diff --git a/.github/workflows/acceptance-tests-pr.yml b/.github/workflows/acceptance-tests-pr.yml new file mode 100644 index 0000000..768276d --- /dev/null +++ b/.github/workflows/acceptance-tests-pr.yml @@ -0,0 +1,23 @@ +name: acceptance-tests-pr + +on: + pull_request: + branches: + - master + +jobs: + run: + runs-on: ubuntu-latest + steps: + - name: Checkout source code + uses: actions/checkout@v1 + + - name: Run acceptance tests + run: make github-actions-ci + + - uses: actions/upload-artifact@master + name: Upload test report + with: + name: helm-acceptance-testing-report-${{ github.sha }} + path: acceptance-testing-reports/${{ github.sha }}/ + if: always() diff --git a/.github/workflows/acceptance-tests.yml b/.github/workflows/acceptance-tests.yml new file mode 100644 index 0000000..44e7530 --- /dev/null +++ b/.github/workflows/acceptance-tests.yml @@ -0,0 +1,22 @@ +name: acceptance-tests + +on: + push: + branches: + - master +jobs: + run: + runs-on: ubuntu-latest + steps: + - name: Checkout source code + uses: actions/checkout@v1 + + - name: Run acceptance tests + run: make github-actions-ci + + - uses: actions/upload-artifact@master + name: Upload test report + with: + name: helm-acceptance-testing-report-${{ github.sha }} + path: acceptance-testing-reports/${{ github.sha }}/ + if: always() diff --git a/.gitignore b/.gitignore index 56ae284..b9da8cd 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,5 @@ .acceptance/ .idea/ + +acceptance-testing-reports/ +bin/ diff --git a/Makefile b/Makefile index 37a72c5..e175d6b 100644 --- a/Makefile +++ b/Makefile @@ -3,3 +3,16 @@ SHELL = /bin/bash .PHONY: acceptance acceptance: @scripts/acceptance.sh + +.PHONY: github-actions-ci +github-actions-ci: + @scripts/github-actions-ci.sh + +.PHONY: github-actions-ci-local +github-actions-ci-local: + docker run -it --rm \ + -v $(shell pwd):/tmp/acceptance-testing \ + -w /tmp/acceptance-testing \ + --privileged -v /var/run/docker.sock:/var/run/docker.sock \ + --entrypoint=/bin/bash ubuntu:latest \ + -c 'set +e; scripts/github-actions-ci.sh; echo "Exited $?. (Ctrl+D to exit shell)"; bash' \ No newline at end of file diff --git a/README.md b/README.md index ea889d9..2848f19 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,51 @@ # Helm Acceptance Tests -*Note: these tests have only been run against Helm 3 ([dev-v3](https://github.com/helm/helm/tree/dev-v3))* +[![GitHub Actions status](https://github.com/helm/acceptance-testing/workflows/acceptance-tests/badge.svg)](https://github.com/helm/acceptance-testing/actions) This repo contains the source for Helm acceptance tests. - The tests are written using [Robot Framework](https://robotframework.org/). +*Note: these tests have only been run against Helm 3 ([dev-v3](https://github.com/helm/helm/tree/dev-v3))* + +## Test Summary + +### Kubernetes Versions + +Helm is tested to work against the following versions of Kubernetes: + + + +- [1.15.3](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.15.md) +- [1.14.6](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.14.md) + +Test suite: [kubernetes_versions.robot](./testsuites/kubernetes_versions.robot) + + +### Shell Completion + +Helm's shell completion functionality is tested against the following shells: + +- Bash +- Zsh + +Test suite: [shells.robot](./testsuites/shells.robot) + +### Helm Repositories + +Basic functionality of the chart repository subsystem is tested. + +Test suite: [repos.robot](./testsuites/repos.robot) + ## System requirements The following tools/commands are expected to be present on the base system @@ -25,6 +65,13 @@ From the root of this repo, run the following: make acceptance ``` +Alternatively, if you have Docker installed, +the system requirements above are not needed, and you can run the following +command which will simulate CI: +``` +make github-actions-ci-local +``` + Note: by default, the tests will use helm as found on your PATH. To specify a different helm to test, set and export the `ROBOT_HELM_PATH` environment variable. For example, if you have helm v2 installed, but want @@ -107,7 +154,6 @@ contains a base class called `CommandRunner` that you will likely want to leverage when adding support for a new external tool. The test run is wrapped by [acceptance.sh](./scripts/acceptance.sh) - -in this file the environment is validated (i.e. check if required tools present). - -sinstalled (including Robot Framework itself). If any additional Python libraries -are required for a new library, it can be appended to `ROBOT_PY_REQUIRES`. +in this file the environment is validated (i.e. check if required tools present). +If any additional Python libraries are required for a new library, +it can be appended to `ROBOT_PY_REQUIRES`. diff --git a/lib/Helm.py b/lib/Helm.py index 88b965d..71eae8b 100644 --- a/lib/Helm.py +++ b/lib/Helm.py @@ -5,15 +5,7 @@ TEST_CHARTS_ROOT_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) +'/../testdata/charts') class Helm(common.CommandRunner): - def list_releases(self): - cmd = 'helm list' - self.run_command(kind_auth_wrap(cmd)) - def install_test_chart(self, release_name, test_chart, extra_args): chart_path = TEST_CHARTS_ROOT_DIR+'/'+test_chart cmd = 'helm install '+release_name+' '+chart_path+' '+extra_args - self.run_command(kind_auth_wrap(cmd)) - - def delete_release(self, release_name): - cmd = 'helm delete '+release_name - self.run_command(kind_auth_wrap(cmd)) + self.run_command(kind_auth_wrap(cmd)) \ No newline at end of file diff --git a/lib/Kind.py b/lib/Kind.py index 8129f3b..90e0215 100644 --- a/lib/Kind.py +++ b/lib/Kind.py @@ -9,7 +9,7 @@ MAX_WAIT_KIND_NODE_SECONDS = 60 KIND_NODE_INTERVAL_SECONDS = 2 -MAX_WAIT_KIND_POD_SECONDS = 60 +MAX_WAIT_KIND_POD_SECONDS = 120 KIND_POD_INTERVAL_SECONDS = 2 KIND_POD_EXPECTED_NUMBER = 8 @@ -39,6 +39,12 @@ def create_test_cluster_with_kubernetes_version(self, kube_version): cmd += ' --image='+DOCKER_HUB_REPO+':v'+kube_version self.run_command(cmd) + # Fix for running kind in docker, switch the port+IP in the kubeconfig + if os.path.exists('/.dockerenv'): + print('Running in Docker, modifying IP in kubeconfig') + fixcmd = 'export KIND_IP=$(docker inspect '+LAST_CLUSTER_NAME+'-control-plane | grep \'IPAddress": "\' | head -1 | awk \'{print $2}\' | tr -d \\",) && sed -i "s/https:\/\/127\.0\.0\.1:.*/https:\/\/${KIND_IP}:6443/" $(kind get kubeconfig-path --name="'+LAST_CLUSTER_NAME+'")' + self.run_command(fixcmd) + def delete_test_cluster(self): if LAST_CLUSTER_EXISTING: print('Not deleting cluster (cluster existed prior to test run)') diff --git a/lib/Kubectl.py b/lib/Kubectl.py index 8bb7c0e..ca0ead5 100644 --- a/lib/Kubectl.py +++ b/lib/Kubectl.py @@ -2,22 +2,6 @@ from Kind import kind_auth_wrap class Kubectl(common.CommandRunner): - def get_nodes(self): - cmd = 'kubectl get nodes' - self.run_command(kind_auth_wrap(cmd)) - - def get_pods(self, namespace): - cmd = 'kubectl get pods --namespace='+namespace - self.run_command(kind_auth_wrap(cmd)) - - def get_services(self, namespace): - cmd = 'kubectl get services --namespace='+namespace - self.run_command(kind_auth_wrap(cmd)) - - def get_persistent_volume_claims(self, namespace): - cmd = 'kubectl get pvc --namespace='+namespace - self.run_command(kind_auth_wrap(cmd)) - def service_has_ip(self, namespace, service_name): cmd = 'kubectl get services --namespace='+namespace cmd += ' | grep '+service_name diff --git a/lib/Sh.py b/lib/Sh.py index 89f0308..d3484c8 100644 --- a/lib/Sh.py +++ b/lib/Sh.py @@ -22,7 +22,10 @@ class Sh(common.CommandRunner): def require_cluster(self, require): global needs_cluster - needs_cluster = require + if require == "True" or require == "true": + needs_cluster = True + else: + needs_cluster = False def wrap(self, cmd): global needs_cluster diff --git a/scripts/acceptance.sh b/scripts/acceptance.sh index 0ce9593..c44e0e5 100755 --- a/scripts/acceptance.sh +++ b/scripts/acceptance.sh @@ -1,4 +1,18 @@ #!/bin/bash -e +# +# Copyright The Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # Turn on debug printouts if the user requested a debug level >= $1 set_shell_debug_level() @@ -39,18 +53,35 @@ set_shell_debug_level 2 DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" cd $DIR/../ -# We force the tests to use a directory of our own choosing -# to make sure that when we wipe it clean, we don't wipe -# some directory that was important to the user. -# Don't use .helm as it may collide with a valid directory -FINAL_DIR_NAME=".helm_acceptance_tests" - # Acceptance test configurables ROBOT_PY_REQUIRES="${ROBOT_PY_REQUIRES:-robotframework==3.1.2}" -ROBOT_OUTPUT_DIR="${ROBOT_OUTPUT_DIR:-${PWD}/.acceptance}" -ROBOT_HELM_HOME_DIR="${ROBOT_HELM_HOME_DIR:-${ROBOT_OUTPUT_DIR}}/${FINAL_DIR_NAME}" +export ROBOT_OUTPUT_DIR="${ROBOT_OUTPUT_DIR:-${PWD}/.acceptance}" ROBOT_VENV_DIR="${ROBOT_VENV_DIR:-${ROBOT_OUTPUT_DIR}/.venv}" +set_shell_debug_level 3 +echo "==============================================================================" +echo "Available configuration variables:" +echo "ROBOT_DEBUG_LEVEL - Choose debug level (0 to 3)." +echo " Current: ${ROBOT_DEBUG_LEVEL}" +echo "ROBOT_HELM_PATH - The directory where the helm to test can be found." +echo " Current: ${ROBOT_HELM_PATH:-Helm as found on \$PATH: $(dirname $(which helm))}/helm" +echo "ROBOT_RUN_TESTS - Comma-separated list of *.robot files to execute." +echo " Current: ${ROBOT_RUN_TESTS:-unset (all)}" +echo "ROBOT_OUTPUT_DIR - The output directory for robot to use." +echo " Current: ${ROBOT_OUTPUT_DIR}" +echo "ROBOT_VENV_DIR - The directory to be used for virtualenv." +echo " Current: ${ROBOT_VENV_DIR}" +echo "ROBOT_PY_REQUIRES - Space-separated list of python packages to install (including the robot framework)." +echo " Current: ${ROBOT_PY_REQUIRES}" +echo "==============================================================================" +set_shell_debug_level 2 + +# Only use the -d flag for mktemp as many other flags don't +# work on every plateform +mkdir -p ${ROBOT_OUTPUT_DIR} +export TMP_DIR="$(mktemp -d ${ROBOT_OUTPUT_DIR}/helm-acceptance.XXXXXX)" +trap "rm -rf ${TMP_DIR}" EXIT + SUITES_TO_RUN="" # Allow to specify which test suites to run in a space-separated or comma-separated list for suite in ${ROBOT_RUN_TESTS/,/ }; do @@ -69,21 +100,9 @@ if [ ! -z "${ROBOT_HELM_PATH}" ]; then fi export PATH="${ROBOT_VENV_DIR}/bin:${PATH}" -set_shell_debug_level 3 -# A bit of safety before wiping the entire directory -if [ $(basename ${ROBOT_HELM_HOME_DIR}) == "${FINAL_DIR_NAME}" ]; then - rm -rf ${ROBOT_HELM_HOME_DIR} -else - echo "ABORT: should not delete unexpected directory ${ROBOT_HELM_HOME_DIR}" - echo "ABORT: error in acceptance-testing code!" - echo "Please report a bug at https://github.com/helm/acceptance-testing/issues" - exit 1 -fi -set_shell_debug_level 2 - -export XDG_CACHE_HOME=${ROBOT_HELM_HOME_DIR}/cache && mkdir -p ${XDG_CACHE_HOME} -export XDG_CONFIG_HOME=${ROBOT_HELM_HOME_DIR}/config && mkdir -p ${XDG_CONFIG_HOME} -export XDG_DATA_HOME=${ROBOT_HELM_HOME_DIR}/data && mkdir -p ${XDG_DATA_HOME} +export XDG_CACHE_HOME=${TMP_DIR}/cache && mkdir -p ${XDG_CACHE_HOME} +export XDG_CONFIG_HOME=${TMP_DIR}/config && mkdir -p ${XDG_CONFIG_HOME} +export XDG_DATA_HOME=${TMP_DIR}/data && mkdir -p ${XDG_DATA_HOME} # We fully support helm v3 and partially support helm v2 at this time. # To figure out which version of helm is used, we run 'helm version' @@ -94,7 +113,7 @@ export XDG_DATA_HOME=${ROBOT_HELM_HOME_DIR}/data && mkdir -p ${XDG_DATA_HOME} # contact the cluster, which may not be accessible, and the command # will timeout. set_shell_debug_level 3 -if helm version -c &> /dev/null; then +if helm version -c --tls &> /dev/null; then echo "====================" echo "Running with Helm v2" echo "====================" diff --git a/scripts/completion-tests/completionTests.sh b/scripts/completion-tests/completionTests.sh index c8c20b4..8bb2bd6 100755 --- a/scripts/completion-tests/completionTests.sh +++ b/scripts/completion-tests/completionTests.sh @@ -17,24 +17,25 @@ # This script tests different scenarios of completion. The tests can be # run by sourcing this file from a bash shell or a zsh shell. -source /tmp/helm-acceptance-shell-completion-tests/lib/completionTests-base.sh +source ${COMP_DIR}/lib/completionTests-base.sh -# Don't use the new source <() form as it does not work with bash v3 -source /dev/stdin <<- EOF - $(helm completion $SHELL_TYPE) -EOF +export PATH=${COMP_DIR}/bin:$PATH + +# Use the memory driver with pre-defined releases to easily +# test release name completion +export HELM_DRIVER=memory +export HELM_MEMORY_DRIVER_DATA=${COMP_DIR}/releases.yaml # Helm setup -HELM_ROOT=/tmp/helm-acceptance-tests-helm-config if [ ! -z ${ROBOT_HELM_V3} ]; then - export XDG_CACHE_HOME=${XDG_CACHE_HOME:-${HELM_ROOT}/cache} && mkdir -p ${XDG_CACHE_HOME} - export XDG_CONFIG_HOME=${XDG_CONFIG_HOME:-${HELM_ROOT}/config} && mkdir -p ${XDG_CONFIG_HOME} - export XDG_DATA_HOME=${XDG_DATA_HOME:-${HELM_ROOT}/data} && mkdir -p ${XDG_DATA_HOME} + export XDG_CACHE_HOME=${COMP_DIR}/cache && rm -rf ${XDG_CACHE_HOME} && mkdir -p ${XDG_CACHE_HOME} + export XDG_CONFIG_HOME=${COMP_DIR}/config && rm -rf ${XDG_CONFIG_HOME} && mkdir -p ${XDG_CONFIG_HOME} + export XDG_DATA_HOME=${COMP_DIR}/data && rm -rf ${XDG_DATA_HOME} && mkdir -p ${XDG_DATA_HOME} REPO_ROOT=${XDG_CONFIG_HOME}/helm PLUGIN_ROOT=${XDG_DATA_HOME}/helm/plugins else - export HELM_HOME=${HELM_ROOT} + export HELM_HOME=${COMP_DIR}/.helm && rm -rf ${HELM_HOME} && mkdir -p ${HELM_HOME} helm init --client-only REPO_ROOT=${HELM_HOME}/repository @@ -51,51 +52,143 @@ generated: "2019-08-11T22:28:44.841141-04:00" repositories: - name: stable url: https://kubernetes-charts.storage.googleapis.com -- name: test1 +- name: zztest1 url: https://charts.example.com -- name: test2 +- name: zztest2 url: https://charts2.example.com EOF helm repo list +# Fetch the details of the stable repo +helm repo update # Setup some plugins to allow testing completion of the helm plugin command # We inject the content of different plugin.yaml files directly to avoid having # to install a real plugin which can take a long time. -PLUGIN_DIR=${PLUGIN_ROOT}/helm-template + +########### +# Plugin 1 +########### +PLUGIN_DIR=${PLUGIN_ROOT}/helm-2to3 mkdir -p ${PLUGIN_DIR} +# The plugin file cat > ${PLUGIN_DIR}/plugin.yaml << EOF -name: "template" +name: "2to3" version: "2.5.1+2" -description: "Render templates on the local client." +description: "Migrate from helm v2 to helm v3" +EOF + +# The plugin's static completion file +cat > ${PLUGIN_DIR}/completion.yaml << EOF +commands: +- name: cleanup + flags: + - r + - label + - cleanup + - s + - storage +- name: convert + flags: + - l + - label + - s + - storage + - t +- name: move + commands: + - name: config + flags: + - dry-run +EOF + +# The plugin's dynamic completion file +cat > ${PLUGIN_DIR}/plugin.complete << EOF +#!/usr/bin/env sh + +if [ "\$2" = "config" ]; then + echo "case-config" + echo "gryffindor slytherin ravenclaw hufflepuff" + echo ":0" + exit +fi + +if [ "\$HELM_NAMESPACE" != "default" ]; then + echo "case-ns" + # Check the namespace flag is not passed + echo "\$1" + # Check plugin variables are set + echo "\$HELM_NAMESPACE" + echo ":4" + exit +fi + +if [ "\$2" = -s ]; then + echo "case-flag" + echo "lucius draco dobby" + echo ":4" + exit +fi + +# Check missing directive +echo "hermione harry ron" EOF +chmod u+x ${PLUGIN_DIR}/plugin.complete +########### +# Plugin 2 +########### PLUGIN_DIR=${PLUGIN_ROOT}/helm-push mkdir -p ${PLUGIN_DIR} +# The plugin file cat > ${PLUGIN_DIR}/plugin.yaml << EOF name: "push" version: "0.7.1" description: "Push chart package to ChartMuseum" EOF +########### +# Plugin 3 +########### PLUGIN_DIR=${PLUGIN_ROOT}/helm-push-artifactory mkdir -p ${PLUGIN_DIR} +# The plugin file cat > ${PLUGIN_DIR}/plugin.yaml << EOF name: "push-artifactory" version: "0.3.0" description: "Push helm charts to artifactory" EOF + helm plugin list +# Source the completion script after setting things up, so it can +# take the configuration into consideration (such as plugin names) +# Don't use the new source <() form as it does not work with bash v3 +source /dev/stdin <<- EOF + $(helm completion $SHELL_TYPE) +EOF + +allHelmCommands="completion create dependency env 2to3 get history install lint list package plugin pull push push-artifactory repo rollback search show status template test uninstall upgrade verify version" +if [ "$SHELL_TYPE" = bash ]; then + allHelmGlobalFlags="--add-dir-header --alsologtostderr --debug --kube-apiserver --kube-apiserver= --kube-context --kube-context= --kube-token --kube-token= --kubeconfig --kubeconfig= --log-backtrace-at --log-backtrace-at= --log-dir --log-dir= --log-file --log-file-max-size --log-file-max-size= --log-file= --logtostderr --namespace --namespace= --registry-config --registry-config= --repository-cache --repository-cache= --repository-config --repository-config= --skip-headers --skip-log-headers --stderrthreshold --stderrthreshold= --v --v= --vmodule --vmodule= -n -v" + allHelmLongFlags="--add-dir-header --alsologtostderr --debug --kube-apiserver --kube-apiserver= --kube-context --kube-context= --kube-token --kube-token= --kubeconfig --kubeconfig= --log-backtrace-at --log-backtrace-at= --log-dir --log-dir= --log-file --log-file-max-size --log-file-max-size= --log-file= --logtostderr --namespace --namespace= --registry-config --registry-config= --repository-cache --repository-cache= --repository-config --repository-config= --skip-headers --skip-log-headers --stderrthreshold --stderrthreshold= --v --v= --vmodule --vmodule=" +else + allHelmGlobalFlags="--add-dir-header --alsologtostderr --debug --kube-apiserver --kube-apiserver --kube-apiserver --kube-context --kube-context --kube-context --kube-token --kube-token --kube-token --kubeconfig --kubeconfig --kubeconfig --log-backtrace-at --log-backtrace-at --log-backtrace-at --log-dir --log-dir --log-dir --log-file --log-file --log-file --log-file-max-size --log-file-max-size --log-file-max-size --logtostderr --namespace --namespace --namespace --registry-config --registry-config --registry-config --repository-cache --repository-cache --repository-cache --repository-config --repository-config --repository-config --skip-headers --skip-log-headers --stderrthreshold --stderrthreshold --stderrthreshold --v --v --v --vmodule --vmodule --vmodule -n -v" + allHelmLongFlags="--add-dir-header --alsologtostderr --debug --kube-apiserver --kube-apiserver --kube-apiserver --kube-context --kube-context --kube-context --kube-token --kube-token --kube-token --kubeconfig --kubeconfig --kubeconfig --log-backtrace-at --log-backtrace-at --log-backtrace-at --log-dir --log-dir --log-dir --log-file --log-file --log-file --log-file-max-size --log-file-max-size --log-file-max-size --logtostderr --namespace --namespace --namespace --registry-config --registry-config --registry-config --repository-cache --repository-cache --repository-cache --repository-config --repository-config --repository-config --skip-headers --skip-log-headers --stderrthreshold --stderrthreshold --stderrthreshold --v --v --v --vmodule --vmodule --vmodule" +fi + ##################### # Static completions ##################### -# No need to test every command, as completion is handled -# automatically by Cobra. -# We focus on some smoke tests for the Cobra-handled completion -# and also on code specific to this project. - # Basic first level commands (static completion) +_completionTests_verifyCompletion "helm " "$allHelmCommands" +_completionTests_verifyCompletion "helm sho" "show" +_completionTests_verifyCompletion "helm --debug " "$allHelmCommands" +_completionTests_verifyCompletion "helm --debug sho" "show" +_completionTests_verifyCompletion "helm -n ns " "$allHelmCommands" +_completionTests_verifyCompletion "helm -n ns sho" "show" +_completionTests_verifyCompletion "helm --namespace ns " "$allHelmCommands" +_completionTests_verifyCompletion "helm --namespace ns sho" "show" _completionTests_verifyCompletion "helm stat" "status" _completionTests_verifyCompletion "helm status" "status" _completionTests_verifyCompletion "helm lis" "list" @@ -109,48 +202,393 @@ fi # Basic second level commands (static completion) if [ ! -z ${ROBOT_HELM_V3} ]; then - _completionTests_verifyCompletion "helm get " "hooks manifest values" + _completionTests_verifyCompletion "helm get " "all hooks manifest notes values" else - _completionTests_verifyCompletion "helm get " "hooks manifest notes values" + _completionTests_verifyCompletion "helm get " "all hooks manifest notes values" fi _completionTests_verifyCompletion "helm get h" "hooks" _completionTests_verifyCompletion "helm completion " "bash zsh" _completionTests_verifyCompletion "helm completion z" "zsh" +_completionTests_verifyCompletion "helm plugin " "install list uninstall update" +_completionTests_verifyCompletion "helm plugin u" "uninstall update" +_completionTests_verifyCompletion "helm --debug plugin " "install list uninstall update" +_completionTests_verifyCompletion "helm --debug plugin u" "uninstall update" +_completionTests_verifyCompletion "helm -n ns plugin " "install list uninstall update" +_completionTests_verifyCompletion "helm -n ns plugin u" "uninstall update" +_completionTests_verifyCompletion "helm --namespace ns plugin " "install list uninstall update" +_completionTests_verifyCompletion "helm --namespace ns plugin u" "uninstall update" +_completionTests_verifyCompletion "helm plugin --debug " "install list uninstall update" +_completionTests_verifyCompletion "helm plugin --debug u" "uninstall update" +_completionTests_verifyCompletion "helm plugin -n ns " "install list uninstall update" +_completionTests_verifyCompletion "helm plugin -n ns u" "uninstall update" +_completionTests_verifyCompletion "helm plugin --namespace ns " "install list uninstall update" +_completionTests_verifyCompletion "helm plugin --namespace ns u" "uninstall update" + +# With validArgs +_completionTests_verifyCompletion "helm completion " "bash zsh" +_completionTests_verifyCompletion "helm completion z" "zsh" +_completionTests_verifyCompletion "helm --debug completion " "bash zsh" +_completionTests_verifyCompletion "helm --debug completion z" "zsh" +_completionTests_verifyCompletion "helm -n ns completion " "bash zsh" +_completionTests_verifyCompletion "helm -n ns completion z" "zsh" +_completionTests_verifyCompletion "helm --namespace ns completion " "bash zsh" +_completionTests_verifyCompletion "helm --namespace ns completion z" "zsh" # Completion of flags -_completionTests_verifyCompletion ZFAIL "helm --kube-con" "--kube-context= --kube-context" -_completionTests_verifyCompletion ZFAIL "helm --kubecon" "--kubeconfig= --kubeconfig" +if [ "$SHELL_TYPE" = bash ]; then + _completionTests_verifyCompletion "helm --kube-con" "--kube-context= --kube-context" + _completionTests_verifyCompletion "helm --kubecon" "--kubeconfig= --kubeconfig" +else + _completionTests_verifyCompletion "helm --kube-con" "--kube-context --kube-context --kube-context" + _completionTests_verifyCompletion "helm --kubecon" "--kubeconfig --kubeconfig --kubeconfig" +fi if [ ! -z ${ROBOT_HELM_V3} ]; then _completionTests_verifyCompletion "helm -v" "-v" - _completionTests_verifyCompletion ZFAIL "helm --v" "--v= --vmodule= --v --vmodule" - _completionTests_verifyCompletion ZFAIL "helm --name" "--namespace= --namespace" + if [ "$SHELL_TYPE" = bash ]; then + _completionTests_verifyCompletion "helm --v" "--v= --vmodule= --v --vmodule" + _completionTests_verifyCompletion "helm --name" "--namespace= --namespace" + else + _completionTests_verifyCompletion "helm --v" "--v --vmodule --v --vmodule --v --vmodule" + _completionTests_verifyCompletion "helm --name" "--namespace --namespace --namespace" + fi +fi + +_completionTests_verifyCompletion "helm -" "$allHelmGlobalFlags" +_completionTests_verifyCompletion "helm --" "$allHelmLongFlags" +_completionTests_verifyCompletion "helm show -" "$allHelmGlobalFlags" +_completionTests_verifyCompletion "helm show --" "$allHelmLongFlags" + +if [ "$SHELL_TYPE" = bash ]; then + _completionTests_verifyCompletion "helm --s" "--skip-headers --skip-log-headers --stderrthreshold --stderrthreshold=" + _completionTests_verifyCompletion "helm show --s" "--skip-headers --skip-log-headers --stderrthreshold --stderrthreshold=" +else + _completionTests_verifyCompletion "helm --s" "--skip-headers --skip-log-headers --stderrthreshold --stderrthreshold --stderrthreshold" + _completionTests_verifyCompletion "helm show --s" "--skip-headers --skip-log-headers --stderrthreshold --stderrthreshold --stderrthreshold" fi +_completionTests_verifyCompletion "helm -n" "-n" +_completionTests_verifyCompletion "helm show -n" "-n" + # Completion of commands while using flags _completionTests_verifyCompletion "helm --kube-context prod sta" "status" -_completionTests_verifyCompletion ZFAIL "helm --kubeconfig=/tmp/config lis" "list" -_completionTests_verifyCompletion ZFAIL "helm get hooks --kubec" "--kubeconfig= --kubeconfig" +_completionTests_verifyCompletion "helm --kubeconfig=/tmp/config lis" "list" +if [ "$SHELL_TYPE" = bash ]; then + _completionTests_verifyCompletion "helm get hooks --kubec" "--kubeconfig= --kubeconfig" +else + _completionTests_verifyCompletion "helm get hooks --kubec" "--kubeconfig --kubeconfig --kubeconfig" +fi if [ ! -z ${ROBOT_HELM_V3} ]; then _completionTests_verifyCompletion "helm --namespace mynamespace get h" "hooks" - _completionTests_verifyCompletion KFAIL "helm -v get " "hooks manifest values" - _completionTests_verifyCompletion ZFAIL "helm get --name" "--namespace= --namespace" + _completionTests_verifyCompletion "helm -v 3 get " "all hooks manifest notes values" + if [ "$SHELL_TYPE" = bash ]; then + _completionTests_verifyCompletion "helm get --name" "--namespace= --namespace" + else + _completionTests_verifyCompletion "helm get --name" "--namespace --namespace --namespace" + fi fi -# Alias completion -# Does not work. -_completionTests_verifyCompletion KFAIL "helm ls" "ls" -_completionTests_verifyCompletion KFAIL "helm dependenci" "dependencies" +# Cobra command aliases are purposefully not completed +_completionTests_verifyCompletion "helm ls" "" +_completionTests_verifyCompletion "helm dependenci" "" + +# Static completion for plugins +_completionTests_verifyCompletion "helm push " "" +_completionTests_verifyCompletion "helm 2to3 " "cleanup convert move" +_completionTests_verifyCompletion "helm 2to3 c" "cleanup convert" +_completionTests_verifyCompletion "helm 2to3 move " "config" + +_completionTests_verifyCompletion "helm 2to3 cleanup -" "$allHelmGlobalFlags -r -s --label --cleanup --storage" +# For plugin completion, when there are more short flags than long flags, a long flag is created for the extra short flags +# So here we expect the extra --t +_completionTests_verifyCompletion "helm 2to3 convert -" "$allHelmGlobalFlags -l -s -t --t --label --storage" +_completionTests_verifyCompletion "helm 2to3 move config --" "$allHelmLongFlags --dry-run" ##################### # Dynamic completions ##################### +# For release name completion +_completionTests_verifyCompletion "helm status " "athos porthos aramis" +_completionTests_verifyCompletion "helm history a" "athos aramis" +_completionTests_verifyCompletion "helm uninstall a" "athos aramis" +_completionTests_verifyCompletion "helm upgrade a" "athos aramis" +_completionTests_verifyCompletion "helm get manifest -n default " "athos porthos aramis" +_completionTests_verifyCompletion "helm --namespace gascony get manifest " "dartagnan" +_completionTests_verifyCompletion "helm --namespace gascony test d" "dartagnan" +_completionTests_verifyCompletion "helm rollback d" "" + # For the repo command -_completionTests_verifyCompletion "helm repo remove " "stable test1 test2" -_completionTests_verifyCompletion "helm repo remove test" "test1 test2" +_completionTests_verifyCompletion "helm repo remove " "stable zztest1 zztest2" +_completionTests_verifyCompletion "helm repo remove zztest" "zztest1 zztest2" +if [ ! -z ${ROBOT_HELM_V3} ]; then + # Make sure completion works as expected when there are no repositories configured + tmp=$XDG_CONFIG_HOME + XDG_CONFIG_HOME='/invalid/path' _completionTests_verifyCompletion "helm repo remove " "" + XDG_CONFIG_HOME=$tmp +fi # For the plugin command -_completionTests_verifyCompletion "helm plugin remove " "template push push-artifactory" -_completionTests_verifyCompletion "helm plugin remove pu" "push push-artifactory" -_completionTests_verifyCompletion "helm plugin update " "template push push-artifactory" +_completionTests_verifyCompletion "helm plugin uninstall " "2to3 push push-artifactory" +_completionTests_verifyCompletion "helm plugin uninstall pu" "push push-artifactory" +_completionTests_verifyCompletion "helm plugin update " "2to3 push push-artifactory" _completionTests_verifyCompletion "helm plugin update pus" "push push-artifactory" +if [ ! -z ${ROBOT_HELM_V3} ]; then + # Make sure completion works as expected when there are no plugins + tmp=$XDG_DATA_HOME + XDG_DATA_HOME='/invalid/path' _completionTests_verifyCompletion "helm plugin uninstall " "" + XDG_DATA_HOME=$tmp +fi + +# For the global --kube-context flag +if [ ! -z ${ROBOT_HELM_V3} ]; then + # Feature not available in v2 + _completionTests_verifyCompletion "helm --kube-context " "dev1 dev2 accept prod" + _completionTests_verifyCompletion "helm upgrade --kube-context " "dev1 dev2 accept prod" + _completionTests_verifyCompletion "helm upgrade --kube-context d" "dev1 dev2" + if [ "$SHELL_TYPE" = bash ]; then + _completionTests_verifyCompletion "helm --kube-context=" "dev1 dev2 accept prod" + else + _completionTests_verifyCompletion "helm --kube-context=" "--kube-context=dev1 --kube-context=dev2 --kube-context=accept --kube-context=prod" + fi +fi + +# Now requires a real cluster +# # For the global --namespace flag +# if [ ! -z ${ROBOT_HELM_V3} ]; then +# # No namespace flag in v2 +# _completionTests_verifyCompletion "helm --namespace " "casterly-rock white-harbor winterfell" +# _completionTests_verifyCompletion "helm --namespace w" "white-harbor winterfell" +# _completionTests_verifyCompletion "helm upgrade --namespace " "casterly-rock white-harbor winterfell" +# _completionTests_verifyCompletion "helm -n " "casterly-rock white-harbor winterfell" +# _completionTests_verifyCompletion "helm -n w" "white-harbor winterfell" +# _completionTests_verifyCompletion "helm upgrade -n " "casterly-rock white-harbor winterfell" + +# if [ "$SHELL_TYPE" = bash ]; then +# _completionTests_verifyCompletion "helm --namespace=" "casterly-rock white-harbor winterfell" +# _completionTests_verifyCompletion "helm --namespace=w" "white-harbor winterfell" +# _completionTests_verifyCompletion "helm upgrade --namespace=w" "white-harbor winterfell" +# _completionTests_verifyCompletion "helm upgrade --namespace=" "casterly-rock white-harbor winterfell" +# _completionTests_verifyCompletion "helm -n=" "casterly-rock white-harbor winterfell" +# _completionTests_verifyCompletion "helm -n=w" "white-harbor winterfell" +# _completionTests_verifyCompletion "helm upgrade -n=w" "white-harbor winterfell" +# _completionTests_verifyCompletion "helm upgrade -n=" "casterly-rock white-harbor winterfell" +# else +# _completionTests_verifyCompletion "helm --namespace=" "--namespace=casterly-rock --namespace=white-harbor --namespace=winterfell" +# _completionTests_verifyCompletion "helm --namespace=w" "--namespace=white-harbor --namespace=winterfell" +# _completionTests_verifyCompletion "helm upgrade --namespace=w" "--namespace=white-harbor --namespace=winterfell" +# _completionTests_verifyCompletion "helm upgrade --namespace=" "--namespace=casterly-rock --namespace=white-harbor --namespace=winterfell" +# _completionTests_verifyCompletion "helm -n=" "-n=casterly-rock -n=white-harbor -n=winterfell" +# _completionTests_verifyCompletion "helm -n=w" "-n=white-harbor -n=winterfell" +# _completionTests_verifyCompletion "helm upgrade -n=w" "-n=white-harbor -n=winterfell" +# _completionTests_verifyCompletion "helm upgrade -n=" "-n=casterly-rock -n=white-harbor -n=winterfell" +# fi + +# # With override flags +# _completionTests_verifyCompletion "helm --kubeconfig myconfig --namespace " "meereen myr volantis" +# _completionTests_verifyCompletion "helm --kubeconfig=myconfig --namespace " "meereen myr volantis" +# _completionTests_verifyCompletion "helm --kube-context mycontext --namespace " "braavos old-valyria yunkai" +# _completionTests_verifyCompletion "helm --kube-context=mycontext --namespace " "braavos old-valyria yunkai" +# fi +# For the --output flag that applies to multiple commands +if [ ! -z ${ROBOT_HELM_V3} ]; then + # Feature not available in v2 + + # Also test that the list of outputs matches what the helm message gives. + # This is an imperfect way of detecting if the output format list has changed, but + # the completion wasn't updated to match. + outputFormats=$(helm repo list -h | grep -- --output | cut -d: -f2 | cut -d '(' -f1 | sed s/,//g) + _completionTests_verifyCompletion "helm repo list --output " "${outputFormats}" + _completionTests_verifyCompletion "helm install --output " "${outputFormats}" + _completionTests_verifyCompletion "helm history -o " "${outputFormats}" + _completionTests_verifyCompletion "helm list -o " "${outputFormats}" +fi + +# For completing specification of charts +if [ ! -z ${ROBOT_HELM_V3} ]; then + tmpFiles="zztest2file files" + touch $tmpFiles + + _completionTests_verifyCompletion "helm show values " "./ / zztest1/ zztest2/ stable/ file:// http:// https://" + _completionTests_verifyCompletion "helm show values ht" "http:// https://" + _completionTests_verifyCompletion "helm show values zz" "zztest1/ zztest2/ zztest2file" + _completionTests_verifyCompletion "helm show values zztest2" "zztest2/ zztest2file" + _completionTests_verifyCompletion "helm show values zztest2f" "" + _completionTests_verifyCompletion "helm show values stable/yyy" "" + _completionTests_verifyCompletion "helm show values stable/z" "stable/zeppelin stable/zetcd" + _completionTests_verifyCompletion "helm show values fil" "file:// files" + + _completionTests_verifyCompletion "helm show chart zz" "zztest1/ zztest2/ zztest2file" + _completionTests_verifyCompletion "helm show readme zz" "zztest1/ zztest2/ zztest2file" + _completionTests_verifyCompletion "helm show values zz" "zztest1/ zztest2/ zztest2file" + + _completionTests_verifyCompletion "helm pull " "zztest1/ zztest2/ stable/ file:// http:// https://" + _completionTests_verifyCompletion "helm pull zz" "zztest1/ zztest2/" + + _completionTests_verifyCompletion "helm install name " "./ / zztest1/ zztest2/ stable/ file:// http:// https://" + _completionTests_verifyCompletion "helm install name zz" "zztest1/ zztest2/ zztest2file" + _completionTests_verifyCompletion "helm install name stable/z" "stable/zeppelin stable/zetcd" + + _completionTests_verifyCompletion "helm template name " "./ / zztest1/ zztest2/ stable/ file:// http:// https://" + _completionTests_verifyCompletion "helm template name zz" "zztest1/ zztest2/ zztest2file" + _completionTests_verifyCompletion "helm template name stable/z" "stable/zeppelin stable/zetcd" + + _completionTests_verifyCompletion "helm upgrade release " "./ / zztest1/ zztest2/ stable/ file:// http:// https://" + _completionTests_verifyCompletion "helm upgrade release zz" "zztest1/ zztest2/ zztest2file" + _completionTests_verifyCompletion "helm upgrade release stable/z" "stable/zeppelin stable/zetcd" + + _completionTests_verifyCompletion "helm show values stab" "stable/ stable/." + + \rm $tmpFiles +fi + +# Dynamic completion for plugins +_completionTests_verifyCompletion "helm push " "" +_completionTests_verifyCompletion "helm 2to3 move config g" "gryffindor" +_completionTests_verifyCompletion "helm 2to3 -n dumbledore convert " "case-ns convert dumbledore" +_completionTests_verifyCompletion "helm 2to3 convert -s flag d" "dobby draco" +_completionTests_verifyCompletion "helm 2to3 convert " "hermione harry ron" + +############################################################## +# Completion with helm called through an alias or using a path +############################################################## + +# We want to specify a different helm for completion than the one +# that is found on the PATH variable. +# This is particularly valuable to check that dynamic completion +# uses the correct location for helm. + +# Copy helm to a location that is not on PATH +TMP_HELM_DIR=$(mktemp -d ${ROBOT_OUTPUT_DIR}/helm-acceptance-temp-bin.XXXXXX) +trap "rm -rf ${TMP_HELM_DIR}" EXIT + +mkdir -p $TMP_HELM_DIR +HELM_DIR=$(dirname $(which helm)) +cp $HELM_DIR/helm $TMP_HELM_DIR/helm + +# Make 'helm' unavailable to make sure it can't be called direactly +# by the dynamic completion code, which should instead use the helm +# as called in the completion calls that follow. +alias helm=echo + +# Testing with shell aliases is only applicable to bash. +# Zsh replaces the alias before calling the completion function, +# so it does not make sense to try zsh completion with an alias. +if [ "$SHELL_TYPE" = bash ]; then + + # Create aliases to helm + # This alias will be created after the variable is expanded + alias helmAlias="${TMP_HELM_DIR}/helm" + # This alias will be created without expanding the variable (because of single quotes) + alias helmAliasWithVar='${TMP_HELM_DIR}/helm' + + # Hook these new aliases to the helm completion function. + complete -o default -F $(_completionTests_findCompletionFunction helm) helmAlias + complete -o default -F $(_completionTests_findCompletionFunction helm) helmAliasWithVar + + # Completion with normal alias + _completionTests_verifyCompletion "helmAlias lis" "list" + _completionTests_verifyCompletion "helmAlias completion z" "zsh" + _completionTests_verifyCompletion "helmAlias --kubecon" "--kubeconfig= --kubeconfig" + _completionTests_verifyCompletion "helmAlias get hooks --kubec" "--kubeconfig= --kubeconfig" + _completionTests_verifyCompletion "helmAlias repo remove zztest" "zztest1 zztest2" + _completionTests_verifyCompletion "helmAlias plugin update pus" "push push-artifactory" + _completionTests_verifyCompletion "helmAlias upgrade --kube-context d" "dev1 dev2" + # if [ ! -z ${ROBOT_HELM_V3} ]; then + # _completionTests_verifyCompletion "helmAlias --kube-context=mycontext --namespace " "braavos old-valyria yunkai" + # fi + + # Completion with alias that contains a variable + _completionTests_verifyCompletion "helmAliasWithVar lis" "list" + _completionTests_verifyCompletion "helmAliasWithVar completion z" "zsh" + _completionTests_verifyCompletion "helmAliasWithVar --kubecon" "--kubeconfig= --kubeconfig" + _completionTests_verifyCompletion "helmAliasWithVar get hooks --kubec" "--kubeconfig= --kubeconfig" + _completionTests_verifyCompletion "helmAliasWithVar repo remove zztest" "zztest1 zztest2" + _completionTests_verifyCompletion "helmAliasWithVar plugin update pus" "push push-artifactory" + _completionTests_verifyCompletion "helmAliasWithVar upgrade --kube-context d" "dev1 dev2" + # if [ ! -z ${ROBOT_HELM_V3} ]; then + # _completionTests_verifyCompletion "helmAliasWithVar --kube-context=mycontext --namespace " "braavos old-valyria yunkai" + # fi +fi + +# Completion with absolute path +_completionTests_verifyCompletion "$TMP_HELM_DIR/helm lis" "list" +_completionTests_verifyCompletion "$TMP_HELM_DIR/helm completion z" "zsh" +_completionTests_verifyCompletion "$TMP_HELM_DIR/helm repo remove zztest" "zztest1 zztest2" +_completionTests_verifyCompletion "$TMP_HELM_DIR/helm plugin update pus" "push push-artifactory" +_completionTests_verifyCompletion "$TMP_HELM_DIR/helm upgrade --kube-context d" "dev1 dev2" +# if [ ! -z ${ROBOT_HELM_V3} ]; then +# _completionTests_verifyCompletion "$TMP_HELM_DIR/helm --kube-context=mycontext --namespace " "braavos old-valyria yunkai" +# fi +if [ "$SHELL_TYPE" = bash ]; then + _completionTests_verifyCompletion "$TMP_HELM_DIR/helm --kubecon" "--kubeconfig= --kubeconfig" + _completionTests_verifyCompletion "$TMP_HELM_DIR/helm get hooks --kubec" "--kubeconfig= --kubeconfig" +else + _completionTests_verifyCompletion "$TMP_HELM_DIR/helm --kubecon" "--kubeconfig --kubeconfig --kubeconfig" + _completionTests_verifyCompletion "$TMP_HELM_DIR/helm get hooks --kubec" "--kubeconfig --kubeconfig --kubeconfig" +fi + +# Completion with relative path +cd $TMP_HELM_DIR +_completionTests_verifyCompletion "./helm lis" "list" +_completionTests_verifyCompletion "./helm completion z" "zsh" +_completionTests_verifyCompletion "./helm repo remove zztest" "zztest1 zztest2" +_completionTests_verifyCompletion "./helm plugin update pus" "push push-artifactory" +_completionTests_verifyCompletion "./helm upgrade --kube-context d" "dev1 dev2" +# if [ ! -z ${ROBOT_HELM_V3} ]; then +# _completionTests_verifyCompletion "./helm --kube-context=mycontext --namespace " "braavos old-valyria yunkai" +# fi +if [ "$SHELL_TYPE" = bash ]; then + _completionTests_verifyCompletion "./helm --kubecon" "--kubeconfig= --kubeconfig" + _completionTests_verifyCompletion "./helm get hooks --kubec" "--kubeconfig= --kubeconfig" +else + _completionTests_verifyCompletion "./helm --kubecon" "--kubeconfig --kubeconfig --kubeconfig" + _completionTests_verifyCompletion "./helm get hooks --kubec" "--kubeconfig --kubeconfig --kubeconfig" +fi +cd - >/dev/null + +# Completion with a different name for helm +mv $TMP_HELM_DIR/helm $TMP_HELM_DIR/myhelm + +# Generating the completion script using the new binary name +# should make completion work for that binary name +source /dev/stdin <<- EOF + $(${TMP_HELM_DIR}/myhelm completion $SHELL_TYPE) +EOF +_completionTests_verifyCompletion "$TMP_HELM_DIR/myhelm lis" "list" +_completionTests_verifyCompletion "$TMP_HELM_DIR/myhelm completion z" "zsh" +_completionTests_verifyCompletion "$TMP_HELM_DIR/myhelm repo remove zztest" "zztest1 zztest2" +_completionTests_verifyCompletion "$TMP_HELM_DIR/myhelm plugin update pus" "push push-artifactory" +_completionTests_verifyCompletion "$TMP_HELM_DIR/myhelm upgrade --kube-context d" "dev1 dev2" +# if [ ! -z ${ROBOT_HELM_V3} ]; then +# _completionTests_verifyCompletion "$TMP_HELM_DIR/myhelm --kube-context=mycontext --namespace " "braavos old-valyria yunkai" +# fi +if [ "$SHELL_TYPE" = bash ]; then + _completionTests_verifyCompletion "$TMP_HELM_DIR/myhelm --kubecon" "--kubeconfig= --kubeconfig" + _completionTests_verifyCompletion "$TMP_HELM_DIR/myhelm get hooks --kubec" "--kubeconfig= --kubeconfig" +else + _completionTests_verifyCompletion "$TMP_HELM_DIR/myhelm --kubecon" "--kubeconfig --kubeconfig --kubeconfig" + _completionTests_verifyCompletion "$TMP_HELM_DIR/myhelm get hooks --kubec" "--kubeconfig --kubeconfig --kubeconfig" +fi + +# Completion with a different name for helm that is on PATH +mv $TMP_HELM_DIR/myhelm $HELM_DIR/myhelm +_completionTests_verifyCompletion "myhelm lis" "list" +_completionTests_verifyCompletion "myhelm completion z" "zsh" +_completionTests_verifyCompletion "myhelm repo remove zztest" "zztest1 zztest2" +_completionTests_verifyCompletion "myhelm plugin update pus" "push push-artifactory" +_completionTests_verifyCompletion "myhelm upgrade --kube-context d" "dev1 dev2" +# if [ ! -z ${ROBOT_HELM_V3} ]; then +# _completionTests_verifyCompletion "myhelm --kube-context=mycontext --namespace " "braavos old-valyria yunkai" +# fi +if [ "$SHELL_TYPE" = bash ]; then + _completionTests_verifyCompletion "myhelm --kubecon" "--kubeconfig= --kubeconfig" + _completionTests_verifyCompletion "myhelm get hooks --kubec" "--kubeconfig= --kubeconfig" +else + _completionTests_verifyCompletion "myhelm --kubecon" "--kubeconfig --kubeconfig --kubeconfig" + _completionTests_verifyCompletion "myhelm get hooks --kubec" "--kubeconfig --kubeconfig --kubeconfig" +fi +unalias helm + +# This must be the last call. It allows to exit with an exit code +# that reflects the final status of all the tests. +_completionTests_exit diff --git a/scripts/completion-tests/lib/completionTests-base.sh b/scripts/completion-tests/lib/completionTests-base.sh index 3554d46..8ca3f52 100755 --- a/scripts/completion-tests/lib/completionTests-base.sh +++ b/scripts/completion-tests/lib/completionTests-base.sh @@ -53,6 +53,7 @@ _completionTests_verifyCompletion() { local cmdLine=$1 local expected=$2 + local currentFailure=0 result=$(_completionTests_complete "${cmdLine}") @@ -69,6 +70,7 @@ _completionTests_verifyCompletion() { ([ $expectedFailure = "ZFAIL" ] && [ $SHELL_TYPE = "zsh" ]); then if [ "$result" = "$expected" ]; then _completionTests_TEST_FAILED=1 + currentFailure=1 echo "UNEXPECTED SUCCESS: \"$cmdLine\" completes to \"$resultOut\"" else echo "$expectedFailure: \"$cmdLine\" should complete to \"$expected\" but we got \"$resultOut\"" @@ -77,12 +79,11 @@ _completionTests_verifyCompletion() { echo "SUCCESS: \"$cmdLine\" completes to \"$resultOut\"" else _completionTests_TEST_FAILED=1 - echo "FAIL: \"$cmdLine\" should complete to \"$expected\" but we got \"$result\"" + currentFailure=1 + echo "ERROR: \"$cmdLine\" should complete to \"$expected\" but we got \"$result\"" fi - # Return the global result each time. This allows for the very last call to - # this method to return the correct success or failure code for the entire script - return $_completionTests_TEST_FAILED + return $currentFailure } _completionTests_disable_sort() { @@ -95,16 +96,24 @@ _completionTests_enable_sort() { _completionTests_sort() { if [ -n "${_completionTests_DISABLE_SORT}" ]; then - echo "$1" + # We use printf instead of echo as the $1 could be -n which would be + # interpreted as an argument to echo + printf "%s\n" "$1" else - echo $(echo "$1" | tr ' ' '\n' | sort -n) + # We use printf instead of echo as the $1 could be -n which would be + # interpreted as an argument to echo + printf "%s\n" "$1" | sed -e 's/^ *//' -e 's/ *$//' | tr ' ' '\n' | sort -n | tr '\n' ' ' fi } # Find the completion function associated with the binary. -# $1 is the name of the binary for which completion was triggered. +# $1 is the first argument of the line to complete which allows +# us to find the existing completion function name. _completionTests_findCompletionFunction() { - local out=($(complete -p $1)) + binary=$(basename $1) + # The below must work for both bash and zsh + # which is why we use grep as complete -p $binary only works for bash + local out=($(complete -p | grep ${binary}$)) local returnNext=0 for i in ${out[@]}; do if [ $returnNext -eq 1 ]; then @@ -118,6 +127,7 @@ _completionTests_findCompletionFunction() { _completionTests_complete() { local cmdLine=$1 + # Set the bash completion variables which are # used for both bash and zsh completion COMP_LINE=${cmdLine} @@ -132,10 +142,20 @@ _completionTests_complete() { [ "${cmdLine: -1}" = " " ] && COMP_CWORD=${#COMP_WORDS[@]} # Call the completion function associated with the binary being called. - eval $(_completionTests_findCompletionFunction ${COMP_WORDS[0]}) + # Also redirect stderr to stdout so that the tests fail if anything is printed + # to stderr. + eval $(_completionTests_findCompletionFunction ${COMP_WORDS[0]}) 2>&1 # Return the result of the completion. - echo "${COMPREPLY[@]}" + # We use printf instead of echo as the first completion could be -n which + # would be interpreted as an argument to echo + printf "%s\n" "${COMPREPLY[@]}" +} + +_completionTests_exit() { + # Return the global result each time. This allows for the very last call to + # this method to return the correct success or failure code for the entire script + return $_completionTests_TEST_FAILED } # compopt, which is only available for bash 4, I believe, @@ -155,6 +175,11 @@ if [ ! -z "$BASH_VERSION" ];then echo "Running completions tests on $(uname) with bash $BASH_VERSION" echo "====================================================" + # Enable aliases to work even though we are in a script (non-interactive shell). + # This allows to test completion with aliases. + # Only needed for bash, zsh does this automatically. + shopt -s expand_aliases + bashCompletionScript="/usr/share/bash-completion/bash_completion" if [ $(uname) = "Darwin" ]; then bashCompletionScript="/usr/local/etc/bash_completion" diff --git a/scripts/completion-tests/releases.yaml b/scripts/completion-tests/releases.yaml new file mode 100644 index 0000000..fef79f4 --- /dev/null +++ b/scripts/completion-tests/releases.yaml @@ -0,0 +1,43 @@ +# This file can be used as input to create test releases: +# HELM_MEMORY_DRIVER_DATA=./testdata/releases.yaml HELM_DRIVER=memory helm list --all-namespaces +- name: athos + version: 1 + namespace: default + info: + status: deployed + chart: + metadata: + name: athos-chart + version: 1.0.0 + appversion: 1.1.0 +- name: porthos + version: 2 + namespace: default + info: + status: deployed + chart: + metadata: + name: prothos-chart + version: 0.2.0 + appversion: 0.2.2 +- name: aramis + version: 3 + namespace: default + info: + status: deployed + chart: + metadata: + name: aramis-chart + version: 0.0.3 + appversion: 3.0.3 +- name: dartagnan + version: 4 + namespace: gascony + info: + status: deployed + chart: + metadata: + name: dartagnan-chart + version: 0.4.4 + appversion: 4.4.4 + diff --git a/scripts/completion-tests/test-completion.sh b/scripts/completion-tests/test-completion.sh index 0e08903..a6ae6f3 100755 --- a/scripts/completion-tests/test-completion.sh +++ b/scripts/completion-tests/test-completion.sh @@ -20,6 +20,17 @@ set -e SCRIPT_DIR=$(dirname "${BASH_SOURCE[0]}") +# TODO: this is redeclared, but shouldnt have to be? +# getting error "scripts/completion-tests/test-completion.sh: line 23: set_shell_debug_level: command not found" +set_shell_debug_level() +{ + set +x + if [ $ROBOT_DEBUG_LEVEL -ge $1 ]; then + set -x + fi +} +export -f set_shell_debug_level + set_shell_debug_level 2 BINARY_NAME=helm @@ -32,22 +43,68 @@ if [ -z $(which docker) ]; then exit 2; fi -COMP_DIR=/tmp/helm-acceptance-shell-completion-tests +# Only use the -d flag for mktemp as many other flags don't +# work on every plateform +export COMP_DIR=$(mktemp -d ${ROBOT_OUTPUT_DIR}/helm-acceptance-completion.XXXXXX) +trap "rm -rf ${COMP_DIR}" EXIT COMP_SCRIPT_NAME=completionTests.sh COMP_SCRIPT=${COMP_DIR}/${COMP_SCRIPT_NAME} rm -rf ${COMP_DIR} mkdir -p ${COMP_DIR}/lib +mkdir -p ${COMP_DIR}/bin cp ${SCRIPT_DIR}/${COMP_SCRIPT_NAME} ${COMP_DIR} cp ${SCRIPT_DIR}/lib/completionTests-base.sh ${COMP_DIR}/lib - -if ! [ -f ${BINARY_PATH_DOCKER}/${BINARY_NAME} ]; then - echo "These tests require a helm binary located at ${BINARY_PATH_DOCKER}/${BINARY_NAME}" - echo "Hint: Run 'make build-cross' in a clone of helm repo" - exit 2 +cp ${SCRIPT_DIR}/releases.yaml ${COMP_DIR} + +if [[ "${GITHUB_SHA}" == "" ]]; then + CHECK_BINARY_PATH="$(cd ${BINARY_PATH_DOCKER} && pwd)/${BINARY_NAME}" + if [[ ! -f ${CHECK_BINARY_PATH} ]] && [[ -L ${CHECK_BINARY_PATH} ]]; then + echo "These tests require a helm binary located at ${CHECK_BINARY_PATH}" + echo "Hint: Run 'make build-cross' in a clone of helm repo" + exit 2 + fi + cp ${CHECK_BINARY_PATH} ${COMP_DIR}/bin +else + echo "Running on GitHub Actions CI - using system-wide Helm 3 binary." + cp $(which helm-docker) ${COMP_DIR}/bin/helm fi -cp ${BINARY_PATH_DOCKER}/${BINARY_NAME} ${COMP_DIR} + +# config file stubs +cat > ${COMP_DIR}/config.dev1 << EOF +kind: Config +apiVersion: v1 +contexts: +- context: + name: dev1 +current-context: dev1 +EOF +cat > ${COMP_DIR}/config.dev2 << EOF +kind: Config +apiVersion: v1 +contexts: +- context: + name: dev2 +current-context: dev2 +EOF +cat > ${COMP_DIR}/config.accept << EOF +kind: Config +apiVersion: v1 +contexts: +- context: + name: accept +current-context: accept +EOF +cat > ${COMP_DIR}/config.prod << EOF +kind: Config +apiVersion: v1 +contexts: +- context: + name: prod +current-context: prod +EOF +export KUBECONFIG=${COMP_DIR}/config.dev1:${COMP_DIR}/config.dev2:${COMP_DIR}/config.accept:${COMP_DIR}/config.prod # Now run all tests, even if there is a failure. # But remember if there was any failure to report it at the end. @@ -63,12 +120,14 @@ BASH4_IMAGE=completion-bash4 echo;echo; docker build -t ${BASH4_IMAGE} - <<- EOF FROM bash:4.4 - RUN apk update && apk add bash-completion + RUN apk update && apk add bash-completion ca-certificates EOF docker run --rm \ - -v ${COMP_DIR}:${COMP_DIR} -v ${COMP_DIR}/${BINARY_NAME}:/bin/${BINARY_NAME} \ + -v ${COMP_DIR}:${COMP_DIR} \ -e ROBOT_HELM_V3=${ROBOT_HELM_V3} \ -e ROBOT_DEBUG_LEVEL=${ROBOT_DEBUG_LEVEL} \ + -e COMP_DIR=${COMP_DIR} \ + -e KUBECONFIG=${KUBECONFIG} \ ${BASH4_IMAGE} bash -c "source ${COMP_SCRIPT}" ######################################## @@ -82,18 +141,40 @@ BASH3_IMAGE=completion-bash3 echo;echo; docker build -t ${BASH3_IMAGE} - <<- EOF FROM bash:3.2 + RUN apk update && apk add ca-certificates # For bash 3.2, the bash-completion package required is version 1.3 RUN mkdir /usr/share/bash-completion && \ wget -qO - https://github.com/scop/bash-completion/archive/1.3.tar.gz | \ tar xvz -C /usr/share/bash-completion --strip-components 1 bash-completion-1.3/bash_completion EOF docker run --rm \ - -v ${COMP_DIR}:${COMP_DIR} -v ${COMP_DIR}/${BINARY_NAME}:/bin/${BINARY_NAME} \ + -v ${COMP_DIR}:${COMP_DIR} \ -e BASH_COMPLETION=/usr/share/bash-completion \ -e ROBOT_HELM_V3=${ROBOT_HELM_V3} \ -e ROBOT_DEBUG_LEVEL=${ROBOT_DEBUG_LEVEL} \ + -e COMP_DIR=${COMP_DIR} \ + -e KUBECONFIG=${KUBECONFIG} \ ${BASH3_IMAGE} bash -c "source ${COMP_SCRIPT}" +######################################## +# Bash centos completion tests +# https://github.com/helm/helm/pull/7304 +######################################## +BASH_IMAGE=completion-bash-centos + +echo;echo; +docker build -t ${BASH_IMAGE} - <<- EOF + FROM centos + RUN yum install -y bash-completion which +EOF +docker run --rm \ + -v ${COMP_DIR}:${COMP_DIR} \ + -e ROBOT_HELM_V3=${ROBOT_HELM_V3} \ + -e ROBOT_DEBUG_LEVEL=${ROBOT_DEBUG_LEVEL} \ + -e COMP_DIR=${COMP_DIR} \ + -e KUBECONFIG=${KUBECONFIG} \ + ${BASH_IMAGE} bash -c "source ${COMP_SCRIPT}" + ######################################## # Zsh completion tests ######################################## @@ -102,11 +183,15 @@ ZSH_IMAGE=completion-zsh echo;echo; docker build -t ${ZSH_IMAGE} - <<- EOF FROM zshusers/zsh:5.7 + # This will install the SSL certificates necessary for helm repo update to work + RUN apt-get update && apt-get install -y wget EOF docker run --rm \ - -v ${COMP_DIR}:${COMP_DIR} -v ${COMP_DIR}/${BINARY_NAME}:/bin/${BINARY_NAME} \ + -v ${COMP_DIR}:${COMP_DIR} \ -e ROBOT_HELM_V3=${ROBOT_HELM_V3} \ -e ROBOT_DEBUG_LEVEL=${ROBOT_DEBUG_LEVEL} \ + -e COMP_DIR=${COMP_DIR} \ + -e KUBECONFIG=${KUBECONFIG} \ ${ZSH_IMAGE} zsh -c "source ${COMP_SCRIPT}" ######################################## @@ -118,12 +203,14 @@ ZSH_IMAGE=completion-zsh-alpine echo;echo; docker build -t ${ZSH_IMAGE} - <<- EOF FROM alpine - RUN apk update && apk add zsh + RUN apk update && apk add zsh ca-certificates EOF docker run --rm \ - -v ${COMP_DIR}:${COMP_DIR} -v ${COMP_DIR}/${BINARY_NAME}:/bin/${BINARY_NAME} \ + -v ${COMP_DIR}:${COMP_DIR} \ -e ROBOT_HELM_V3=${ROBOT_HELM_V3} \ -e ROBOT_DEBUG_LEVEL=${ROBOT_DEBUG_LEVEL} \ + -e COMP_DIR=${COMP_DIR} \ + -e KUBECONFIG=${KUBECONFIG} \ ${ZSH_IMAGE} zsh -c "source ${COMP_SCRIPT}" ######################################## @@ -137,25 +224,22 @@ if [ "$(uname)" == "Darwin" ]; then echo "Attempting local completion tests on Darwin" echo "====================================================" - # Make sure that for the local tests, the tests will find the newly - # built binary. If for some reason the binary to test is not present - # the tests may use the default binary installed on localhost and we - # won't be testing the right thing. So we check here. - if [ $(PATH=${BINARY_PATH_LOCAL}:$PATH which ${BINARY_NAME}) != ${BINARY_PATH_LOCAL}/${BINARY_NAME} ]; then - echo "Cannot find ${BINARY_NAME} under ${BINARY_PATH_LOCAL}/${BINARY_NAME} although it is what we need to test." - exit 1 + # Copy the local helm to use + if ! cp ${BINARY_PATH_LOCAL}/${BINARY_NAME} ${COMP_DIR}/bin ; then + echo "Cannot find ${BINARY_NAME} under ${BINARY_PATH_LOCAL}/${BINARY_NAME} although it is what we need to test." + exit 1 fi if which bash>/dev/null && [ -f /usr/local/etc/bash_completion ]; then echo;echo; echo "Completion tests for bash running locally" - PATH=${BINARY_PATH_LOCAL}:$PATH bash -c "source ${COMP_SCRIPT}" + bash -c "source ${COMP_SCRIPT}" fi if which zsh>/dev/null; then echo;echo; echo "Completion tests for zsh running locally" - PATH=${BINARY_PATH_LOCAL}:$PATH zsh -c "source ${COMP_SCRIPT}" + zsh -c "source ${COMP_SCRIPT}" fi fi diff --git a/scripts/github-actions-ci.sh b/scripts/github-actions-ci.sh new file mode 100755 index 0000000..1fd8250 --- /dev/null +++ b/scripts/github-actions-ci.sh @@ -0,0 +1,82 @@ +#!/bin/bash -ex +# +# Copyright The Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +export KUBECTL_VERSION="v1.16.1" +export KIND_VERSION="v0.5.1" + +rm -rf bin/ +mkdir -p bin/ +export PATH="${PWD}/bin:${HOME}/.local/bin:${PATH}" +export GITHUB_SHA="${GITHUB_SHA:-latest}" + +# Build helm from source +which helm || true +mkdir -p /tmp/gopath/src/helm.sh +pushd /tmp/gopath/src/helm.sh +git clone https://github.com/helm/helm.git -b master +pushd helm/ +GOPATH=/tmp/gopath make build build-cross +popd +popd +mv /tmp/gopath/src/helm.sh/helm/bin/helm bin/helm +mv /tmp/gopath/src/helm.sh/helm/_dist/linux-amd64/helm bin/helm-docker +helm version +which helm + +# These tools appear to be in the GitHub "ubuntu-latest" environment, but not in +# the ubuntu:latest image from Docker Hub +if ! [[ -x "$(command -v curl)" || -x "$(command -v pip3)" || -x "$(command -v docker)" ]]; then + apt-get update + apt-get install -y apt-transport-https ca-certificates gnupg-agent software-properties-common curl python3-pip + + # Docker install + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - + add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" + apt-get update + apt-get install -y docker-ce +fi +if ! [[ -x "$(command -v pip)" ]]; then + ln -sf $(which pip3) bin/pip +fi + +# Install kubectl +which kubectl || true +curl -LO https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl +chmod +x kubectl +mv kubectl bin/kubectl +kubectl version --client +which kubectl + +# Install kind +which kind || true +curl -LO https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-linux-amd64 +chmod +x kind-linux-amd64 +mv kind-linux-amd64 bin/kind +which kind + +# Install virtualenv +which virtualenv || true +pip3 install --user virtualenv +virtualenv --version +which virtualenv + +export ROBOT_OUTPUT_DIR="${PWD}/acceptance-testing-reports/${GITHUB_SHA}" +rm -rf ${ROBOT_OUTPUT_DIR} +mkdir -p ${ROBOT_OUTPUT_DIR} +trap "rm -rf ${ROBOT_OUTPUT_DIR}/.venv/" EXIT + +# Run +make acceptance diff --git a/testsuites/kubernetes_versions.robot b/testsuites/kubernetes_versions.robot index daf6726..45efa1d 100644 --- a/testsuites/kubernetes_versions.robot +++ b/testsuites/kubernetes_versions.robot @@ -1,3 +1,18 @@ +# +# Copyright The Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + *** Settings *** Documentation Verify Helm functionality on multiple Kubernetes versions. ... @@ -6,26 +21,33 @@ Documentation Verify Helm functionality on multiple Kubernetes versions. ... kind cluster can be used by specifying it in an env var ... representing the version, for example: ... -... export KIND_CLUSTER_1_14_3="helm-ac-keepalive-1.14.3" -... export KIND_CLUSTER_1_15_0="helm-ac-keepalive-1.15.0" +... export KIND_CLUSTER_1_16_1="helm-ac-keepalive-1.16.1" +... export KIND_CLUSTER_1_15_4="helm-ac-keepalive-1.15.4" +... export KIND_CLUSTER_1_14_7="helm-ac-keepalive-1.14.7" ... Library String Library OperatingSystem Library ../lib/Kind.py Library ../lib/Kubectl.py Library ../lib/Helm.py +Library ../lib/Sh.py Suite Setup Suite Setup Suite Teardown Suite Teardown *** Test Cases *** -Helm works with Kubernetes 1.14.3 - Test Helm on Kubernetes version 1.14.3 +#Helm works with Kubernetes 1.16.1 +# Test Helm on Kubernetes version 1.16.1 + +Helm works with Kubernetes 1.15.3 + Test Helm on Kubernetes version 1.15.3 -Helm works with Kubernetes 1.15.0 - Test Helm on Kubernetes version 1.15.0 +Helm works with Kubernetes 1.14.6 + Test Helm on Kubernetes version 1.14.6 *** Keyword *** Test Helm on Kubernetes version + Require cluster True + ${helm_version} = Get Environment Variable ROBOT_HELM_V3 "v2" Pass Execution If ${helm_version} == 'v2' Helm v2 not supported. Skipping test. @@ -41,21 +63,19 @@ Create test cluster with kube version [Arguments] ${kube_version} Kind.Create test cluster with Kubernetes version ${kube_version} Kind.Wait for cluster - Kubectl.Get nodes - Kubectl.Return code should be 0 - Kubectl.Get pods kube-system - Kubectl.Return code should be 0 + Should pass kubectl get nodes + Should pass kubectl get pods --namespace=kube-system Verify --wait flag works as expected # Install nginx chart in a good state, using --wait flag - Helm.Delete release wait-flag-good + Sh.Run helm delete wait-flag-good Helm.Install test chart wait-flag-good nginx --wait --timeout=60s Helm.Return code should be 0 # Make sure everything is up-and-running - Kubectl.Get pods default - Kubectl.Get services default - Kubectl.Get persistent volume claims default + Sh.Run kubectl get pods --namespace=default + Sh.Run kubectl get services --namespace=default + Sh.Run kubectl get pvc --namespace=default Kubectl.Service has IP default wait-flag-good-nginx Kubectl.Return code should be 0 @@ -77,20 +97,19 @@ Verify --wait flag works as expected Kubectl.Return code should be 0 # Delete good release - Helm.Delete release wait-flag-good - Helm.Return code should be 0 + Should pass helm delete wait-flag-good # Install nginx chart in a bad state, using --wait flag - Helm.Delete release wait-flag-bad + Sh.Run helm delete wait-flag-bad Helm.Install test chart wait-flag-bad nginx --wait --timeout=60s --set breakme=true # Install should return non-zero, as things fail to come up Helm.Return code should not be 0 # Make sure things are NOT up-and-running - Kubectl.Get pods default - Kubectl.Get services default - Kubectl.Get persistent volume claims default + Sh.Run kubectl get pods --namespace=default + Sh.Run kubectl get services --namespace=default + Sh.Run kubectl get pvc --namespace=default Kubectl.Persistent volume claim is bound default wait-flag-bad-nginx Kubectl.Return code should not be 0 @@ -109,8 +128,7 @@ Verify --wait flag works as expected Kubectl.Return code should not be 0 # Delete bad release - Helm.Delete release wait-flag-bad - Helm.Return code should be 0 + Should pass helm delete wait-flag-bad Suite Setup Kind.Cleanup all test clusters diff --git a/testsuites/repos.robot b/testsuites/repos.robot index 0b3ac9e..daab240 100644 --- a/testsuites/repos.robot +++ b/testsuites/repos.robot @@ -16,30 +16,37 @@ *** Settings *** Documentation Verify helm repo commands work as expected. ... +Library OperatingSystem Library ../lib/Sh.py *** Test Cases *** No repos provisioned yet + Check helm version Should fail helm repo list Output contains Error: no repositories Add a first valid repo + Check helm version Should pass helm repo add gitlab https://charts.gitlab.io Output contains "gitlab" has been added to your repositories Add invalid repo without protocol + Check helm version Should fail helm repo add invalid notAValidURL Output contains Error: could not find protocol handler Add invalid repo with protocol + Check helm version Should fail helm repo add invalid https://example.com Output contains Error: looks like "https://example.com" is not a valid chart repository or cannot be reached Add a second valid repo + Check helm version Should pass helm repo add jfrog https://charts.jfrog.io Output contains "jfrog" has been added to your repositories Check output of repo list + Check helm version Should pass helm repo list Output contains gitlab Output contains https://charts.gitlab.io @@ -48,38 +55,51 @@ Check output of repo list Output does not contain invalid Make sure both repos get updated + Check helm version Should pass helm repo update Output contains Successfully got an update from the "gitlab" chart repository Output contains Successfully got an update from the "jfrog" chart repository Output contains Update Complete. ⎈ Happy Helming!⎈ -Try to remove inexistant repo +Try to remove nonexistent repo + Check helm version Should fail helm repo remove badname Output contains Error: no repo named "badname" found Remove a repo + Check helm version Should pass helm repo remove gitlab Output contains "gitlab" has been removed from your repositories Make sure repo update will only update the remaining repo + Check helm version Should pass helm repo update Output contains Successfully got an update from the "jfrog" chart repository Output contains Update Complete. ⎈ Happy Helming!⎈ Try removing an already removed repo + Check helm version Should fail helm repo remove gitlab Output contains Error: no repo named "gitlab" found Remove last repo + Check helm version Should pass helm repo remove jfrog Output contains "jfrog" has been removed from your repositories Check there are no more repos + Check helm version Should fail helm repo list Output contains Error: no repositories to show Make sure repo update now fails, with a proper message + Check helm version Should fail helm repo update Output contains Error: no repositories found. You must add one before updating # "helm repo index" should also be tested + +*** Keyword *** +Check helm version + ${helm_version} = Get Environment Variable ROBOT_HELM_V3 "v2" + Pass Execution If ${helm_version} == 'v2' Helm v2 not supported. Skipping test.