diff --git a/.circleci/config.yml b/.circleci/config.yml index aebd5733e542d..c78eb24b4e7d7 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -7,9 +7,7 @@ version: 2 jobs: install-ui-dependencies: docker: - - environment: - JOBS: 2 - image: docker.mirror.hashicorp.services/circleci/node:14-browsers + - image: docker.mirror.hashicorp.services/node:14-buster shell: /usr/bin/env bash -euo pipefail -c working_directory: /home/circleci/go/src/github.com/hashicorp/vault steps: @@ -30,9 +28,7 @@ jobs: - ui/node_modules test-ui: docker: - - environment: - JOBS: 2 - image: docker.mirror.hashicorp.services/circleci/node:14-browsers + - image: docker.mirror.hashicorp.services/node:14-buster shell: /usr/bin/env bash -euo pipefail -c working_directory: /home/circleci/go/src/github.com/hashicorp/vault resource_class: xlarge @@ -59,6 +55,18 @@ jobs: at: . - run: command: | + set -x + + # Install Chrome + wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub \ + | apt-key add - + echo "deb http://dl.google.com/linux/chrome/deb/ stable main" \ + | tee /etc/apt/sources.list.d/google-chrome.list + apt-get update + apt-get -y install google-chrome-stable + rm /etc/apt/sources.list.d/google-chrome.list + rm -rf /var/lib/apt/lists/* /var/cache/apt/* + # Add ./bin to the PATH so vault binary can be run by Ember tests export PATH="${PWD}/bin:${PATH}" @@ -71,6 +79,25 @@ jobs: path: ui/test-results - store_test_results: path: ui/test-results + test-ui-browserstack: + docker: + - image: docker.mirror.hashicorp.services/node:14-buster + shell: /usr/bin/env bash -euo pipefail -c + working_directory: /home/circleci/go/src/github.com/hashicorp/vault + resource_class: xlarge + steps: + - checkout + - restore_cache: + key: yarn-lock-v7-{{ checksum "ui/yarn.lock" }} + name: Restore yarn cache + - attach_workspace: + at: . + - run: + command: | + # Add ./bin to the PATH so vault binary can be found. + export PATH="${PWD}"/bin:${PATH} + make test-ui-browserstack + name: Run Browserstack Tests build-go-dev: machine: image: ubuntu-2004:202201-02 @@ -120,12 +147,12 @@ jobs: environment: - CIRCLECI_CLI_VERSION: 0.1.5546 - GO_TAGS: '' - - GO_VERSION: 1.19.1 - - GOFUMPT_VERSION: 0.3.1 + - GO_VERSION: 1.17.13 + - GOFUMPT_VERSION: 0.2.1 - GOTESTSUM_VERSION: 0.5.2 test-go-remote-docker: docker: - - image: docker.mirror.hashicorp.services/cimg/go:1.19.1 + - image: docker.mirror.hashicorp.services/cimg/go:1.17.13 resource_class: medium working_directory: /home/circleci/go/src/github.com/hashicorp/vault parallelism: 8 @@ -147,7 +174,7 @@ jobs: version: 18.09.3 - add_ssh_keys: fingerprints: - - b8:e2:38:f8:5b:1b:82:f3:1f:23:fa:46:6e:95:e7:e9 + - 0e:03:77:f4:e2:c3:56:c2:53:6a:03:e1:31:91:2f:06 - run: command: | git config --global url."git@github.com:".insteadOf https://github.com/ @@ -231,7 +258,7 @@ jobs: export VAULT_LICENSE_CI="$VAULT_LICENSE" VAULT_LICENSE= - # Create a docker network for our test container + # Create a docker network for our testcontainer if [ $USE_DOCKER == 1 ]; then # Despite the fact that we're using a circleci image (thus getting the # version they chose for the docker cli) and that we're specifying a @@ -240,17 +267,15 @@ jobs: # reasons unclear. export DOCKER_API_VERSION=1.39 - TEST_DOCKER_NETWORK_NAME="${CIRCLE_WORKFLOW_JOB_ID}-${CIRCLE_NODE_INDEX}" - export TEST_DOCKER_NETWORK_ID=$(docker network list --quiet --no-trunc --filter="name=${TEST_DOCKER_NETWORK_NAME}") + export TEST_DOCKER_NETWORK_ID=$(docker network list -q -f 'name=vaulttest') if [ -z $TEST_DOCKER_NETWORK_ID ]; then - docker network prune -f - TEST_DOCKER_NETWORK_ID=$(docker network create "${TEST_DOCKER_NETWORK_NAME}") + TEST_DOCKER_NETWORK_ID=$(docker network create vaulttest) fi - # Start a docker test container to run the tests in - CONTAINER_ID="$(docker run -d \ + # Start a docker testcontainer to run the tests in + docker run -d \ -e TEST_DOCKER_NETWORK_ID \ -e GOPRIVATE \ -e DOCKER_CERT_PATH \ @@ -259,21 +284,19 @@ jobs: -e DOCKER_TLS_VERIFY \ -e NO_PROXY \ -e VAULT_TEST_LOG_DIR=/tmp/testlogs \ - --network ${TEST_DOCKER_NETWORK_NAME} \ - docker.mirror.hashicorp.services/cimg/go:1.19.1 \ - tail -f /dev/null)" - mkdir workspace - echo ${CONTAINER_ID} > workspace/container_id + --network vaulttest --name \ + testcontainer docker.mirror.hashicorp.services/cimg/go:1.17.13 \ + tail -f /dev/null # Run tests - test -d /tmp/go-cache && docker cp /tmp/go-cache ${CONTAINER_ID}:/tmp/gocache - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/src/github.com/hashicorp/vault' - docker cp . ${CONTAINER_ID}:/home/circleci/go/src/github.com/hashicorp/vault/ - docker cp $DOCKER_CERT_PATH/ ${CONTAINER_ID}:$DOCKER_CERT_PATH + test -d /tmp/go-cache && docker cp /tmp/go-cache testcontainer:/tmp/gocache + docker exec testcontainer sh -c 'mkdir -p /home/circleci/go/src/github.com/hashicorp/vault' + docker cp . testcontainer:/home/circleci/go/src/github.com/hashicorp/vault/ + docker cp $DOCKER_CERT_PATH/ testcontainer:$DOCKER_CERT_PATH # Copy the downloaded modules inside the container. - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/pkg' - docker cp "$(go env GOPATH)/pkg/mod" ${CONTAINER_ID}:/home/circleci/go/pkg/mod + docker exec testcontainer sh -c 'mkdir -p /home/circleci/go/pkg' + docker cp "$(go env GOPATH)/pkg/mod" testcontainer:/home/circleci/go/pkg/mod docker exec -w /home/circleci/go/src/github.com/hashicorp/vault/ \ -e CIRCLECI -e VAULT_CI_GO_TEST_RACE \ @@ -282,7 +305,7 @@ jobs: -e GOPROXY="off" \ -e VAULT_LICENSE_CI \ -e GOARCH=amd64 \ - ${CONTAINER_ID} \ + testcontainer \ gotestsum --format=short-verbose \ --junitfile test-results/go-test/results.xml \ --jsonfile test-results/go-test/results.json \ @@ -311,8 +334,8 @@ jobs: no_output_timeout: 60m - run: command: | - docker cp $(cat workspace/container_id):/home/circleci/go/src/github.com/hashicorp/vault/test-results . - docker cp $(cat workspace/container_id):/tmp/gocache /tmp/go-cache + docker cp testcontainer:/home/circleci/go/src/github.com/hashicorp/vault/test-results . + docker cp testcontainer:/tmp/gocache /tmp/go-cache name: Copy test results when: always - store_artifacts: @@ -353,7 +376,6 @@ jobs: - checkout - run: command: | - echo "Using gofumpt version ${GOFUMPT_VERSION}" go install "mvdan.cc/gofumpt@v${GOFUMPT_VERSION}" make fmt if ! git diff --exit-code; then @@ -364,12 +386,12 @@ jobs: environment: - CIRCLECI_CLI_VERSION: 0.1.5546 - GO_TAGS: '' - - GO_VERSION: 1.19.1 - - GOFUMPT_VERSION: 0.3.1 + - GO_VERSION: 1.17.13 + - GOFUMPT_VERSION: 0.2.1 - GOTESTSUM_VERSION: 0.5.2 test-go-race: docker: - - image: docker.mirror.hashicorp.services/cimg/go:1.19.1 + - image: docker.mirror.hashicorp.services/cimg/go:1.17.13 resource_class: xlarge working_directory: /home/circleci/go/src/github.com/hashicorp/vault parallelism: 8 @@ -388,7 +410,7 @@ jobs: - checkout - add_ssh_keys: fingerprints: - - b8:e2:38:f8:5b:1b:82:f3:1f:23:fa:46:6e:95:e7:e9 + - 0e:03:77:f4:e2:c3:56:c2:53:6a:03:e1:31:91:2f:06 - run: command: | git config --global url."git@github.com:".insteadOf https://github.com/ @@ -471,7 +493,7 @@ jobs: export VAULT_LICENSE_CI="$VAULT_LICENSE" VAULT_LICENSE= - # Create a docker network for our test container + # Create a docker network for our testcontainer if [ $USE_DOCKER == 1 ]; then # Despite the fact that we're using a circleci image (thus getting the # version they chose for the docker cli) and that we're specifying a @@ -480,17 +502,15 @@ jobs: # reasons unclear. export DOCKER_API_VERSION=1.39 - TEST_DOCKER_NETWORK_NAME="${CIRCLE_WORKFLOW_JOB_ID}-${CIRCLE_NODE_INDEX}" - export TEST_DOCKER_NETWORK_ID=$(docker network list --quiet --no-trunc --filter="name=${TEST_DOCKER_NETWORK_NAME}") + export TEST_DOCKER_NETWORK_ID=$(docker network list -q -f 'name=vaulttest') if [ -z $TEST_DOCKER_NETWORK_ID ]; then - docker network prune -f - TEST_DOCKER_NETWORK_ID=$(docker network create "${TEST_DOCKER_NETWORK_NAME}") + TEST_DOCKER_NETWORK_ID=$(docker network create vaulttest) fi - # Start a docker test container to run the tests in - CONTAINER_ID="$(docker run -d \ + # Start a docker testcontainer to run the tests in + docker run -d \ -e TEST_DOCKER_NETWORK_ID \ -e GOPRIVATE \ -e DOCKER_CERT_PATH \ @@ -499,21 +519,19 @@ jobs: -e DOCKER_TLS_VERIFY \ -e NO_PROXY \ -e VAULT_TEST_LOG_DIR=/tmp/testlogs \ - --network ${TEST_DOCKER_NETWORK_NAME} \ - docker.mirror.hashicorp.services/cimg/go:1.19.1 \ - tail -f /dev/null)" - mkdir workspace - echo ${CONTAINER_ID} > workspace/container_id + --network vaulttest --name \ + testcontainer docker.mirror.hashicorp.services/cimg/go:1.17.13 \ + tail -f /dev/null # Run tests - test -d /tmp/go-cache && docker cp /tmp/go-cache ${CONTAINER_ID}:/tmp/gocache - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/src/github.com/hashicorp/vault' - docker cp . ${CONTAINER_ID}:/home/circleci/go/src/github.com/hashicorp/vault/ - docker cp $DOCKER_CERT_PATH/ ${CONTAINER_ID}:$DOCKER_CERT_PATH + test -d /tmp/go-cache && docker cp /tmp/go-cache testcontainer:/tmp/gocache + docker exec testcontainer sh -c 'mkdir -p /home/circleci/go/src/github.com/hashicorp/vault' + docker cp . testcontainer:/home/circleci/go/src/github.com/hashicorp/vault/ + docker cp $DOCKER_CERT_PATH/ testcontainer:$DOCKER_CERT_PATH # Copy the downloaded modules inside the container. - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/pkg' - docker cp "$(go env GOPATH)/pkg/mod" ${CONTAINER_ID}:/home/circleci/go/pkg/mod + docker exec testcontainer sh -c 'mkdir -p /home/circleci/go/pkg' + docker cp "$(go env GOPATH)/pkg/mod" testcontainer:/home/circleci/go/pkg/mod docker exec -w /home/circleci/go/src/github.com/hashicorp/vault/ \ -e CIRCLECI -e VAULT_CI_GO_TEST_RACE \ @@ -522,7 +540,7 @@ jobs: -e GOPROXY="off" \ -e VAULT_LICENSE_CI \ -e GOARCH=amd64 \ - ${CONTAINER_ID} \ + testcontainer \ gotestsum --format=short-verbose \ --junitfile test-results/go-test/results.xml \ --jsonfile test-results/go-test/results.json \ @@ -560,7 +578,7 @@ jobs: - GO_TAGS: '' test-go: docker: - - image: docker.mirror.hashicorp.services/cimg/go:1.19.1 + - image: docker.mirror.hashicorp.services/cimg/go:1.17.13 resource_class: large working_directory: /home/circleci/go/src/github.com/hashicorp/vault parallelism: 8 @@ -579,7 +597,7 @@ jobs: - checkout - add_ssh_keys: fingerprints: - - b8:e2:38:f8:5b:1b:82:f3:1f:23:fa:46:6e:95:e7:e9 + - 0e:03:77:f4:e2:c3:56:c2:53:6a:03:e1:31:91:2f:06 - run: command: | git config --global url."git@github.com:".insteadOf https://github.com/ @@ -662,7 +680,7 @@ jobs: export VAULT_LICENSE_CI="$VAULT_LICENSE" VAULT_LICENSE= - # Create a docker network for our test container + # Create a docker network for our testcontainer if [ $USE_DOCKER == 1 ]; then # Despite the fact that we're using a circleci image (thus getting the # version they chose for the docker cli) and that we're specifying a @@ -671,17 +689,15 @@ jobs: # reasons unclear. export DOCKER_API_VERSION=1.39 - TEST_DOCKER_NETWORK_NAME="${CIRCLE_WORKFLOW_JOB_ID}-${CIRCLE_NODE_INDEX}" - export TEST_DOCKER_NETWORK_ID=$(docker network list --quiet --no-trunc --filter="name=${TEST_DOCKER_NETWORK_NAME}") + export TEST_DOCKER_NETWORK_ID=$(docker network list -q -f 'name=vaulttest') if [ -z $TEST_DOCKER_NETWORK_ID ]; then - docker network prune -f - TEST_DOCKER_NETWORK_ID=$(docker network create "${TEST_DOCKER_NETWORK_NAME}") + TEST_DOCKER_NETWORK_ID=$(docker network create vaulttest) fi - # Start a docker test container to run the tests in - CONTAINER_ID="$(docker run -d \ + # Start a docker testcontainer to run the tests in + docker run -d \ -e TEST_DOCKER_NETWORK_ID \ -e GOPRIVATE \ -e DOCKER_CERT_PATH \ @@ -690,21 +706,19 @@ jobs: -e DOCKER_TLS_VERIFY \ -e NO_PROXY \ -e VAULT_TEST_LOG_DIR=/tmp/testlogs \ - --network ${TEST_DOCKER_NETWORK_NAME} \ - docker.mirror.hashicorp.services/cimg/go:1.19.1 \ - tail -f /dev/null)" - mkdir workspace - echo ${CONTAINER_ID} > workspace/container_id + --network vaulttest --name \ + testcontainer docker.mirror.hashicorp.services/cimg/go:1.17.13 \ + tail -f /dev/null # Run tests - test -d /tmp/go-cache && docker cp /tmp/go-cache ${CONTAINER_ID}:/tmp/gocache - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/src/github.com/hashicorp/vault' - docker cp . ${CONTAINER_ID}:/home/circleci/go/src/github.com/hashicorp/vault/ - docker cp $DOCKER_CERT_PATH/ ${CONTAINER_ID}:$DOCKER_CERT_PATH + test -d /tmp/go-cache && docker cp /tmp/go-cache testcontainer:/tmp/gocache + docker exec testcontainer sh -c 'mkdir -p /home/circleci/go/src/github.com/hashicorp/vault' + docker cp . testcontainer:/home/circleci/go/src/github.com/hashicorp/vault/ + docker cp $DOCKER_CERT_PATH/ testcontainer:$DOCKER_CERT_PATH # Copy the downloaded modules inside the container. - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/pkg' - docker cp "$(go env GOPATH)/pkg/mod" ${CONTAINER_ID}:/home/circleci/go/pkg/mod + docker exec testcontainer sh -c 'mkdir -p /home/circleci/go/pkg' + docker cp "$(go env GOPATH)/pkg/mod" testcontainer:/home/circleci/go/pkg/mod docker exec -w /home/circleci/go/src/github.com/hashicorp/vault/ \ -e CIRCLECI -e VAULT_CI_GO_TEST_RACE \ @@ -713,7 +727,7 @@ jobs: -e GOPROXY="off" \ -e VAULT_LICENSE_CI \ -e GOARCH=amd64 \ - ${CONTAINER_ID} \ + testcontainer \ gotestsum --format=short-verbose \ --junitfile test-results/go-test/results.xml \ --jsonfile test-results/go-test/results.json \ @@ -751,15 +765,25 @@ jobs: - GO_TAGS: '' semgrep: docker: - - image: docker.mirror.hashicorp.services/returntocorp/semgrep:0.113.0 + - image: docker.mirror.hashicorp.services/alpine:3.13 shell: /bin/sh working_directory: /home/circleci/go/src/github.com/hashicorp/vault steps: + - run: + command: | + apk add --no-cache python3 py3-pip make + python3 -m pip install --user semgrep==0.86.5 + export PATH="$HOME/.local/bin:$PATH" + + echo "$ semgrep --version" + semgrep --version + name: Setup Semgrep + working_directory: ~/ - checkout - attach_workspace: at: . - run: - command: "# Alpine images can't run the make file due to a bash requirement. Run\n# semgrep explicitly here. \nexport PATH=\"$HOME/.local/bin:$PATH\" \necho -n 'Semgrep Version: '\nsemgrep --version\nsemgrep --error --include '*.go' --exclude 'vendor' -f tools/semgrep/ci .\n" + command: "# Alpine images can't run the make file due to a bash requirement. Run\n# semgrep explicitly here. \nexport PATH=\"$HOME/.local/bin:$PATH\" \nsemgrep --error --include '*.go' --exclude 'vendor' -f tools/semgrep/ci .\n" name: Run Semgrep Rules pre-flight-checks: machine: @@ -815,7 +839,7 @@ jobs: name: Verify CircleCI - add_ssh_keys: fingerprints: - - b8:e2:38:f8:5b:1b:82:f3:1f:23:fa:46:6e:95:e7:e9 + - 0e:03:77:f4:e2:c3:56:c2:53:6a:03:e1:31:91:2f:06 - run: command: | git config --global url."git@github.com:".insteadOf https://github.com/ @@ -852,12 +876,12 @@ jobs: environment: - CIRCLECI_CLI_VERSION: 0.1.5546 - GO_TAGS: '' - - GO_VERSION: 1.19.1 - - GOFUMPT_VERSION: 0.3.1 + - GO_VERSION: 1.17.13 + - GOFUMPT_VERSION: 0.2.1 - GOTESTSUM_VERSION: 0.5.2 test-go-race-remote-docker: docker: - - image: docker.mirror.hashicorp.services/cimg/go:1.19.1 + - image: docker.mirror.hashicorp.services/cimg/go:1.17.13 resource_class: medium working_directory: /home/circleci/go/src/github.com/hashicorp/vault parallelism: 8 @@ -879,7 +903,7 @@ jobs: version: 18.09.3 - add_ssh_keys: fingerprints: - - b8:e2:38:f8:5b:1b:82:f3:1f:23:fa:46:6e:95:e7:e9 + - 0e:03:77:f4:e2:c3:56:c2:53:6a:03:e1:31:91:2f:06 - run: command: | git config --global url."git@github.com:".insteadOf https://github.com/ @@ -963,7 +987,7 @@ jobs: export VAULT_LICENSE_CI="$VAULT_LICENSE" VAULT_LICENSE= - # Create a docker network for our test container + # Create a docker network for our testcontainer if [ $USE_DOCKER == 1 ]; then # Despite the fact that we're using a circleci image (thus getting the # version they chose for the docker cli) and that we're specifying a @@ -972,17 +996,15 @@ jobs: # reasons unclear. export DOCKER_API_VERSION=1.39 - TEST_DOCKER_NETWORK_NAME="${CIRCLE_WORKFLOW_JOB_ID}-${CIRCLE_NODE_INDEX}" - export TEST_DOCKER_NETWORK_ID=$(docker network list --quiet --no-trunc --filter="name=${TEST_DOCKER_NETWORK_NAME}") + export TEST_DOCKER_NETWORK_ID=$(docker network list -q -f 'name=vaulttest') if [ -z $TEST_DOCKER_NETWORK_ID ]; then - docker network prune -f - TEST_DOCKER_NETWORK_ID=$(docker network create "${TEST_DOCKER_NETWORK_NAME}") + TEST_DOCKER_NETWORK_ID=$(docker network create vaulttest) fi - # Start a docker test container to run the tests in - CONTAINER_ID="$(docker run -d \ + # Start a docker testcontainer to run the tests in + docker run -d \ -e TEST_DOCKER_NETWORK_ID \ -e GOPRIVATE \ -e DOCKER_CERT_PATH \ @@ -991,21 +1013,19 @@ jobs: -e DOCKER_TLS_VERIFY \ -e NO_PROXY \ -e VAULT_TEST_LOG_DIR=/tmp/testlogs \ - --network ${TEST_DOCKER_NETWORK_NAME} \ - docker.mirror.hashicorp.services/cimg/go:1.19.1 \ - tail -f /dev/null)" - mkdir workspace - echo ${CONTAINER_ID} > workspace/container_id + --network vaulttest --name \ + testcontainer docker.mirror.hashicorp.services/cimg/go:1.17.13 \ + tail -f /dev/null # Run tests - test -d /tmp/go-cache && docker cp /tmp/go-cache ${CONTAINER_ID}:/tmp/gocache - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/src/github.com/hashicorp/vault' - docker cp . ${CONTAINER_ID}:/home/circleci/go/src/github.com/hashicorp/vault/ - docker cp $DOCKER_CERT_PATH/ ${CONTAINER_ID}:$DOCKER_CERT_PATH + test -d /tmp/go-cache && docker cp /tmp/go-cache testcontainer:/tmp/gocache + docker exec testcontainer sh -c 'mkdir -p /home/circleci/go/src/github.com/hashicorp/vault' + docker cp . testcontainer:/home/circleci/go/src/github.com/hashicorp/vault/ + docker cp $DOCKER_CERT_PATH/ testcontainer:$DOCKER_CERT_PATH # Copy the downloaded modules inside the container. - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/pkg' - docker cp "$(go env GOPATH)/pkg/mod" ${CONTAINER_ID}:/home/circleci/go/pkg/mod + docker exec testcontainer sh -c 'mkdir -p /home/circleci/go/pkg' + docker cp "$(go env GOPATH)/pkg/mod" testcontainer:/home/circleci/go/pkg/mod docker exec -w /home/circleci/go/src/github.com/hashicorp/vault/ \ -e CIRCLECI -e VAULT_CI_GO_TEST_RACE \ @@ -1014,7 +1034,7 @@ jobs: -e GOPROXY="off" \ -e VAULT_LICENSE_CI \ -e GOARCH=amd64 \ - ${CONTAINER_ID} \ + testcontainer \ gotestsum --format=short-verbose \ --junitfile test-results/go-test/results.xml \ --jsonfile test-results/go-test/results.json \ @@ -1043,8 +1063,8 @@ jobs: no_output_timeout: 60m - run: command: | - docker cp $(cat workspace/container_id):/home/circleci/go/src/github.com/hashicorp/vault/test-results . - docker cp $(cat workspace/container_id):/tmp/gocache /tmp/go-cache + docker cp testcontainer:/home/circleci/go/src/github.com/hashicorp/vault/test-results . + docker cp testcontainer:/tmp/gocache /tmp/go-cache name: Copy test results when: always - store_artifacts: @@ -1071,6 +1091,13 @@ workflows: requires: - install-ui-dependencies - build-go-dev + - test-ui-browserstack: + filters: + branches: + ignore: /pull\/[0-9]+/ + requires: + - install-ui-dependencies + - build-go-dev - test-go: requires: - pre-flight-checks diff --git a/.circleci/config/commands/configure-git.yml b/.circleci/config/commands/configure-git.yml index a725ab97e7b93..67fecb15f86c9 100644 --- a/.circleci/config/commands/configure-git.yml +++ b/.circleci/config/commands/configure-git.yml @@ -1,7 +1,6 @@ steps: - add_ssh_keys: fingerprints: - # "CircleCI Additional SSH Key" associated with hc-github-team-secure-vault-core GitHub user - - "b8:e2:38:f8:5b:1b:82:f3:1f:23:fa:46:6e:95:e7:e9" + - "0e:03:77:f4:e2:c3:56:c2:53:6a:03:e1:31:91:2f:06" - run: | git config --global url."git@github.com:".insteadOf https://github.com/ diff --git a/.circleci/config/commands/go_test.yml b/.circleci/config/commands/go_test.yml index cc9a0337b81ae..f763c67f67dd5 100644 --- a/.circleci/config/commands/go_test.yml +++ b/.circleci/config/commands/go_test.yml @@ -14,7 +14,7 @@ parameters: default: false go_image: type: string - default: "docker.mirror.hashicorp.services/cimg/go:1.19.1" + default: "docker.mirror.hashicorp.services/cimg/go:1.17.13" use_docker: type: boolean default: false @@ -107,7 +107,7 @@ steps: export VAULT_LICENSE_CI="$VAULT_LICENSE" VAULT_LICENSE= - # Create a docker network for our test container + # Create a docker network for our testcontainer if [ $USE_DOCKER == 1 ]; then # Despite the fact that we're using a circleci image (thus getting the # version they chose for the docker cli) and that we're specifying a @@ -116,17 +116,15 @@ steps: # reasons unclear. export DOCKER_API_VERSION=1.39 - TEST_DOCKER_NETWORK_NAME="${CIRCLE_WORKFLOW_JOB_ID}-${CIRCLE_NODE_INDEX}" - export TEST_DOCKER_NETWORK_ID=$(docker network list --quiet --no-trunc --filter="name=${TEST_DOCKER_NETWORK_NAME}") + export TEST_DOCKER_NETWORK_ID=$(docker network list -q -f 'name=vaulttest') if [ -z $TEST_DOCKER_NETWORK_ID ]; then - docker network prune -f - TEST_DOCKER_NETWORK_ID=$(docker network create "${TEST_DOCKER_NETWORK_NAME}") + TEST_DOCKER_NETWORK_ID=$(docker network create vaulttest) fi - # Start a docker test container to run the tests in - CONTAINER_ID="$(docker run -d \ + # Start a docker testcontainer to run the tests in + docker run -d \ -e TEST_DOCKER_NETWORK_ID \ -e GOPRIVATE \ -e DOCKER_CERT_PATH \ @@ -135,21 +133,19 @@ steps: -e DOCKER_TLS_VERIFY \ -e NO_PROXY \ -e VAULT_TEST_LOG_DIR=<< parameters.log_dir >> \ - --network ${TEST_DOCKER_NETWORK_NAME} \ - << parameters.go_image >> \ - tail -f /dev/null)" - mkdir workspace - echo ${CONTAINER_ID} > workspace/container_id + --network vaulttest --name \ + testcontainer << parameters.go_image >> \ + tail -f /dev/null # Run tests - test -d << parameters.cache_dir >> && docker cp << parameters.cache_dir >> ${CONTAINER_ID}:/tmp/gocache - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/src/github.com/hashicorp/vault' - docker cp . ${CONTAINER_ID}:/home/circleci/go/src/github.com/hashicorp/vault/ - docker cp $DOCKER_CERT_PATH/ ${CONTAINER_ID}:$DOCKER_CERT_PATH + test -d << parameters.cache_dir >> && docker cp << parameters.cache_dir >> testcontainer:/tmp/gocache + docker exec testcontainer sh -c 'mkdir -p /home/circleci/go/src/github.com/hashicorp/vault' + docker cp . testcontainer:/home/circleci/go/src/github.com/hashicorp/vault/ + docker cp $DOCKER_CERT_PATH/ testcontainer:$DOCKER_CERT_PATH # Copy the downloaded modules inside the container. - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/pkg' - docker cp "$(go env GOPATH)/pkg/mod" ${CONTAINER_ID}:/home/circleci/go/pkg/mod + docker exec testcontainer sh -c 'mkdir -p /home/circleci/go/pkg' + docker cp "$(go env GOPATH)/pkg/mod" testcontainer:/home/circleci/go/pkg/mod docker exec -w /home/circleci/go/src/github.com/hashicorp/vault/ \ -e CIRCLECI -e VAULT_CI_GO_TEST_RACE \ @@ -158,7 +154,7 @@ steps: -e GOPROXY="off" \ -e VAULT_LICENSE_CI \ -e GOARCH=<< parameters.arch >> \ - ${CONTAINER_ID} \ + testcontainer \ gotestsum --format=short-verbose \ --junitfile test-results/go-test/results.xml \ --jsonfile test-results/go-test/results.json \ @@ -189,8 +185,8 @@ steps: name: Copy test results when: always command: | - docker cp $(cat workspace/container_id):/home/circleci/go/src/github.com/hashicorp/vault/test-results . - docker cp $(cat workspace/container_id):/tmp/gocache << parameters.cache_dir >> + docker cp testcontainer:/home/circleci/go/src/github.com/hashicorp/vault/test-results . + docker cp testcontainer:/tmp/gocache << parameters.cache_dir >> - when: condition: << parameters.save_cache >> steps: diff --git a/.circleci/config/commands/setup-semgrep.yml b/.circleci/config/commands/setup-semgrep.yml new file mode 100644 index 0000000000000..b34307d3bc223 --- /dev/null +++ b/.circleci/config/commands/setup-semgrep.yml @@ -0,0 +1,14 @@ +--- +description: > + Ensure semgrep is installed. +steps: + - run: + working_directory: ~/ + name: Setup Semgrep + command: | + apk add --no-cache python3 py3-pip make + python3 -m pip install --user semgrep==0.86.5 + export PATH="$HOME/.local/bin:$PATH" + + echo "$ semgrep --version" + semgrep --version diff --git a/.circleci/config/executors/@executors.yml b/.circleci/config/executors/@executors.yml index 3b9d84d7aa5ce..40e16db67de92 100644 --- a/.circleci/config/executors/@executors.yml +++ b/.circleci/config/executors/@executors.yml @@ -3,18 +3,15 @@ go-machine: image: ubuntu-2004:202201-02 shell: /usr/bin/env bash -euo pipefail -c environment: - CIRCLECI_CLI_VERSION: 0.1.5546 # Pin CircleCI CLI to patch version (ex: 1.2.3) - GO_VERSION: 1.19.1 # Pin Go to patch version (ex: 1.2.3) - GOTESTSUM_VERSION: 0.5.2 # Pin gotestsum to patch version (ex: 1.2.3) - GOFUMPT_VERSION: 0.3.1 # Pin gofumpt to patch version (ex: 1.2.3) + CIRCLECI_CLI_VERSION: 0.1.5546 # Pin CircleCI CLI to patch version (ex: 1.2.3) + GO_VERSION: 1.17.13 # Pin Go to patch version (ex: 1.2.3) + GOTESTSUM_VERSION: 0.5.2 # Pin gotestsum to patch version (ex: 1.2.3) + GOFUMPT_VERSION: 0.2.1 # Pin gofumpt to patch version (ex: 1.2.3) GO_TAGS: "" working_directory: /home/circleci/go/src/github.com/hashicorp/vault node: docker: - - image: docker.mirror.hashicorp.services/circleci/node:14-browsers - environment: - # See https://git.io/vdao3 for details. - JOBS: 2 + - image: docker.mirror.hashicorp.services/node:14-buster shell: /usr/bin/env bash -euo pipefail -c working_directory: /home/circleci/go/src/github.com/hashicorp/vault python: @@ -22,32 +19,32 @@ python: - image: docker.mirror.hashicorp.services/python:3-alpine shell: /usr/bin/env bash -euo pipefail -c working_directory: /home/circleci/go/src/github.com/hashicorp/vault -semgrep: +alpine: docker: - - image: docker.mirror.hashicorp.services/returntocorp/semgrep:0.113.0 + - image: docker.mirror.hashicorp.services/alpine:3.13 shell: /bin/sh working_directory: /home/circleci/go/src/github.com/hashicorp/vault docker-env-go-test-remote-docker: resource_class: medium docker: - - image: "docker.mirror.hashicorp.services/cimg/go:1.19.1" + - image: "docker.mirror.hashicorp.services/cimg/go:1.17.13" environment: - CIRCLECI_CLI_VERSION: 0.1.5546 # Pin CircleCI CLI to patch version (ex: 1.2.3) + CIRCLECI_CLI_VERSION: 0.1.5546 # Pin CircleCI CLI to patch version (ex: 1.2.3) GO_TAGS: "" working_directory: /home/circleci/go/src/github.com/hashicorp/vault docker-env-go-test: resource_class: large docker: - - image: "docker.mirror.hashicorp.services/cimg/go:1.19.1" + - image: "docker.mirror.hashicorp.services/cimg/go:1.17.13" environment: - CIRCLECI_CLI_VERSION: 0.1.5546 # Pin CircleCI CLI to patch version (ex: 1.2.3) + CIRCLECI_CLI_VERSION: 0.1.5546 # Pin CircleCI CLI to patch version (ex: 1.2.3) GO_TAGS: "" working_directory: /home/circleci/go/src/github.com/hashicorp/vault docker-env-go-test-race: resource_class: xlarge docker: - - image: "docker.mirror.hashicorp.services/cimg/go:1.19.1" + - image: "docker.mirror.hashicorp.services/cimg/go:1.17.13" environment: - CIRCLECI_CLI_VERSION: 0.1.5546 # Pin CircleCI CLI to patch version (ex: 1.2.3) + CIRCLECI_CLI_VERSION: 0.1.5546 # Pin CircleCI CLI to patch version (ex: 1.2.3) GO_TAGS: "" working_directory: /home/circleci/go/src/github.com/hashicorp/vault diff --git a/.circleci/config/jobs/fmt.yml b/.circleci/config/jobs/fmt.yml index c92ce617aa6a0..29a8a70d7693e 100644 --- a/.circleci/config/jobs/fmt.yml +++ b/.circleci/config/jobs/fmt.yml @@ -8,7 +8,6 @@ steps: - run: name: make fmt command: | - echo "Using gofumpt version ${GOFUMPT_VERSION}" go install "mvdan.cc/gofumpt@v${GOFUMPT_VERSION}" make fmt if ! git diff --exit-code; then diff --git a/.circleci/config/jobs/semgrep.yml b/.circleci/config/jobs/semgrep.yml index c5cf749e129dd..1f6d5dd1f5adb 100644 --- a/.circleci/config/jobs/semgrep.yml +++ b/.circleci/config/jobs/semgrep.yml @@ -1,6 +1,7 @@ --- -executor: semgrep +executor: alpine steps: + - setup-semgrep - checkout - attach_workspace: at: . @@ -10,6 +11,4 @@ steps: # Alpine images can't run the make file due to a bash requirement. Run # semgrep explicitly here. export PATH="$HOME/.local/bin:$PATH" - echo -n 'Semgrep Version: ' - semgrep --version semgrep --error --include '*.go' --exclude 'vendor' -f tools/semgrep/ci . diff --git a/.circleci/config/jobs/test-ui-browserstack.yml b/.circleci/config/jobs/test-ui-browserstack.yml new file mode 100644 index 0000000000000..7640f83135a83 --- /dev/null +++ b/.circleci/config/jobs/test-ui-browserstack.yml @@ -0,0 +1,13 @@ +executor: node +resource_class: xlarge +steps: + - checkout + - restore_yarn_cache + - attach_workspace: + at: . + - run: + name: Run Browserstack Tests + command: | + # Add ./bin to the PATH so vault binary can be found. + export PATH="${PWD}"/bin:${PATH} + make test-ui-browserstack diff --git a/.circleci/config/jobs/test-ui.yml b/.circleci/config/jobs/test-ui.yml index f2aa19b0508d2..271e809a04005 100644 --- a/.circleci/config/jobs/test-ui.yml +++ b/.circleci/config/jobs/test-ui.yml @@ -9,6 +9,18 @@ steps: - run: name: Test UI command: | + set -x + + # Install Chrome + wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub \ + | apt-key add - + echo "deb http://dl.google.com/linux/chrome/deb/ stable main" \ + | tee /etc/apt/sources.list.d/google-chrome.list + apt-get update + apt-get -y install google-chrome-stable + rm /etc/apt/sources.list.d/google-chrome.list + rm -rf /var/lib/apt/lists/* /var/cache/apt/* + # Add ./bin to the PATH so vault binary can be run by Ember tests export PATH="${PWD}/bin:${PATH}" diff --git a/.circleci/config/workflows/ci.yml b/.circleci/config/workflows/ci.yml index 5e99293d7ea37..3af59741a66b5 100644 --- a/.circleci/config/workflows/ci.yml +++ b/.circleci/config/workflows/ci.yml @@ -14,6 +14,14 @@ jobs: # Only main, UI, release and merge branches need to run UI tests. # We don't filter here however because test-ui is configured in github as # required so it must run, instead we short-circuit within test-ui. + - test-ui-browserstack: + requires: + - install-ui-dependencies + - build-go-dev + filters: + branches: + # Forked pull requests have CIRCLE_BRANCH set to pull/XXX + ignore: /pull\/[0-9]+/ - test-go: requires: - pre-flight-checks @@ -31,5 +39,5 @@ jobs: requires: - pre-flight-checks - semgrep: - requires: - - pre-flight-checks + requires: + - pre-flight-checks diff --git a/.github/enos-run-matrices/ent.json b/.github/enos-run-matrices/ent.json deleted file mode 100644 index d1dafc0fba595..0000000000000 --- a/.github/enos-run-matrices/ent.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "include": [ - { - "scenario": "smoke backend:consul consul_version:1.12.3 distro:ubuntu seal:awskms arch:amd64 builder:crt edition:ent", - "aws_region": "us-west-1" - }, - { - "scenario": "smoke backend:raft consul_version:1.12.3 distro:ubuntu seal:shamir arch:amd64 builder:crt edition:ent", - "aws_region": "us-west-2" - }, - { - "scenario": "upgrade backend:raft consul_version:1.11.7 distro:rhel seal:shamir arch:amd64 builder:crt edition:ent", - "aws_region": "us-west-1" - }, - { - "scenario": "upgrade backend:consul consul_version:1.11.7 distro:rhel seal:awskms arch:amd64 builder:crt edition:ent", - "aws_region": "us-west-2" - }, - { - "scenario": "autopilot distro:ubuntu seal:shamir arch:amd64 builder:crt edition:ent", - "aws_region": "us-west-1" - } - ] -} diff --git a/.github/enos-run-matrices/oss.json b/.github/enos-run-matrices/oss.json deleted file mode 100644 index 7ce25ebc49a22..0000000000000 --- a/.github/enos-run-matrices/oss.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "include": [ - { - "scenario": "smoke backend:consul consul_version:1.12.3 distro:ubuntu seal:awskms arch:amd64 builder:crt edition:oss", - "aws_region": "us-west-1" - }, - { - "scenario": "smoke backend:raft consul_version:1.12.3 distro:ubuntu seal:shamir arch:amd64 builder:crt edition:oss", - "aws_region": "us-west-2" - }, - { - "scenario": "upgrade backend:raft consul_version:1.11.7 distro:rhel seal:shamir arch:amd64 builder:crt edition:oss", - "aws_region": "us-west-1" - }, - { - "scenario": "upgrade backend:consul consul_version:1.11.7 distro:rhel seal:awskms arch:amd64 builder:crt edition:oss", - "aws_region": "us-west-2" - } - ] -} diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 8cad99db68747..86e505456a98a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,42 +1,41 @@ name: build -on: - workflow_dispatch: - push: - branches-ignore: - - docs/** - - backport/docs/** +on: [ workflow_dispatch, push ] env: PKG_NAME: "vault" GO_TAGS: "ui" jobs: - - product-metadata: + get-product-version: runs-on: ubuntu-latest outputs: product-version: ${{ steps.get-product-version.outputs.product-version }} product-base-version: ${{ steps.get-product-version.outputs.product-base-version }} - build-date: ${{ steps.get-build-date.outputs.build-date }} steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 - - name: Get product version + - uses: actions/checkout@v2 + - name: get product version id: get-product-version run: | make version IFS="-" read BASE_VERSION _other <<< "$(make version)" echo "::set-output name=product-version::$(make version)" echo "::set-output name=product-base-version::${BASE_VERSION}" - - name: Get build date + + get-build-date: + runs-on: ubuntu-latest + outputs: + build-date: ${{ steps.get-build-date.outputs.build-date }} + steps: + - uses: actions/checkout@v2 + - name: get build date id: get-build-date run: | make build-date echo "::set-output name=build-date::$(make build-date)" generate-metadata-file: - needs: product-metadata + needs: get-product-version runs-on: ubuntu-latest outputs: filepath: ${{ steps.generate-metadata-file.outputs.filepath }} @@ -47,20 +46,22 @@ jobs: id: generate-metadata-file uses: hashicorp/actions-generate-metadata@v1 with: - version: ${{ needs.product-metadata.outputs.product-version }} + version: ${{ needs.get-product-version.outputs.product-version }} product: ${{ env.PKG_NAME }} + - uses: actions/upload-artifact@v2 with: name: metadata.json path: ${{ steps.generate-metadata-file.outputs.filepath }} build-other: - needs: [ product-metadata ] + needs: [ get-product-version, get-build-date ] runs-on: ubuntu-latest strategy: matrix: goos: [ freebsd, windows, netbsd, openbsd, solaris ] goarch: [ "386", "amd64", "arm" ] + go: [ "1.17.13" ] exclude: - goos: solaris goarch: 386 @@ -70,14 +71,14 @@ jobs: goarch: arm fail-fast: true - name: Go ${{ matrix.goos }} ${{ matrix.goarch }} build + name: Go ${{ matrix.go }} ${{ matrix.goos }} ${{ matrix.goarch }} build steps: - uses: actions/checkout@v2 - name: Setup go - uses: actions/setup-go@v3 + uses: actions/setup-go@v2 with: - go-version-file: go.mod + go-version: ${{ matrix.go }} - name: Setup node and yarn uses: actions/setup-node@v2 with: @@ -98,31 +99,32 @@ jobs: CGO_ENABLED: 0 run: | mkdir dist out - GO_TAGS="${{ env.GO_TAGS }}" VAULT_VERSION=${{ needs.product-metadata.outputs.product-base-version }} VAULT_REVISION="$(git rev-parse HEAD)" VAULT_BUILD_DATE="${{ needs.get-build-date.outputs.build-date }}" make build - zip -r -j out/${{ env.PKG_NAME }}_${{ needs.product-metadata.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip dist/ + GO_TAGS="${{ env.GO_TAGS }}" VAULT_VERSION=${{ needs.get-product-version.outputs.product-base-version }} VAULT_REVISION="$(git rev-parse HEAD)" VAULT_BUILD_DATE="${{ needs.get-build-date.outputs.build-date }}" make build + zip -r -j out/${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip dist/ - uses: actions/upload-artifact@v2 with: - name: ${{ env.PKG_NAME }}_${{ needs.product-metadata.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip - path: out/${{ env.PKG_NAME }}_${{ needs.product-metadata.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip + name: ${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip + path: out/${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip build-linux: - needs: [ product-metadata ] + needs: [ get-product-version, get-build-date ] runs-on: ubuntu-latest strategy: matrix: goos: [linux] goarch: ["arm", "arm64", "386", "amd64"] + go: ["1.17.13"] fail-fast: true - name: Go ${{ matrix.goos }} ${{ matrix.goarch }} build + name: Go ${{ matrix.go }} ${{ matrix.goos }} ${{ matrix.goarch }} build steps: - uses: actions/checkout@v2 - name: Setup go - uses: actions/setup-go@v3 + uses: actions/setup-go@v2 with: - go-version-file: go.mod + go-version: ${{ matrix.go }} - name: Setup node and yarn uses: actions/setup-node@v2 with: @@ -143,12 +145,12 @@ jobs: CGO_ENABLED: 0 run: | mkdir dist out - GO_TAGS="${{ env.GO_TAGS }}" VAULT_VERSION=${{ needs.product-metadata.outputs.product-base-version }} VAULT_REVISION="$(git rev-parse HEAD)" VAULT_BUILD_DATE="${{ needs.get-build-date.outputs.build-date }}" make build - zip -r -j out/${{ env.PKG_NAME }}_${{ needs.product-metadata.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip dist/ + GO_TAGS="${{ env.GO_TAGS }}" VAULT_VERSION=${{ needs.get-product-version.outputs.product-base-version }} VAULT_REVISION="$(git rev-parse HEAD)" VAULT_BUILD_DATE="${{ needs.get-build-date.outputs.build-date }}" make build + zip -r -j out/${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip dist/ - uses: actions/upload-artifact@v2 with: - name: ${{ env.PKG_NAME }}_${{ needs.product-metadata.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip - path: out/${{ env.PKG_NAME }}_${{ needs.product-metadata.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip + name: ${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip + path: out/${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip - name: Package uses: hashicorp/actions-packaging-linux@v1 @@ -156,7 +158,7 @@ jobs: name: ${{ github.event.repository.name }} description: "Vault is a tool for secrets management, encryption as a service, and privileged access management." arch: ${{ matrix.goarch }} - version: ${{ needs.product-metadata.outputs.product-version }} + version: ${{ needs.get-product-version.outputs.product-version }} maintainer: "HashiCorp" homepage: "https://github.com/hashicorp/vault" license: "MPL-2.0" @@ -182,21 +184,22 @@ jobs: path: out/${{ env.DEB_PACKAGE }} build-darwin: - needs: [ product-metadata ] + needs: [ get-product-version, get-build-date ] runs-on: macos-latest strategy: matrix: goos: [ darwin ] goarch: [ "amd64", "arm64" ] + go: [ "1.17.13" ] fail-fast: true - name: Go ${{ matrix.goos }} ${{ matrix.goarch }} build + name: Go ${{ matrix.go }} ${{ matrix.goos }} ${{ matrix.goarch }} build steps: - uses: actions/checkout@v2 - name: Setup go - uses: actions/setup-go@v3 + uses: actions/setup-go@v2 with: - go-version-file: go.mod + go-version: ${{ matrix.go }} - name: Setup node and yarn uses: actions/setup-node@v2 with: @@ -218,17 +221,17 @@ jobs: CGO_ENABLED: 0 run: | mkdir dist out - GO_TAGS="${{ env.GO_TAGS }}" VAULT_VERSION=${{ needs.product-metadata.outputs.product-base-version }} VAULT_REVISION="$(git rev-parse HEAD)" VAULT_BUILD_DATE="${{ needs.get-build-date.outputs.build-date }}" make build - zip -r -j out/${{ env.PKG_NAME }}_${{ needs.product-metadata.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip dist/ + GO_TAGS="${{ env.GO_TAGS }}" VAULT_VERSION=${{ needs.get-product-version.outputs.product-base-version }} VAULT_REVISION="$(git rev-parse HEAD)" VAULT_BUILD_DATE="${{ needs.get-build-date.outputs.build-date }}" make build + zip -r -j out/${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip dist/ - uses: actions/upload-artifact@v2 with: - name: ${{ env.PKG_NAME }}_${{ needs.product-metadata.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip - path: out/${{ env.PKG_NAME }}_${{ needs.product-metadata.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip + name: ${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip + path: out/${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip build-docker: name: Docker ${{ matrix.arch }} build needs: - - product-metadata + - get-product-version - build-linux runs-on: ubuntu-latest strategy: @@ -236,7 +239,7 @@ jobs: arch: ["arm", "arm64", "386", "amd64"] env: repo: ${{github.event.repository.name}} - version: ${{needs.product-metadata.outputs.product-version}} + version: ${{needs.get-product-version.outputs.product-version}} steps: - uses: actions/checkout@v2 - name: Docker Build (Action) @@ -245,7 +248,7 @@ jobs: version: ${{env.version}} target: default arch: ${{matrix.arch}} - zip_artifact_name: ${{ env.PKG_NAME }}_${{ needs.product-metadata.outputs.product-version }}_linux_${{ matrix.arch }}.zip + zip_artifact_name: ${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_linux_${{ matrix.arch }}.zip tags: | docker.io/hashicorp/${{env.repo}}:${{env.version}} public.ecr.aws/hashicorp/${{env.repo}}:${{env.version}} @@ -253,7 +256,7 @@ jobs: build-ubi: name: Red Hat UBI ${{ matrix.arch }} build needs: - - product-metadata + - get-product-version - build-linux runs-on: ubuntu-latest strategy: @@ -261,7 +264,7 @@ jobs: arch: ["amd64"] env: repo: ${{github.event.repository.name}} - version: ${{needs.product-metadata.outputs.product-version}} + version: ${{needs.get-product-version.outputs.product-version}} steps: - uses: actions/checkout@v2 - name: Docker Build (Action) @@ -270,22 +273,5 @@ jobs: version: ${{env.version}} target: ubi arch: ${{matrix.arch}} - zip_artifact_name: ${{ env.PKG_NAME }}_${{ needs.product-metadata.outputs.product-version }}_linux_${{ matrix.arch }}.zip + zip_artifact_name: ${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_linux_${{ matrix.arch }}.zip redhat_tag: scan.connect.redhat.com/ospid-f0a92725-d8c6-4023-9a87-ba785b94c3fd/${{env.repo}}:${{env.version}}-ubi - - enos: - name: Enos - # Only run the Enos workflow against branches that are created from the - # hashicorp/vault repository. This has the effect of limiting execution of - # Enos scenarios to branches that originate from authors that have write - # access to hashicorp/vault repository. This is required as Github Actions - # will not populate the required secrets for branches created by outside - # contributors in order to protect the secrets integrity. - if: "! github.event.pull_request.head.repo.fork" - needs: - - product-metadata - - build-linux - uses: ./.github/workflows/enos-run.yml - with: - artifact-name: "vault_${{ needs.product-metadata.outputs.product-version }}_linux_amd64.zip" - secrets: inherit diff --git a/.github/workflows/changelog-checker.yml b/.github/workflows/changelog-checker.yml index d8a380270b263..9addf91761b0c 100644 --- a/.github/workflows/changelog-checker.yml +++ b/.github/workflows/changelog-checker.yml @@ -39,11 +39,11 @@ jobs: if [ -z "$changelog_files" ]; then echo "Not found." - echo "looking for changelog file matching changelog/_go-ver-*.txt" + echo "looking for changelog file matching changelog/go-ver-*.txt" # If we do not find a file matching the PR # in changelog/, we fail the check # unless we did a Go toolchain version update, in which case we check the # alternative name. - toolchain_files=$(git --no-pager diff --name-only HEAD "$(git merge-base HEAD "origin/${{ github.event.pull_request.base.ref }}")" -- 'changelog/_go-ver-*.txt') + toolchain_files=$(git --no-pager diff --name-only HEAD "$(git merge-base HEAD "origin/${{ github.event.pull_request.base.ref }}")" -- 'changelog/go-ver-*.txt') if [ -z "$toolchain_files" ]; then echo "Not found." echo "" @@ -82,8 +82,8 @@ jobs: exit 1 elif grep -q '^core: Bump Go version' "$changelog_files"; then echo "Don't use PR numbered changelog entries for Go version bumps!" - echo "Please use the format changelog/_go-ver-.txt instead." - echo "Example: _go-ver-1110.txt for Vault 1.11.0" + echo "Please use the format changelog/go-ver-.txt instead." + echo "Example: go-ver-1110.txt for Vault 1.11.0" exit 1 else echo "Found changelog entry in PR!" diff --git a/.github/workflows/enos-fmt.yml b/.github/workflows/enos-fmt.yml deleted file mode 100644 index c10395c8badf8..0000000000000 --- a/.github/workflows/enos-fmt.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -name: enos_fmt - -on: - pull_request: - paths: - - enos/** - -jobs: - fmt_check: - # Only run this workflow on pull requests from hashicorp/vault branches - # as we need secrets to install enos. - if: "! github.event.pull_request.head.repo.fork" - runs-on: ubuntu-latest - env: - GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - steps: - - uses: actions/checkout@v3 - - uses: hashicorp/setup-terraform@v2 - with: - terraform_wrapper: false - - uses: hashicorp/action-setup-enos@v1 - with: - github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - - name: "check formatting" - working-directory: ./enos - run: make check-fmt diff --git a/.github/workflows/enos-run.yml b/.github/workflows/enos-run.yml deleted file mode 100644 index c2adca32a782e..0000000000000 --- a/.github/workflows/enos-run.yml +++ /dev/null @@ -1,122 +0,0 @@ ---- -name: enos - -on: - # Only trigger this working using workflow_call. It assumes that secrets are - # being inherited from the caller. - workflow_call: - inputs: - artifact-name: - required: true - type: string - -env: - PKG_NAME: vault - ARTIFACT_NAME: ${{ inputs.artifact-name }} - -jobs: - # Read Enos scenario matrix file based on artifact-name input to test - read-enos-matrix: - runs-on: ubuntu-latest - outputs: - enos-scenarios: ${{ steps.enos-matrix.outputs.matrix }} - steps: - - name: Checkout - uses: actions/checkout@v3 - - name: Create Enos scenario matrix - id: enos-matrix - run: | - [[ ${{ env.ARTIFACT_NAME }} == *"ent"* ]] && scenarioFile=$(cat ./.github/enos-run-matrices/ent.json |jq -c .) || scenarioFile=$(cat ./.github/enos-run-matrices/oss.json |jq -c .) - echo "::set-output name=matrix::$scenarioFile" - # Run Integration tests on Enos scenario matrix - enos: - name: Integration - needs: read-enos-matrix - strategy: - fail-fast: false # don't fail as that can skip required cleanup steps for jobs - matrix: ${{ fromJson(needs.read-enos-matrix.outputs.enos-scenarios) }} - runs-on: ubuntu-latest - env: - GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - steps: - - name: Checkout - uses: actions/checkout@v3 - - name: Set up Terraform - uses: hashicorp/setup-terraform@v2 - with: - # the Terraform wrapper will break Terraform execution in Enos because - # it changes the output to text when we expect it to be JSON. - terraform_wrapper: false - - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v1 - with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - aws-region: ${{ matrix.aws_region }} - role-to-assume: ${{ secrets.AWS_ROLE_ARN }} - role-skip-session-tagging: true - role-duration-seconds: 3600 - - name: Set up Enos - uses: hashicorp/action-setup-enos@v1 - with: - github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - - name: Set up AWS SSH private key - run: | - mkdir -p ./enos/support - echo "${{ secrets.ENOS_CI_SSH_KEY }}" > ./enos/support/private_key.pem - chmod 600 ./enos/support/private_key.pem - - name: Download Linux AMD64 Vault bundle - id: download - uses: actions/download-artifact@v3 - with: - name: ${{ inputs.artifact-name }} - path: ./enos/support/downloads - - name: Prepare for scenario execution - run: | - unzip ${{steps.download.outputs.download-path}}/*.zip -d enos/support - mv ${{steps.download.outputs.download-path}}/*.zip enos/support/vault.zip - mkdir -p enos/support/terraform-plugin-cache - [[ ${{ env.ARTIFACT_NAME }} == *"ent"* ]] && echo "${{ secrets.VAULT_LICENSE }}" > ./enos/support/vault.hclic || true - - name: Run Enos scenario - id: run - # Continue once and retry to handle occasional blips when creating - # infrastructure. - continue-on-error: true - env: - ENOS_VAR_aws_region: ${{ matrix.aws_region }} - ENOS_VAR_aws_ssh_keypair_name: enos-ci-ssh-key - ENOS_VAR_aws_ssh_private_key_path: ./support/private_key.pem - ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }} - ENOS_VAR_terraform_plugin_cache_dir: ./support/terraform-plugin-cache - ENOS_VAR_vault_bundle_path: ./support/vault.zip - run: | - enos scenario run --timeout 60m0s --chdir ./enos ${{ matrix.scenario }} - - name: Retry Enos scenario - id: run_retry - if: steps.run.outcome == 'failure' - env: - ENOS_VAR_aws_region: ${{ matrix.aws_region }} - ENOS_VAR_aws_ssh_keypair_name: enos-ci-ssh-key - ENOS_VAR_aws_ssh_private_key_path: ./support/private_key.pem - ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }} - ENOS_VAR_terraform_plugin_cache_dir: ./support/terraform-plugin-cache - ENOS_VAR_vault_bundle_path: ./support/vault.zip - run: | - enos scenario run --timeout 60m0s --chdir ./enos ${{ matrix.scenario }} - - name: Destroy Enos scenario - if: ${{ always() }} - env: - ENOS_VAR_aws_region: ${{ matrix.aws_region }} - ENOS_VAR_aws_ssh_keypair_name: enos-ci-ssh-key - ENOS_VAR_aws_ssh_private_key_path: ./support/private_key.pem - ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }} - ENOS_VAR_terraform_plugin_cache_dir: ./support/terraform-plugin-cache - ENOS_VAR_vault_bundle_path: ./support/vault.zip - run: | - enos scenario destroy --timeout 60m0s --chdir ./enos ${{ matrix.scenario }} - - name: Cleanup Enos runtime directories - if: ${{ always() }} - run: | - rm -rf /tmp/enos* - rm -rf ./enos/support - rm -rf ./enos/.enos diff --git a/.github/workflows/oss.yml b/.github/workflows/oss.yml deleted file mode 100644 index 4e03b9761ba41..0000000000000 --- a/.github/workflows/oss.yml +++ /dev/null @@ -1,128 +0,0 @@ -# Open Source Community Workflows - -name: Project triage -on: - pull_request: - types: [opened, reopened] - # Runs on PRs to main - branches: - - main - - issues: - types: [opened, reopened] - -jobs: - add-to-projects: - # exclude internal PRs - if: github.event.pull_request.head.repo.owner.login != 'hashicorp' && ((github.event.action == 'reopened') || (github.event.action == 'opened')) - name: Add issue or PR to projects - runs-on: ubuntu-latest - steps: - - if: github.event.pull_request != null - uses: actions/checkout@v3 - - if: github.event.pull_request != null - uses: dorny/paths-filter@v2 - id: changes - with: - # derived from CODEOWNERS - filters: | - cryptosec: - - 'builtin/logical/pki/**' - - 'builtin/logical/ssh/**' - - 'builtin/logical/totp/**' - - 'builtin/logical/transit/**' - ecosystem: - - 'builtin/credential/aws/**' - - 'builtin/credential/github/**' - - 'builtin/credential/ldap/**' - - 'builtin/credential/okta/**' - - 'builtin/logical/aws/**' - - 'builtin/logical/cassandra/**' - - 'builtin/logical/consul/**' - - 'builtin/logical/database/**' - - 'builtin/logical/mongodb/**' - - 'builtin/logical/mssql/**' - - 'builtin/logical/mysql/**' - - 'builtin/logical/nomad/**' - - 'builtin/logical/postgresql/**' - - 'builtin/logical/rabbitmq/**' - - 'command/agent/**' - - 'plugins/**' - - 'vault/plugin_catalog.go' - - 'ui/app/components/auth-jwt.js' - - 'ui/app/routes/vault/cluster/oidc-*.js' - devex: - - 'api/**' - - 'command/**' - ui: - - 'ui/**' - - - name: "Default to core board" - run: echo "PROJECT=170" >> $GITHUB_ENV - - if: github.event.pull_request != null && steps.changes.outputs.cryptosec == 'true' - run: echo "PROJECT=172" >> $GITHUB_ENV - - if: github.event.pull_request != null && steps.changes.outputs.ecosystem == 'true' - run: echo "PROJECT=169" >> $GITHUB_ENV - - if: github.event.pull_request != null && steps.changes.outputs.devex == 'true' - run: echo "PROJECT=176" >> $GITHUB_ENV - - if: github.event.pull_request != null && steps.changes.outputs.ui == 'true' - run: echo "PROJECT=171" >> $GITHUB_ENV - - - uses: actions/add-to-project@v0.3.0 - with: - project-url: https://github.com/orgs/hashicorp/projects/${{ env.PROJECT }} - github-token: ${{ secrets.TRIAGE_GITHUB_TOKEN }} - - # example of something more complicated: deleting an issue or PR automatically (though this is done in the project workflows already) - # we have to use the GraphQL API for anything involving projects. - # - # get-project: - # name: Get project data - # runs-on: ubuntu-latest - # if: github.event.action == 'closed' || github.event.action == 'deleted' - # outputs: - # project_id: ${{ steps.get-project.outputs.project_id }} - # steps: - # - id: get-project - # name: Get project data - # env: - # GITHUB_TOKEN: ${{ secrets.TRIAGE_GITHUB_TOKEN }} - # ORGANIZATION: hashicorp - # PROJECT_NUMBER: 169 - # run: | - # gh api graphql -f query=' - # query($org: String!, $number: Int!) { - # organization(login: $org){ - # projectV2(number: $number) { - # id - # } - # } - # }' -f org=$ORGANIZATION -F number=$PROJECT_NUMBER > project_data.json - # echo "::set-output name=project_id::$(jq '.data.organization.projectV2.id' project_data.json)" - - # delete-from-project: - # name: Remove issue or PR from project - # needs: [get-project] - # if: github.event.action == 'closed' || github.event.action == 'deleted' - # runs-on: ubuntu-latest - # steps: - # - name: Remove issue or PR - # env: - # GITHUB_TOKEN: ${{ secrets.TRIAGE_GITHUB_TOKEN }} - # run: | - # PROJECT_ID=${{ needs.get-project.outputs.project_id }} - # item_id=${{ github.event.issue.node_id }} - # if [ -z "$item_id" ]; then - # item_id=${{ github.event.pull_request.node_id }} - # fi - # gh api graphql -f query=' - # mutation($project_id: ID!, $item_id: ID!) { - # deleteProjectV2Item( - # input: { - # projectId: $project_id - # itemId: $item_id - # } - # ) { - # deletedItemId - # } - # }' -f project_id=$PROJECT_ID -f item_id=$item_id || true \ No newline at end of file diff --git a/.gitignore b/.gitignore index f5793c7356969..75b7fec103349 100644 --- a/.gitignore +++ b/.gitignore @@ -56,11 +56,7 @@ Vagrantfile !.release/linux/package/etc/vault.d/vault.hcl !command/agent/config/test-fixtures/*.hcl !command/server/test-fixtures/**/*.hcl -!enos/*.hcl -# Enos -enos/.enos -enos/support .DS_Store .idea diff --git a/.release/ci.hcl b/.release/ci.hcl index a103d5031940a..ffff06ae9b83f 100644 --- a/.release/ci.hcl +++ b/.release/ci.hcl @@ -3,7 +3,7 @@ schema = "1" project "vault" { team = "vault" slack { - notification_channel = "C03RXFX5M4L" // #feed-vault-releases + notification_channel = "CRF6FFKEW" // #vault-releases } github { organization = "hashicorp" diff --git a/CHANGELOG.md b/CHANGELOG.md index 037d16338452d..5dd2225a1233d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,303 +1,34 @@ -## 1.12.0 -### Unreleased - -CHANGES: - -* core/entities: Fixed stranding of aliases upon entity merge, and require explicit selection of which aliases should be kept when some must be deleted [[GH-16539](https://github.com/hashicorp/vault/pull/16539)] -* core: Bump Go version to 1.18.5. -* core: Validate input parameters for vault operator init command. Vault 1.12 CLI version is needed to run operator init now. [[GH-16379](https://github.com/hashicorp/vault/pull/16379)] -* identity: a request to `/identity/group` that includes `member_group_ids` that contains a cycle will now be responded to with a 400 rather than 500 [[GH-15912](https://github.com/hashicorp/vault/pull/15912)] -* licensing (enterprise): Terminated licenses will no longer result in shutdown. Instead, upgrades -will not be allowed if the license termination time is before the build date of the binary. -* plugins: `GET /sys/plugins/catalog/:type/:name` endpoint now returns an additional `version` field in the response data. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] -* plugins: `GET /sys/plugins/catalog` endpoint now returns an additional `detailed` field in the response data with a list of additional plugin metadata. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] - -FEATURES: - -* **Secrets/auth plugin multiplexing**: manage multiple plugin configurations with a single plugin process [[GH-14946](https://github.com/hashicorp/vault/pull/14946)] -* secrets/database/hana: Add ability to customize dynamic usernames [[GH-16631](https://github.com/hashicorp/vault/pull/16631)] -* secrets/pki: Add an OCSP responder that implements a subset of RFC6960, answering single serial number OCSP requests for -a specific cluster's revoked certificates in a mount. [[GH-16723](https://github.com/hashicorp/vault/pull/16723)] -* ui: UI support for Okta Number Challenge. [[GH-15998](https://github.com/hashicorp/vault/pull/15998)] - -IMPROVEMENTS: - -* activity (enterprise): Added new clients unit tests to test accuracy of estimates -* agent: Added `disable_idle_connections` configuration to disable leaving idle connections open in auto-auth, caching and templating. [[GH-15986](https://github.com/hashicorp/vault/pull/15986)] -* agent: Added `disable_keep_alives` configuration to disable keep alives in auto-auth, caching and templating. [[GH-16479](https://github.com/hashicorp/vault/pull/16479)] -* agent: JWT auto auth now supports a `remove_jwt_after_reading` config option which defaults to true. [[GH-11969](https://github.com/hashicorp/vault/pull/11969)] -* agent: Send notifications to systemd on start and stop. [[GH-9802](https://github.com/hashicorp/vault/pull/9802)] -* api/mfa: Add namespace path to the MFA read/list endpoint [[GH-16911](https://github.com/hashicorp/vault/pull/16911)] -* api: Add a sentinel error for missing KV secrets [[GH-16699](https://github.com/hashicorp/vault/pull/16699)] -* auth/aws: PKCS7 signatures will now use SHA256 by default in prep for Go 1.18 [[GH-16455](https://github.com/hashicorp/vault/pull/16455)] -* auth/cert: Add metadata to identity-alias [[GH-14751](https://github.com/hashicorp/vault/pull/14751)] -* auth/gcp: Add support for GCE regional instance groups [[GH-16435](https://github.com/hashicorp/vault/pull/16435)] -* auth/jwt: Adds support for Microsoft US Gov L4 to the Azure provider for groups fetching. [[GH-16525](https://github.com/hashicorp/vault/pull/16525)] -* auth/jwt: Improves detection of Windows Subsystem for Linux (WSL) for CLI-based logins. [[GH-16525](https://github.com/hashicorp/vault/pull/16525)] -* auth/kerberos: add `add_group_aliases` config to include LDAP groups in Vault group aliases [[GH-16890](https://github.com/hashicorp/vault/pull/16890)] -* auth/kerberos: add `remove_instance_name` parameter to the login CLI and the -Kerberos config in Vault. This removes any instance names found in the keytab -service principal name. [[GH-16594](https://github.com/hashicorp/vault/pull/16594)] -* auth/oidc: Adds support for group membership parsing when using SecureAuth as an OIDC provider. [[GH-16274](https://github.com/hashicorp/vault/pull/16274)] -* cli: CLI commands will print a warning if flags will be ignored because they are passed after positional arguments. [[GH-16441](https://github.com/hashicorp/vault/pull/16441)] -* command/audit: Improve missing type error message [[GH-16409](https://github.com/hashicorp/vault/pull/16409)] -* command/server: add `-dev-tls` and `-dev-tls-cert-dir` subcommands to create a Vault dev server with generated certificates and private key. [[GH-16421](https://github.com/hashicorp/vault/pull/16421)] -* core (enterprise): Add HTTP PATCH support for namespaces with an associated `namespace patch` CLI command -* core (enterprise): Add check to `vault server` command to ensure configured storage backend is supported. -* core (enterprise): Add custom metadata support for namespaces -* core/activity: generate hyperloglogs containing clientIds for each month during precomputation [[GH-16146](https://github.com/hashicorp/vault/pull/16146)] -* core/activity: refactor activity log api to reuse partial api functions in activity endpoint when current month is specified [[GH-16162](https://github.com/hashicorp/vault/pull/16162)] -* core/activity: use monthly hyperloglogs to calculate new clients approximation for current month [[GH-16184](https://github.com/hashicorp/vault/pull/16184)] -* core/quotas (enterprise): Added ability to add path suffixes for lease-count resource quotas -* core/quotas (enterprise): Added ability to add role information for lease-count resource quotas, to limit login requests on auth mounts made using that role -* core/quotas: Added ability to add path suffixes for rate-limit resource quotas [[GH-15989](https://github.com/hashicorp/vault/pull/15989)] -* core/quotas: Added ability to add role information for rate-limit resource quotas, to limit login requests on auth mounts made using that role [[GH-16115](https://github.com/hashicorp/vault/pull/16115)] -* core: Add `sys/loggers` and `sys/loggers/:name` endpoints to provide ability to modify logging verbosity [[GH-16111](https://github.com/hashicorp/vault/pull/16111)] -* core: Limit activity log client count usage by namespaces [[GH-16000](https://github.com/hashicorp/vault/pull/16000)] -* core: Upgrade github.com/hashicorp/raft [[GH-16609](https://github.com/hashicorp/vault/pull/16609)] -* core: remove gox [[GH-16353](https://github.com/hashicorp/vault/pull/16353)] -* docs: Clarify the behaviour of local mounts in the context of DR replication [[GH-16218](https://github.com/hashicorp/vault/pull/16218)] -* identity/oidc: Adds support for detailed listing of clients and providers. [[GH-16567](https://github.com/hashicorp/vault/pull/16567)] -* identity/oidc: Adds the `client_secret_post` token endpoint authentication method. [[GH-16598](https://github.com/hashicorp/vault/pull/16598)] -* identity/oidc: allows filtering the list providers response by an allowed_client_id [[GH-16181](https://github.com/hashicorp/vault/pull/16181)] -* identity: Prevent possibility of data races on entity creation. [[GH-16487](https://github.com/hashicorp/vault/pull/16487)] -* physical/postgresql: pass context to queries to propagate timeouts and cancellations on requests. [[GH-15866](https://github.com/hashicorp/vault/pull/15866)] -* plugins: Add Deprecation Status method to builtinregistry. [[GH-16846](https://github.com/hashicorp/vault/pull/16846)] -* plugins: Plugin catalog supports registering and managing plugins with semantic version information. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] -* secret/nomad: allow reading CA and client auth certificate from /nomad/config/access [[GH-15809](https://github.com/hashicorp/vault/pull/15809)] -* secret/pki: Add RSA PSS signature support for issuing certificates, signing CRLs [[GH-16519](https://github.com/hashicorp/vault/pull/16519)] -* secret/pki: Add signature_bits to sign-intermediate, sign-verbatim endpoints [[GH-16124](https://github.com/hashicorp/vault/pull/16124)] -* secret/pki: Allow issuing certificates with non-domain, non-email Common Names from roles, sign-verbatim, and as issuers (`cn_validations`). [[GH-15996](https://github.com/hashicorp/vault/pull/15996)] -* secret/pki: Allow specifying SKID for cross-signed issuance from older Vault versions. [[GH-16494](https://github.com/hashicorp/vault/pull/16494)] -* secret/transit: Allow importing Ed25519 keys from PKCS#8 with inner RFC 5915 ECPrivateKey blobs (NSS-wrapped keys). [[GH-15742](https://github.com/hashicorp/vault/pull/15742)] -* secrets/ad: set config default length only if password_policy is missing [[GH-16140](https://github.com/hashicorp/vault/pull/16140)] -* secrets/kubernetes: Add allowed_kubernetes_namespace_selector to allow selecting Kubernetes namespaces with a label selector when configuring roles. [[GH-16240](https://github.com/hashicorp/vault/pull/16240)] -* secrets/pki/tidy: Add another pair of metrics counting certificates not deleted by the tidy operation. [[GH-16702](https://github.com/hashicorp/vault/pull/16702)] -* secrets/pki: Add ability to periodically rebuild CRL before expiry [[GH-16762](https://github.com/hashicorp/vault/pull/16762)] -* secrets/pki: Add ability to periodically run tidy operations to remove expired certificates. [[GH-16900](https://github.com/hashicorp/vault/pull/16900)] -* secrets/pki: Add support for per-issuer Authority Information Access (AIA) URLs [[GH-16563](https://github.com/hashicorp/vault/pull/16563)] -* secrets/pki: Allow revocation of certificates with explicitly provided certificate (bring your own certificate / BYOC). [[GH-16564](https://github.com/hashicorp/vault/pull/16564)] -* secrets/pki: Allow revocation via proving possession of certificate's private key [[GH-16566](https://github.com/hashicorp/vault/pull/16566)] -* secrets/pki: Allow tidy to associate revoked certs with their issuers for OCSP performance [[GH-16871](https://github.com/hashicorp/vault/pull/16871)] -* secrets/pki: Honor If-Modified-Since header on CA, CRL fetch; requires passthrough_request_headers modification on the mount point. [[GH-16249](https://github.com/hashicorp/vault/pull/16249)] -* secrets/pki: Improve stability of association of revoked cert with its parent issuer; when an issuer loses crl-signing usage, do not place certs on default issuer's CRL. [[GH-16874](https://github.com/hashicorp/vault/pull/16874)] -* secrets/pki: Support generating delta CRLs for up-to-date CRLs when auto-building is enabled. [[GH-16773](https://github.com/hashicorp/vault/pull/16773)] -* secrets/ssh: Add allowed_domains_template to allow templating of allowed_domains. [[GH-16056](https://github.com/hashicorp/vault/pull/16056)] -* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] -* secrets/ssh: Allow the use of Identity templates in the `default_user` field [[GH-16351](https://github.com/hashicorp/vault/pull/16351)] -* ssh: Addition of an endpoint `ssh/issue/:role` to allow the creation of signed key pairs [[GH-15561](https://github.com/hashicorp/vault/pull/15561)] -* storage/cassandra: tuning parameters for clustered environments `connection_timeout`, `initial_connection_timeout`, `simple_retry_policy_retries`. [[GH-10467](https://github.com/hashicorp/vault/pull/10467)] -* storage/gcs: Add documentation explaining how to configure the gcs backend using environment variables instead of options in the configuration stanza [[GH-14455](https://github.com/hashicorp/vault/pull/14455)] -* ui: Changed the tokenBoundCidrs tooltip content to clarify that comma separated values are not accepted in this field. [[GH-15852](https://github.com/hashicorp/vault/pull/15852)] -* ui: Removed deprecated version of core-js 2.6.11 [[GH-15898](https://github.com/hashicorp/vault/pull/15898)] -* ui: Renamed labels under Tools for wrap, lookup, rewrap and unwrap with description. [[GH-16489](https://github.com/hashicorp/vault/pull/16489)] -* ui: redirect_to param forwards from auth route when authenticated [[GH-16821](https://github.com/hashicorp/vault/pull/16821)] -* website/docs: API generate-recovery-token documentation. [[GH-16213](https://github.com/hashicorp/vault/pull/16213)] -* website/docs: Update replication docs to mention Integrated Storage [[GH-16063](https://github.com/hashicorp/vault/pull/16063)] -* website/docs: changed to echo for all string examples instead of (<<<) here-string. [[GH-9081](https://github.com/hashicorp/vault/pull/9081)] - -BUG FIXES: - -* agent/template: Fix parsing error for the exec stanza [[GH-16231](https://github.com/hashicorp/vault/pull/16231)] -* agent: Update consul-template for pkiCert bug fixes [[GH-16087](https://github.com/hashicorp/vault/pull/16087)] -* api/sys/internal/specs/openapi: support a new "dynamic" query parameter to generate generic mountpaths [[GH-15835](https://github.com/hashicorp/vault/pull/15835)] -* api: Fixed erroneous warnings of unrecognized parameters when unwrapping data. [[GH-16794](https://github.com/hashicorp/vault/pull/16794)] -* api: Fixed issue with internal/ui/mounts and internal/ui/mounts/(?P.+) endpoints where it was not properly handling /auth/ [[GH-15552](https://github.com/hashicorp/vault/pull/15552)] -* api: properly handle switching to/from unix domain socket when changing client address [[GH-11904](https://github.com/hashicorp/vault/pull/11904)] -* auth/kerberos: Maintain headers set by the client [[GH-16636](https://github.com/hashicorp/vault/pull/16636)] -* command/debug: fix bug where monitor was not honoring configured duration [[GH-16834](https://github.com/hashicorp/vault/pull/16834)] -* core (enterprise): Fix bug where wrapping token lookup does not work within namespaces. [[GH-15583](https://github.com/hashicorp/vault/pull/15583)] -* core (enterprise): Fix creation of duplicate entities via alias metadata changes on local auth mounts. -* core/auth: Return a 403 instead of a 500 for a malformed SSCT [[GH-16112](https://github.com/hashicorp/vault/pull/16112)] -* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] -* core/license (enterprise): Always remove stored license and allow unseal to complete when license cleanup fails -* core/quotas (enterprise): Fixed issue with improper counting of leases if lease count quota created after leases -* core/quotas: Added globbing functionality on the end of path suffix quota paths [[GH-16386](https://github.com/hashicorp/vault/pull/16386)] -* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty -* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] -* core: Fixes parsing boolean values for ha_storage backends in config [[GH-15900](https://github.com/hashicorp/vault/pull/15900)] -* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] -* database: Invalidate queue should cancel context first to avoid deadlock [[GH-15933](https://github.com/hashicorp/vault/pull/15933)] -* debug: Fix panic when capturing debug bundle on Windows [[GH-14399](https://github.com/hashicorp/vault/pull/14399)] -* debug: Remove extra empty lines from vault.log when debug command is run [[GH-16714](https://github.com/hashicorp/vault/pull/16714)] -* identity (enterprise): Fix a data race when creating an entity for a local alias. -* identity/oidc: Change the `state` parameter of the Authorization Endpoint to optional. [[GH-16599](https://github.com/hashicorp/vault/pull/16599)] -* identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the -Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/16601)] -* identity/oidc: Fixes validation of the `request` and `request_uri` parameters. [[GH-16600](https://github.com/hashicorp/vault/pull/16600)] -* openapi: Fixed issue where information about /auth/token endpoints was not present with explicit policy permissions [[GH-15552](https://github.com/hashicorp/vault/pull/15552)] -* plugin/multiplexing: Fix panic when id doesn't exist in connection map [[GH-16094](https://github.com/hashicorp/vault/pull/16094)] -* plugin/secrets/auth: Fix a bug with aliased backends such as aws-ec2 or generic [[GH-16673](https://github.com/hashicorp/vault/pull/16673)] -* quotas/lease-count: Fix lease-count quotas on mounts not properly being enforced when the lease generating request is a read [[GH-15735](https://github.com/hashicorp/vault/pull/15735)] -* replication (enterprise): Fix data race in SaveCheckpoint() -* replication (enterprise): Fix data race in saveCheckpoint. -* secret/pki: Do not fail validation with a legacy key_bits default value and key_type=any when signing CSRs [[GH-16246](https://github.com/hashicorp/vault/pull/16246)] -* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] -* secrets/gcp: Fixes duplicate static account key creation from performance secondary clusters. [[GH-16534](https://github.com/hashicorp/vault/pull/16534)] -* secrets/kv: Fix `kv get` issue preventing the ability to read a secret when providing a leading slash [[GH-16443](https://github.com/hashicorp/vault/pull/16443)] -* secrets/pki: Allow import of issuers without CRLSign KeyUsage; prohibit setting crl-signing usage on such issuers [[GH-16865](https://github.com/hashicorp/vault/pull/16865)] -* secrets/pki: Fix migration to properly handle mounts that contain only keys, no certificates [[GH-16813](https://github.com/hashicorp/vault/pull/16813)] -* secrets/pki: Ignore EC PARAMETER PEM blocks during issuer import (/config/ca, /issuers/import/*, and /intermediate/set-signed) [[GH-16721](https://github.com/hashicorp/vault/pull/16721)] -* secrets/pki: LIST issuers endpoint is now unauthenticated. [[GH-16830](https://github.com/hashicorp/vault/pull/16830)] -* storage/raft (enterprise): Fix some storage-modifying RPCs used by perf standbys that weren't returning the resulting WAL state. -* storage/raft (enterprise): Prevent unauthenticated voter status change with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] -* storage/raft: Fix retry_join initialization failure [[GH-16550](https://github.com/hashicorp/vault/pull/16550)] -* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] -* ui: Fix info tooltip submitting form [[GH-16659](https://github.com/hashicorp/vault/pull/16659)] -* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] -* ui: Fix lease force revoke action [[GH-16930](https://github.com/hashicorp/vault/pull/16930)] -* ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] -* ui: Fixed bug where red spellcheck underline appears in sensitive/secret kv values when it should not appear [[GH-15681](https://github.com/hashicorp/vault/pull/15681)] -* ui: OIDC login type uses localStorage instead of sessionStorage [[GH-16170](https://github.com/hashicorp/vault/pull/16170)] -* vault: Fix a bug where duplicate policies could be added to an identity group. [[GH-15638](https://github.com/hashicorp/vault/pull/15638)] - -## 1.11.3 -### August 31, 2022 - -CHANGES: - -* core: Bump Go version to 1.17.13. - -IMPROVEMENTS: - -* auth/kerberos: add `add_group_aliases` config to include LDAP groups in Vault group aliases [[GH-16890](https://github.com/hashicorp/vault/pull/16890)] -* auth/kerberos: add `remove_instance_name` parameter to the login CLI and the -Kerberos config in Vault. This removes any instance names found in the keytab -service principal name. [[GH-16594](https://github.com/hashicorp/vault/pull/16594)] -* identity/oidc: Adds the `client_secret_post` token endpoint authentication method. [[GH-16598](https://github.com/hashicorp/vault/pull/16598)] -* storage/gcs: Add documentation explaining how to configure the gcs backend using environment variables instead of options in the configuration stanza [[GH-14455](https://github.com/hashicorp/vault/pull/14455)] - -BUG FIXES: - -* api: Fixed erroneous warnings of unrecognized parameters when unwrapping data. [[GH-16794](https://github.com/hashicorp/vault/pull/16794)] -* auth/gcp: Fixes the ability to reset the configuration's credentials to use application default credentials. [[GH-16523](https://github.com/hashicorp/vault/pull/16523)] -* auth/kerberos: Maintain headers set by the client [[GH-16636](https://github.com/hashicorp/vault/pull/16636)] -* command/debug: fix bug where monitor was not honoring configured duration [[GH-16834](https://github.com/hashicorp/vault/pull/16834)] -* core/license (enterprise): Always remove stored license and allow unseal to complete when license cleanup fails -* database/elasticsearch: Fixes a bug in boolean parsing for initialize [[GH-16526](https://github.com/hashicorp/vault/pull/16526)] -* identity/oidc: Change the `state` parameter of the Authorization Endpoint to optional. [[GH-16599](https://github.com/hashicorp/vault/pull/16599)] -* identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the -Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/16601)] -* identity/oidc: Fixes validation of the `request` and `request_uri` parameters. [[GH-16600](https://github.com/hashicorp/vault/pull/16600)] -* plugin/secrets/auth: Fix a bug with aliased backends such as aws-ec2 or generic [[GH-16673](https://github.com/hashicorp/vault/pull/16673)] -* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] -* secrets/gcp: Fixes duplicate static account key creation from performance secondary clusters. [[GH-16534](https://github.com/hashicorp/vault/pull/16534)] -* secrets/pki: Fix migration to properly handle mounts that contain only keys, no certificates [[GH-16813](https://github.com/hashicorp/vault/pull/16813)] -* secrets/pki: Ignore EC PARAMETER PEM blocks during issuer import (/config/ca, /issuers/import/*, and /intermediate/set-signed) [[GH-16721](https://github.com/hashicorp/vault/pull/16721)] -* secrets/pki: LIST issuers endpoint is now unauthenticated. [[GH-16830](https://github.com/hashicorp/vault/pull/16830)] -* storage/raft: Fix retry_join initialization failure [[GH-16550](https://github.com/hashicorp/vault/pull/16550)] -* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] -* ui: Fix info tooltip submitting form [[GH-16659](https://github.com/hashicorp/vault/pull/16659)] -* ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] - -SECURITY: - -* identity/entity: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. [[HCSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] - -## 1.11.2 -### August 2, 2022 - -IMPROVEMENTS: - -* agent: Added `disable_keep_alives` configuration to disable keep alives in auto-auth, caching and templating. [[GH-16479](https://github.com/hashicorp/vault/pull/16479)] - -BUG FIXES: - -* core/auth: Return a 403 instead of a 500 for a malformed SSCT [[GH-16112](https://github.com/hashicorp/vault/pull/16112)] -* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] -* secrets/kv: Fix `kv get` issue preventing the ability to read a secret when providing a leading slash [[GH-16443](https://github.com/hashicorp/vault/pull/16443)] -* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] - -## 1.11.1 -### July 21, 2022 - -CHANGES: - -* core: Bump Go version to 1.17.12. - -IMPROVEMENTS: - -* agent: Added `disable_idle_connections` configuration to disable leaving idle connections open in auto-auth, caching and templating. [[GH-15986](https://github.com/hashicorp/vault/pull/15986)] -* core: Add `sys/loggers` and `sys/loggers/:name` endpoints to provide ability to modify logging verbosity [[GH-16111](https://github.com/hashicorp/vault/pull/16111)] -* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] -BUG FIXES: - -* agent/template: Fix parsing error for the exec stanza [[GH-16231](https://github.com/hashicorp/vault/pull/16231)] -* agent: Update consul-template for pkiCert bug fixes [[GH-16087](https://github.com/hashicorp/vault/pull/16087)] -* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] -* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty -* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] -* kmip (enterprise): Return SecretData as supported Object Type. -* plugin/multiplexing: Fix panic when id doesn't exist in connection map [[GH-16094](https://github.com/hashicorp/vault/pull/16094)] -* secret/pki: Do not fail validation with a legacy key_bits default value and key_type=any when signing CSRs [[GH-16246](https://github.com/hashicorp/vault/pull/16246)] -* storage/raft (enterprise): Prevent unauthenticated voter status change with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] -* transform (enterprise): Fix a bug in the handling of nested or unmatched capture groups in FPE transformations. -* ui: OIDC login type uses localStorage instead of sessionStorage [[GH-16170](https://github.com/hashicorp/vault/pull/16170)] - -SECURITY: - -* storage/raft (enterprise): Vault Enterprise (“Vault”) clusters using Integrated Storage expose an unauthenticated API endpoint that could be abused to override the voter status of a node within a Vault HA cluster, introducing potential for future data loss or catastrophic failure. This vulnerability, CVE-2022-36129, was fixed in Vault 1.9.8, 1.10.5, and 1.11.1. [[HCSEC-2022-15](https://discuss.hashicorp.com/t/hcsec-2022-15-vault-enterprise-does-not-verify-existing-voter-status-when-joining-an-integrated-storage-ha-node/42420)] - ## 1.11.0 -### June 20, 2022 +### Unreleased CHANGES: * auth/aws: Add RoleSession to DisplayName when using assumeRole for authentication [[GH-14954](https://github.com/hashicorp/vault/pull/14954)] -* auth/kubernetes: If `kubernetes_ca_cert` is unset, and there is no pod-local CA available, an error will be surfaced when writing config instead of waiting for login. [[GH-15584](https://github.com/hashicorp/vault/pull/15584)] * auth: Remove support for legacy MFA (https://www.vaultproject.io/docs/v1.10.x/auth/mfa) [[GH-14869](https://github.com/hashicorp/vault/pull/14869)] -* core/fips: Disable and warn about entropy augmentation in FIPS 140-2 Inside mode [[GH-15858](https://github.com/hashicorp/vault/pull/15858)] * core: A request that fails path validation due to relative path check will now be responded to with a 400 rather than 500. [[GH-14328](https://github.com/hashicorp/vault/pull/14328)] -* core: Bump Go version to 1.17.11. [[GH-go-ver-1110](https://github.com/hashicorp/vault/pull/go-ver-1110)] -* database & storage: Change underlying driver library from [lib/pq](https://github.com/lib/pq) to [pgx](https://github.com/jackc/pgx). This change affects Redshift & Postgres database secrets engines, and CockroachDB & Postgres storage engines [[GH-15343](https://github.com/hashicorp/vault/pull/15343)] +* core: Bump Go version to 1.17.9. [[GH-go-ver-1110](https://github.com/hashicorp/vault/pull/go-ver-1110)] * licensing (enterprise): Remove support for stored licenses and associated `sys/license` and `sys/license/signed` endpoints in favor of [autoloaded licenses](https://www.vaultproject.io/docs/enterprise/license/autoloading). * replication (enterprise): The `/sys/replication/performance/primary/mount-filter` endpoint has been removed. Please use [Paths Filter](https://www.vaultproject.io/api-docs/system/replication/replication-performance#create-paths-filter) instead. -* secret/pki: Remove unused signature_bits parameter from intermediate CSR generation; this parameter doesn't control the final certificate's signature algorithm selection as that is up to the signing CA [[GH-15478](https://github.com/hashicorp/vault/pull/15478)] -* secrets/kubernetes: Split `additional_metadata` into `extra_annotations` and `extra_labels` parameters [[GH-15655](https://github.com/hashicorp/vault/pull/15655)] -* secrets/pki: A new aliased api path (/pki/issuer/:issuer_ref/sign-self-issued) -providing the same functionality as the existing API(/pki/root/sign-self-issued) -does not require sudo capabilities but the latter still requires it in an -effort to maintain backwards compatibility. [[GH-15211](https://github.com/hashicorp/vault/pull/15211)] -* secrets/pki: Err on unknown role during sign-verbatim. [[GH-15543](https://github.com/hashicorp/vault/pull/15543)] -* secrets/pki: Existing CRL API (/pki/crl) now returns an X.509 v2 CRL instead -of a v1 CRL. [[GH-15100](https://github.com/hashicorp/vault/pull/15100)] -* secrets/pki: The `ca_chain` response field within issuing (/pki/issue/:role) -and signing APIs will now include the root CA certificate if the mount is -aware of it. [[GH-15155](https://github.com/hashicorp/vault/pull/15155)] -* secrets/pki: existing Delete Root API (pki/root) will now delete all issuers -and keys within the mount path. [[GH-15004](https://github.com/hashicorp/vault/pull/15004)] -* secrets/pki: existing Generate Root (pki/root/generate/:type), -Set Signed Intermediate (/pki/intermediate/set-signed) APIs will -add new issuers/keys to a mount instead of warning that an existing CA exists [[GH-14975](https://github.com/hashicorp/vault/pull/14975)] -* secrets/pki: the signed CA certificate from the sign-intermediate api will now appear within the ca_chain -response field along with the issuer's ca chain. [[GH-15524](https://github.com/hashicorp/vault/pull/15524)] * ui: Upgrade Ember to version 3.28 [[GH-14763](https://github.com/hashicorp/vault/pull/14763)] FEATURES: -* **Autopilot Improvements (Enterprise)**: Autopilot on Vault Enterprise now supports automated upgrades and redundancy zones when using integrated storage. -* **KeyMgmt UI**: Add UI support for managing the Key Management Secrets Engine [[GH-15523](https://github.com/hashicorp/vault/pull/15523)] -* **Kubernetes Secrets Engine**: This new secrets engine generates Kubernetes service account tokens, service accounts, role bindings, and roles dynamically. [[GH-15551](https://github.com/hashicorp/vault/pull/15551)] * **Non-Disruptive Intermediate/Root Certificate Rotation**: This allows import, generation and configuration of any number of keys and/or issuers within a PKI mount, providing operators the ability to rotate certificates in place without affecting existing client configurations. [[GH-15277](https://github.com/hashicorp/vault/pull/15277)] -* **Print minimum required policy for any command**: The global CLI flag `-output-policy` can now be used with any command to print out the minimum required policy HCL for that operation, including whether the given path requires the "sudo" capability. [[GH-14899](https://github.com/hashicorp/vault/pull/14899)] -* **Snowflake Database Plugin**: Adds ability to manage RSA key pair credentials for dynamic and static Snowflake users. [[GH-15376](https://github.com/hashicorp/vault/pull/15376)] -* **Transit BYOK**: Allow import of externally-generated keys into the Transit secrets engine. [[GH-15414](https://github.com/hashicorp/vault/pull/15414)] +* api/command: Global -output-policy flag to determine minimum required policy HCL for a given operation [[GH-14899](https://github.com/hashicorp/vault/pull/14899)] * nomad: Bootstrap Nomad ACL system if no token is provided [[GH-12451](https://github.com/hashicorp/vault/pull/12451)] * storage/dynamodb: Added `AWS_DYNAMODB_REGION` environment variable. [[GH-15054](https://github.com/hashicorp/vault/pull/15054)] IMPROVEMENTS: -* activity: return nil response months in activity log API when no month data exists [[GH-15420](https://github.com/hashicorp/vault/pull/15420)] * agent/auto-auth: Add `min_backoff` to the method stanza for configuring initial backoff duration. [[GH-15204](https://github.com/hashicorp/vault/pull/15204)] -* agent: Update consul-template to v0.29.0 [[GH-15293](https://github.com/hashicorp/vault/pull/15293)] +* agent: Update consult-template to v0.29.0 [[GH-15293](https://github.com/hashicorp/vault/pull/15293)] * agent: Upgrade hashicorp/consul-template version for sprig template functions and improved writeTo function [[GH-15092](https://github.com/hashicorp/vault/pull/15092)] -* api/monitor: Add log_format option to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] * api: Add ability to pass certificate as PEM bytes to api.Client. [[GH-14753](https://github.com/hashicorp/vault/pull/14753)] * api: Add context-aware functions to vault/api for each API wrapper function. [[GH-14388](https://github.com/hashicorp/vault/pull/14388)] * api: Added MFALogin() for handling MFA flow when using login helpers. [[GH-14900](https://github.com/hashicorp/vault/pull/14900)] @@ -305,110 +36,49 @@ IMPROVEMENTS: being what the endpoints were expecting, or if the parameters supplied get replaced by the values in the endpoint's path itself, warnings will be added to the non-empty responses listing all the ignored and replaced parameters. [[GH-14962](https://github.com/hashicorp/vault/pull/14962)] -* api: KV helper methods to simplify the common use case of reading and writing KV secrets [[GH-15305](https://github.com/hashicorp/vault/pull/15305)] * api: Provide a helper method WithNamespace to create a cloned client with a new NS [[GH-14963](https://github.com/hashicorp/vault/pull/14963)] -* api: Support VAULT_PROXY_ADDR environment variable to allow overriding the Vault client's HTTP proxy. [[GH-15377](https://github.com/hashicorp/vault/pull/15377)] * api: Use the context passed to the api/auth Login helpers. [[GH-14775](https://github.com/hashicorp/vault/pull/14775)] -* api: make ListPlugins parse only known plugin types [[GH-15434](https://github.com/hashicorp/vault/pull/15434)] -* audit: Add a policy_results block into the audit log that contains the set of -policies that granted this request access. [[GH-15457](https://github.com/hashicorp/vault/pull/15457)] -* audit: Include mount_accessor in audit request and response logs [[GH-15342](https://github.com/hashicorp/vault/pull/15342)] -* audit: added entity_created boolean to audit log, set when login operations create an entity [[GH-15487](https://github.com/hashicorp/vault/pull/15487)] -* auth/aws: Add rsa2048 signature type to API [[GH-15719](https://github.com/hashicorp/vault/pull/15719)] -* auth/gcp: Enable the Google service endpoints used by the underlying client to be customized [[GH-15592](https://github.com/hashicorp/vault/pull/15592)] -* auth/gcp: Vault CLI now infers the service account email when running on Google Cloud [[GH-15592](https://github.com/hashicorp/vault/pull/15592)] -* auth/jwt: Adds ability to use JSON pointer syntax for the `user_claim` value. [[GH-15593](https://github.com/hashicorp/vault/pull/15593)] * auth/okta: Add support for Google provider TOTP type in the Okta auth method [[GH-14985](https://github.com/hashicorp/vault/pull/14985)] -* auth/okta: Add support for performing [the number -challenge](https://help.okta.com/en-us/Content/Topics/Mobile/ov-admin-config.htm?cshid=csh-okta-verify-number-challenge-v1#enable-number-challenge) -during an Okta Verify push challenge [[GH-15361](https://github.com/hashicorp/vault/pull/15361)] -* auth: Globally scoped Login MFA method Get/List endpoints [[GH-15248](https://github.com/hashicorp/vault/pull/15248)] * auth: enforce a rate limit for TOTP passcode validation attempts [[GH-14864](https://github.com/hashicorp/vault/pull/14864)] -* auth: forward cached MFA auth response to the leader using RPC instead of forwarding all login requests [[GH-15469](https://github.com/hashicorp/vault/pull/15469)] * cli/debug: added support for retrieving metrics from DR clusters if `unauthenticated_metrics_access` is enabled [[GH-15316](https://github.com/hashicorp/vault/pull/15316)] * cli/vault: warn when policy name contains upper-case letter [[GH-14670](https://github.com/hashicorp/vault/pull/14670)] * cli: Alternative flag-based syntax for KV to mitigate confusion from automatically appended /data [[GH-14807](https://github.com/hashicorp/vault/pull/14807)] * cockroachdb: add high-availability support [[GH-12965](https://github.com/hashicorp/vault/pull/12965)] -* command/debug: Add log_format flag to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] -* command: Support optional '-log-level' flag to be passed to 'operator migrate' command (defaults to info). Also support VAULT_LOG_LEVEL env var. [[GH-15405](https://github.com/hashicorp/vault/pull/15405)] -* command: Support the optional '-detailed' flag to be passed to 'vault list' command to show ListResponseWithInfo data. Also supports the VAULT_DETAILED env var. [[GH-15417](https://github.com/hashicorp/vault/pull/15417)] * core (enterprise): Include `termination_time` in `sys/license/status` response * core (enterprise): Include termination time in `license inspect` command output +* core : check uid and permissions of config dir, config file, plugin dir and plugin binaries [[GH-14817](https://github.com/hashicorp/vault/pull/14817)] * core,transit: Allow callers to choose random byte source including entropy augmentation sources for the sys/tools/random and transit/random endpoints. [[GH-15213](https://github.com/hashicorp/vault/pull/15213)] * core/activity: Order month data in ascending order of timestamps [[GH-15259](https://github.com/hashicorp/vault/pull/15259)] -* core/activity: allow client counts to be precomputed and queried on non-contiguous chunks of data [[GH-15352](https://github.com/hashicorp/vault/pull/15352)] -* core/managed-keys (enterprise): Allow configuring the number of parallel operations to PKCS#11 managed keys. -* core: Add an export API for historical activity log data [[GH-15586](https://github.com/hashicorp/vault/pull/15586)] * core: Add new DB methods that do not prepare statements. [[GH-15166](https://github.com/hashicorp/vault/pull/15166)] -* core: check uid and permissions of config dir, config file, plugin dir and plugin binaries [[GH-14817](https://github.com/hashicorp/vault/pull/14817)] * core: Fix some identity data races found by Go race detector (no known impact yet). [[GH-15123](https://github.com/hashicorp/vault/pull/15123)] * core: Include build date in `sys/seal-status` and `sys/version-history` endpoints. [[GH-14957](https://github.com/hashicorp/vault/pull/14957)] * core: Upgrade github.org/x/crypto/ssh [[GH-15125](https://github.com/hashicorp/vault/pull/15125)] -* kmip (enterprise): Implement operations Query, Import, Encrypt and Decrypt. Improve operations Locate, Add Attribute, Get Attributes and Get Attribute List to handle most supported attributes. -* mfa/okta: migrate to use official Okta SDK [[GH-15355](https://github.com/hashicorp/vault/pull/15355)] * sdk: Change OpenAPI code generator to extract request objects into /components/schemas and reference them by name. [[GH-14217](https://github.com/hashicorp/vault/pull/14217)] * secrets/consul: Add support for Consul node-identities and service-identities [[GH-15295](https://github.com/hashicorp/vault/pull/15295)] * secrets/consul: Vault is now able to automatically bootstrap the Consul ACL system. [[GH-10751](https://github.com/hashicorp/vault/pull/10751)] -* secrets/database/elasticsearch: Use the new /_security base API path instead of /_xpack/security when managing elasticsearch. [[GH-15614](https://github.com/hashicorp/vault/pull/15614)] -* secrets/pki: Add not_before_duration to root CA generation, intermediate CA signing paths. [[GH-14178](https://github.com/hashicorp/vault/pull/14178)] -* secrets/pki: Add support for CPS URLs and User Notice to Policy Information [[GH-15751](https://github.com/hashicorp/vault/pull/15751)] -* secrets/pki: Allow operators to control the issuing certificate behavior when -the requested TTL is beyond the NotAfter value of the signing certificate [[GH-15152](https://github.com/hashicorp/vault/pull/15152)] -* secrets/pki: Always return CRLs, URLs configurations, even if using the default value. [[GH-15470](https://github.com/hashicorp/vault/pull/15470)] -* secrets/pki: Enable Patch Functionality for Roles and Issuers (API only) [[GH-15510](https://github.com/hashicorp/vault/pull/15510)] -* secrets/pki: Have pki/sign-verbatim use the not_before_duration field defined in the role [[GH-15429](https://github.com/hashicorp/vault/pull/15429)] -* secrets/pki: Warn on empty Subject field during issuer generation (root/generate and root/sign-intermediate). [[GH-15494](https://github.com/hashicorp/vault/pull/15494)] -* secrets/pki: Warn on missing AIA access information when generating issuers (config/urls). [[GH-15509](https://github.com/hashicorp/vault/pull/15509)] * secrets/pki: Warn when `generate_lease` and `no_store` are both set to `true` on requests. [[GH-14292](https://github.com/hashicorp/vault/pull/14292)] -* secrets/ssh: Add connection timeout of 1 minute for outbound SSH connection in deprecated Dynamic SSH Keys mode. [[GH-15440](https://github.com/hashicorp/vault/pull/15440)] -* secrets/ssh: Support for `add_before_duration` in SSH [[GH-15250](https://github.com/hashicorp/vault/pull/15250)] * sentinel (enterprise): Upgrade sentinel to [v0.18.5](https://docs.hashicorp.com/sentinel/changelog#0-18-5-january-14-2022) to avoid potential naming collisions in the remote installer * storage/raft: Use larger timeouts at startup to reduce likelihood of inducing elections. [[GH-15042](https://github.com/hashicorp/vault/pull/15042)] -* ui: Allow namespace param to be parsed from state queryParam [[GH-15378](https://github.com/hashicorp/vault/pull/15378)] -* ui: Default auto-rotation period in transit is 30 days [[GH-15474](https://github.com/hashicorp/vault/pull/15474)] * ui: Parse schema refs from OpenAPI [[GH-14508](https://github.com/hashicorp/vault/pull/14508)] -* ui: Remove stored license references [[GH-15513](https://github.com/hashicorp/vault/pull/15513)] * ui: Remove storybook. [[GH-15074](https://github.com/hashicorp/vault/pull/15074)] * ui: Replaces the IvyCodemirror wrapper with a custom ember modifier. [[GH-14659](https://github.com/hashicorp/vault/pull/14659)] -* website/docs: Add usage documentation for Kubernetes Secrets Engine [[GH-15527](https://github.com/hashicorp/vault/pull/15527)] * website/docs: added a link to an Enigma secret plugin. [[GH-14389](https://github.com/hashicorp/vault/pull/14389)] -DEPRECATIONS: - -* docs: Document removal of X.509 certificates with signatures who use SHA-1 in Vault 1.12 [[GH-15581](https://github.com/hashicorp/vault/pull/15581)] -* secrets/consul: Deprecate old parameters "token_type" and "policy" [[GH-15550](https://github.com/hashicorp/vault/pull/15550)] -* secrets/consul: Deprecate parameter "policies" in favor of "consul_policies" for consistency [[GH-15400](https://github.com/hashicorp/vault/pull/15400)] - BUG FIXES: * Fixed panic when adding or modifying a Duo MFA Method in Enterprise * agent: Fix log level mismatch between ERR and ERROR [[GH-14424](https://github.com/hashicorp/vault/pull/14424)] -* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] * api/sys/raft: Update RaftSnapshotRestore to use net/http client allowing bodies larger than allocated memory to be streamed [[GH-14269](https://github.com/hashicorp/vault/pull/14269)] * api: Fixes bug where OutputCurlString field was unintentionally being copied over during client cloning [[GH-14968](https://github.com/hashicorp/vault/pull/14968)] * api: Respect increment value in grace period calculations in LifetimeWatcher [[GH-14836](https://github.com/hashicorp/vault/pull/14836)] * auth/approle: Add maximum length for input values that result in SHA56 HMAC calculation [[GH-14746](https://github.com/hashicorp/vault/pull/14746)] -* auth/kubernetes: Fix error code when using the wrong service account [[GH-15584](https://github.com/hashicorp/vault/pull/15584)] -* auth/ldap: The logic for setting the entity alias when `username_as_alias` is set -has been fixed. The previous behavior would make a request to the LDAP server to -get `user_attr` before discarding it and using the username instead. This would -make it impossible for a user to connect if this attribute was missing or had -multiple values, even though it would not be used anyway. This has been fixed -and the username is now used without making superfluous LDAP searches. [[GH-15525](https://github.com/hashicorp/vault/pull/15525)] -* auth: Fixed erroneous success message when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] -* auth: Fixed erroneous token information being displayed when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] -* auth: Fixed two-phase MFA information missing from table format when using vault login [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] -* auth: Prevent deleting a valid MFA method ID using the endpoint for a different MFA method type [[GH-15482](https://github.com/hashicorp/vault/pull/15482)] * auth: forward requests subject to login MFA from perfStandby to Active node [[GH-15009](https://github.com/hashicorp/vault/pull/15009)] * auth: load login MFA configuration upon restart [[GH-15261](https://github.com/hashicorp/vault/pull/15261)] * cassandra: Update gocql Cassandra client to fix "no hosts available in the pool" error [[GH-14973](https://github.com/hashicorp/vault/pull/14973)] * cli: Fix panic caused by parsing key=value fields whose value is a single backslash [[GH-14523](https://github.com/hashicorp/vault/pull/14523)] * cli: kv get command now honors trailing spaces to retrieve secrets [[GH-15188](https://github.com/hashicorp/vault/pull/15188)] -* command: do not report listener and storage types as key not found warnings [[GH-15383](https://github.com/hashicorp/vault/pull/15383)] * core (enterprise): Allow local alias create RPCs to persist alias metadata -* core (enterprise): Fix overcounting of lease count quota usage at startup. * core (enterprise): Fix some races in merkle index flushing code found in testing -* core (enterprise): Handle additional edge cases reinitializing PKCS#11 libraries after login errors. * core/config: Only ask the system about network interfaces when address configs contain a template having the format: {{ ... }} [[GH-15224](https://github.com/hashicorp/vault/pull/15224)] * core/managed-keys (enterprise): Allow PKCS#11 managed keys to use 0 as a slot number * core/metrics: Fix incorrect table size metric for local mounts [[GH-14755](https://github.com/hashicorp/vault/pull/14755)] @@ -417,161 +87,33 @@ and the username is now used without making superfluous LDAP searches. [[GH-1552 * core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited strings [[GH-14522](https://github.com/hashicorp/vault/pull/14522)] * core: Fix panic caused by parsing policies with empty slice values. [[GH-14501](https://github.com/hashicorp/vault/pull/14501)] * core: Fix panic for help request URL paths without /v1/ prefix [[GH-14704](https://github.com/hashicorp/vault/pull/14704)] -* core: Limit SSCT WAL checks on perf standbys to raft backends only [[GH-15879](https://github.com/hashicorp/vault/pull/15879)] -* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] -* core: Prevent metrics generation from causing deadlocks. [[GH-15693](https://github.com/hashicorp/vault/pull/15693)] * core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] * core: fixing excessive unix file permissions [[GH-14791](https://github.com/hashicorp/vault/pull/14791)] * core: fixing excessive unix file permissions on dir, files and archive created by vault debug command [[GH-14846](https://github.com/hashicorp/vault/pull/14846)] * core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] -* core: renaming the environment variable VAULT_DISABLE_FILE_PERMISSIONS_CHECK to VAULT_ENABLE_FILE_PERMISSIONS_CHECK and adjusting the logic [[GH-15452](https://github.com/hashicorp/vault/pull/15452)] * core: report unused or redundant keys in server configuration [[GH-14752](https://github.com/hashicorp/vault/pull/14752)] * core: time.After() used in a select statement can lead to memory leak [[GH-14814](https://github.com/hashicorp/vault/pull/14814)] -* identity: deduplicate policies when creating/updating identity groups [[GH-15055](https://github.com/hashicorp/vault/pull/15055)] -* mfa/okta: disable client side rate limiting causing delays in push notifications [[GH-15369](https://github.com/hashicorp/vault/pull/15369)] -* plugin: Fix a bug where plugin reload would falsely report success in certain scenarios. [[GH-15579](https://github.com/hashicorp/vault/pull/15579)] -* raft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old [[GH-15156](https://github.com/hashicorp/vault/pull/15156)] +* rafft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old [[GH-15156](https://github.com/hashicorp/vault/pull/15156)] * raft: Ensure initialMmapSize is set to 0 on Windows [[GH-14977](https://github.com/hashicorp/vault/pull/14977)] * replication (enterprise): fix panic due to missing entity during invalidation of local aliases. [[GH-14622](https://github.com/hashicorp/vault/pull/14622)] * sdk/cidrutil: Only check if cidr contains remote address for IP addresses [[GH-14487](https://github.com/hashicorp/vault/pull/14487)] * sdk: Fix OpenApi spec generator to properly convert TypeInt64 to OAS supported int64 [[GH-15104](https://github.com/hashicorp/vault/pull/15104)] * sdk: Fix OpenApi spec generator to remove duplicate sha_256 parameter [[GH-15163](https://github.com/hashicorp/vault/pull/15163)] * secrets/database: Ensure that a `connection_url` password is redacted in all cases. [[GH-14744](https://github.com/hashicorp/vault/pull/14744)] -* secrets/kv: Fix issue preventing the ability to reset the `delete_version_after` key metadata field to 0s via HTTP `PATCH`. [[GH-15792](https://github.com/hashicorp/vault/pull/15792)] -* secrets/pki: CRLs on performance secondary clusters are now automatically -rebuilt upon changes to the list of issuers. [[GH-15179](https://github.com/hashicorp/vault/pull/15179)] * secrets/pki: Fix handling of "any" key type with default zero signature bits value. [[GH-14875](https://github.com/hashicorp/vault/pull/14875)] * secrets/pki: Fixed bug where larger SHA-2 hashes were truncated with shorter ECDSA CA certificates [[GH-14943](https://github.com/hashicorp/vault/pull/14943)] -* secrets/ssh: Convert role field not_before_duration to seconds before returning it [[GH-15559](https://github.com/hashicorp/vault/pull/15559)] -* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. -* storage/raft: Forward autopilot state requests on perf standbys to active node. [[GH-15493](https://github.com/hashicorp/vault/pull/15493)] -* storage/raft: joining a node to a cluster now ignores any VAULT_NAMESPACE environment variable set on the server process [[GH-15519](https://github.com/hashicorp/vault/pull/15519)] * ui: Fix Generated Token's Policies helpText to clarify that comma separated values are not accepted in this field. [[GH-15046](https://github.com/hashicorp/vault/pull/15046)] * ui: Fix KV secret showing in the edit form after a user creates a new version but doesn't have read capabilities [[GH-14794](https://github.com/hashicorp/vault/pull/14794)] -* ui: Fix inconsistent behavior in client count calendar widget [[GH-15789](https://github.com/hashicorp/vault/pull/15789)] -* ui: Fix issue where metadata tab is hidden even though policy grants access [[GH-15824](https://github.com/hashicorp/vault/pull/15824)] * ui: Fix issue with KV not recomputing model when you changed versions. [[GH-14941](https://github.com/hashicorp/vault/pull/14941)] -* ui: Fixed client count timezone for start and end months [[GH-15167](https://github.com/hashicorp/vault/pull/15167)] -* ui: Fixed unsupported revocation statements field for DB roles [[GH-15573](https://github.com/hashicorp/vault/pull/15573)] * ui: Fixes edit auth method capabilities issue [[GH-14966](https://github.com/hashicorp/vault/pull/14966)] * ui: Fixes issue logging in with OIDC from a listed auth mounts tab [[GH-14916](https://github.com/hashicorp/vault/pull/14916)] -* ui: Revert using localStorage in favor of sessionStorage [[GH-15769](https://github.com/hashicorp/vault/pull/15769)] -* ui: Updated `leasId` to `leaseId` in the "Copy Credentials" section of "Generate AWS Credentials" [[GH-15685](https://github.com/hashicorp/vault/pull/15685)] * ui: fix firefox inability to recognize file format of client count csv export [[GH-15364](https://github.com/hashicorp/vault/pull/15364)] -* ui: fix form validations ignoring default values and disabling submit button [[GH-15560](https://github.com/hashicorp/vault/pull/15560)] * ui: fix search-select component showing blank selections when editing group member entity [[GH-15058](https://github.com/hashicorp/vault/pull/15058)] * ui: masked values no longer give away length or location of special characters [[GH-15025](https://github.com/hashicorp/vault/pull/15025)] -## 1.10.6 -### August 31, 2022 - -CHANGES: - -* core: Bump Go version to 1.17.13. - -IMPROVEMENTS: - -* identity/oidc: Adds the `client_secret_post` token endpoint authentication method. [[GH-16598](https://github.com/hashicorp/vault/pull/16598)] - -BUG FIXES: - -* auth/gcp: Fixes the ability to reset the configuration's credentials to use application default credentials. [[GH-16524](https://github.com/hashicorp/vault/pull/16524)] -* command/debug: fix bug where monitor was not honoring configured duration [[GH-16834](https://github.com/hashicorp/vault/pull/16834)] -* core/auth: Return a 403 instead of a 500 for a malformed SSCT [[GH-16112](https://github.com/hashicorp/vault/pull/16112)] -* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] -* database: Invalidate queue should cancel context first to avoid deadlock [[GH-15933](https://github.com/hashicorp/vault/pull/15933)] -* identity/oidc: Change the `state` parameter of the Authorization Endpoint to optional. [[GH-16599](https://github.com/hashicorp/vault/pull/16599)] -* identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the -Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/16601)] -* identity/oidc: Fixes validation of the `request` and `request_uri` parameters. [[GH-16600](https://github.com/hashicorp/vault/pull/16600)] -* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] -* secrets/gcp: Fixes duplicate static account key creation from performance secondary clusters. [[GH-16534](https://github.com/hashicorp/vault/pull/16534)] -* storage/raft: Fix retry_join initialization failure [[GH-16550](https://github.com/hashicorp/vault/pull/16550)] -* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] -* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] -* ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] - -SECURITY: - -* identity/entity: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. [[HCSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] - -## 1.10.5 -### July 21, 2022 - -CHANGES: - -* core/fips: Disable and warn about entropy augmentation in FIPS 140-2 Inside mode [[GH-15858](https://github.com/hashicorp/vault/pull/15858)] -* core: Bump Go version to 1.17.12. - -IMPROVEMENTS: - -* core: Add `sys/loggers` and `sys/loggers/:name` endpoints to provide ability to modify logging verbosity [[GH-16111](https://github.com/hashicorp/vault/pull/16111)] -* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] - -BUG FIXES: - -* agent/template: Fix parsing error for the exec stanza [[GH-16231](https://github.com/hashicorp/vault/pull/16231)] -* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] -* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty -* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] -* core: Limit SSCT WAL checks on perf standbys to raft backends only [[GH-15879](https://github.com/hashicorp/vault/pull/15879)] -* plugin/multiplexing: Fix panic when id doesn't exist in connection map [[GH-16094](https://github.com/hashicorp/vault/pull/16094)] -* secret/pki: Do not fail validation with a legacy key_bits default value and key_type=any when signing CSRs [[GH-16246](https://github.com/hashicorp/vault/pull/16246)] -* storage/raft (enterprise): Prevent unauthenticated voter status with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] -* transform (enterprise): Fix a bug in the handling of nested or unmatched capture groups in FPE transformations. -* ui: Fix issue where metadata tab is hidden even though policy grants access [[GH-15824](https://github.com/hashicorp/vault/pull/15824)] -* ui: Revert using localStorage in favor of sessionStorage [[GH-16169](https://github.com/hashicorp/vault/pull/16169)] -* ui: Updated `leasId` to `leaseId` in the "Copy Credentials" section of "Generate AWS Credentials" [[GH-15685](https://github.com/hashicorp/vault/pull/15685)] - -## 1.10.4 -### June 10, 2022 - -CHANGES: - -* core: Bump Go version to 1.17.11. [[GH-go-ver-1104](https://github.com/hashicorp/vault/pull/go-ver-1104)] - -IMPROVEMENTS: - -* api/monitor: Add log_format option to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] -* auth: Globally scoped Login MFA method Get/List endpoints [[GH-15248](https://github.com/hashicorp/vault/pull/15248)] -* auth: forward cached MFA auth response to the leader using RPC instead of forwarding all login requests [[GH-15469](https://github.com/hashicorp/vault/pull/15469)] -* cli/debug: added support for retrieving metrics from DR clusters if `unauthenticated_metrics_access` is enabled [[GH-15316](https://github.com/hashicorp/vault/pull/15316)] -* command/debug: Add log_format flag to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] -* core: Fix some identity data races found by Go race detector (no known impact yet). [[GH-15123](https://github.com/hashicorp/vault/pull/15123)] -* storage/raft: Use larger timeouts at startup to reduce likelihood of inducing elections. [[GH-15042](https://github.com/hashicorp/vault/pull/15042)] -* ui: Allow namespace param to be parsed from state queryParam [[GH-15378](https://github.com/hashicorp/vault/pull/15378)] - -BUG FIXES: - -* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] -* auth/kubernetes: Fix error code when using the wrong service account [[GH-15585](https://github.com/hashicorp/vault/pull/15585)] -* auth/ldap: The logic for setting the entity alias when `username_as_alias` is set -has been fixed. The previous behavior would make a request to the LDAP server to -get `user_attr` before discarding it and using the username instead. This would -make it impossible for a user to connect if this attribute was missing or had -multiple values, even though it would not be used anyway. This has been fixed -and the username is now used without making superfluous LDAP searches. [[GH-15525](https://github.com/hashicorp/vault/pull/15525)] -* auth: Fixed erroneous success message when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] -* auth: Fixed erroneous token information being displayed when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] -* auth: Fixed two-phase MFA information missing from table format when using vault login [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] -* auth: Prevent deleting a valid MFA method ID using the endpoint for a different MFA method type [[GH-15482](https://github.com/hashicorp/vault/pull/15482)] -* core (enterprise): Fix overcounting of lease count quota usage at startup. -* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] -* core: Prevent metrics generation from causing deadlocks. [[GH-15693](https://github.com/hashicorp/vault/pull/15693)] -* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] -* mfa/okta: disable client side rate limiting causing delays in push notifications [[GH-15369](https://github.com/hashicorp/vault/pull/15369)] -* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. -* transform (enterprise): Fix non-overridable column default value causing tokenization tokens to expire prematurely when using the MySQL storage backend. -* ui: Fix inconsistent behavior in client count calendar widget [[GH-15789](https://github.com/hashicorp/vault/pull/15789)] -* ui: Fixed client count timezone for start and end months [[GH-15167](https://github.com/hashicorp/vault/pull/15167)] -* ui: fix firefox inability to recognize file format of client count csv export [[GH-15364](https://github.com/hashicorp/vault/pull/15364)] - ## 1.10.3 ### May 11, 2022 -SECURITY: -* auth: A vulnerability was identified in Vault and Vault Enterprise (“Vault”) from 1.10.0 to 1.10.2 where MFA may not be enforced on user logins after a server restart. This vulnerability, CVE-2022-30689, was fixed in Vault 1.10.3. - BUG FIXES: * auth: load login MFA configuration upon restart [[GH-15261](https://github.com/hashicorp/vault/pull/15261)] @@ -852,78 +394,6 @@ operation for upgraded configurations with a `root_password_ttl` of zero. [[GH-1 * ui: Removes ability to tune token_type for token auth methods [[GH-12904](https://github.com/hashicorp/vault/pull/12904)] * ui: trigger token renewal if inactive and half of TTL has passed [[GH-13950](https://github.com/hashicorp/vault/pull/13950)] -## 1.9.9 -### August 31, 2022 - -CHANGES: - -* core: Bump Go version to 1.17.13. - -BUG FIXES: - -* core (enterprise): Fix some races in merkle index flushing code found in testing -* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] -* database: Invalidate queue should cancel context first to avoid deadlock [[GH-15933](https://github.com/hashicorp/vault/pull/15933)] -* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] -* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] -* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] - -SECURITY: - -* identity/entity: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. [[HCSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] - -## 1.9.8 -### July 21, 2022 - -CHANGES: - -* core: Bump Go version to 1.17.12. - -IMPROVEMENTS: - -* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] - -BUG FIXES: - -* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] -* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty -* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] -* storage/raft (enterprise): Prevent unauthenticated voter status change with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] -* transform (enterprise): Fix a bug in the handling of nested or unmatched capture groups in FPE transformations. -* ui: Fix issue where metadata tab is hidden even though policy grants access [[GH-15824](https://github.com/hashicorp/vault/pull/15824)] -* ui: Updated `leasId` to `leaseId` in the "Copy Credentials" section of "Generate AWS Credentials" [[GH-15685](https://github.com/hashicorp/vault/pull/15685)] - -## 1.9.7 -### June 10, 2022 - -CHANGES: - -* core: Bump Go version to 1.17.11. [[GH-go-ver-197](https://github.com/hashicorp/vault/pull/go-ver-197)] - -IMPROVEMENTS: - -* ui: Allow namespace param to be parsed from state queryParam [[GH-15378](https://github.com/hashicorp/vault/pull/15378)] - -BUG FIXES: - -* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] -* auth/ldap: The logic for setting the entity alias when `username_as_alias` is set -has been fixed. The previous behavior would make a request to the LDAP server to -get `user_attr` before discarding it and using the username instead. This would -make it impossible for a user to connect if this attribute was missing or had -multiple values, even though it would not be used anyway. This has been fixed -and the username is now used without making superfluous LDAP searches. [[GH-15525](https://github.com/hashicorp/vault/pull/15525)] -* core (enterprise): Fix overcounting of lease count quota usage at startup. -* core/config: Only ask the system about network interfaces when address configs contain a template having the format: {{ ... }} [[GH-15224](https://github.com/hashicorp/vault/pull/15224)] -* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] -* core: Prevent metrics generation from causing deadlocks. [[GH-15693](https://github.com/hashicorp/vault/pull/15693)] -* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] -* core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] -* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. -* transform (enterprise): Fix non-overridable column default value causing tokenization tokens to expire prematurely when using the MySQL storage backend. -* ui: Fixes client count timezone bug [[GH-15743](https://github.com/hashicorp/vault/pull/15743)] -* ui: Fixes issue logging in with OIDC from a listed auth mounts tab [[GH-15666](https://github.com/hashicorp/vault/pull/15666)] - ## 1.9.6 ### April 29, 2022 @@ -984,10 +454,6 @@ autosnapshot save error. ## 1.9.4 ### March 3, 2022 -SECURITY: -* secrets/pki: Vault and Vault Enterprise (“Vault”) allowed the PKI secrets engine under certain configurations to issue wildcard certificates to authorized users for a specified domain, even if the PKI role policy attribute allow_subdomains is set to false. This vulnerability, CVE-2022-25243, was fixed in Vault 1.8.9 and 1.9.4. -* transform (enterprise): Vault Enterprise (“Vault”) clusters using the tokenization transform feature can expose the tokenization key through the tokenization key configuration endpoint to authorized operators with read permissions on this endpoint. This vulnerability, CVE-2022-25244, was fixed in Vault Enterprise 1.7.10, 1.8.9, and 1.9.4. - CHANGES: * secrets/azure: Changes the configuration parameter `use_microsoft_graph_api` to use the Microsoft @@ -1296,18 +762,6 @@ of dirty pages in the merkle tree at time of checkpoint creation. [[GH-2093](htt * ui: update bar chart when model changes [[GH-12622](https://github.com/hashicorp/vault/pull/12622)] * ui: updating database TTL picker help text. [[GH-12212](https://github.com/hashicorp/vault/pull/12212)] -## 1.8.12 -### June 10, 2022 - -BUG FIXES: - -* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] -* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] -* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] -* core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] -* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. -* transform (enterprise): Fix non-overridable column default value causing tokenization tokens to expire prematurely when using the MySQL storage backend. - ## 1.8.11 ### April 29, 2022 @@ -1358,9 +812,6 @@ autosnapshot save error. ## 1.8.9 ### March 3, 2022 -* secrets/pki: Vault and Vault Enterprise (“Vault”) allowed the PKI secrets engine under certain configurations to issue wildcard certificates to authorized users for a specified domain, even if the PKI role policy attribute allow_subdomains is set to false. This vulnerability, CVE-2022-25243, was fixed in Vault 1.8.9 and 1.9.4. -* transform (enterprise): Vault Enterprise (“Vault”) clusters using the tokenization transform feature can expose the tokenization key through the tokenization key configuration endpoint to authorized operators with read permissions on this endpoint. This vulnerability, CVE-2022-25244, was fixed in Vault Enterprise 1.7.10, 1.8.9, and 1.9.4. - IMPROVEMENTS: * secrets/pki: Restrict issuance of wildcard certificates via role parameter (`allow_wildcard_certificates`) [[GH-14238](https://github.com/hashicorp/vault/pull/14238)] @@ -1696,10 +1147,6 @@ BUG FIXES: ## 1.7.10 ### March 3, 2022 -SECURITY: - -* transform (enterprise): Vault Enterprise (“Vault”) clusters using the tokenization transform feature can expose the tokenization key through the tokenization key configuration endpoint to authorized operators with read permissions on this endpoint. This vulnerability, CVE-2022-25244, was fixed in Vault Enterprise 1.7.10, 1.8.9, and 1.9.4. - BUG FIXES: * database/mssql: Removed string interpolation on internal queries and replaced them with inline queries using named parameters. [[GH-13799](https://github.com/hashicorp/vault/pull/13799)] diff --git a/CODEOWNERS b/CODEOWNERS index 1c120e00fa11e..0f0191af3715a 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -22,6 +22,7 @@ /builtin/logical/postgresql/ @hashicorp/vault-ecosystem /builtin/logical/rabbitmq/ @hashicorp/vault-ecosystem +/command/agent/ @hashicorp/vault-ecosystem /plugins/ @hashicorp/vault-ecosystem /vault/plugin_catalog.go @hashicorp/vault-ecosystem @@ -37,7 +38,3 @@ # so stewards of the backend code are added below for notification. /ui/app/components/auth-jwt.js @austingebauer /ui/app/routes/vault/cluster/oidc-*.js @austingebauer - -# Release config; service account is required for automation tooling. -/.release/ @hashicorp/release-engineering @hashicorp/github-secure-vault-core -/.github/workflows/build.yml @hashicorp/release-engineering @hashicorp/github-secure-vault-core diff --git a/Makefile b/Makefile index f084e3bd5c65e..2e6408dad2271 100644 --- a/Makefile +++ b/Makefile @@ -8,15 +8,14 @@ EXTENDED_TEST_TIMEOUT=60m INTEG_TEST_TIMEOUT=120m VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf -rangeloops -shift -structtags -unsafeptr EXTERNAL_TOOLS_CI=\ + github.com/mitchellh/gox \ golang.org/x/tools/cmd/goimports EXTERNAL_TOOLS=\ github.com/client9/misspell/cmd/misspell GOFMT_FILES?=$$(find . -name '*.go' | grep -v pb.go | grep -v vendor) -SED?=$(shell command -v gsed || command -v sed) -GO_VERSION_MIN=1.19.1 -PROTOC_VERSION_MIN=3.21.5 +GO_VERSION_MIN=1.17.13 GO_CMD?=go CGO_ENABLED?=0 ifneq ($(FDB_ENABLED), ) @@ -152,11 +151,23 @@ test-ember-enos: install-ui-dependencies @echo "--> Running ember tests with a real backend" @cd ui && yarn run test:enos +ember-ci-test: # Deprecated, to be removed soon. + @echo "ember-ci-test is deprecated in favour of test-ui-browserstack" + @exit 1 + check-vault-in-path: @VAULT_BIN=$$(command -v vault) || { echo "vault command not found"; exit 1; }; \ [ -x "$$VAULT_BIN" ] || { echo "$$VAULT_BIN not executable"; exit 1; }; \ printf "Using Vault at %s:\n\$$ vault version\n%s\n" "$$VAULT_BIN" "$$(vault version)" +check-browserstack-creds: + @[ -n "$$BROWSERSTACK_ACCESS_KEY" ] || { echo "BROWSERSTACK_ACCESS_KEY not set"; exit 1; } + @[ -n "$$BROWSERSTACK_USERNAME" ] || { echo "BROWSERSTACK_USERNAME not set"; exit 1; } + +test-ui-browserstack: check-vault-in-path check-browserstack-creds install-ui-dependencies + @echo "--> Running ember tests in Browserstack" + @cd ui && yarn run test:browserstack + ember-dist: install-ui-dependencies @cd ui && npm rebuild node-sass @echo "--> Building Ember application" @@ -172,7 +183,6 @@ static-dist: ember-dist static-dist-dev: ember-dist-dev proto: bootstrap - @sh -c "'$(CURDIR)/scripts/protocversioncheck.sh' '$(PROTOC_VERSION_MIN)'" protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/*.proto protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/activity/activity_log.proto protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative helper/storagepacker/types.proto @@ -186,13 +196,12 @@ proto: bootstrap protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative sdk/plugin/pb/*.proto protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/tokens/token.proto protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative sdk/helper/pluginutil/*.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/hcp_link/proto/node_status/*.proto # No additional sed expressions should be added to this list. Going forward # we should just use the variable names choosen by protobuf. These are left # here for backwards compatability, namely for SDK compilation. - $(SED) -i -e 's/Id/ID/' vault/request_forwarding_service.pb.go - $(SED) -i -e 's/Idp/IDP/' -e 's/Url/URL/' -e 's/Id/ID/' -e 's/IDentity/Identity/' -e 's/EntityId/EntityID/' -e 's/Api/API/' -e 's/Qr/QR/' -e 's/Totp/TOTP/' -e 's/Mfa/MFA/' -e 's/Pingid/PingID/' -e 's/namespaceId/namespaceID/' -e 's/Ttl/TTL/' -e 's/BoundCidrs/BoundCIDRs/' helper/identity/types.pb.go helper/identity/mfa/types.pb.go helper/storagepacker/types.pb.go sdk/plugin/pb/backend.pb.go sdk/logical/identity.pb.go vault/activity/activity_log.pb.go + sed -i -e 's/Id/ID/' vault/request_forwarding_service.pb.go + sed -i -e 's/Idp/IDP/' -e 's/Url/URL/' -e 's/Id/ID/' -e 's/IDentity/Identity/' -e 's/EntityId/EntityID/' -e 's/Api/API/' -e 's/Qr/QR/' -e 's/Totp/TOTP/' -e 's/Mfa/MFA/' -e 's/Pingid/PingID/' -e 's/namespaceId/namespaceID/' -e 's/Ttl/TTL/' -e 's/BoundCidrs/BoundCIDRs/' helper/identity/types.pb.go helper/identity/mfa/types.pb.go helper/storagepacker/types.pb.go sdk/plugin/pb/backend.pb.go sdk/logical/identity.pb.go vault/activity/activity_log.pb.go # This will inject the sentinel struct tags as decorated in the proto files. protoc-go-inject-tag -input=./helper/identity/types.pb.go @@ -205,10 +214,10 @@ fmtcheck: fmt: find . -name '*.go' | grep -v pb.go | grep -v vendor | xargs gofumpt -w -semgrep: +semgrep: semgrep --include '*.go' --exclude 'vendor' -a -f tools/semgrep . -semgrep-ci: +semgrep-ci: semgrep --error --include '*.go' --exclude 'vendor' -f tools/semgrep/ci . assetcheck: @@ -250,7 +259,7 @@ ci-config: ci-verify: @$(MAKE) -C .circleci ci-verify -.PHONY: bin default prep test vet bootstrap ci-bootstrap fmt fmtcheck mysql-database-plugin mysql-legacy-database-plugin cassandra-database-plugin influxdb-database-plugin postgresql-database-plugin mssql-database-plugin hana-database-plugin mongodb-database-plugin ember-dist ember-dist-dev static-dist static-dist-dev assetcheck check-vault-in-path packages build build-ci semgrep semgrep-ci +.PHONY: bin default prep test vet bootstrap ci-bootstrap fmt fmtcheck mysql-database-plugin mysql-legacy-database-plugin cassandra-database-plugin influxdb-database-plugin postgresql-database-plugin mssql-database-plugin hana-database-plugin mongodb-database-plugin ember-dist ember-dist-dev static-dist static-dist-dev assetcheck check-vault-in-path check-browserstack-creds test-ui-browserstack packages build build-ci semgrep semgrep-ci .NOTPARALLEL: ember-dist ember-dist-dev diff --git a/README.md b/README.md index abda05b9c64ea..39b12e8aa0290 100644 --- a/README.md +++ b/README.md @@ -40,11 +40,11 @@ The key features of Vault are: having to design their own encryption methods. * **Leasing and Renewal**: All secrets in Vault have a _lease_ associated - with them. At the end of the lease, Vault will automatically revoke that + with it. At the end of the lease, Vault will automatically revoke that secret. Clients are able to renew leases via built-in renew APIs. * **Revocation**: Vault has built-in support for secret revocation. Vault - can revoke not only single secrets, but a tree of secrets, for example, + can revoke not only single secrets, but a tree of secrets, for example all secrets read by a specific user, or all secrets of a particular type. Revocation assists in key rolling as well as locking down systems in the case of an intrusion. @@ -71,11 +71,11 @@ Developing Vault If you wish to work on Vault itself or any of its built-in systems, you'll first need [Go](https://www.golang.org) installed on your machine. Go version -1.19.1+ is *required*. +1.17.13+ is *required*. For local dev first make sure Go is properly installed, including setting up a [GOPATH](https://golang.org/doc/code.html#GOPATH). Ensure that `$GOPATH/bin` is in -your path as some distributions bundle the old version of build tools. Next, clone this +your path as some distributions bundle old version of build tools. Next, clone this repository. Vault uses [Go Modules](https://github.com/golang/go/wiki/Modules), so it is recommended that you clone the repository ***outside*** of the GOPATH. You can then download any required build tools by bootstrapping your environment: diff --git a/api/auth/approle/approle_test.go b/api/auth/approle/approle_test.go index f2628c695cc27..9367e54bb79e6 100644 --- a/api/auth/approle/approle_test.go +++ b/api/auth/approle/approle_test.go @@ -16,8 +16,7 @@ import ( // testHTTPServer creates a test HTTP server that handles requests until // the listener returned is closed. func testHTTPServer( - t *testing.T, handler http.Handler, -) (*api.Config, net.Listener) { + t *testing.T, handler http.Handler) (*api.Config, net.Listener) { ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatalf("err: %s", err) diff --git a/api/auth/approle/go.mod b/api/auth/approle/go.mod index b3d871b887ad6..4313984b23069 100644 --- a/api/auth/approle/go.mod +++ b/api/auth/approle/go.mod @@ -2,4 +2,4 @@ module github.com/hashicorp/vault/api/auth/approle go 1.16 -require github.com/hashicorp/vault/api v1.8.0 +require github.com/hashicorp/vault/api v1.5.0 diff --git a/api/auth/approle/go.sum b/api/auth/approle/go.sum index c7835a3bf723f..89bdb5ce7a2af 100644 --- a/api/auth/approle/go.sum +++ b/api/auth/approle/go.sum @@ -99,7 +99,7 @@ github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39 github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= @@ -113,13 +113,11 @@ github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR3 github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1 h1:78ki3QBevHwYrVxnyVeaEz+7WtifHhauYF23es/0KlI= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1 h1:nd0HIW15E6FG1MsnArYaHfuw9C2zgzM8LxkG5Ty/788= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= @@ -133,10 +131,10 @@ github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+l github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.8.0 h1:7765sW1XBt+qf4XKIYE4ebY9qc/yi9V2/egzGSUNMZU= -github.com/hashicorp/vault/api v1.8.0/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= -github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZTIAhs= -github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= +github.com/hashicorp/vault/api v1.5.0 h1:Bp6yc2bn7CWkOrVIzFT/Qurzx528bdavF3nz590eu28= +github.com/hashicorp/vault/api v1.5.0/go.mod h1:LkMdrZnWNrFaQyYYazWVn7KshilfDidgVBq6YiTq/bM= +github.com/hashicorp/vault/sdk v0.4.1 h1:3SaHOJY687jY1fnB61PtL0cOkKItphrbLmux7T92HBo= +github.com/hashicorp/vault/sdk v0.4.1/go.mod h1:aZ3fNuL5VNydQk8GcLJ2TV8YCRVvyaakYkhZRoVuhj0= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -175,8 +173,8 @@ github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdI github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/api/auth/aws/go.mod b/api/auth/aws/go.mod index 6ccdb0f4e1dde..75e5343a3ba31 100644 --- a/api/auth/aws/go.mod +++ b/api/auth/aws/go.mod @@ -7,5 +7,5 @@ require ( github.com/hashicorp/go-hclog v0.16.2 github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6 github.com/hashicorp/go-uuid v1.0.2 - github.com/hashicorp/vault/api v1.8.0 + github.com/hashicorp/vault/api v1.5.0 ) diff --git a/api/auth/aws/go.sum b/api/auth/aws/go.sum index 0d29275b814c1..0c57287ff115d 100644 --- a/api/auth/aws/go.sum +++ b/api/auth/aws/go.sum @@ -102,7 +102,7 @@ github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39 github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= @@ -118,13 +118,11 @@ github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6/go.mod h1:MpCPSPGLDILGb4JMm github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1 h1:78ki3QBevHwYrVxnyVeaEz+7WtifHhauYF23es/0KlI= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1 h1:nd0HIW15E6FG1MsnArYaHfuw9C2zgzM8LxkG5Ty/788= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= @@ -138,10 +136,10 @@ github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+l github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.8.0 h1:7765sW1XBt+qf4XKIYE4ebY9qc/yi9V2/egzGSUNMZU= -github.com/hashicorp/vault/api v1.8.0/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= -github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZTIAhs= -github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= +github.com/hashicorp/vault/api v1.5.0 h1:Bp6yc2bn7CWkOrVIzFT/Qurzx528bdavF3nz590eu28= +github.com/hashicorp/vault/api v1.5.0/go.mod h1:LkMdrZnWNrFaQyYYazWVn7KshilfDidgVBq6YiTq/bM= +github.com/hashicorp/vault/sdk v0.4.1 h1:3SaHOJY687jY1fnB61PtL0cOkKItphrbLmux7T92HBo= +github.com/hashicorp/vault/sdk v0.4.1/go.mod h1:aZ3fNuL5VNydQk8GcLJ2TV8YCRVvyaakYkhZRoVuhj0= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -183,8 +181,8 @@ github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdI github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/api/auth/azure/go.mod b/api/auth/azure/go.mod index becdcd90e12a0..89283886b2b83 100644 --- a/api/auth/azure/go.mod +++ b/api/auth/azure/go.mod @@ -2,4 +2,4 @@ module github.com/hashicorp/vault/api/auth/azure go 1.16 -require github.com/hashicorp/vault/api v1.8.0 +require github.com/hashicorp/vault/api v1.5.0 diff --git a/api/auth/azure/go.sum b/api/auth/azure/go.sum index c7835a3bf723f..89bdb5ce7a2af 100644 --- a/api/auth/azure/go.sum +++ b/api/auth/azure/go.sum @@ -99,7 +99,7 @@ github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39 github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= @@ -113,13 +113,11 @@ github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR3 github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1 h1:78ki3QBevHwYrVxnyVeaEz+7WtifHhauYF23es/0KlI= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1 h1:nd0HIW15E6FG1MsnArYaHfuw9C2zgzM8LxkG5Ty/788= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= @@ -133,10 +131,10 @@ github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+l github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.8.0 h1:7765sW1XBt+qf4XKIYE4ebY9qc/yi9V2/egzGSUNMZU= -github.com/hashicorp/vault/api v1.8.0/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= -github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZTIAhs= -github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= +github.com/hashicorp/vault/api v1.5.0 h1:Bp6yc2bn7CWkOrVIzFT/Qurzx528bdavF3nz590eu28= +github.com/hashicorp/vault/api v1.5.0/go.mod h1:LkMdrZnWNrFaQyYYazWVn7KshilfDidgVBq6YiTq/bM= +github.com/hashicorp/vault/sdk v0.4.1 h1:3SaHOJY687jY1fnB61PtL0cOkKItphrbLmux7T92HBo= +github.com/hashicorp/vault/sdk v0.4.1/go.mod h1:aZ3fNuL5VNydQk8GcLJ2TV8YCRVvyaakYkhZRoVuhj0= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -175,8 +173,8 @@ github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdI github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/api/auth/gcp/go.mod b/api/auth/gcp/go.mod index cb122ae9cfe30..a7d8637ff0180 100644 --- a/api/auth/gcp/go.mod +++ b/api/auth/gcp/go.mod @@ -4,6 +4,6 @@ go 1.16 require ( cloud.google.com/go v0.97.0 - github.com/hashicorp/vault/api v1.8.0 + github.com/hashicorp/vault/api v1.5.0 google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0 ) diff --git a/api/auth/gcp/go.sum b/api/auth/gcp/go.sum index a9fda24c58efb..60c2540e96636 100644 --- a/api/auth/gcp/go.sum +++ b/api/auth/gcp/go.sum @@ -203,7 +203,7 @@ github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39 github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= @@ -217,13 +217,11 @@ github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR3 github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1 h1:78ki3QBevHwYrVxnyVeaEz+7WtifHhauYF23es/0KlI= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1 h1:nd0HIW15E6FG1MsnArYaHfuw9C2zgzM8LxkG5Ty/788= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= @@ -238,10 +236,10 @@ github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+l github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.8.0 h1:7765sW1XBt+qf4XKIYE4ebY9qc/yi9V2/egzGSUNMZU= -github.com/hashicorp/vault/api v1.8.0/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= -github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZTIAhs= -github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= +github.com/hashicorp/vault/api v1.5.0 h1:Bp6yc2bn7CWkOrVIzFT/Qurzx528bdavF3nz590eu28= +github.com/hashicorp/vault/api v1.5.0/go.mod h1:LkMdrZnWNrFaQyYYazWVn7KshilfDidgVBq6YiTq/bM= +github.com/hashicorp/vault/sdk v0.4.1 h1:3SaHOJY687jY1fnB61PtL0cOkKItphrbLmux7T92HBo= +github.com/hashicorp/vault/sdk v0.4.1/go.mod h1:aZ3fNuL5VNydQk8GcLJ2TV8YCRVvyaakYkhZRoVuhj0= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -285,8 +283,8 @@ github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdI github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/api/auth/kubernetes/go.mod b/api/auth/kubernetes/go.mod index 0faeebfcc91aa..e529831e6fba1 100644 --- a/api/auth/kubernetes/go.mod +++ b/api/auth/kubernetes/go.mod @@ -2,4 +2,4 @@ module github.com/hashicorp/vault/api/auth/kubernetes go 1.16 -require github.com/hashicorp/vault/api v1.8.0 +require github.com/hashicorp/vault/api v1.5.0 diff --git a/api/auth/kubernetes/go.sum b/api/auth/kubernetes/go.sum index c7835a3bf723f..89bdb5ce7a2af 100644 --- a/api/auth/kubernetes/go.sum +++ b/api/auth/kubernetes/go.sum @@ -99,7 +99,7 @@ github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39 github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= @@ -113,13 +113,11 @@ github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR3 github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1 h1:78ki3QBevHwYrVxnyVeaEz+7WtifHhauYF23es/0KlI= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1 h1:nd0HIW15E6FG1MsnArYaHfuw9C2zgzM8LxkG5Ty/788= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= @@ -133,10 +131,10 @@ github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+l github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.8.0 h1:7765sW1XBt+qf4XKIYE4ebY9qc/yi9V2/egzGSUNMZU= -github.com/hashicorp/vault/api v1.8.0/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= -github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZTIAhs= -github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= +github.com/hashicorp/vault/api v1.5.0 h1:Bp6yc2bn7CWkOrVIzFT/Qurzx528bdavF3nz590eu28= +github.com/hashicorp/vault/api v1.5.0/go.mod h1:LkMdrZnWNrFaQyYYazWVn7KshilfDidgVBq6YiTq/bM= +github.com/hashicorp/vault/sdk v0.4.1 h1:3SaHOJY687jY1fnB61PtL0cOkKItphrbLmux7T92HBo= +github.com/hashicorp/vault/sdk v0.4.1/go.mod h1:aZ3fNuL5VNydQk8GcLJ2TV8YCRVvyaakYkhZRoVuhj0= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -175,8 +173,8 @@ github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdI github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/api/auth/ldap/go.mod b/api/auth/ldap/go.mod index 15b683c2696ab..3b04391541d7b 100644 --- a/api/auth/ldap/go.mod +++ b/api/auth/ldap/go.mod @@ -2,4 +2,4 @@ module github.com/hashicorp/vault/api/auth/ldap go 1.16 -require github.com/hashicorp/vault/api v1.8.0 +require github.com/hashicorp/vault/api v1.5.0 diff --git a/api/auth/ldap/go.sum b/api/auth/ldap/go.sum index c7835a3bf723f..89bdb5ce7a2af 100644 --- a/api/auth/ldap/go.sum +++ b/api/auth/ldap/go.sum @@ -99,7 +99,7 @@ github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39 github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= @@ -113,13 +113,11 @@ github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR3 github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1 h1:78ki3QBevHwYrVxnyVeaEz+7WtifHhauYF23es/0KlI= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1 h1:nd0HIW15E6FG1MsnArYaHfuw9C2zgzM8LxkG5Ty/788= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= @@ -133,10 +131,10 @@ github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+l github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.8.0 h1:7765sW1XBt+qf4XKIYE4ebY9qc/yi9V2/egzGSUNMZU= -github.com/hashicorp/vault/api v1.8.0/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= -github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZTIAhs= -github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= +github.com/hashicorp/vault/api v1.5.0 h1:Bp6yc2bn7CWkOrVIzFT/Qurzx528bdavF3nz590eu28= +github.com/hashicorp/vault/api v1.5.0/go.mod h1:LkMdrZnWNrFaQyYYazWVn7KshilfDidgVBq6YiTq/bM= +github.com/hashicorp/vault/sdk v0.4.1 h1:3SaHOJY687jY1fnB61PtL0cOkKItphrbLmux7T92HBo= +github.com/hashicorp/vault/sdk v0.4.1/go.mod h1:aZ3fNuL5VNydQk8GcLJ2TV8YCRVvyaakYkhZRoVuhj0= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -175,8 +173,8 @@ github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdI github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/api/auth/ldap/ldap_test.go b/api/auth/ldap/ldap_test.go index 8633c4dfac116..cd656febb612d 100644 --- a/api/auth/ldap/ldap_test.go +++ b/api/auth/ldap/ldap_test.go @@ -16,8 +16,7 @@ import ( // testHTTPServer creates a test HTTP server that handles requests until // the listener returned is closed. func testHTTPServer( - t *testing.T, handler http.Handler, -) (*api.Config, net.Listener) { + t *testing.T, handler http.Handler) (*api.Config, net.Listener) { ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatalf("err: %s", err) diff --git a/api/auth/userpass/go.mod b/api/auth/userpass/go.mod index 5f907553bd35f..dcbbdffa58149 100644 --- a/api/auth/userpass/go.mod +++ b/api/auth/userpass/go.mod @@ -2,4 +2,4 @@ module github.com/hashicorp/vault/api/auth/userpass go 1.16 -require github.com/hashicorp/vault/api v1.8.0 +require github.com/hashicorp/vault/api v1.5.0 diff --git a/api/auth/userpass/go.sum b/api/auth/userpass/go.sum index c7835a3bf723f..89bdb5ce7a2af 100644 --- a/api/auth/userpass/go.sum +++ b/api/auth/userpass/go.sum @@ -99,7 +99,7 @@ github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39 github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= @@ -113,13 +113,11 @@ github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR3 github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1 h1:78ki3QBevHwYrVxnyVeaEz+7WtifHhauYF23es/0KlI= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1 h1:nd0HIW15E6FG1MsnArYaHfuw9C2zgzM8LxkG5Ty/788= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= @@ -133,10 +131,10 @@ github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+l github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.8.0 h1:7765sW1XBt+qf4XKIYE4ebY9qc/yi9V2/egzGSUNMZU= -github.com/hashicorp/vault/api v1.8.0/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= -github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZTIAhs= -github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= +github.com/hashicorp/vault/api v1.5.0 h1:Bp6yc2bn7CWkOrVIzFT/Qurzx528bdavF3nz590eu28= +github.com/hashicorp/vault/api v1.5.0/go.mod h1:LkMdrZnWNrFaQyYYazWVn7KshilfDidgVBq6YiTq/bM= +github.com/hashicorp/vault/sdk v0.4.1 h1:3SaHOJY687jY1fnB61PtL0cOkKItphrbLmux7T92HBo= +github.com/hashicorp/vault/sdk v0.4.1/go.mod h1:aZ3fNuL5VNydQk8GcLJ2TV8YCRVvyaakYkhZRoVuhj0= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -175,8 +173,8 @@ github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdI github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/api/auth/userpass/userpass_test.go b/api/auth/userpass/userpass_test.go index 0728117a1e8c4..eb62499aa70a1 100644 --- a/api/auth/userpass/userpass_test.go +++ b/api/auth/userpass/userpass_test.go @@ -16,8 +16,7 @@ import ( // testHTTPServer creates a test HTTP server that handles requests until // the listener returned is closed. func testHTTPServer( - t *testing.T, handler http.Handler, -) (*api.Config, net.Listener) { + t *testing.T, handler http.Handler) (*api.Config, net.Listener) { ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatalf("err: %s", err) diff --git a/api/client.go b/api/client.go index 7c17981059fb7..c07f3b124a3c7 100644 --- a/api/client.go +++ b/api/client.go @@ -347,6 +347,8 @@ func (c *Config) ReadEnvironment() error { } if v := os.Getenv(EnvVaultAgentAddr); v != "" { envAgentAddress = v + } else if v := os.Getenv(EnvVaultAgentAddress); v != "" { + envAgentAddress = v } if v := os.Getenv(EnvVaultMaxRetries); v != "" { maxRetries, err := strconv.ParseUint(v, 10, 32) @@ -390,6 +392,12 @@ func (c *Config) ReadEnvironment() error { if err != nil { return fmt.Errorf("could not parse VAULT_SKIP_VERIFY") } + } else if v := os.Getenv(EnvVaultInsecure); v != "" { + var err error + envInsecure, err = strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("could not parse VAULT_INSECURE") + } } if v := os.Getenv(EnvVaultSRVLookup); v != "" { var err error @@ -462,51 +470,6 @@ func (c *Config) ReadEnvironment() error { return nil } -// ParseAddress transforms the provided address into a url.URL and handles -// the case of Unix domain sockets by setting the DialContext in the -// configuration's HttpClient.Transport. This function must be called with -// c.modifyLock held for write access. -func (c *Config) ParseAddress(address string) (*url.URL, error) { - u, err := url.Parse(address) - if err != nil { - return nil, err - } - - c.Address = address - - if strings.HasPrefix(address, "unix://") { - // When the address begins with unix://, always change the transport's - // DialContext (to match previous behaviour) - socket := strings.TrimPrefix(address, "unix://") - - if transport, ok := c.HttpClient.Transport.(*http.Transport); ok { - transport.DialContext = func(context.Context, string, string) (net.Conn, error) { - return net.Dial("unix", socket) - } - - // Since the address points to a unix domain socket, the scheme in the - // *URL would be set to `unix`. The *URL in the client is expected to - // be pointing to the protocol used in the application layer and not to - // the transport layer. Hence, setting the fields accordingly. - u.Scheme = "http" - u.Host = socket - u.Path = "" - } else { - return nil, fmt.Errorf("attempting to specify unix:// address with non-transport transport") - } - } else if strings.HasPrefix(c.Address, "unix://") { - // When the address being set does not begin with unix:// but the previous - // address in the Config did, change the transport's DialContext back to - // use the default configuration that cleanhttp uses. - - if transport, ok := c.HttpClient.Transport.(*http.Transport); ok { - transport.DialContext = cleanhttp.DefaultPooledTransport().DialContext - } - } - - return u, nil -} - func parseRateLimit(val string) (rate float64, burst int, err error) { _, err = fmt.Sscanf(val, "%f:%d", &rate, &burst) if err != nil { @@ -579,11 +542,27 @@ func NewClient(c *Config) (*Client, error) { address = c.AgentAddress } - u, err := c.ParseAddress(address) + u, err := url.Parse(address) if err != nil { return nil, err } + if strings.HasPrefix(address, "unix://") { + socket := strings.TrimPrefix(address, "unix://") + transport := c.HttpClient.Transport.(*http.Transport) + transport.DialContext = func(context.Context, string, string) (net.Conn, error) { + return net.Dial("unix", socket) + } + + // Since the address points to a unix domain socket, the scheme in the + // *URL would be set to `unix`. The *URL in the client is expected to + // be pointing to the protocol used in the application layer and not to + // the transport layer. Hence, setting the fields accordingly. + u.Scheme = "http" + u.Host = socket + u.Path = "" + } + client := &Client{ addr: u, config: c, @@ -642,11 +621,14 @@ func (c *Client) SetAddress(addr string) error { c.modifyLock.Lock() defer c.modifyLock.Unlock() - parsedAddr, err := c.config.ParseAddress(addr) + parsedAddr, err := url.Parse(addr) if err != nil { return errwrap.Wrapf("failed to set address: {{err}}", err) } + c.config.modifyLock.Lock() + c.config.Address = addr + c.config.modifyLock.Unlock() c.addr = parsedAddr return nil } diff --git a/api/client_test.go b/api/client_test.go index 2305d42fe7870..74f1b9354e361 100644 --- a/api/client_test.go +++ b/api/client_test.go @@ -69,63 +69,17 @@ func TestClientNilConfig(t *testing.T) { } } -func TestClientDefaultHttpClient_unixSocket(t *testing.T) { - os.Setenv("VAULT_AGENT_ADDR", "unix:///var/run/vault.sock") - defer os.Setenv("VAULT_AGENT_ADDR", "") - - client, err := NewClient(nil) - if err != nil { - t.Fatal(err) - } - if client == nil { - t.Fatal("expected a non-nil client") - } - if client.addr.Scheme != "http" { - t.Fatalf("bad: %s", client.addr.Scheme) - } - if client.addr.Host != "/var/run/vault.sock" { - t.Fatalf("bad: %s", client.addr.Host) - } -} - func TestClientSetAddress(t *testing.T) { client, err := NewClient(nil) if err != nil { t.Fatal(err) } - // Start with TCP address using HTTP - if err := client.SetAddress("http://172.168.2.1:8300"); err != nil { - t.Fatal(err) - } - if client.addr.Host != "172.168.2.1:8300" { - t.Fatalf("bad: expected: '172.168.2.1:8300' actual: %q", client.addr.Host) - } - // Test switching to Unix Socket address from TCP address - if err := client.SetAddress("unix:///var/run/vault.sock"); err != nil { - t.Fatal(err) - } - if client.addr.Scheme != "http" { - t.Fatalf("bad: expected: 'http' actual: %q", client.addr.Scheme) - } - if client.addr.Host != "/var/run/vault.sock" { - t.Fatalf("bad: expected: '/var/run/vault.sock' actual: %q", client.addr.Host) - } - if client.addr.Path != "" { - t.Fatalf("bad: expected '' actual: %q", client.addr.Path) - } - if client.config.HttpClient.Transport.(*http.Transport).DialContext == nil { - t.Fatal("bad: expected DialContext to not be nil") - } - // Test switching to TCP address from Unix Socket address if err := client.SetAddress("http://172.168.2.1:8300"); err != nil { t.Fatal(err) } if client.addr.Host != "172.168.2.1:8300" { t.Fatalf("bad: expected: '172.168.2.1:8300' actual: %q", client.addr.Host) } - if client.addr.Scheme != "http" { - t.Fatalf("bad: expected: 'http' actual: %q", client.addr.Scheme) - } } func TestClientToken(t *testing.T) { @@ -299,7 +253,7 @@ func TestDefaulRetryPolicy(t *testing.T) { t.Fatalf("expected to retry request: '%t', but actual result was: '%t'", test.expect, retry) } if err != test.expectErr { - t.Fatalf("expected error from retry policy: %q, but actual result was: %q", err, test.expectErr) + t.Fatalf("expected error from retry policy: '%s', but actual result was: '%s'", err, test.expectErr) } }) } @@ -472,20 +426,6 @@ func TestClientNonTransportRoundTripper(t *testing.T) { } } -func TestClientNonTransportRoundTripperUnixAddress(t *testing.T) { - client := &http.Client{ - Transport: roundTripperFunc(http.DefaultTransport.RoundTrip), - } - - _, err := NewClient(&Config{ - HttpClient: client, - Address: "unix:///var/run/vault.sock", - }) - if err == nil { - t.Fatal("bad: expected error got nil") - } -} - func TestClone(t *testing.T) { type fields struct{} tests := []struct { @@ -1219,7 +1159,7 @@ func TestClientWithNamespace(t *testing.T) { t.Fatalf("err: %s", err) } if ns != ogNS { - t.Fatalf("Expected namespace: %q, got %q", ogNS, ns) + t.Fatalf("Expected namespace: \"%s\", got \"%s\"", ogNS, ns) } // make a call with a temporary namespace @@ -1231,7 +1171,7 @@ func TestClientWithNamespace(t *testing.T) { t.Fatalf("err: %s", err) } if ns != newNS { - t.Fatalf("Expected new namespace: %q, got %q", newNS, ns) + t.Fatalf("Expected new namespace: \"%s\", got \"%s\"", newNS, ns) } // ensure client has not been modified _, err = client.rawRequestWithContext( @@ -1241,7 +1181,7 @@ func TestClientWithNamespace(t *testing.T) { t.Fatalf("err: %s", err) } if ns != ogNS { - t.Fatalf("Expected original namespace: %q, got %q", ogNS, ns) + t.Fatalf("Expected original namespace: \"%s\", got \"%s\"", ogNS, ns) } // make call with empty ns @@ -1252,12 +1192,12 @@ func TestClientWithNamespace(t *testing.T) { t.Fatalf("err: %s", err) } if ns != "" { - t.Fatalf("Expected no namespace, got %q", ns) + t.Fatalf("Expected no namespace, got \"%s\"", ns) } // ensure client has not been modified if client.Namespace() != ogNS { - t.Fatalf("Expected original namespace: %q, got %q", ogNS, client.Namespace()) + t.Fatalf("Expected original namespace: \"%s\", got \"%s\"", ogNS, client.Namespace()) } } @@ -1344,25 +1284,3 @@ func TestVaultProxy(t *testing.T) { }) } } - -func TestParseAddressWithUnixSocket(t *testing.T) { - address := "unix:///var/run/vault.sock" - config := DefaultConfig() - - u, err := config.ParseAddress(address) - if err != nil { - t.Fatal("Error not expected") - } - if u.Scheme != "http" { - t.Fatal("Scheme not changed to http") - } - if u.Host != "/var/run/vault.sock" { - t.Fatal("Host not changed to socket name") - } - if u.Path != "" { - t.Fatal("Path expected to be blank") - } - if config.HttpClient.Transport.(*http.Transport).DialContext == nil { - t.Fatal("DialContext function not set in config.HttpClient.Transport") - } -} diff --git a/api/go.mod b/api/go.mod index 93164863e6b1f..fe7fa41d73eac 100644 --- a/api/go.mod +++ b/api/go.mod @@ -15,7 +15,7 @@ require ( github.com/hashicorp/go-rootcerts v1.0.2 github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 github.com/hashicorp/hcl v1.0.0 - github.com/hashicorp/vault/sdk v0.6.0 + github.com/hashicorp/vault/sdk v0.5.1 github.com/mitchellh/mapstructure v1.5.0 golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 diff --git a/api/go.sum b/api/go.sum index 823feb55d0691..269f0d4826a00 100644 --- a/api/go.sum +++ b/api/go.sum @@ -99,7 +99,7 @@ github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39 github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= diff --git a/api/kv.go b/api/kv.go index 37699df266f9f..a334c8e218e57 100644 --- a/api/kv.go +++ b/api/kv.go @@ -1,11 +1,5 @@ package api -import "errors" - -// ErrSecretNotFound is returned by KVv1 and KVv2 wrappers to indicate that the -// secret is missing at the given location. -var ErrSecretNotFound = errors.New("secret not found") - // A KVSecret is a key-value secret returned by Vault's KV secrets engine, // and is the most basic type of secret stored in Vault. // @@ -36,8 +30,8 @@ type KVSecret struct { // // Learn more about the KV secrets engine here: // https://www.vaultproject.io/docs/secrets/kv -func (c *Client) KVv1(mountPath string) *KVv1 { - return &KVv1{c: c, mountPath: mountPath} +func (c *Client) KVv1(mountPath string) *kvv1 { + return &kvv1{c: c, mountPath: mountPath} } // KVv2 is used to return a client for reads and writes against @@ -51,6 +45,6 @@ func (c *Client) KVv1(mountPath string) *KVv1 { // // Learn more about the KV secrets engine here: // https://www.vaultproject.io/docs/secrets/kv -func (c *Client) KVv2(mountPath string) *KVv2 { - return &KVv2{c: c, mountPath: mountPath} +func (c *Client) KVv2(mountPath string) *kvv2 { + return &kvv2{c: c, mountPath: mountPath} } diff --git a/api/kv_v1.go b/api/kv_v1.go index 22ba992384b79..1b0428dee977c 100644 --- a/api/kv_v1.go +++ b/api/kv_v1.go @@ -5,13 +5,13 @@ import ( "fmt" ) -type KVv1 struct { +type kvv1 struct { c *Client mountPath string } // Get returns a secret from the KV v1 secrets engine. -func (kv *KVv1) Get(ctx context.Context, secretPath string) (*KVSecret, error) { +func (kv *kvv1) Get(ctx context.Context, secretPath string) (*KVSecret, error) { pathToRead := fmt.Sprintf("%s/%s", kv.mountPath, secretPath) secret, err := kv.c.Logical().ReadWithContext(ctx, pathToRead) @@ -19,7 +19,7 @@ func (kv *KVv1) Get(ctx context.Context, secretPath string) (*KVSecret, error) { return nil, fmt.Errorf("error encountered while reading secret at %s: %w", pathToRead, err) } if secret == nil { - return nil, fmt.Errorf("%w: at %s", ErrSecretNotFound, pathToRead) + return nil, fmt.Errorf("no secret found at %s", pathToRead) } return &KVSecret{ @@ -33,7 +33,7 @@ func (kv *KVv1) Get(ctx context.Context, secretPath string) (*KVSecret, error) { // KV v1 secrets engine. // // If the secret already exists, it will be overwritten. -func (kv *KVv1) Put(ctx context.Context, secretPath string, data map[string]interface{}) error { +func (kv *kvv1) Put(ctx context.Context, secretPath string, data map[string]interface{}) error { pathToWriteTo := fmt.Sprintf("%s/%s", kv.mountPath, secretPath) _, err := kv.c.Logical().WriteWithContext(ctx, pathToWriteTo, data) @@ -45,7 +45,7 @@ func (kv *KVv1) Put(ctx context.Context, secretPath string, data map[string]inte } // Delete deletes a secret from the KV v1 secrets engine. -func (kv *KVv1) Delete(ctx context.Context, secretPath string) error { +func (kv *kvv1) Delete(ctx context.Context, secretPath string) error { pathToDelete := fmt.Sprintf("%s/%s", kv.mountPath, secretPath) _, err := kv.c.Logical().DeleteWithContext(ctx, pathToDelete) diff --git a/api/kv_v2.go b/api/kv_v2.go index 7a98cfeefd234..2b6149b16a45c 100644 --- a/api/kv_v2.go +++ b/api/kv_v2.go @@ -2,9 +2,7 @@ package api import ( "context" - "errors" "fmt" - "net/http" "sort" "strconv" "time" @@ -12,7 +10,7 @@ import ( "github.com/mitchellh/mapstructure" ) -type KVv2 struct { +type kvv2 struct { c *Client mountPath string } @@ -32,35 +30,6 @@ type KVMetadata struct { Raw *Secret } -// KVMetadataPutInput is the subset of metadata that can be replaced for a -// KV v2 secret using the PutMetadata method. -// -// All fields should be explicitly provided, as any fields left unset in the -// struct will be reset to their zero value. -type KVMetadataPutInput struct { - CASRequired bool - CustomMetadata map[string]interface{} - DeleteVersionAfter time.Duration - MaxVersions int -} - -// KVMetadataPatchInput is the subset of metadata that can be manually modified for -// a KV v2 secret using the PatchMetadata method. -// -// The struct's fields are all pointers. A pointer to a field's zero -// value (e.g. false for *bool) implies that field should be reset to its -// zero value after update, whereas a field left as a nil pointer -// (e.g. nil for *bool) implies the field should remain unchanged. -// -// Since maps are already pointers, use an empty map to remove all -// custom metadata. -type KVMetadataPatchInput struct { - CASRequired *bool - CustomMetadata map[string]interface{} - DeleteVersionAfter *time.Duration - MaxVersions *int -} - // KVVersionMetadata is a subset of metadata for a given version of a KV v2 secret. type KVVersionMetadata struct { Version int `mapstructure:"version"` @@ -69,39 +38,18 @@ type KVVersionMetadata struct { Destroyed bool `mapstructure:"destroyed"` } -// Currently supported options: WithOption, WithCheckAndSet, WithMethod +// Currently supported options: WithCheckAndSet type KVOption func() (key string, value interface{}) -const ( - KVOptionCheckAndSet = "cas" - KVOptionMethod = "method" - KVMergeMethodPatch = "patch" - KVMergeMethodReadWrite = "rw" -) - -// WithOption can optionally be passed to provide generic options for a -// KV request. Valid keys and values depend on the type of request. -func WithOption(key string, value interface{}) KVOption { - return func() (string, interface{}) { - return key, value - } -} - // WithCheckAndSet can optionally be passed to perform a check-and-set -// operation on a KV request. If not set, the write will be allowed. -// If cas is set to 0, a write will only be allowed if the key doesn't exist. -// If set to non-zero, the write will only be allowed if the key’s current -// version matches the version specified in the cas parameter. +// operation. If not set, the write will be allowed. If cas is set to 0, a +// write will only be allowed if the key doesn't exist. If set to non-zero, +// the write will only be allowed if the key’s current version matches the +// version specified in the cas parameter. func WithCheckAndSet(cas int) KVOption { - return WithOption(KVOptionCheckAndSet, cas) -} - -// WithMergeMethod can optionally be passed to dictate which type of -// patch to perform in a Patch request. If set to "patch", then an HTTP PATCH -// request will be issued. If set to "rw", then a read will be performed, -// then a local update, followed by a remote update. Defaults to "patch". -func WithMergeMethod(method string) KVOption { - return WithOption(KVOptionMethod, method) + return func() (string, interface{}) { + return "cas", cas + } } // Get returns the latest version of a secret from the KV v2 secrets engine. @@ -109,7 +57,7 @@ func WithMergeMethod(method string) KVOption { // If the latest version has been deleted, an error will not be thrown, but // the Data field on the returned secret will be nil, and the Metadata field // will contain the deletion time. -func (kv *KVv2) Get(ctx context.Context, secretPath string) (*KVSecret, error) { +func (kv *kvv2) Get(ctx context.Context, secretPath string) (*KVSecret, error) { pathToRead := fmt.Sprintf("%s/data/%s", kv.mountPath, secretPath) secret, err := kv.c.Logical().ReadWithContext(ctx, pathToRead) @@ -117,7 +65,7 @@ func (kv *KVv2) Get(ctx context.Context, secretPath string) (*KVSecret, error) { return nil, fmt.Errorf("error encountered while reading secret at %s: %w", pathToRead, err) } if secret == nil { - return nil, fmt.Errorf("%w: at %s", ErrSecretNotFound, pathToRead) + return nil, fmt.Errorf("no secret found at %s", pathToRead) } kvSecret, err := extractDataAndVersionMetadata(secret) @@ -142,7 +90,7 @@ func (kv *KVv2) Get(ctx context.Context, secretPath string) (*KVSecret, error) { // // GetVersionsAsList can provide a list of available versions sorted by // version number, while the response from GetMetadata contains them as a map. -func (kv *KVv2) GetVersion(ctx context.Context, secretPath string, version int) (*KVSecret, error) { +func (kv *kvv2) GetVersion(ctx context.Context, secretPath string, version int) (*KVSecret, error) { pathToRead := fmt.Sprintf("%s/data/%s", kv.mountPath, secretPath) queryParams := map[string][]string{"version": {strconv.Itoa(version)}} @@ -151,7 +99,7 @@ func (kv *KVv2) GetVersion(ctx context.Context, secretPath string, version int) return nil, err } if secret == nil { - return nil, fmt.Errorf("%w: for version %d at %s", ErrSecretNotFound, version, pathToRead) + return nil, fmt.Errorf("no secret with version %d found at %s", version, pathToRead) } kvSecret, err := extractDataAndVersionMetadata(secret) @@ -169,7 +117,7 @@ func (kv *KVv2) GetVersion(ctx context.Context, secretPath string, version int) } // GetVersionsAsList returns a subset of the metadata for each version of the secret, sorted by version number. -func (kv *KVv2) GetVersionsAsList(ctx context.Context, secretPath string) ([]KVVersionMetadata, error) { +func (kv *kvv2) GetVersionsAsList(ctx context.Context, secretPath string) ([]KVVersionMetadata, error) { pathToRead := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath) secret, err := kv.c.Logical().ReadWithContext(ctx, pathToRead) @@ -177,7 +125,7 @@ func (kv *KVv2) GetVersionsAsList(ctx context.Context, secretPath string) ([]KVV return nil, err } if secret == nil || secret.Data == nil { - return nil, fmt.Errorf("%w: no metadata at %s", ErrSecretNotFound, pathToRead) + return nil, fmt.Errorf("no secret metadata found at %s", pathToRead) } md, err := extractFullMetadata(secret) @@ -196,7 +144,7 @@ func (kv *KVv2) GetVersionsAsList(ctx context.Context, secretPath string) ([]KVV // GetMetadata returns the full metadata for a given secret, including a map of // its existing versions and their respective creation/deletion times, etc. -func (kv *KVv2) GetMetadata(ctx context.Context, secretPath string) (*KVMetadata, error) { +func (kv *kvv2) GetMetadata(ctx context.Context, secretPath string) (*KVMetadata, error) { pathToRead := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath) secret, err := kv.c.Logical().ReadWithContext(ctx, pathToRead) @@ -204,7 +152,7 @@ func (kv *KVv2) GetMetadata(ctx context.Context, secretPath string) (*KVMetadata return nil, err } if secret == nil || secret.Data == nil { - return nil, fmt.Errorf("%w: no metadata at %s", ErrSecretNotFound, pathToRead) + return nil, fmt.Errorf("no secret metadata found at %s", pathToRead) } md, err := extractFullMetadata(secret) @@ -221,7 +169,7 @@ func (kv *KVv2) GetMetadata(ctx context.Context, secretPath string) (*KVMetadata // If the secret already exists, a new version will be created // and the previous version can be accessed with the GetVersion method. // GetMetadata can provide a list of available versions. -func (kv *KVv2) Put(ctx context.Context, secretPath string, data map[string]interface{}, opts ...KVOption) (*KVSecret, error) { +func (kv *kvv2) Put(ctx context.Context, secretPath string, data map[string]interface{}, opts ...KVOption) (*KVSecret, error) { pathToWriteTo := fmt.Sprintf("%s/data/%s", kv.mountPath, secretPath) wrappedData := map[string]interface{}{ @@ -246,7 +194,7 @@ func (kv *KVv2) Put(ctx context.Context, secretPath string, data map[string]inte return nil, fmt.Errorf("error writing secret to %s: %w", pathToWriteTo, err) } if secret == nil { - return nil, fmt.Errorf("%w: after writing to %s", ErrSecretNotFound, pathToWriteTo) + return nil, fmt.Errorf("no secret was written to %s", pathToWriteTo) } metadata, err := extractVersionMetadata(secret) @@ -269,107 +217,9 @@ func (kv *KVv2) Put(ctx context.Context, secretPath string, data map[string]inte return kvSecret, nil } -// PutMetadata can be used to fully replace a subset of metadata fields for a -// given KV v2 secret. All fields will replace the corresponding values on the Vault server. -// Any fields left as nil will reset the field on the Vault server back to its zero value. -// -// To only partially replace the values of these metadata fields, use PatchMetadata. -// -// This method can also be used to create a new secret with just metadata and no secret data yet. -func (kv *KVv2) PutMetadata(ctx context.Context, secretPath string, metadata KVMetadataPutInput) error { - pathToWriteTo := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath) - - const ( - casRequiredKey = "cas_required" - deleteVersionAfterKey = "delete_version_after" - maxVersionsKey = "max_versions" - customMetadataKey = "custom_metadata" - ) - - // convert values to a map we can pass to Logical - metadataMap := make(map[string]interface{}) - metadataMap[maxVersionsKey] = metadata.MaxVersions - metadataMap[deleteVersionAfterKey] = metadata.DeleteVersionAfter.String() - metadataMap[casRequiredKey] = metadata.CASRequired - metadataMap[customMetadataKey] = metadata.CustomMetadata - - _, err := kv.c.Logical().WriteWithContext(ctx, pathToWriteTo, metadataMap) - if err != nil { - return fmt.Errorf("error writing secret metadata to %s: %w", pathToWriteTo, err) - } - - return nil -} - -// Patch additively updates the most recent version of a key-value secret, -// differentiating it from Put which will fully overwrite the previous data. -// Only the key-value pairs that are new or changing need to be provided. -// -// The WithMethod KVOption function can optionally be passed to dictate which -// kind of patch to perform, as older Vault server versions (pre-1.9.0) may -// only be able to use the old "rw" (read-then-write) style of partial update, -// whereas newer Vault servers can use the default value of "patch" if the -// client token's policy has the "patch" capability. -func (kv *KVv2) Patch(ctx context.Context, secretPath string, newData map[string]interface{}, opts ...KVOption) (*KVSecret, error) { - // determine patch method - var patchMethod string - var ok bool - for _, opt := range opts { - k, v := opt() - if k == "method" { - patchMethod, ok = v.(string) - if !ok { - return nil, fmt.Errorf("unsupported type provided for option value; value for patch method should be string \"rw\" or \"patch\"") - } - } - } - - // Determine which kind of patch to use, - // the newer HTTP Patch style or the older read-then-write style - var kvs *KVSecret - var err error - switch patchMethod { - case "rw": - kvs, err = readThenWrite(ctx, kv.c, kv.mountPath, secretPath, newData) - case "patch": - kvs, err = mergePatch(ctx, kv.c, kv.mountPath, secretPath, newData, opts...) - case "": - kvs, err = mergePatch(ctx, kv.c, kv.mountPath, secretPath, newData, opts...) - default: - return nil, fmt.Errorf("unsupported patch method provided; value for patch method should be string \"rw\" or \"patch\"") - } - if err != nil { - return nil, fmt.Errorf("unable to perform patch: %w", err) - } - if kvs == nil { - return nil, fmt.Errorf("no secret was written to %s", secretPath) - } - - return kvs, nil -} - -// PatchMetadata can be used to replace just a subset of a secret's -// metadata fields at a time, as opposed to PutMetadata which is used to -// completely replace all fields on the previous metadata. -func (kv *KVv2) PatchMetadata(ctx context.Context, secretPath string, metadata KVMetadataPatchInput) error { - pathToWriteTo := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath) - - md, err := toMetadataMap(metadata) - if err != nil { - return fmt.Errorf("unable to create map for JSON merge patch request: %w", err) - } - - _, err = kv.c.Logical().JSONMergePatch(ctx, pathToWriteTo, md) - if err != nil { - return fmt.Errorf("error patching metadata at %s: %w", pathToWriteTo, err) - } - - return nil -} - // Delete deletes the most recent version of a secret from the KV v2 // secrets engine. To delete an older version, use DeleteVersions. -func (kv *KVv2) Delete(ctx context.Context, secretPath string) error { +func (kv *kvv2) Delete(ctx context.Context, secretPath string) error { pathToDelete := fmt.Sprintf("%s/data/%s", kv.mountPath, secretPath) _, err := kv.c.Logical().DeleteWithContext(ctx, pathToDelete) @@ -382,7 +232,7 @@ func (kv *KVv2) Delete(ctx context.Context, secretPath string) error { // DeleteVersions deletes the specified versions of a secret from the KV v2 // secrets engine. To delete the latest version of a secret, just use Delete. -func (kv *KVv2) DeleteVersions(ctx context.Context, secretPath string, versions []int) error { +func (kv *kvv2) DeleteVersions(ctx context.Context, secretPath string, versions []int) error { // verb and path are different when trying to delete past versions pathToDelete := fmt.Sprintf("%s/delete/%s", kv.mountPath, secretPath) @@ -405,119 +255,27 @@ func (kv *KVv2) DeleteVersions(ctx context.Context, secretPath string, versions return nil } -// DeleteMetadata deletes all versions and metadata of the secret at the -// given path. -func (kv *KVv2) DeleteMetadata(ctx context.Context, secretPath string) error { - pathToDelete := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath) - - _, err := kv.c.Logical().DeleteWithContext(ctx, pathToDelete) - if err != nil { - return fmt.Errorf("error deleting secret metadata at %s: %w", pathToDelete, err) - } - - return nil -} - -// Undelete undeletes the given versions of a secret, restoring the data -// so that it can be fetched again with Get requests. -// -// A list of existing versions can be retrieved using the GetVersionsAsList method. -func (kv *KVv2) Undelete(ctx context.Context, secretPath string, versions []int) error { - pathToUndelete := fmt.Sprintf("%s/undelete/%s", kv.mountPath, secretPath) - - data := map[string]interface{}{ - "versions": versions, - } - - _, err := kv.c.Logical().WriteWithContext(ctx, pathToUndelete, data) - if err != nil { - return fmt.Errorf("error undeleting secret metadata at %s: %w", pathToUndelete, err) - } - - return nil -} - -// Destroy permanently removes the specified secret versions' data -// from the Vault server. If no secret exists at the given path, no -// action will be taken. -// -// A list of existing versions can be retrieved using the GetVersionsAsList method. -func (kv *KVv2) Destroy(ctx context.Context, secretPath string, versions []int) error { - pathToDestroy := fmt.Sprintf("%s/destroy/%s", kv.mountPath, secretPath) - - data := map[string]interface{}{ - "versions": versions, - } - - _, err := kv.c.Logical().WriteWithContext(ctx, pathToDestroy, data) - if err != nil { - return fmt.Errorf("error destroying secret metadata at %s: %w", pathToDestroy, err) - } - - return nil -} - -// Rollback can be used to roll a secret back to a previous -// non-deleted/non-destroyed version. That previous version becomes the -// next/newest version for the path. -func (kv *KVv2) Rollback(ctx context.Context, secretPath string, toVersion int) (*KVSecret, error) { - // First, do a read to get the current version for check-and-set - latest, err := kv.Get(ctx, secretPath) - if err != nil { - return nil, fmt.Errorf("unable to get latest version of secret: %w", err) - } - - // Make sure a value already exists - if latest == nil { - return nil, fmt.Errorf("no secret was found: %w", err) - } - - // Verify metadata found - if latest.VersionMetadata == nil { - return nil, fmt.Errorf("no metadata found; rollback can only be used on existing data") - } - - // Now run it again and read the version we want to roll back to - rollbackVersion, err := kv.GetVersion(ctx, secretPath, toVersion) - if err != nil { - return nil, fmt.Errorf("unable to get previous version %d of secret: %w", toVersion, err) - } - - err = validateRollbackVersion(rollbackVersion) - if err != nil { - return nil, fmt.Errorf("invalid rollback version %d: %w", toVersion, err) - } - - casVersion := latest.VersionMetadata.Version - kvs, err := kv.Put(ctx, secretPath, rollbackVersion.Data, WithCheckAndSet(casVersion)) - if err != nil { - return nil, fmt.Errorf("unable to roll back to previous secret version: %w", err) - } - - return kvs, nil -} - func extractCustomMetadata(secret *Secret) (map[string]interface{}, error) { // Logical Writes return the metadata directly, Reads return it nested inside the "metadata" key - customMetadataInterface, ok := secret.Data["custom_metadata"] + cmI, ok := secret.Data["custom_metadata"] if !ok { - metadataInterface, ok := secret.Data["metadata"] + mI, ok := secret.Data["metadata"] if !ok { // if that's not found, bail since it should have had one or the other return nil, fmt.Errorf("secret is missing expected fields") } - metadataMap, ok := metadataInterface.(map[string]interface{}) + mM, ok := mI.(map[string]interface{}) if !ok { - return nil, fmt.Errorf("unexpected type for 'metadata' element: %T (%#v)", metadataInterface, metadataInterface) + return nil, fmt.Errorf("unexpected type for 'metadata' element: %T (%#v)", mI, mI) } - customMetadataInterface, ok = metadataMap["custom_metadata"] + cmI, ok = mM["custom_metadata"] if !ok { - return nil, fmt.Errorf("metadata missing expected field \"custom_metadata\": %v", metadataMap) + return nil, fmt.Errorf("metadata missing expected field \"custom_metadata\":%v", mM) } } - cm, ok := customMetadataInterface.(map[string]interface{}) - if !ok && customMetadataInterface != nil { - return nil, fmt.Errorf("unexpected type for 'metadata' element: %T (%#v)", customMetadataInterface, customMetadataInterface) + cm, ok := cmI.(map[string]interface{}) + if !ok && cmI != nil { + return nil, fmt.Errorf("unexpected type for 'metadata' element: %T (%#v)", cmI, cmI) } return cm, nil @@ -641,160 +399,3 @@ func extractFullMetadata(secret *Secret) (*KVMetadata, error) { return metadata, nil } - -func validateRollbackVersion(rollbackVersion *KVSecret) error { - // Make sure a value already exists - if rollbackVersion == nil || rollbackVersion.Data == nil { - return fmt.Errorf("no secret found") - } - - // Verify metadata found - if rollbackVersion.VersionMetadata == nil { - return fmt.Errorf("no version metadata found; rollback only works on existing data") - } - - // Verify it hasn't been deleted - if !rollbackVersion.VersionMetadata.DeletionTime.IsZero() { - return fmt.Errorf("cannot roll back to a version that has been deleted") - } - - if rollbackVersion.VersionMetadata.Destroyed { - return fmt.Errorf("cannot roll back to a version that has been destroyed") - } - - // Verify old data found - if rollbackVersion.Data == nil { - return fmt.Errorf("no data found; rollback only works on existing data") - } - - return nil -} - -func mergePatch(ctx context.Context, client *Client, mountPath string, secretPath string, newData map[string]interface{}, opts ...KVOption) (*KVSecret, error) { - pathToMergePatch := fmt.Sprintf("%s/data/%s", mountPath, secretPath) - - // take any other additional options provided - // and pass them along to the patch request - wrappedData := map[string]interface{}{ - "data": newData, - } - options := make(map[string]interface{}) - for _, opt := range opts { - k, v := opt() - options[k] = v - } - if len(opts) > 0 { - wrappedData["options"] = options - } - - secret, err := client.Logical().JSONMergePatch(ctx, pathToMergePatch, wrappedData) - if err != nil { - var re *ResponseError - - if errors.As(err, &re) { - switch re.StatusCode { - // 403 - case http.StatusForbidden: - return nil, fmt.Errorf("received 403 from Vault server; please ensure that token's policy has \"patch\" capability: %w", err) - - // 404 - case http.StatusNotFound: - return nil, fmt.Errorf("%w: performing merge patch to %s", ErrSecretNotFound, pathToMergePatch) - - // 405 - case http.StatusMethodNotAllowed: - // If it's a 405, that probably means the server is running a pre-1.9 - // Vault version that doesn't support the HTTP PATCH method. - // Fall back to the old way of doing it. - return readThenWrite(ctx, client, mountPath, secretPath, newData) - } - } - - return nil, fmt.Errorf("error performing merge patch to %s: %w", pathToMergePatch, err) - } - - metadata, err := extractVersionMetadata(secret) - if err != nil { - return nil, fmt.Errorf("secret was written successfully, but unable to view version metadata from response: %w", err) - } - - kvSecret := &KVSecret{ - Data: nil, // secret.Data in this case is the metadata - VersionMetadata: metadata, - Raw: secret, - } - - cm, err := extractCustomMetadata(secret) - if err != nil { - return nil, fmt.Errorf("error reading custom metadata for secret %s: %w", secretPath, err) - } - kvSecret.CustomMetadata = cm - - return kvSecret, nil -} - -func readThenWrite(ctx context.Context, client *Client, mountPath string, secretPath string, newData map[string]interface{}) (*KVSecret, error) { - // First, read the secret. - existingVersion, err := client.KVv2(mountPath).Get(ctx, secretPath) - if err != nil { - return nil, fmt.Errorf("error reading secret as part of read-then-write patch operation: %w", err) - } - - // Make sure the secret already exists - if existingVersion == nil || existingVersion.Data == nil { - return nil, fmt.Errorf("%w: at %s as part of read-then-write patch operation", ErrSecretNotFound, secretPath) - } - - // Verify existing secret has metadata - if existingVersion.VersionMetadata == nil { - return nil, fmt.Errorf("no metadata found at %s; patch can only be used on existing data", secretPath) - } - - // Copy new data over with existing data - combinedData := existingVersion.Data - for k, v := range newData { - combinedData[k] = v - } - - updatedSecret, err := client.KVv2(mountPath).Put(ctx, secretPath, combinedData, WithCheckAndSet(existingVersion.VersionMetadata.Version)) - if err != nil { - return nil, fmt.Errorf("error writing secret to %s: %w", secretPath, err) - } - - return updatedSecret, nil -} - -func toMetadataMap(patchInput KVMetadataPatchInput) (map[string]interface{}, error) { - metadataMap := make(map[string]interface{}) - - const ( - casRequiredKey = "cas_required" - deleteVersionAfterKey = "delete_version_after" - maxVersionsKey = "max_versions" - customMetadataKey = "custom_metadata" - ) - - // The KVMetadataPatchInput struct is designed to have pointer fields so that - // the user can easily express the difference between explicitly setting a - // field back to its zero value (e.g. false), as opposed to just having - // the field remain unchanged (e.g. nil). This way, they only need to pass - // the fields they want to change. - if patchInput.MaxVersions != nil { - metadataMap[maxVersionsKey] = *(patchInput.MaxVersions) - } - if patchInput.CASRequired != nil { - metadataMap[casRequiredKey] = *(patchInput.CASRequired) - } - if patchInput.CustomMetadata != nil { - if len(patchInput.CustomMetadata) == 0 { // empty non-nil map means delete all the keys - metadataMap[customMetadataKey] = nil - } else { - metadataMap[customMetadataKey] = patchInput.CustomMetadata - } - } - if patchInput.DeleteVersionAfter != nil { - metadataMap[deleteVersionAfterKey] = patchInput.DeleteVersionAfter.String() - } - - return metadataMap, nil -} diff --git a/api/lifetime_watcher.go b/api/lifetime_watcher.go index 5f3eadbffdd88..f06263526f35a 100644 --- a/api/lifetime_watcher.go +++ b/api/lifetime_watcher.go @@ -50,24 +50,25 @@ const ( // LifetimeWatcher is a process for watching lifetime of a secret. // -// watcher, err := client.NewLifetimeWatcher(&LifetimeWatcherInput{ -// Secret: mySecret, -// }) -// go watcher.Start() -// defer watcher.Stop() +// watcher, err := client.NewLifetimeWatcher(&LifetimeWatcherInput{ +// Secret: mySecret, +// }) +// go watcher.Start() +// defer watcher.Stop() // -// for { -// select { -// case err := <-watcher.DoneCh(): -// if err != nil { -// log.Fatal(err) -// } +// for { +// select { +// case err := <-watcher.DoneCh(): +// if err != nil { +// log.Fatal(err) +// } +// +// // Renewal is now over +// case renewal := <-watcher.RenewCh(): +// log.Printf("Successfully renewed: %#v", renewal) +// } +// } // -// // Renewal is now over -// case renewal := <-watcher.RenewCh(): -// log.Printf("Successfully renewed: %#v", renewal) -// } -// } // // `DoneCh` will return if renewal fails, or if the remaining lease duration is // under a built-in threshold and either renewing is not extending it or @@ -250,8 +251,7 @@ func (r *LifetimeWatcher) doRenew() error { } func (r *LifetimeWatcher) doRenewWithOptions(tokenMode bool, nonRenewable bool, initLeaseDuration int, credString string, - renew renewFunc, initialRetryInterval time.Duration, -) error { + renew renewFunc, initialRetryInterval time.Duration) error { if credString == "" || (nonRenewable && r.renewBehavior == RenewBehaviorErrorOnErrors) { return r.errLifetimeWatcherNotRenewable diff --git a/api/output_string.go b/api/output_string.go index 80c591f20b5c7..b8c396ebc05db 100644 --- a/api/output_string.go +++ b/api/output_string.go @@ -60,19 +60,19 @@ func (d *OutputStringError) buildCurlString() (string, error) { finalCurlString = fmt.Sprintf("%s-X %s ", finalCurlString, d.Request.Method) } if d.ClientCACert != "" { - clientCACert := strings.ReplaceAll(d.ClientCACert, "'", "'\"'\"'") + clientCACert := strings.Replace(d.ClientCACert, "'", "'\"'\"'", -1) finalCurlString = fmt.Sprintf("%s--cacert '%s' ", finalCurlString, clientCACert) } if d.ClientCAPath != "" { - clientCAPath := strings.ReplaceAll(d.ClientCAPath, "'", "'\"'\"'") + clientCAPath := strings.Replace(d.ClientCAPath, "'", "'\"'\"'", -1) finalCurlString = fmt.Sprintf("%s--capath '%s' ", finalCurlString, clientCAPath) } if d.ClientCert != "" { - clientCert := strings.ReplaceAll(d.ClientCert, "'", "'\"'\"'") + clientCert := strings.Replace(d.ClientCert, "'", "'\"'\"'", -1) finalCurlString = fmt.Sprintf("%s--cert '%s' ", finalCurlString, clientCert) } if d.ClientKey != "" { - clientKey := strings.ReplaceAll(d.ClientKey, "'", "'\"'\"'") + clientKey := strings.Replace(d.ClientKey, "'", "'\"'\"'", -1) finalCurlString = fmt.Sprintf("%s--key '%s' ", finalCurlString, clientKey) } for k, v := range d.Request.Header { @@ -87,7 +87,7 @@ func (d *OutputStringError) buildCurlString() (string, error) { if len(body) > 0 { // We need to escape single quotes since that's what we're using to // quote the body - escapedBody := strings.ReplaceAll(string(body), "'", "'\"'\"'") + escapedBody := strings.Replace(string(body), "'", "'\"'\"'", -1) finalCurlString = fmt.Sprintf("%s-d '%s' ", finalCurlString, escapedBody) } diff --git a/api/plugin_helpers.go b/api/plugin_helpers.go index 2b1b35c3b593e..e8ceb9c2fd6e5 100644 --- a/api/plugin_helpers.go +++ b/api/plugin_helpers.go @@ -16,11 +16,7 @@ import ( "github.com/hashicorp/errwrap" ) -const ( - // PluginAutoMTLSEnv is used to ensure AutoMTLS is used. This will override - // setting a TLSProviderFunc for a plugin. - PluginAutoMTLSEnv = "VAULT_PLUGIN_AUTOMTLS_ENABLED" - +var ( // PluginMetadataModeEnv is an ENV name used to disable TLS communication // to bootstrap mounting plugins. PluginMetadataModeEnv = "VAULT_PLUGIN_METADATA_MODE" @@ -28,50 +24,50 @@ const ( // PluginUnwrapTokenEnv is the ENV name used to pass unwrap tokens to the // plugin. PluginUnwrapTokenEnv = "VAULT_UNWRAP_TOKEN" -) -// sudoPaths is a map containing the paths that require a token's policy -// to have the "sudo" capability. The keys are the paths as strings, in -// the same format as they are returned by the OpenAPI spec. The values -// are the regular expressions that can be used to test whether a given -// path matches that path or not (useful specifically for the paths that -// contain templated fields.) -var sudoPaths = map[string]*regexp.Regexp{ - "/auth/token/accessors/": regexp.MustCompile(`^/auth/token/accessors/$`), - "/pki/root": regexp.MustCompile(`^/pki/root$`), - "/pki/root/sign-self-issued": regexp.MustCompile(`^/pki/root/sign-self-issued$`), - "/sys/audit": regexp.MustCompile(`^/sys/audit$`), - "/sys/audit/{path}": regexp.MustCompile(`^/sys/audit/.+$`), - "/sys/auth/{path}": regexp.MustCompile(`^/sys/auth/.+$`), - "/sys/auth/{path}/tune": regexp.MustCompile(`^/sys/auth/.+/tune$`), - "/sys/config/auditing/request-headers": regexp.MustCompile(`^/sys/config/auditing/request-headers$`), - "/sys/config/auditing/request-headers/{header}": regexp.MustCompile(`^/sys/config/auditing/request-headers/.+$`), - "/sys/config/cors": regexp.MustCompile(`^/sys/config/cors$`), - "/sys/config/ui/headers/": regexp.MustCompile(`^/sys/config/ui/headers/$`), - "/sys/config/ui/headers/{header}": regexp.MustCompile(`^/sys/config/ui/headers/.+$`), - "/sys/leases": regexp.MustCompile(`^/sys/leases$`), - "/sys/leases/lookup/": regexp.MustCompile(`^/sys/leases/lookup/$`), - "/sys/leases/lookup/{prefix}": regexp.MustCompile(`^/sys/leases/lookup/.+$`), - "/sys/leases/revoke-force/{prefix}": regexp.MustCompile(`^/sys/leases/revoke-force/.+$`), - "/sys/leases/revoke-prefix/{prefix}": regexp.MustCompile(`^/sys/leases/revoke-prefix/.+$`), - "/sys/plugins/catalog/{name}": regexp.MustCompile(`^/sys/plugins/catalog/[^/]+$`), - "/sys/plugins/catalog/{type}": regexp.MustCompile(`^/sys/plugins/catalog/[\w-]+$`), - "/sys/plugins/catalog/{type}/{name}": regexp.MustCompile(`^/sys/plugins/catalog/[\w-]+/[^/]+$`), - "/sys/raw": regexp.MustCompile(`^/sys/raw$`), - "/sys/raw/{path}": regexp.MustCompile(`^/sys/raw/.+$`), - "/sys/remount": regexp.MustCompile(`^/sys/remount$`), - "/sys/revoke-force/{prefix}": regexp.MustCompile(`^/sys/revoke-force/.+$`), - "/sys/revoke-prefix/{prefix}": regexp.MustCompile(`^/sys/revoke-prefix/.+$`), - "/sys/rotate": regexp.MustCompile(`^/sys/rotate$`), - - // enterprise-only paths - "/sys/replication/dr/primary/secondary-token": regexp.MustCompile(`^/sys/replication/dr/primary/secondary-token$`), - "/sys/replication/performance/primary/secondary-token": regexp.MustCompile(`^/sys/replication/performance/primary/secondary-token$`), - "/sys/replication/primary/secondary-token": regexp.MustCompile(`^/sys/replication/primary/secondary-token$`), - "/sys/replication/reindex": regexp.MustCompile(`^/sys/replication/reindex$`), - "/sys/storage/raft/snapshot-auto/config/": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/$`), - "/sys/storage/raft/snapshot-auto/config/{name}": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/[^/]+$`), -} + // sudoPaths is a map containing the paths that require a token's policy + // to have the "sudo" capability. The keys are the paths as strings, in + // the same format as they are returned by the OpenAPI spec. The values + // are the regular expressions that can be used to test whether a given + // path matches that path or not (useful specifically for the paths that + // contain templated fields.) + sudoPaths = map[string]*regexp.Regexp{ + "/auth/token/accessors/": regexp.MustCompile(`^/auth/token/accessors/$`), + "/pki/root": regexp.MustCompile(`^/pki/root$`), + "/pki/root/sign-self-issued": regexp.MustCompile(`^/pki/root/sign-self-issued$`), + "/sys/audit": regexp.MustCompile(`^/sys/audit$`), + "/sys/audit/{path}": regexp.MustCompile(`^/sys/audit/.+$`), + "/sys/auth/{path}": regexp.MustCompile(`^/sys/auth/.+$`), + "/sys/auth/{path}/tune": regexp.MustCompile(`^/sys/auth/.+/tune$`), + "/sys/config/auditing/request-headers": regexp.MustCompile(`^/sys/config/auditing/request-headers$`), + "/sys/config/auditing/request-headers/{header}": regexp.MustCompile(`^/sys/config/auditing/request-headers/.+$`), + "/sys/config/cors": regexp.MustCompile(`^/sys/config/cors$`), + "/sys/config/ui/headers/": regexp.MustCompile(`^/sys/config/ui/headers/$`), + "/sys/config/ui/headers/{header}": regexp.MustCompile(`^/sys/config/ui/headers/.+$`), + "/sys/leases": regexp.MustCompile(`^/sys/leases$`), + "/sys/leases/lookup/": regexp.MustCompile(`^/sys/leases/lookup/$`), + "/sys/leases/lookup/{prefix}": regexp.MustCompile(`^/sys/leases/lookup/.+$`), + "/sys/leases/revoke-force/{prefix}": regexp.MustCompile(`^/sys/leases/revoke-force/.+$`), + "/sys/leases/revoke-prefix/{prefix}": regexp.MustCompile(`^/sys/leases/revoke-prefix/.+$`), + "/sys/plugins/catalog/{name}": regexp.MustCompile(`^/sys/plugins/catalog/[^/]+$`), + "/sys/plugins/catalog/{type}": regexp.MustCompile(`^/sys/plugins/catalog/[\w-]+$`), + "/sys/plugins/catalog/{type}/{name}": regexp.MustCompile(`^/sys/plugins/catalog/[\w-]+/[^/]+$`), + "/sys/raw": regexp.MustCompile(`^/sys/raw$`), + "/sys/raw/{path}": regexp.MustCompile(`^/sys/raw/.+$`), + "/sys/remount": regexp.MustCompile(`^/sys/remount$`), + "/sys/revoke-force/{prefix}": regexp.MustCompile(`^/sys/revoke-force/.+$`), + "/sys/revoke-prefix/{prefix}": regexp.MustCompile(`^/sys/revoke-prefix/.+$`), + "/sys/rotate": regexp.MustCompile(`^/sys/rotate$`), + + // enterprise-only paths + "/sys/replication/dr/primary/secondary-token": regexp.MustCompile(`^/sys/replication/dr/primary/secondary-token$`), + "/sys/replication/performance/primary/secondary-token": regexp.MustCompile(`^/sys/replication/performance/primary/secondary-token$`), + "/sys/replication/primary/secondary-token": regexp.MustCompile(`^/sys/replication/primary/secondary-token$`), + "/sys/replication/reindex": regexp.MustCompile(`^/sys/replication/reindex$`), + "/sys/storage/raft/snapshot-auto/config/": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/$`), + "/sys/storage/raft/snapshot-auto/config/{name}": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/[^/]+$`), + } +) // PluginAPIClientMeta is a helper that plugins can use to configure TLS connections // back to Vault. @@ -124,7 +120,7 @@ func VaultPluginTLSProvider(apiTLSConfig *TLSConfig) func() (*tls.Config, error) // VaultPluginTLSProviderContext is run inside a plugin and retrieves the response // wrapped TLS certificate from vault. It returns a configured TLS Config. func VaultPluginTLSProviderContext(ctx context.Context, apiTLSConfig *TLSConfig) func() (*tls.Config, error) { - if os.Getenv(PluginAutoMTLSEnv) == "true" || os.Getenv(PluginMetadataModeEnv) == "true" { + if os.Getenv(PluginMetadataModeEnv) == "true" { return nil } diff --git a/api/ssh_agent.go b/api/ssh_agent.go index 03fe2bea53ed2..505519b04e7c2 100644 --- a/api/ssh_agent.go +++ b/api/ssh_agent.go @@ -85,10 +85,11 @@ func (c *SSHHelperConfig) SetTLSParameters(clientConfig *Config, certPool *x509. } // Returns true if any of the following conditions are true: -// - CA cert is configured -// - CA path is configured -// - configured to skip certificate verification -// - TLS server name is configured +// * CA cert is configured +// * CA path is configured +// * configured to skip certificate verification +// * TLS server name is configured +// func (c *SSHHelperConfig) shouldSetTLSParameters() bool { return c.CACert != "" || c.CAPath != "" || c.TLSServerName != "" || c.TLSSkipVerify } diff --git a/api/sys_audit.go b/api/sys_audit.go index 82d9aab0b7a01..7020256f41009 100644 --- a/api/sys_audit.go +++ b/api/sys_audit.go @@ -87,8 +87,7 @@ func (c *Sys) ListAuditWithContext(ctx context.Context) (map[string]*Audit, erro // DEPRECATED: Use EnableAuditWithOptions instead func (c *Sys) EnableAudit( - path string, auditType string, desc string, opts map[string]string, -) error { + path string, auditType string, desc string, opts map[string]string) error { return c.EnableAuditWithOptions(path, &EnableAuditOptions{ Type: auditType, Description: desc, diff --git a/api/sys_mounts.go b/api/sys_mounts.go index ddaddaf4752bb..52f51139f77b6 100644 --- a/api/sys_mounts.go +++ b/api/sys_mounts.go @@ -247,7 +247,6 @@ type MountInput struct { SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"` ExternalEntropyAccess bool `json:"external_entropy_access" mapstructure:"external_entropy_access"` Options map[string]string `json:"options"` - PluginVersion string `json:"plugin_version,omitempty"` // Deprecated: Newer server responses should be returning this information in the // Type field (json: "type") instead. @@ -282,10 +281,6 @@ type MountOutput struct { Local bool `json:"local"` SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"` ExternalEntropyAccess bool `json:"external_entropy_access" mapstructure:"external_entropy_access"` - PluginVersion string `json:"plugin_version" mapstructure:"plugin_version"` - RunningVersion string `json:"running_plugin_version" mapstructure:"running_plugin_version"` - RunningSha256 string `json:"running_sha256" mapstructure:"running_sha256"` - DeprecationStatus string `json:"deprecation_status" mapstructure:"deprecation_status"` } type MountConfigOutput struct { diff --git a/api/sys_mounts_test.go b/api/sys_mounts_test.go deleted file mode 100644 index d461a9d495cf8..0000000000000 --- a/api/sys_mounts_test.go +++ /dev/null @@ -1,149 +0,0 @@ -package api - -import ( - "net/http" - "net/http/httptest" - "testing" -) - -func TestListMounts(t *testing.T) { - mockVaultServer := httptest.NewServer(http.HandlerFunc(mockVaultMountsHandler)) - defer mockVaultServer.Close() - - cfg := DefaultConfig() - cfg.Address = mockVaultServer.URL - client, err := NewClient(cfg) - if err != nil { - t.Fatal(err) - } - - resp, err := client.Sys().ListMounts() - if err != nil { - t.Fatal(err) - } - - expectedMounts := map[string]struct { - Type string - Version string - }{ - "cubbyhole/": {Type: "cubbyhole", Version: "v1.0.0"}, - "identity/": {Type: "identity", Version: ""}, - "secret/": {Type: "kv", Version: ""}, - "sys/": {Type: "system", Version: ""}, - } - - for path, mount := range resp { - expected, ok := expectedMounts[path] - if !ok { - t.Errorf("Unexpected mount: %s: %+v", path, mount) - continue - } - if expected.Type != mount.Type || expected.Version != mount.PluginVersion { - t.Errorf("Mount did not match: %s -> expected %+v but got %+v", path, expected, mount) - } - } - - for path, expected := range expectedMounts { - mount, ok := resp[path] - if !ok { - t.Errorf("Expected mount not found mount: %s: %+v", path, expected) - continue - } - if expected.Type != mount.Type || expected.Version != mount.PluginVersion { - t.Errorf("Mount did not match: %s -> expected %+v but got %+v", path, expected, mount) - } - } -} - -func mockVaultMountsHandler(w http.ResponseWriter, _ *http.Request) { - _, _ = w.Write([]byte(listMountsResponse)) -} - -const listMountsResponse = `{ - "request_id": "3cd881e9-ea50-2e06-90b2-5641667485fa", - "lease_id": "", - "lease_duration": 0, - "renewable": false, - "data": { - "cubbyhole/": { - "accessor": "cubbyhole_2e3fc28d", - "config": { - "default_lease_ttl": 0, - "force_no_cache": false, - "max_lease_ttl": 0 - }, - "description": "per-token private secret storage", - "external_entropy_access": false, - "local": true, - "options": null, - "plugin_version": "v1.0.0", - "running_sha256": "", - "running_plugin_version": "", - "seal_wrap": false, - "type": "cubbyhole", - "uuid": "575063dc-5ef8-4487-c842-22c494c19a6f" - }, - "identity/": { - "accessor": "identity_6e01c327", - "config": { - "default_lease_ttl": 0, - "force_no_cache": false, - "max_lease_ttl": 0, - "passthrough_request_headers": [ - "Authorization" - ] - }, - "description": "identity store", - "external_entropy_access": false, - "local": false, - "options": null, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": "", - "seal_wrap": false, - "type": "identity", - "uuid": "187d7eba-3471-554b-c2d9-1479612c8046" - }, - "secret/": { - "accessor": "kv_3e2f282f", - "config": { - "default_lease_ttl": 0, - "force_no_cache": false, - "max_lease_ttl": 0 - }, - "description": "key/value secret storage", - "external_entropy_access": false, - "local": false, - "options": { - "version": "2" - }, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": "", - "seal_wrap": false, - "type": "kv", - "uuid": "13375e0f-876e-7e96-0a3e-076f37b6b69d" - }, - "sys/": { - "accessor": "system_93503264", - "config": { - "default_lease_ttl": 0, - "force_no_cache": false, - "max_lease_ttl": 0, - "passthrough_request_headers": [ - "Accept" - ] - }, - "description": "system endpoints used for control, policy and debugging", - "external_entropy_access": false, - "local": false, - "options": null, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": "", - "seal_wrap": true, - "type": "system", - "uuid": "1373242d-cc4d-c023-410b-7f336e7ba0a8" - } - } -}` diff --git a/api/sys_plugins.go b/api/sys_plugins.go index 389e66eb1fb0e..004ee222bfdf8 100644 --- a/api/sys_plugins.go +++ b/api/sys_plugins.go @@ -22,8 +22,6 @@ type ListPluginsResponse struct { // PluginsByType is the list of plugins by type. PluginsByType map[consts.PluginType][]string `json:"types"` - Details []PluginDetails `json:"details,omitempty"` - // Names is the list of names of the plugins. // // Deprecated: Newer server responses should be returning PluginsByType (json: @@ -31,14 +29,6 @@ type ListPluginsResponse struct { Names []string `json:"names"` } -type PluginDetails struct { - Type string `json:"string"` - Name string `json:"name"` - Version string `json:"version,omitempty"` - Builtin bool `json:"builtin"` - DeprecationStatus string `json:"deprecation_status,omitempty" mapstructure:"deprecation_status"` -} - // ListPlugins wraps ListPluginsWithContext using context.Background. func (c *Sys) ListPlugins(i *ListPluginsInput) (*ListPluginsResponse, error) { return c.ListPluginsWithContext(context.Background(), i) @@ -108,7 +98,6 @@ func (c *Sys) ListPluginsWithContext(ctx context.Context, i *ListPluginsInput) ( result := &ListPluginsResponse{ PluginsByType: make(map[consts.PluginType][]string), - Details: []PluginDetails{}, } if i.Type == consts.PluginTypeUnknown { for _, pluginType := range consts.PluginTypes { @@ -140,12 +129,6 @@ func (c *Sys) ListPluginsWithContext(ctx context.Context, i *ListPluginsInput) ( result.PluginsByType[i.Type] = respKeys } - if detailed, ok := secret.Data["detailed"]; ok { - if err := mapstructure.Decode(detailed, &result.Details); err != nil { - return nil, err - } - } - return result, nil } @@ -159,12 +142,11 @@ type GetPluginInput struct { // GetPluginResponse is the response from the GetPlugin call. type GetPluginResponse struct { - Args []string `json:"args"` - Builtin bool `json:"builtin"` - Command string `json:"command"` - Name string `json:"name"` - SHA256 string `json:"sha256"` - DeprecationStatus string `json:"deprecation_status,omitempty"` + Args []string `json:"args"` + Builtin bool `json:"builtin"` + Command string `json:"command"` + Name string `json:"name"` + SHA256 string `json:"sha256"` } // GetPlugin wraps GetPluginWithContext using context.Background. @@ -212,9 +194,6 @@ type RegisterPluginInput struct { // SHA256 is the shasum of the plugin. SHA256 string `json:"sha256,omitempty"` - - // Version is the optional version of the plugin being registered - Version string `json:"version,omitempty"` } // RegisterPlugin wraps RegisterPluginWithContext using context.Background. @@ -248,9 +227,6 @@ type DeregisterPluginInput struct { // Type of the plugin. Required. Type consts.PluginType `json:"type"` - - // Version of the plugin. Optional. - Version string `json:"version,omitempty"` } // DeregisterPlugin wraps DeregisterPluginWithContext using context.Background. @@ -266,7 +242,7 @@ func (c *Sys) DeregisterPluginWithContext(ctx context.Context, i *DeregisterPlug path := catalogPathByType(i.Type, i.Name) req := c.c.NewRequest(http.MethodDelete, path) - req.Params.Set("version", i.Version) + resp, err := c.c.rawRequestWithContext(ctx, req) if err == nil { defer resp.Body.Close() diff --git a/api/sys_plugins_test.go b/api/sys_plugins_test.go index d4a577bac9de8..24295b6f22b61 100644 --- a/api/sys_plugins_test.go +++ b/api/sys_plugins_test.go @@ -9,27 +9,8 @@ import ( "github.com/hashicorp/vault/sdk/helper/consts" ) -func TestRegisterPlugin(t *testing.T) { - mockVaultServer := httptest.NewServer(http.HandlerFunc(mockVaultHandlerRegister)) - defer mockVaultServer.Close() - - cfg := DefaultConfig() - cfg.Address = mockVaultServer.URL - client, err := NewClient(cfg) - if err != nil { - t.Fatal(err) - } - - err = client.Sys().RegisterPluginWithContext(context.Background(), &RegisterPluginInput{ - Version: "v1.0.0", - }) - if err != nil { - t.Fatal(err) - } -} - func TestListPlugins(t *testing.T) { - mockVaultServer := httptest.NewServer(http.HandlerFunc(mockVaultHandlerList)) + mockVaultServer := httptest.NewServer(http.HandlerFunc(mockVaultHandler)) defer mockVaultServer.Close() cfg := DefaultConfig() @@ -63,7 +44,7 @@ func TestListPlugins(t *testing.T) { } } -func mockVaultHandlerList(w http.ResponseWriter, _ *http.Request) { +func mockVaultHandler(w http.ResponseWriter, _ *http.Request) { _, _ = w.Write([]byte(listUntypedResponse)) } @@ -96,9 +77,3 @@ const listUntypedResponse = `{ "warnings": null, "auth": null }` - -func mockVaultHandlerRegister(w http.ResponseWriter, _ *http.Request) { - _, _ = w.Write([]byte(registerResponse)) -} - -const registerResponse = `{}` diff --git a/api/sys_seal.go b/api/sys_seal.go index c772ae0fc2600..189d61469ac7a 100644 --- a/api/sys_seal.go +++ b/api/sys_seal.go @@ -93,22 +93,20 @@ func sealStatusRequestWithContext(ctx context.Context, c *Sys, r *Request) (*Sea } type SealStatusResponse struct { - Type string `json:"type"` - Initialized bool `json:"initialized"` - Sealed bool `json:"sealed"` - T int `json:"t"` - N int `json:"n"` - Progress int `json:"progress"` - Nonce string `json:"nonce"` - Version string `json:"version"` - BuildDate string `json:"build_date"` - Migration bool `json:"migration"` - ClusterName string `json:"cluster_name,omitempty"` - ClusterID string `json:"cluster_id,omitempty"` - RecoverySeal bool `json:"recovery_seal"` - StorageType string `json:"storage_type,omitempty"` - HCPLinkStatus string `json:"hcp_link_status,omitempty"` - HCPLinkResourceID string `json:"hcp_link_resource_ID,omitempty"` + Type string `json:"type"` + Initialized bool `json:"initialized"` + Sealed bool `json:"sealed"` + T int `json:"t"` + N int `json:"n"` + Progress int `json:"progress"` + Nonce string `json:"nonce"` + Version string `json:"version"` + BuildDate string `json:"build_date"` + Migration bool `json:"migration"` + ClusterName string `json:"cluster_name,omitempty"` + ClusterID string `json:"cluster_id,omitempty"` + RecoverySeal bool `json:"recovery_seal"` + StorageType string `json:"storage_type,omitempty"` } type UnsealOpts struct { diff --git a/audit/format_json_test.go b/audit/format_json_test.go index e4a703d12ad42..e2d8b3b086870 100644 --- a/audit/format_json_test.go +++ b/audit/format_json_test.go @@ -144,7 +144,7 @@ func TestFormatJSON_formatRequest(t *testing.T) { if !strings.HasSuffix(strings.TrimSpace(buf.String()), string(expectedBytes)) { t.Fatalf( - "bad: %s\nResult:\n\n%q\n\nExpected:\n\n%q", + "bad: %s\nResult:\n\n'%s'\n\nExpected:\n\n'%s'", name, buf.String(), string(expectedBytes)) } } diff --git a/audit/format_jsonx_test.go b/audit/format_jsonx_test.go index 00921c0c71a95..6774f01fb6bd7 100644 --- a/audit/format_jsonx_test.go +++ b/audit/format_jsonx_test.go @@ -137,7 +137,7 @@ func TestFormatJSONx_formatRequest(t *testing.T) { if !strings.HasSuffix(strings.TrimSpace(buf.String()), string(tc.ExpectedStr)) { t.Fatalf( - "bad: %s\nResult:\n\n%q\n\nExpected:\n\n%q", + "bad: %s\nResult:\n\n'%s'\n\nExpected:\n\n'%s'", name, strings.TrimSpace(buf.String()), string(tc.ExpectedStr)) } } diff --git a/builtin/credential/approle/backend.go b/builtin/credential/approle/backend.go index ebd8d3c06a801..9612b65f7afd8 100644 --- a/builtin/credential/approle/backend.go +++ b/builtin/credential/approle/backend.go @@ -18,9 +18,6 @@ const ( secretIDAccessorLocalPrefix = "accessor_local/" ) -// ReportedVersion is used to report a specific version to Vault. -var ReportedVersion = "" - type backend struct { *framework.Backend @@ -114,9 +111,8 @@ func Backend(conf *logical.BackendConfig) (*backend, error) { pathTidySecretID(b), }, ), - Invalidate: b.invalidate, - BackendType: logical.TypeCredential, - RunningVersion: ReportedVersion, + Invalidate: b.invalidate, + BackendType: logical.TypeCredential, } return b, nil } diff --git a/builtin/credential/approle/path_login.go b/builtin/credential/approle/path_login.go index 5822519b1c79f..ba478c4ffd50e 100644 --- a/builtin/credential/approle/path_login.go +++ b/builtin/credential/approle/path_login.go @@ -33,9 +33,6 @@ func pathLogin(b *backend) *framework.Path { logical.AliasLookaheadOperation: &framework.PathOperation{ Callback: b.pathLoginUpdateAliasLookahead, }, - logical.ResolveRoleOperation: &framework.PathOperation{ - Callback: b.pathLoginResolveRole, - }, }, HelpSynopsis: pathLoginHelpSys, HelpDescription: pathLoginHelpDesc, @@ -57,39 +54,6 @@ func (b *backend) pathLoginUpdateAliasLookahead(ctx context.Context, req *logica }, nil } -func (b *backend) pathLoginResolveRole(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - // RoleID must be supplied during every login - roleID := strings.TrimSpace(data.Get("role_id").(string)) - if roleID == "" { - return logical.ErrorResponse("missing role_id"), nil - } - - // Look for the storage entry that maps the roleID to role - roleIDIndex, err := b.roleIDEntry(ctx, req.Storage, roleID) - if err != nil { - return nil, err - } - if roleIDIndex == nil { - return logical.ErrorResponse("invalid role ID"), nil - } - - roleName := roleIDIndex.Name - - roleLock := b.roleLock(roleName) - roleLock.RLock() - - role, err := b.roleEntry(ctx, req.Storage, roleName) - roleLock.RUnlock() - if err != nil { - return nil, err - } - if role == nil { - return logical.ErrorResponse("invalid role ID"), nil - } - - return logical.ResolveRoleResponse(roleName) -} - // Returns the Auth object indicating the authentication and authorization information // if the credentials provided are validated by the backend. func (b *backend) pathLoginUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { diff --git a/builtin/credential/approle/path_login_test.go b/builtin/credential/approle/path_login_test.go index 542bf665b6e9a..9d1facf52cf96 100644 --- a/builtin/credential/approle/path_login_test.go +++ b/builtin/credential/approle/path_login_test.go @@ -301,92 +301,3 @@ func generateRenewRequest(s logical.Storage, auth *logical.Auth) *logical.Reques return renewReq } - -func TestAppRole_RoleResolve(t *testing.T) { - var resp *logical.Response - var err error - b, storage := createBackendWithStorage(t) - - role := "role1" - createRole(t, b, storage, role, "a,b,c") - roleRoleIDReq := &logical.Request{ - Operation: logical.ReadOperation, - Path: "role/role1/role-id", - Storage: storage, - } - resp, err = b.HandleRequest(context.Background(), roleRoleIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } - roleID := resp.Data["role_id"] - - roleSecretIDReq := &logical.Request{ - Operation: logical.UpdateOperation, - Path: "role/role1/secret-id", - Storage: storage, - } - resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } - secretID := resp.Data["secret_id"] - - loginData := map[string]interface{}{ - "role_id": roleID, - "secret_id": secretID, - } - loginReq := &logical.Request{ - Operation: logical.ResolveRoleOperation, - Path: "login", - Storage: storage, - Data: loginData, - Connection: &logical.Connection{ - RemoteAddr: "127.0.0.1", - }, - } - - resp, err = b.HandleRequest(context.Background(), loginReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } - - if resp.Data["role"] != role { - t.Fatalf("Role was not as expected. Expected %s, received %s", role, resp.Data["role"]) - } -} - -func TestAppRole_RoleDoesNotExist(t *testing.T) { - var resp *logical.Response - var err error - b, storage := createBackendWithStorage(t) - - roleID := "roleDoesNotExist" - - loginData := map[string]interface{}{ - "role_id": roleID, - "secret_id": "secret", - } - loginReq := &logical.Request{ - Operation: logical.ResolveRoleOperation, - Path: "login", - Storage: storage, - Data: loginData, - Connection: &logical.Connection{ - RemoteAddr: "127.0.0.1", - }, - } - - resp, err = b.HandleRequest(context.Background(), loginReq) - if resp == nil && !resp.IsError() { - t.Fatalf("Response was not an error: err:%v resp:%#v", err, resp) - } - - errString, ok := resp.Data["error"].(string) - if !ok { - t.Fatal("Error not part of response.") - } - - if !strings.Contains(errString, "invalid role ID") { - t.Fatalf("Error was not due to invalid role ID. Error: %s", errString) - } -} diff --git a/builtin/credential/approle/path_role.go b/builtin/credential/approle/path_role.go index b079a9ca6fa6f..5bf56e5fb475b 100644 --- a/builtin/credential/approle/path_role.go +++ b/builtin/credential/approle/path_role.go @@ -481,16 +481,6 @@ the role.`, Type: framework.TypeCommaStringSlice, Description: defTokenFields["token_bound_cidrs"].Description, }, - "num_uses": { - Type: framework.TypeInt, - Description: `Number of times this SecretID can be used, after which the SecretID expires. -Overrides secret_id_num_uses role option when supplied. May not be higher than role's secret_id_num_uses.`, - }, - "ttl": { - Type: framework.TypeDurationSecond, - Description: `Duration in seconds after which this SecretID expires. -Overrides secret_id_ttl role option when supplied. May not be longer than role's secret_id_ttl.`, - }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ logical.UpdateOperation: b.pathRoleSecretIDUpdate, @@ -601,16 +591,6 @@ the role.`, Description: `Comma separated string or list of CIDR blocks. If set, specifies the blocks of IP addresses which can use the returned token. Should be a subset of the token CIDR blocks listed on the role, if any.`, }, - "num_uses": { - Type: framework.TypeInt, - Description: `Number of times this SecretID can be used, after which the SecretID expires. -Overrides secret_id_num_uses role option when supplied. May not be higher than role's secret_id_num_uses.`, - }, - "ttl": { - Type: framework.TypeDurationSecond, - Description: `Duration in seconds after which this SecretID expires. -Overrides secret_id_ttl role option when supplied. May not be longer than role's secret_id_ttl.`, - }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ logical.UpdateOperation: b.pathRoleCustomSecretIDUpdate, @@ -1517,7 +1497,7 @@ func (b *backend) pathRoleFieldRead(ctx context.Context, req *logical.Request, d "bound_cidr_list": role.BoundCIDRList, }, } - resp.AddWarning(`The "bound_cidr_list" field is deprecated and will be removed. Please use "secret_id_bound_cidrs" instead.`) + resp.AddWarning(`The "bound_cidr_list" parameter is deprecated and will be removed. Please use "secret_id_bound_cidrs" instead.`) return resp, nil default: // shouldn't occur IRL @@ -2375,38 +2355,9 @@ func (b *backend) handleRoleSecretIDCommon(ctx context.Context, req *logical.Req return nil, err } - var numUses int - // Check whether or not specified num_uses is defined, otherwise fallback to role's secret_id_num_uses - if numUsesRaw, ok := data.GetOk("num_uses"); ok { - numUses = numUsesRaw.(int) - if numUses < 0 { - return logical.ErrorResponse("num_uses cannot be negative"), nil - } - - // If the specified num_uses is higher than the role's secret_id_num_uses, throw an error rather than implicitly overriding - if (numUses == 0 && role.SecretIDNumUses > 0) || (role.SecretIDNumUses > 0 && numUses > role.SecretIDNumUses) { - return logical.ErrorResponse("num_uses cannot be higher than the role's secret_id_num_uses"), nil - } - } else { - numUses = role.SecretIDNumUses - } - - var ttl time.Duration - // Check whether or not specified ttl is defined, otherwise fallback to role's secret_id_ttl - if ttlRaw, ok := data.GetOk("ttl"); ok { - ttl = time.Second * time.Duration(ttlRaw.(int)) - - // If the specified ttl is longer than the role's secret_id_ttl, throw an error rather than implicitly overriding - if (ttl == 0 && role.SecretIDTTL > 0) || (role.SecretIDTTL > 0 && ttl > role.SecretIDTTL) { - return logical.ErrorResponse("ttl cannot be longer than the role's secret_id_ttl"), nil - } - } else { - ttl = role.SecretIDTTL - } - secretIDStorage := &secretIDStorageEntry{ - SecretIDNumUses: numUses, - SecretIDTTL: ttl, + SecretIDNumUses: role.SecretIDNumUses, + SecretIDTTL: role.SecretIDTTL, Metadata: make(map[string]string), CIDRList: secretIDCIDRs, TokenBoundCIDRs: secretIDTokenCIDRs, @@ -2425,7 +2376,6 @@ func (b *backend) handleRoleSecretIDCommon(ctx context.Context, req *logical.Req "secret_id": secretID, "secret_id_accessor": secretIDStorage.SecretIDAccessor, "secret_id_ttl": int64(b.deriveSecretIDTTL(secretIDStorage.SecretIDTTL).Seconds()), - "secret_id_num_uses": secretIDStorage.SecretIDNumUses, }, } @@ -2526,11 +2476,11 @@ to be generated against only this specific role, it can be done via 'role//secret-id' and 'role//custom-secret-id' endpoints. The properties of the SecretID created against the role and the properties of the token issued with the SecretID generated against the role, can be -configured using the fields of this endpoint.`, +configured using the parameters of this endpoint.`, }, "role-bind-secret-id": { "Impose secret_id to be presented during login using this role.", - `By setting this to 'true', during login the field 'secret_id' becomes a mandatory argument. + `By setting this to 'true', during login the parameter 'secret_id' becomes a mandatory argument. The value of 'secret_id' can be retrieved using 'role//secret-id' endpoint.`, }, "role-bound-cidr-list": { @@ -2562,17 +2512,16 @@ defined on the role, can access the role.`, }, "role-secret-id-num-uses": { "Use limit of the SecretID generated against the role.", - `If a SecretID is generated/assigned against a role using the -'role//secret-id' or 'role//custom-secret-id' endpoint, -then the number of times this SecretID can be used is defined by this option. -However, this option may be overriden by the request's 'num_uses' field.`, + `If the SecretIDs are generated/assigned against the role using the +'role//secret-id' or 'role//custom-secret-id' endpoints, +then the number of times that SecretID can access the role is defined by +this option.`, }, "role-secret-id-ttl": { - "Duration in seconds of the SecretID generated against the role.", - `If a SecretID is generated/assigned against a role using the -'role//secret-id' or 'role//custom-secret-id' endpoint, -then the lifetime of this SecretID is defined by this option. -However, this option may be overridden by the request's 'ttl' field.`, + `Duration in seconds, representing the lifetime of the SecretIDs +that are generated against the role using 'role//secret-id' or +'role//custom-secret-id' endpoints.`, + ``, }, "role-secret-id-lookup": { "Read the properties of an issued secret_id", @@ -2635,8 +2584,8 @@ this endpoint.`, `The SecretID generated using this endpoint will be scoped to access just this role and none else. The properties of this SecretID will be based on the options set on the role. It will expire after a period -defined by the 'ttl' field or 'secret_id_ttl' option on the role, -and/or the backend mount's maximum TTL value.`, +defined by the 'secret_id_ttl' option on the role and/or the backend +mount's maximum TTL value.`, }, "role-custom-secret-id": { "Assign a SecretID of choice against the role.", @@ -2644,8 +2593,8 @@ and/or the backend mount's maximum TTL value.`, to do so. This will assign a client supplied SecretID to be used to access the role. This SecretID will behave similarly to the SecretIDs generated by the backend. The properties of this SecretID will be based on the options -set on the role. It will expire after a period defined by the 'ttl' field -or 'secret_id_ttl' option on the role, and/or the backend mount's maximum TTL value.`, +set on the role. It will expire after a period defined by the 'secret_id_ttl' +option on the role and/or the backend mount's maximum TTL value.`, }, "role-period": { "Updates the value of 'period' on the role", diff --git a/builtin/credential/approle/path_role_test.go b/builtin/credential/approle/path_role_test.go index 8ab3bfc666358..4c64d3879d9a3 100644 --- a/builtin/credential/approle/path_role_test.go +++ b/builtin/credential/approle/path_role_test.go @@ -1098,7 +1098,7 @@ func TestAppRole_RoleList(t *testing.T) { } } -func TestAppRole_RoleSecretIDWithoutFields(t *testing.T) { +func TestAppRole_RoleSecretID(t *testing.T) { var resp *logical.Response var err error b, storage := createBackendWithStorage(t) @@ -1135,18 +1135,13 @@ func TestAppRole_RoleSecretIDWithoutFields(t *testing.T) { if resp.Data["secret_id"].(string) == "" { t.Fatalf("failed to generate secret_id") } - if resp.Data["secret_id_ttl"].(int64) != int64(roleData["secret_id_ttl"].(int)) { - t.Fatalf("secret_id_ttl has not defaulted to the role's secret id ttl") - } - if resp.Data["secret_id_num_uses"].(int) != roleData["secret_id_num_uses"].(int) { - t.Fatalf("secret_id_num_uses has not defaulted to the role's secret id num_uses") - } roleSecretIDReq.Path = "role/role1/custom-secret-id" roleCustomSecretIDData := map[string]interface{}{ "secret_id": "abcd123", } roleSecretIDReq.Data = roleCustomSecretIDData + roleSecretIDReq.Operation = logical.UpdateOperation resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("err:%v resp:%#v", err, resp) @@ -1155,240 +1150,6 @@ func TestAppRole_RoleSecretIDWithoutFields(t *testing.T) { if resp.Data["secret_id"] != "abcd123" { t.Fatalf("failed to set specific secret_id to role") } - if resp.Data["secret_id_ttl"].(int64) != int64(roleData["secret_id_ttl"].(int)) { - t.Fatalf("secret_id_ttl has not defaulted to the role's secret id ttl") - } - if resp.Data["secret_id_num_uses"].(int) != roleData["secret_id_num_uses"].(int) { - t.Fatalf("secret_id_num_uses has not defaulted to the role's secret id num_uses") - } -} - -func TestAppRole_RoleSecretIDWithValidFields(t *testing.T) { - type testCase struct { - name string - payload map[string]interface{} - } - - var resp *logical.Response - var err error - b, storage := createBackendWithStorage(t) - - roleData := map[string]interface{}{ - "policies": "p,q,r,s", - "secret_id_num_uses": 0, - "secret_id_ttl": 0, - "token_ttl": 400, - "token_max_ttl": 500, - } - roleReq := &logical.Request{ - Operation: logical.CreateOperation, - Path: "role/role1", - Storage: storage, - Data: roleData, - } - - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } - - testCases := []testCase{ - { - name: "finite num_uses ttl", - payload: map[string]interface{}{"secret_id": "finite", "ttl": 5, "num_uses": 5}, - }, - { - name: "infinite num_uses and ttl", - payload: map[string]interface{}{"secret_id": "infinite", "ttl": 0, "num_uses": 0}, - }, - { - name: "finite num_uses and infinite ttl", - payload: map[string]interface{}{"secret_id": "mixed1", "ttl": 0, "num_uses": 5}, - }, - { - name: "infinite num_uses and finite ttl", - payload: map[string]interface{}{"secret_id": "mixed2", "ttl": 5, "num_uses": 0}, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - roleSecretIDReq := &logical.Request{ - Operation: logical.UpdateOperation, - Path: "role/role1/secret-id", - Storage: storage, - } - roleCustomSecretIDData := tc.payload - roleSecretIDReq.Data = roleCustomSecretIDData - - resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } - - if resp.Data["secret_id"].(string) == "" { - t.Fatalf("failed to generate secret_id") - } - if resp.Data["secret_id_ttl"].(int64) != int64(tc.payload["ttl"].(int)) { - t.Fatalf("secret_id_ttl has not been set by the 'ttl' field") - } - if resp.Data["secret_id_num_uses"].(int) != tc.payload["num_uses"].(int) { - t.Fatalf("secret_id_num_uses has not been set by the 'num_uses' field") - } - - roleSecretIDReq.Path = "role/role1/custom-secret-id" - roleSecretIDReq.Data = roleCustomSecretIDData - resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } - - if resp.Data["secret_id"] != tc.payload["secret_id"] { - t.Fatalf("failed to set specific secret_id to role") - } - if resp.Data["secret_id_ttl"].(int64) != int64(tc.payload["ttl"].(int)) { - t.Fatalf("secret_id_ttl has not been set by the 'ttl' field") - } - if resp.Data["secret_id_num_uses"].(int) != tc.payload["num_uses"].(int) { - t.Fatalf("secret_id_num_uses has not been set by the 'num_uses' field") - } - }) - } -} - -func TestAppRole_ErrorsRoleSecretIDWithInvalidFields(t *testing.T) { - type testCase struct { - name string - payload map[string]interface{} - expected string - } - - type roleTestCase struct { - name string - options map[string]interface{} - cases []testCase - } - - infiniteTestCases := []testCase{ - { - name: "infinite ttl", - payload: map[string]interface{}{"secret_id": "abcd123", "num_uses": 1, "ttl": 0}, - expected: "ttl cannot be longer than the role's secret_id_ttl", - }, - { - name: "infinite num_uses", - payload: map[string]interface{}{"secret_id": "abcd123", "num_uses": 0, "ttl": 1}, - expected: "num_uses cannot be higher than the role's secret_id_num_uses", - }, - } - - negativeTestCases := []testCase{ - { - name: "negative num_uses", - payload: map[string]interface{}{"secret_id": "abcd123", "num_uses": -1, "ttl": 0}, - expected: "num_uses cannot be negative", - }, - } - - roleTestCases := []roleTestCase{ - { - name: "infinite role secret id ttl", - options: map[string]interface{}{ - "secret_id_num_uses": 1, - "secret_id_ttl": 0, - }, - cases: []testCase{ - { - name: "higher num_uses", - payload: map[string]interface{}{"secret_id": "abcd123", "ttl": 0, "num_uses": 2}, - expected: "num_uses cannot be higher than the role's secret_id_num_uses", - }, - }, - }, - { - name: "infinite role num_uses", - options: map[string]interface{}{ - "secret_id_num_uses": 0, - "secret_id_ttl": 1, - }, - cases: []testCase{ - { - name: "longer ttl", - payload: map[string]interface{}{"secret_id": "abcd123", "ttl": 2, "num_uses": 0}, - expected: "ttl cannot be longer than the role's secret_id_ttl", - }, - }, - }, - { - name: "finite role ttl and num_uses", - options: map[string]interface{}{ - "secret_id_num_uses": 2, - "secret_id_ttl": 2, - }, - cases: infiniteTestCases, - }, - { - name: "mixed role ttl and num_uses", - options: map[string]interface{}{ - "secret_id_num_uses": 400, - "secret_id_ttl": 500, - }, - cases: negativeTestCases, - }, - } - - var resp *logical.Response - var err error - b, storage := createBackendWithStorage(t) - - for i, rc := range roleTestCases { - roleData := map[string]interface{}{ - "policies": "p,q,r,s", - "token_ttl": 400, - "token_max_ttl": 500, - } - roleData["secret_id_num_uses"] = rc.options["secret_id_num_uses"] - roleData["secret_id_ttl"] = rc.options["secret_id_ttl"] - - roleReq := &logical.Request{ - Operation: logical.CreateOperation, - Path: fmt.Sprintf("role/role%d", i), - Storage: storage, - Data: roleData, - } - - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } - - for _, tc := range rc.cases { - t.Run(fmt.Sprintf("%s/%s", rc.name, tc.name), func(t *testing.T) { - roleSecretIDReq := &logical.Request{ - Operation: logical.UpdateOperation, - Path: fmt.Sprintf("role/role%d/secret-id", i), - Storage: storage, - } - roleSecretIDReq.Data = tc.payload - resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) - if err != nil || (resp != nil && !resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } - if resp.Data["error"].(string) != tc.expected { - t.Fatalf("expected: %q, got: %q", tc.expected, resp.Data["error"].(string)) - } - - roleSecretIDReq.Path = fmt.Sprintf("role/role%d/custom-secret-id", i) - resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) - if err != nil || (resp != nil && !resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } - if resp.Data["error"].(string) != tc.expected { - t.Fatalf("expected: %q, got: %q", tc.expected, resp.Data["error"].(string)) - } - }) - } - } } func TestAppRole_RoleCRUD(t *testing.T) { diff --git a/builtin/credential/aws/backend_test.go b/builtin/credential/aws/backend_test.go index 5b435d3e3a5ce..14b79735c1b4d 100644 --- a/builtin/credential/aws/backend_test.go +++ b/builtin/credential/aws/backend_test.go @@ -1021,40 +1021,38 @@ func TestBackend_PathBlacklistRoleTag(t *testing.T) { } } -/* -This is an acceptance test. - - Requires the following env vars: - TEST_AWS_EC2_RSA2048 - TEST_AWS_EC2_PKCS7 - TEST_AWS_EC2_IDENTITY_DOCUMENT - TEST_AWS_EC2_IDENTITY_DOCUMENT_SIG - TEST_AWS_EC2_AMI_ID - TEST_AWS_EC2_ACCOUNT_ID - TEST_AWS_EC2_IAM_ROLE_ARN - - If this is being run on an EC2 instance, you can set the environment vars using this bash snippet: - - export TEST_AWS_EC2_RSA2048=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/rsa2048) - export TEST_AWS_EC2_PKCS7=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/pkcs7) - export TEST_AWS_EC2_IDENTITY_DOCUMENT=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | base64 -w 0) - export TEST_AWS_EC2_IDENTITY_DOCUMENT_SIG=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/signature | tr -d '\n') - export TEST_AWS_EC2_AMI_ID=$(curl -s http://169.254.169.254/latest/meta-data/ami-id) - export TEST_AWS_EC2_IAM_ROLE_ARN=$(aws iam get-role --role-name $(curl -q http://169.254.169.254/latest/meta-data/iam/security-credentials/ -S -s) --query Role.Arn --output text) - export TEST_AWS_EC2_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) - - If the test is not being run on an EC2 instance that has access to - credentials using EC2RoleProvider, on top of the above vars, following - needs to be set: - TEST_AWS_SECRET_KEY - TEST_AWS_ACCESS_KEY +/* This is an acceptance test. + Requires the following env vars: + TEST_AWS_EC2_RSA2048 + TEST_AWS_EC2_PKCS7 + TEST_AWS_EC2_IDENTITY_DOCUMENT + TEST_AWS_EC2_IDENTITY_DOCUMENT_SIG + TEST_AWS_EC2_AMI_ID + TEST_AWS_EC2_ACCOUNT_ID + TEST_AWS_EC2_IAM_ROLE_ARN + + If this is being run on an EC2 instance, you can set the environment vars using this bash snippet: + + export TEST_AWS_EC2_RSA2048=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/rsa2048) + export TEST_AWS_EC2_PKCS7=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/pkcs7) + export TEST_AWS_EC2_IDENTITY_DOCUMENT=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | base64 -w 0) + export TEST_AWS_EC2_IDENTITY_DOCUMENT_SIG=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/signature | tr -d '\n') + export TEST_AWS_EC2_AMI_ID=$(curl -s http://169.254.169.254/latest/meta-data/ami-id) + export TEST_AWS_EC2_IAM_ROLE_ARN=$(aws iam get-role --role-name $(curl -q http://169.254.169.254/latest/meta-data/iam/security-credentials/ -S -s) --query Role.Arn --output text) + export TEST_AWS_EC2_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) + + If the test is not being run on an EC2 instance that has access to + credentials using EC2RoleProvider, on top of the above vars, following + needs to be set: + TEST_AWS_SECRET_KEY + TEST_AWS_ACCESS_KEY */ func TestBackendAcc_LoginWithInstanceIdentityDocAndAccessListIdentity(t *testing.T) { for _, path := range []string{"identity-whitelist/", "identity-accesslist/"} { // This test case should be run only when certain env vars are set and // executed as an acceptance test. if os.Getenv(logicaltest.TestEnvVar) == "" { - t.Skip(fmt.Sprintf("Acceptance tests skipped unless env %q set", logicaltest.TestEnvVar)) + t.Skip(fmt.Sprintf("Acceptance tests skipped unless env '%s' set", logicaltest.TestEnvVar)) return } @@ -1517,7 +1515,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { // This test case should be run only when certain env vars are set and // executed as an acceptance test. if os.Getenv(logicaltest.TestEnvVar) == "" { - t.Skip(fmt.Sprintf("Acceptance tests skipped unless env %q set", logicaltest.TestEnvVar)) + t.Skip(fmt.Sprintf("Acceptance tests skipped unless env '%s' set", logicaltest.TestEnvVar)) return } diff --git a/builtin/credential/aws/certificates.go b/builtin/credential/aws/certificates.go index c745ad2b3f243..066cdb3410067 100644 --- a/builtin/credential/aws/certificates.go +++ b/builtin/credential/aws/certificates.go @@ -26,9 +26,7 @@ func init() { // These certificates are for verifying PKCS#7 DSA signatures. // Copied from: -// -// curl https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/verify-pkcs7.html | pcregrep -M -o -e '(?s)-----BEGIN CERTIFICATE-----[^>]*-----END CERTIFICATE-----' -// +// curl https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/verify-pkcs7.html | pcregrep -M -o -e '(?s)-----BEGIN CERTIFICATE-----[^>]*-----END CERTIFICATE-----' // Last updated: 2022-05-31 const pkcs7RawCerts = `-----BEGIN CERTIFICATE----- MIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw diff --git a/builtin/credential/aws/path_login.go b/builtin/credential/aws/path_login.go index fb8ab4f47492f..fe70de0d06e87 100644 --- a/builtin/credential/aws/path_login.go +++ b/builtin/credential/aws/path_login.go @@ -128,9 +128,6 @@ needs to be supplied along with 'identity' parameter.`, logical.AliasLookaheadOperation: &framework.PathOperation{ Callback: b.pathLoginUpdate, }, - logical.ResolveRoleOperation: &framework.PathOperation{ - Callback: b.pathLoginResolveRole, - }, }, HelpSynopsis: pathLoginSyn, @@ -138,203 +135,6 @@ needs to be supplied along with 'identity' parameter.`, } } -func (b *backend) pathLoginResolveRole(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - anyEc2, allEc2 := hasValuesForEc2Auth(data) - anyIam, allIam := hasValuesForIamAuth(data) - switch { - case anyEc2 && anyIam: - return logical.ErrorResponse("supplied auth values for both ec2 and iam auth types"), nil - case anyEc2 && !allEc2: - return logical.ErrorResponse("supplied some of the auth values for the ec2 auth type but not all"), nil - case anyEc2: - return b.pathLoginResolveRoleEc2(ctx, req, data) - case anyIam && !allIam: - return logical.ErrorResponse("supplied some of the auth values for the iam auth type but not all"), nil - case anyIam: - return b.pathLoginResolveRoleIam(ctx, req, data) - default: - return logical.ErrorResponse("didn't supply required authentication values"), nil - } -} - -func (b *backend) pathLoginEc2GetRoleNameAndIdentityDoc(ctx context.Context, req *logical.Request, data *framework.FieldData) (string, *identityDocument, *logical.Response, error) { - identityDocB64 := data.Get("identity").(string) - var identityDocBytes []byte - var err error - if identityDocB64 != "" { - identityDocBytes, err = base64.StdEncoding.DecodeString(identityDocB64) - if err != nil || len(identityDocBytes) == 0 { - return "", nil, logical.ErrorResponse("failed to base64 decode the instance identity document"), nil - } - } - - signatureB64 := data.Get("signature").(string) - var signatureBytes []byte - if signatureB64 != "" { - signatureBytes, err = base64.StdEncoding.DecodeString(signatureB64) - if err != nil { - return "", nil, logical.ErrorResponse("failed to base64 decode the SHA256 RSA signature of the instance identity document"), nil - } - } - - pkcs7B64 := data.Get("pkcs7").(string) - - // Either the pkcs7 signature of the instance identity document, or - // the identity document itself along with its SHA256 RSA signature - // needs to be provided. - if pkcs7B64 == "" && (len(identityDocBytes) == 0 && len(signatureBytes) == 0) { - return "", nil, logical.ErrorResponse("either pkcs7 or a tuple containing the instance identity document and its SHA256 RSA signature needs to be provided"), nil - } else if pkcs7B64 != "" && (len(identityDocBytes) != 0 && len(signatureBytes) != 0) { - return "", nil, logical.ErrorResponse("both pkcs7 and a tuple containing the instance identity document and its SHA256 RSA signature is supplied; provide only one"), nil - } - - // Verify the signature of the identity document and unmarshal it - var identityDocParsed *identityDocument - if pkcs7B64 != "" { - identityDocParsed, err = b.parseIdentityDocument(ctx, req.Storage, pkcs7B64) - if err != nil { - return "", nil, nil, err - } - if identityDocParsed == nil { - return "", nil, logical.ErrorResponse("failed to verify the instance identity document using pkcs7"), nil - } - } else { - identityDocParsed, err = b.verifyInstanceIdentitySignature(ctx, req.Storage, identityDocBytes, signatureBytes) - if err != nil { - return "", nil, nil, err - } - if identityDocParsed == nil { - return "", nil, logical.ErrorResponse("failed to verify the instance identity document using the SHA256 RSA digest"), nil - } - } - - roleName := data.Get("role").(string) - - // If roleName is not supplied, a role in the name of the instance's AMI ID will be looked for - if roleName == "" { - roleName = identityDocParsed.AmiID - } - - // Get the entry for the role used by the instance - // Note that we don't return the roleEntry, but use it to determine if the role exists - // roleEntry does not contain the role name, so it is not appropriate to return - roleEntry, err := b.role(ctx, req.Storage, roleName) - if err != nil { - return "", nil, nil, err - } - if roleEntry == nil { - return "", nil, logical.ErrorResponse(fmt.Sprintf("entry for role %q not found", roleName)), nil - } - return roleName, identityDocParsed, nil, nil -} - -func (b *backend) pathLoginResolveRoleEc2(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - role, _, resp, err := b.pathLoginEc2GetRoleNameAndIdentityDoc(ctx, req, data) - if resp != nil || err != nil { - return resp, err - } - return logical.ResolveRoleResponse(role) -} - -func (b *backend) pathLoginIamGetRoleNameCallerIdAndEntity(ctx context.Context, req *logical.Request, data *framework.FieldData) (string, *GetCallerIdentityResult, *iamEntity, *logical.Response, error) { - method := data.Get("iam_http_request_method").(string) - if method == "" { - return "", nil, nil, logical.ErrorResponse("missing iam_http_request_method"), nil - } - - // In the future, might consider supporting GET - if method != "POST" { - return "", nil, nil, logical.ErrorResponse("invalid iam_http_request_method; currently only 'POST' is supported"), nil - } - - rawUrlB64 := data.Get("iam_request_url").(string) - if rawUrlB64 == "" { - return "", nil, nil, logical.ErrorResponse("missing iam_request_url"), nil - } - rawUrl, err := base64.StdEncoding.DecodeString(rawUrlB64) - if err != nil { - return "", nil, nil, logical.ErrorResponse("failed to base64 decode iam_request_url"), nil - } - parsedUrl, err := url.Parse(string(rawUrl)) - if err != nil { - return "", nil, nil, logical.ErrorResponse("error parsing iam_request_url"), nil - } - if parsedUrl.RawQuery != "" { - // Should be no query parameters - return "", nil, nil, logical.ErrorResponse(logical.ErrInvalidRequest.Error()), nil - } - // TODO: There are two potentially valid cases we're not yet supporting that would - // necessitate this check being changed. First, if we support GET requests. - // Second if we support presigned POST requests - bodyB64 := data.Get("iam_request_body").(string) - if bodyB64 == "" { - return "", nil, nil, logical.ErrorResponse("missing iam_request_body"), nil - } - bodyRaw, err := base64.StdEncoding.DecodeString(bodyB64) - if err != nil { - return "", nil, nil, logical.ErrorResponse("failed to base64 decode iam_request_body"), nil - } - body := string(bodyRaw) - if err = validateLoginIamRequestBody(body); err != nil { - return "", nil, nil, logical.ErrorResponse(err.Error()), nil - } - - headers := data.Get("iam_request_headers").(http.Header) - if len(headers) == 0 { - return "", nil, nil, logical.ErrorResponse("missing iam_request_headers"), nil - } - - config, err := b.lockedClientConfigEntry(ctx, req.Storage) - if err != nil { - return "", nil, nil, logical.ErrorResponse("error getting configuration"), nil - } - - endpoint := "https://sts.amazonaws.com" - - maxRetries := awsClient.DefaultRetryerMaxNumRetries - if config != nil { - if config.IAMServerIdHeaderValue != "" { - err = validateVaultHeaderValue(headers, parsedUrl, config.IAMServerIdHeaderValue) - if err != nil { - return "", nil, nil, logical.ErrorResponse(fmt.Sprintf("error validating %s header: %v", iamServerIdHeader, err)), nil - } - } - if err = config.validateAllowedSTSHeaderValues(headers); err != nil { - return "", nil, nil, logical.ErrorResponse(err.Error()), nil - } - if config.STSEndpoint != "" { - endpoint = config.STSEndpoint - } - if config.MaxRetries >= 0 { - maxRetries = config.MaxRetries - } - } - - callerID, err := submitCallerIdentityRequest(ctx, maxRetries, method, endpoint, parsedUrl, body, headers) - if err != nil { - return "", nil, nil, logical.ErrorResponse(fmt.Sprintf("error making upstream request: %v", err)), nil - } - - entity, err := parseIamArn(callerID.Arn) - if err != nil { - return "", nil, nil, logical.ErrorResponse(fmt.Sprintf("error parsing arn %q: %v", callerID.Arn, err)), nil - } - - roleName := data.Get("role").(string) - if roleName == "" { - roleName = entity.FriendlyName - } - return roleName, callerID, entity, nil, nil -} - -func (b *backend) pathLoginResolveRoleIam(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - role, _, _, resp, err := b.pathLoginIamGetRoleNameCallerIdAndEntity(ctx, req, data) - if resp != nil || err != nil { - return resp, err - } - return logical.ResolveRoleResponse(role) -} - // instanceIamRoleARN fetches the IAM role ARN associated with the given // instance profile name func (b *backend) instanceIamRoleARN(iamClient *iam.IAM, instanceProfileName string) (string, error) { @@ -754,9 +554,61 @@ func (b *backend) verifyInstanceMeetsRoleRequirements(ctx context.Context, // and a client created nonce. Client nonce is optional if 'disallow_reauthentication' // option is enabled on the registered role. func (b *backend) pathLoginUpdateEc2(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - roleName, identityDocParsed, errResp, err := b.pathLoginEc2GetRoleNameAndIdentityDoc(ctx, req, data) - if errResp != nil || err != nil { - return errResp, err + identityDocB64 := data.Get("identity").(string) + var identityDocBytes []byte + var err error + if identityDocB64 != "" { + identityDocBytes, err = base64.StdEncoding.DecodeString(identityDocB64) + if err != nil || len(identityDocBytes) == 0 { + return logical.ErrorResponse("failed to base64 decode the instance identity document"), nil + } + } + + signatureB64 := data.Get("signature").(string) + var signatureBytes []byte + if signatureB64 != "" { + signatureBytes, err = base64.StdEncoding.DecodeString(signatureB64) + if err != nil { + return logical.ErrorResponse("failed to base64 decode the SHA256 RSA signature of the instance identity document"), nil + } + } + + pkcs7B64 := data.Get("pkcs7").(string) + + // Either the pkcs7 signature of the instance identity document, or + // the identity document itself along with its SHA256 RSA signature + // needs to be provided. + if pkcs7B64 == "" && (len(identityDocBytes) == 0 && len(signatureBytes) == 0) { + return logical.ErrorResponse("either pkcs7 or a tuple containing the instance identity document and its SHA256 RSA signature needs to be provided"), nil + } else if pkcs7B64 != "" && (len(identityDocBytes) != 0 && len(signatureBytes) != 0) { + return logical.ErrorResponse("both pkcs7 and a tuple containing the instance identity document and its SHA256 RSA signature is supplied; provide only one"), nil + } + + // Verify the signature of the identity document and unmarshal it + var identityDocParsed *identityDocument + if pkcs7B64 != "" { + identityDocParsed, err = b.parseIdentityDocument(ctx, req.Storage, pkcs7B64) + if err != nil { + return nil, err + } + if identityDocParsed == nil { + return logical.ErrorResponse("failed to verify the instance identity document using pkcs7"), nil + } + } else { + identityDocParsed, err = b.verifyInstanceIdentitySignature(ctx, req.Storage, identityDocBytes, signatureBytes) + if err != nil { + return nil, err + } + if identityDocParsed == nil { + return logical.ErrorResponse("failed to verify the instance identity document using the SHA256 RSA digest"), nil + } + } + + roleName := data.Get("role").(string) + + // If roleName is not supplied, a role in the name of the instance's AMI ID will be looked for + if roleName == "" { + roleName = identityDocParsed.AmiID } // Get the entry for the role used by the instance @@ -1324,9 +1176,92 @@ func (b *backend) pathLoginRenewEc2(ctx context.Context, req *logical.Request, _ } func (b *backend) pathLoginUpdateIam(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - roleName, callerID, entity, errResp, err := b.pathLoginIamGetRoleNameCallerIdAndEntity(ctx, req, data) - if errResp != nil || err != nil { - return errResp, err + method := data.Get("iam_http_request_method").(string) + if method == "" { + return logical.ErrorResponse("missing iam_http_request_method"), nil + } + + // In the future, might consider supporting GET + if method != "POST" { + return logical.ErrorResponse("invalid iam_http_request_method; currently only 'POST' is supported"), nil + } + + rawUrlB64 := data.Get("iam_request_url").(string) + if rawUrlB64 == "" { + return logical.ErrorResponse("missing iam_request_url"), nil + } + rawUrl, err := base64.StdEncoding.DecodeString(rawUrlB64) + if err != nil { + return logical.ErrorResponse("failed to base64 decode iam_request_url"), nil + } + parsedUrl, err := url.Parse(string(rawUrl)) + if err != nil { + return logical.ErrorResponse("error parsing iam_request_url"), nil + } + if parsedUrl.RawQuery != "" { + // Should be no query parameters + return logical.ErrorResponse(logical.ErrInvalidRequest.Error()), nil + } + // TODO: There are two potentially valid cases we're not yet supporting that would + // necessitate this check being changed. First, if we support GET requests. + // Second if we support presigned POST requests + bodyB64 := data.Get("iam_request_body").(string) + if bodyB64 == "" { + return logical.ErrorResponse("missing iam_request_body"), nil + } + bodyRaw, err := base64.StdEncoding.DecodeString(bodyB64) + if err != nil { + return logical.ErrorResponse("failed to base64 decode iam_request_body"), nil + } + body := string(bodyRaw) + if err = validateLoginIamRequestBody(body); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + headers := data.Get("iam_request_headers").(http.Header) + if len(headers) == 0 { + return logical.ErrorResponse("missing iam_request_headers"), nil + } + + config, err := b.lockedClientConfigEntry(ctx, req.Storage) + if err != nil { + return logical.ErrorResponse("error getting configuration"), nil + } + + endpoint := "https://sts.amazonaws.com" + + maxRetries := awsClient.DefaultRetryerMaxNumRetries + if config != nil { + if config.IAMServerIdHeaderValue != "" { + err = validateVaultHeaderValue(headers, parsedUrl, config.IAMServerIdHeaderValue) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("error validating %s header: %v", iamServerIdHeader, err)), nil + } + } + if err = config.validateAllowedSTSHeaderValues(headers); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + if config.STSEndpoint != "" { + endpoint = config.STSEndpoint + } + if config.MaxRetries >= 0 { + maxRetries = config.MaxRetries + } + } + + callerID, err := submitCallerIdentityRequest(ctx, maxRetries, method, endpoint, parsedUrl, body, headers) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("error making upstream request: %v", err)), nil + } + + entity, err := parseIamArn(callerID.Arn) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("error parsing arn %q: %v", callerID.Arn, err)), nil + } + + roleName := data.Get("role").(string) + if roleName == "" { + roleName = entity.FriendlyName } roleEntry, err := b.role(ctx, req.Storage, roleName) diff --git a/builtin/credential/aws/path_login_test.go b/builtin/credential/aws/path_login_test.go index 6ffd60ed14943..82a2d9152e806 100644 --- a/builtin/credential/aws/path_login_test.go +++ b/builtin/credential/aws/path_login_test.go @@ -386,106 +386,6 @@ func TestBackend_pathLogin_IAMHeaders(t *testing.T) { } } -// TestBackend_pathLogin_IAMRoleResolution tests role resolution for an Iam login -func TestBackend_pathLogin_IAMRoleResolution(t *testing.T) { - storage := &logical.InmemStorage{} - config := logical.TestBackendConfig() - config.StorageView = storage - b, err := Backend(config) - if err != nil { - t.Fatal(err) - } - - err = b.Setup(context.Background(), config) - if err != nil { - t.Fatal(err) - } - - // sets up a test server to stand in for STS service - ts := setupIAMTestServer() - defer ts.Close() - - clientConfigData := map[string]interface{}{ - "iam_server_id_header_value": testVaultHeaderValue, - "sts_endpoint": ts.URL, - } - clientRequest := &logical.Request{ - Operation: logical.UpdateOperation, - Path: "config/client", - Storage: storage, - Data: clientConfigData, - } - _, err = b.HandleRequest(context.Background(), clientRequest) - if err != nil { - t.Fatal(err) - } - - // Configure identity. - _, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "config/identity", - Storage: storage, - Data: map[string]interface{}{ - "iam_alias": "role_id", - "iam_metadata": []string{ - "account_id", - "auth_type", - "canonical_arn", - "client_arn", - "client_user_id", - "inferred_aws_region", - "inferred_entity_id", - "inferred_entity_type", - }, - "ec2_alias": "role_id", - "ec2_metadata": []string{ - "account_id", - "ami_id", - "instance_id", - "region", - }, - }, - }) - if err != nil { - t.Fatal(err) - } - - // create a role entry - roleEntry := &awsRoleEntry{ - RoleID: "foo", - Version: currentRoleStorageVersion, - AuthType: iamAuthType, - } - - if err := b.setRole(context.Background(), storage, testValidRoleName, roleEntry); err != nil { - t.Fatalf("failed to set entry: %s", err) - } - - // create a baseline loginData map structure, including iam_request_headers - // already base64encoded. This is the "Default" loginData used for all tests. - // Each sub test can override the map's iam_request_headers entry - loginData, err := defaultLoginData() - if err != nil { - t.Fatal(err) - } - - loginRequest := &logical.Request{ - Operation: logical.ResolveRoleOperation, - Path: "login", - Storage: storage, - Data: loginData, - Connection: &logical.Connection{}, - } - - resp, err := b.HandleRequest(context.Background(), loginRequest) - if err != nil || resp == nil || resp.IsError() { - t.Errorf("unexpected failed role resolution:\nresp: %#v\n\nerr: %v", resp, err) - } - if resp.Data["role"] != testValidRoleName { - t.Fatalf("Role was not as expected. Expected %s, received %s", testValidRoleName, resp.Data["role"]) - } -} - func TestBackend_defaultAliasMetadata(t *testing.T) { storage := &logical.InmemStorage{} config := logical.TestBackendConfig() diff --git a/builtin/credential/aws/path_role.go b/builtin/credential/aws/path_role.go index 12a4c7d0f2d9f..8cca61b3de9e2 100644 --- a/builtin/credential/aws/path_role.go +++ b/builtin/credential/aws/path_role.go @@ -259,7 +259,7 @@ func (b *backend) role(ctx context.Context, s logical.Storage, roleName string) return b.roleInternal(ctx, s, roleName) } -// roleInternal does not perform locking, and rechecks the cache, going to disk if necessary +// roleInternal does not perform locking, and rechecks the cache, going to disk if necessar func (b *backend) roleInternal(ctx context.Context, s logical.Storage, roleName string) (*awsRoleEntry, error) { // Check cache again now that we have the lock roleEntryRaw, found := b.roleCache.Get(roleName) @@ -305,8 +305,7 @@ func (b *backend) roleInternal(ctx context.Context, s logical.Storage, roleName // setRole creates or updates a role in the storage. The caller must hold // the write lock. func (b *backend) setRole(ctx context.Context, s logical.Storage, roleName string, - roleEntry *awsRoleEntry, -) error { + roleEntry *awsRoleEntry) error { if roleName == "" { return fmt.Errorf("missing role name") } @@ -898,7 +897,7 @@ func (b *backend) pathRoleCreateUpdate(ctx context.Context, req *logical.Request return logical.ErrorResponse("ttl should be shorter than max ttl"), nil } if roleEntry.TokenPeriod > b.System().MaxLeaseTTL() { - return logical.ErrorResponse(fmt.Sprintf("period of %q is greater than the backend's maximum lease TTL of %q", roleEntry.TokenPeriod.String(), b.System().MaxLeaseTTL().String())), nil + return logical.ErrorResponse(fmt.Sprintf("period of '%s' is greater than the backend's maximum lease TTL of '%s'", roleEntry.TokenPeriod.String(), b.System().MaxLeaseTTL().String())), nil } roleTagStr, ok := data.GetOk("role_tag") diff --git a/builtin/credential/aws/pkcs7/ber.go b/builtin/credential/aws/pkcs7/ber.go index 0b18a6c8d3612..9b736af2d7617 100644 --- a/builtin/credential/aws/pkcs7/ber.go +++ b/builtin/credential/aws/pkcs7/ber.go @@ -106,12 +106,12 @@ func lengthLength(i int) (numBytes int) { // added to 0x80. The length is encoded in big endian encoding follow after // // Examples: +// length | byte 1 | bytes n +// 0 | 0x00 | - +// 120 | 0x78 | - +// 200 | 0x81 | 0xC8 +// 500 | 0x82 | 0x01 0xF4 // -// length | byte 1 | bytes n -// 0 | 0x00 | - -// 120 | 0x78 | - -// 200 | 0x81 | 0xC8 -// 500 | 0x82 | 0x01 0xF4 func encodeLength(out *bytes.Buffer, length int) (err error) { if length >= 128 { l := lengthLength(length) diff --git a/builtin/credential/aws/pkcs7/encrypt.go b/builtin/credential/aws/pkcs7/encrypt.go index 90da67e4eed2d..6b2655708c689 100644 --- a/builtin/credential/aws/pkcs7/encrypt.go +++ b/builtin/credential/aws/pkcs7/encrypt.go @@ -256,7 +256,7 @@ func encryptAESCBC(content []byte, key []byte) ([]byte, *encryptedContentInfo, e // value is EncryptionAlgorithmDESCBC. To use a different algorithm, change the // value before calling Encrypt(). For example: // -// ContentEncryptionAlgorithm = EncryptionAlgorithmAES128GCM +// ContentEncryptionAlgorithm = EncryptionAlgorithmAES128GCM // // TODO(fullsailor): Add support for encrypting content with other algorithms func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) { diff --git a/builtin/credential/aws/pkcs7/encrypt_test.go b/builtin/credential/aws/pkcs7/encrypt_test.go index 7f1bead232b9d..c64381e215517 100644 --- a/builtin/credential/aws/pkcs7/encrypt_test.go +++ b/builtin/credential/aws/pkcs7/encrypt_test.go @@ -15,6 +15,7 @@ func TestEncrypt(t *testing.T) { EncryptionAlgorithmAES256GCM, } sigalgs := []x509.SignatureAlgorithm{ + x509.SHA1WithRSA, x509.SHA256WithRSA, x509.SHA512WithRSA, } diff --git a/builtin/credential/aws/pkcs7/pkcs7_test.go b/builtin/credential/aws/pkcs7/pkcs7_test.go index 7753c174b2006..1eabc9bd4eda8 100644 --- a/builtin/credential/aws/pkcs7/pkcs7_test.go +++ b/builtin/credential/aws/pkcs7/pkcs7_test.go @@ -125,6 +125,16 @@ func createTestCertificateByIssuer(name string, issuer *certKeyPair, sigAlg x509 issuerKey = *issuer.PrivateKey } switch sigAlg { + case x509.SHA1WithRSA: + priv = test1024Key + switch issuerKey.(type) { + case *rsa.PrivateKey: + template.SignatureAlgorithm = x509.SHA1WithRSA + case *ecdsa.PrivateKey: + template.SignatureAlgorithm = x509.ECDSAWithSHA1 + case *dsa.PrivateKey: + template.SignatureAlgorithm = x509.DSAWithSHA1 + } case x509.SHA256WithRSA: priv = test2048Key switch issuerKey.(type) { @@ -155,6 +165,19 @@ func createTestCertificateByIssuer(name string, issuer *certKeyPair, sigAlg x509 case *dsa.PrivateKey: template.SignatureAlgorithm = x509.DSAWithSHA256 } + case x509.ECDSAWithSHA1: + priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return nil, err + } + switch issuerKey.(type) { + case *rsa.PrivateKey: + template.SignatureAlgorithm = x509.SHA1WithRSA + case *ecdsa.PrivateKey: + template.SignatureAlgorithm = x509.ECDSAWithSHA1 + case *dsa.PrivateKey: + template.SignatureAlgorithm = x509.DSAWithSHA1 + } case x509.ECDSAWithSHA256: priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { @@ -194,6 +217,26 @@ func createTestCertificateByIssuer(name string, issuer *certKeyPair, sigAlg x509 case *dsa.PrivateKey: template.SignatureAlgorithm = x509.DSAWithSHA256 } + case x509.DSAWithSHA1: + var dsaPriv dsa.PrivateKey + params := &dsaPriv.Parameters + err = dsa.GenerateParameters(params, rand.Reader, dsa.L1024N160) + if err != nil { + return nil, err + } + err = dsa.GenerateKey(&dsaPriv, rand.Reader) + if err != nil { + return nil, err + } + switch issuerKey.(type) { + case *rsa.PrivateKey: + template.SignatureAlgorithm = x509.SHA1WithRSA + case *ecdsa.PrivateKey: + template.SignatureAlgorithm = x509.ECDSAWithSHA1 + case *dsa.PrivateKey: + template.SignatureAlgorithm = x509.DSAWithSHA1 + } + priv = &dsaPriv } if isCA { template.IsCA = true diff --git a/builtin/credential/aws/pkcs7/sign.go b/builtin/credential/aws/pkcs7/sign.go index b64fcb11da47b..0db0052616c88 100644 --- a/builtin/credential/aws/pkcs7/sign.go +++ b/builtin/credential/aws/pkcs7/sign.go @@ -24,7 +24,7 @@ type SignedData struct { } // NewSignedData takes data and initializes a PKCS7 SignedData struct that is -// ready to be signed via AddSigner. The digest algorithm is set to SHA-256 by default +// ready to be signed via AddSigner. The digest algorithm is set to SHA1 by default // and can be changed by calling SetDigestAlgorithm. func NewSignedData(data []byte) (*SignedData, error) { content, err := asn1.Marshal(data) @@ -39,7 +39,7 @@ func NewSignedData(data []byte) (*SignedData, error) { ContentInfo: ci, Version: 1, } - return &SignedData{sd: sd, data: data, digestOid: OIDDigestAlgorithmSHA256}, nil + return &SignedData{sd: sd, data: data, digestOid: OIDDigestAlgorithmSHA1}, nil } // SignerInfoConfig are optional values to include when adding a signer diff --git a/builtin/credential/aws/pkcs7/sign_test.go b/builtin/credential/aws/pkcs7/sign_test.go index 641cb0465fd05..0e513be3d7ae4 100644 --- a/builtin/credential/aws/pkcs7/sign_test.go +++ b/builtin/credential/aws/pkcs7/sign_test.go @@ -18,8 +18,10 @@ import ( func TestSign(t *testing.T) { content := []byte("Hello World") sigalgs := []x509.SignatureAlgorithm{ + x509.SHA1WithRSA, x509.SHA256WithRSA, x509.SHA512WithRSA, + x509.ECDSAWithSHA1, x509.ECDSAWithSHA256, x509.ECDSAWithSHA384, x509.ECDSAWithSHA512, @@ -97,7 +99,7 @@ func TestDSASignAndVerifyWithOpenSSL(t *testing.T) { } ioutil.WriteFile(tmpContentFile.Name(), content, 0o755) - block, _ := pem.Decode(dsaPublicCert) + block, _ := pem.Decode([]byte(dsaPublicCert)) if block == nil { t.Fatal("failed to parse certificate PEM") } @@ -127,8 +129,6 @@ func TestDSASignAndVerifyWithOpenSSL(t *testing.T) { if err != nil { t.Fatalf("test case: cannot initialize signed data: %s", err) } - // openssl DSA only supports SHA1 for our 1024-bit DSA key, since that is all the standard officially supports - toBeSigned.digestOid = OIDDigestAlgorithmSHA1 if err := toBeSigned.SignWithoutAttr(signerCert, &priv, SignerInfoConfig{}); err != nil { t.Fatalf("Cannot add signer: %s", err) } @@ -151,7 +151,6 @@ func TestDSASignAndVerifyWithOpenSSL(t *testing.T) { "-content", tmpContentFile.Name()) out, err := opensslCMD.CombinedOutput() if err != nil { - t.Errorf("Command: %s", opensslCMD.Args) t.Fatalf("test case: openssl command failed with %s: %s", err, out) } os.Remove(tmpSignatureFile.Name()) // clean up @@ -225,7 +224,7 @@ func TestUnmarshalSignedAttribute(t *testing.T) { } func TestDegenerateCertificate(t *testing.T) { - cert, err := createTestCertificate(x509.SHA256WithRSA) + cert, err := createTestCertificate(x509.SHA1WithRSA) if err != nil { t.Fatal(err) } diff --git a/builtin/credential/aws/pkcs7/verify_dsa_test.go b/builtin/credential/aws/pkcs7/verify_test_dsa.go similarity index 100% rename from builtin/credential/aws/pkcs7/verify_dsa_test.go rename to builtin/credential/aws/pkcs7/verify_test_dsa.go diff --git a/builtin/credential/cert/backend.go b/builtin/credential/cert/backend.go index 89625fbb49d35..121f7bea91041 100644 --- a/builtin/credential/cert/backend.go +++ b/builtin/credential/cert/backend.go @@ -2,15 +2,9 @@ package cert import ( "context" - "crypto/x509" - "fmt" - "io" - "net/http" "strings" "sync" - "time" - "github.com/hashicorp/go-multierror" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" ) @@ -20,7 +14,7 @@ func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, if err := b.Setup(ctx, conf); err != nil { return nil, err } - if err := b.lockThenpopulateCRLs(ctx, conf.StorageView); err != nil { + if err := b.populateCRLs(ctx, conf.StorageView); err != nil { return nil, err } return b, nil @@ -42,10 +36,9 @@ func Backend() *backend { pathCerts(&b), pathCRLs(&b), }, - AuthRenew: b.pathLoginRenew, - Invalidate: b.invalidate, - BackendType: logical.TypeCredential, - PeriodicFunc: b.updateCRLs, + AuthRenew: b.pathLoginRenew, + Invalidate: b.invalidate, + BackendType: logical.TypeCredential, } b.crlUpdateMutex = &sync.RWMutex{} @@ -70,40 +63,6 @@ func (b *backend) invalidate(_ context.Context, key string) { } } -func (b *backend) fetchCRL(ctx context.Context, storage logical.Storage, name string, crl *CRLInfo) error { - response, err := http.Get(crl.CDP.Url) - if err != nil { - return err - } - if response.StatusCode == http.StatusOK { - body, err := io.ReadAll(response.Body) - if err != nil { - return err - } - certList, err := x509.ParseCRL(body) - if err != nil { - return err - } - crl.CDP.ValidUntil = certList.TBSCertList.NextUpdate - return b.setCRL(ctx, storage, certList, name, crl.CDP) - } - return fmt.Errorf("unexpected response code %d fetching CRL from %s", response.StatusCode, crl.CDP.Url) -} - -func (b *backend) updateCRLs(ctx context.Context, req *logical.Request) error { - b.crlUpdateMutex.Lock() - defer b.crlUpdateMutex.Unlock() - var errs *multierror.Error - for name, crl := range b.crls { - if crl.CDP != nil && time.Now().After(crl.CDP.ValidUntil) { - if err := b.fetchCRL(ctx, req.Storage, name, &crl); err != nil { - errs = multierror.Append(errs, err) - } - } - } - return errs.ErrorOrNil() -} - const backendHelp = ` The "cert" credential provider allows authentication using TLS client certificates. A client connects to Vault and uses diff --git a/builtin/credential/cert/backend_test.go b/builtin/credential/cert/backend_test.go index 062fc156bc7a5..db400dab780a0 100644 --- a/builtin/credential/cert/backend_test.go +++ b/builtin/credential/cert/backend_test.go @@ -458,166 +458,6 @@ func TestBackend_PermittedDNSDomainsIntermediateCA(t *testing.T) { } } -func TestBackend_MetadataBasedACLPolicy(t *testing.T) { - // Start cluster with cert auth method enabled - coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: log.NewNullLogger(), - CredentialBackends: map[string]logical.Factory{ - "cert": Factory, - }, - } - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - cores := cluster.Cores - vault.TestWaitActive(t, cores[0].Core) - client := cores[0].Client - - var err error - - // Enable the cert auth method - err = client.Sys().EnableAuthWithOptions("cert", &api.EnableAuthOptions{ - Type: "cert", - }) - if err != nil { - t.Fatal(err) - } - - // Enable metadata in aliases - _, err = client.Logical().Write("auth/cert/config", map[string]interface{}{ - "enable_identity_alias_metadata": true, - }) - if err != nil { - t.Fatal(err) - } - - // Retrieve its accessor id - auths, err := client.Sys().ListAuth() - if err != nil { - t.Fatal(err) - } - - var accessor string - - for _, auth := range auths { - if auth.Type == "cert" { - accessor = auth.Accessor - } - } - - if accessor == "" { - t.Fatal("failed to find cert auth accessor") - } - - // Write ACL policy - err = client.Sys().PutPolicy("metadata-based", fmt.Sprintf(` -path "kv/cn/{{identity.entity.aliases.%s.metadata.common_name}}" { - capabilities = ["read"] -} -path "kv/ext/{{identity.entity.aliases.%s.metadata.2-1-1-1}}" { - capabilities = ["read"] -} -`, accessor, accessor)) - if err != nil { - t.Fatalf("err: %v", err) - } - - ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem") - if err != nil { - t.Fatalf("err: %v", err) - } - - // Set the trusted certificate in the backend - _, err = client.Logical().Write("auth/cert/certs/test", map[string]interface{}{ - "display_name": "test", - "policies": "metadata-based", - "certificate": string(ca), - "allowed_metadata_extensions": "2.1.1.1,1.2.3.45", - }) - if err != nil { - t.Fatal(err) - } - - // This function is a copy-paste from the NewTestCluster, with the - // modification to reconfigure the TLS on the api client with a - // specific client certificate. - getAPIClient := func(port int, tlsConfig *tls.Config) *api.Client { - transport := cleanhttp.DefaultPooledTransport() - transport.TLSClientConfig = tlsConfig.Clone() - if err := http2.ConfigureTransport(transport); err != nil { - t.Fatal(err) - } - client := &http.Client{ - Transport: transport, - CheckRedirect: func(*http.Request, []*http.Request) error { - // This can of course be overridden per-test by using its own client - return fmt.Errorf("redirects not allowed in these tests") - }, - } - config := api.DefaultConfig() - if config.Error != nil { - t.Fatal(config.Error) - } - config.Address = fmt.Sprintf("https://127.0.0.1:%d", port) - config.HttpClient = client - - // Set the client certificates - config.ConfigureTLS(&api.TLSConfig{ - CACertBytes: cluster.CACertPEM, - ClientCert: "test-fixtures/root/rootcawextcert.pem", - ClientKey: "test-fixtures/root/rootcawextkey.pem", - }) - - apiClient, err := api.NewClient(config) - if err != nil { - t.Fatal(err) - } - return apiClient - } - - // Create a new api client with the desired TLS configuration - newClient := getAPIClient(cores[0].Listeners[0].Address.Port, cores[0].TLSConfig) - - var secret *api.Secret - - secret, err = newClient.Logical().Write("auth/cert/login", map[string]interface{}{ - "name": "test", - }) - if err != nil { - t.Fatal(err) - } - if secret.Auth == nil || secret.Auth.ClientToken == "" { - t.Fatalf("expected a successful authentication") - } - - // Check paths guarded by ACL policy - newClient.SetToken(secret.Auth.ClientToken) - - _, err = newClient.Logical().Read("kv/cn/example.com") - if err != nil { - t.Fatal(err) - } - - _, err = newClient.Logical().Read("kv/cn/not.example.com") - if err == nil { - t.Fatal("expected access denied") - } - - _, err = newClient.Logical().Read("kv/ext/A UTF8String Extension") - if err != nil { - t.Fatal(err) - } - - _, err = newClient.Logical().Read("kv/ext/bar") - if err == nil { - t.Fatal("expected access denied") - } -} - func TestBackend_NonCAExpiry(t *testing.T) { var resp *logical.Response var err error @@ -1267,17 +1107,10 @@ func TestBackend_ext_singleCert(t *testing.T) { testAccStepLoginInvalid(t, connState), testAccStepCert(t, "web", ca, "foo", allowed{names: "invalid", ext: "2.1.1.1:*,2.1.1.2:The Wrong Value"}, false), testAccStepLoginInvalid(t, connState), - testAccStepReadConfig(t, config{EnableIdentityAliasMetadata: false}, connState), testAccStepCert(t, "web", ca, "foo", allowed{metadata_ext: "2.1.1.1,1.2.3.45"}, false), - testAccStepLoginWithMetadata(t, connState, "web", map[string]string{"2-1-1-1": "A UTF8String Extension"}, false), + testAccStepLoginWithMetadata(t, connState, "web", map[string]string{"2-1-1-1": "A UTF8String Extension"}), testAccStepCert(t, "web", ca, "foo", allowed{metadata_ext: "1.2.3.45"}, false), - testAccStepLoginWithMetadata(t, connState, "web", map[string]string{}, false), - testAccStepSetConfig(t, config{EnableIdentityAliasMetadata: true}, connState), - testAccStepReadConfig(t, config{EnableIdentityAliasMetadata: true}, connState), - testAccStepCert(t, "web", ca, "foo", allowed{metadata_ext: "2.1.1.1,1.2.3.45"}, false), - testAccStepLoginWithMetadata(t, connState, "web", map[string]string{"2-1-1-1": "A UTF8String Extension"}, true), - testAccStepCert(t, "web", ca, "foo", allowed{metadata_ext: "1.2.3.45"}, false), - testAccStepLoginWithMetadata(t, connState, "web", map[string]string{}, true), + testAccStepLoginWithMetadata(t, connState, "web", map[string]string{}), }, }) } @@ -1682,42 +1515,6 @@ func testAccStepDeleteCRL(t *testing.T, connState tls.ConnectionState) logicalte } } -func testAccStepSetConfig(t *testing.T, conf config, connState tls.ConnectionState) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "config", - ConnState: &connState, - Data: map[string]interface{}{ - "enable_identity_alias_metadata": conf.EnableIdentityAliasMetadata, - }, - } -} - -func testAccStepReadConfig(t *testing.T, conf config, connState tls.ConnectionState) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.ReadOperation, - Path: "config", - ConnState: &connState, - Check: func(resp *logical.Response) error { - value, ok := resp.Data["enable_identity_alias_metadata"] - if !ok { - t.Fatalf("enable_identity_alias_metadata not found in response") - } - - b, ok := value.(bool) - if !ok { - t.Fatalf("bad: expected enable_identity_alias_metadata to be a bool") - } - - if b != conf.EnableIdentityAliasMetadata { - t.Fatalf("bad: expected enable_identity_alias_metadata to be %t, got %t", conf.EnableIdentityAliasMetadata, b) - } - - return nil - }, - } -} - func testAccStepLogin(t *testing.T, connState tls.ConnectionState) logicaltest.TestStep { return testAccStepLoginWithName(t, connState, "") } @@ -1763,7 +1560,7 @@ func testAccStepLoginDefaultLease(t *testing.T, connState tls.ConnectionState) l } } -func testAccStepLoginWithMetadata(t *testing.T, connState tls.ConnectionState, certName string, metadata map[string]string, expectAliasMetadata bool) logicaltest.TestStep { +func testAccStepLoginWithMetadata(t *testing.T, connState tls.ConnectionState, certName string, metadata map[string]string) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "login", @@ -1786,21 +1583,6 @@ func testAccStepLoginWithMetadata(t *testing.T, connState tls.ConnectionState, c if value != expected { t.Fatalf("expected metadata key %s to equal %s, but got: %s", key, expected, value) } - - if expectAliasMetadata { - value, ok = resp.Auth.Alias.Metadata[key] - if !ok { - t.Fatalf("missing alias metadata key: %s", key) - } - - if value != expected { - t.Fatalf("expected metadata key %s to equal %s, but got: %s", key, expected, value) - } - } else { - if len(resp.Auth.Alias.Metadata) > 0 { - t.Fatal("found alias metadata keys, but should not have any") - } - } } fn := logicaltest.TestCheckAuth([]string{"default", "foo"}) @@ -1836,8 +1618,7 @@ func testAccStepLoginWithNameInvalid(t *testing.T, connState tls.ConnectionState } func testAccStepListCerts( - t *testing.T, certs []string, -) []logicaltest.TestStep { + t *testing.T, certs []string) []logicaltest.TestStep { return []logicaltest.TestStep{ { Operation: logical.ListOperation, @@ -1894,8 +1675,7 @@ type allowed struct { } func testAccStepCert( - t *testing.T, name string, cert []byte, policies string, testData allowed, expectError bool, -) logicaltest.TestStep { + t *testing.T, name string, cert []byte, policies string, testData allowed, expectError bool) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "certs/" + name, @@ -1924,8 +1704,7 @@ func testAccStepCert( } func testAccStepCertLease( - t *testing.T, name string, cert []byte, policies string, -) logicaltest.TestStep { + t *testing.T, name string, cert []byte, policies string) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "certs/" + name, @@ -1939,8 +1718,7 @@ func testAccStepCertLease( } func testAccStepCertTTL( - t *testing.T, name string, cert []byte, policies string, -) logicaltest.TestStep { + t *testing.T, name string, cert []byte, policies string) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "certs/" + name, @@ -1954,8 +1732,7 @@ func testAccStepCertTTL( } func testAccStepCertMaxTTL( - t *testing.T, name string, cert []byte, policies string, -) logicaltest.TestStep { + t *testing.T, name string, cert []byte, policies string) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "certs/" + name, @@ -1970,8 +1747,7 @@ func testAccStepCertMaxTTL( } func testAccStepCertNoLease( - t *testing.T, name string, cert []byte, policies string, -) logicaltest.TestStep { + t *testing.T, name string, cert []byte, policies string) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "certs/" + name, diff --git a/builtin/credential/cert/path_config.go b/builtin/credential/cert/path_config.go index 9cc17f3a6aafe..e73aeb3ec0963 100644 --- a/builtin/credential/cert/path_config.go +++ b/builtin/credential/cert/path_config.go @@ -17,27 +17,19 @@ func pathConfig(b *backend) *framework.Path { Default: false, Description: `If set, during renewal, skips the matching of presented client identity with the client identity used during login. Defaults to false.`, }, - "enable_identity_alias_metadata": { - Type: framework.TypeBool, - Default: false, - Description: `If set, metadata of the certificate including the metadata corresponding to allowed_metadata_extensions will be stored in the alias. Defaults to false.`, - }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ logical.UpdateOperation: b.pathConfigWrite, - logical.ReadOperation: b.pathConfigRead, }, } } func (b *backend) pathConfigWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { disableBinding := data.Get("disable_binding").(bool) - enableIdentityAliasMetadata := data.Get("enable_identity_alias_metadata").(bool) entry, err := logical.StorageEntryJSON("config", config{ - DisableBinding: disableBinding, - EnableIdentityAliasMetadata: enableIdentityAliasMetadata, + DisableBinding: disableBinding, }) if err != nil { return nil, err @@ -49,22 +41,6 @@ func (b *backend) pathConfigWrite(ctx context.Context, req *logical.Request, dat return nil, nil } -func (b *backend) pathConfigRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - cfg, err := b.Config(ctx, req.Storage) - if err != nil { - return nil, err - } - - data := map[string]interface{}{ - "disable_binding": cfg.DisableBinding, - "enable_identity_alias_metadata": cfg.EnableIdentityAliasMetadata, - } - - return &logical.Response{ - Data: data, - }, nil -} - // Config returns the configuration for this backend. func (b *backend) Config(ctx context.Context, s logical.Storage) (*config, error) { entry, err := s.Get(ctx, "config") @@ -83,6 +59,5 @@ func (b *backend) Config(ctx context.Context, s logical.Storage) (*config, error } type config struct { - DisableBinding bool `json:"disable_binding"` - EnableIdentityAliasMetadata bool `json:"enable_identity_alias_metadata"` + DisableBinding bool `json:"disable_binding"` } diff --git a/builtin/credential/cert/path_crls.go b/builtin/credential/cert/path_crls.go index fc87671d825d6..e031768a517bd 100644 --- a/builtin/credential/cert/path_crls.go +++ b/builtin/credential/cert/path_crls.go @@ -3,12 +3,9 @@ package cert import ( "context" "crypto/x509" - "crypto/x509/pkix" "fmt" "math/big" - url2 "net/url" "strings" - "time" "github.com/fatih/structs" "github.com/hashicorp/vault/sdk/framework" @@ -27,15 +24,11 @@ func pathCRLs(b *backend) *framework.Path { "crl": { Type: framework.TypeString, - Description: `The public CRL that should be trusted to attest to certificates' validity statuses. + Description: `The public certificate that should be trusted. May be DER or PEM encoded. Note: the expiration time is ignored; if the CRL is no longer valid, delete it using the same name as specified here.`, }, - "url": { - Type: framework.TypeString, - Description: `The URL of a CRL distribution point. Only one of 'crl' or 'url' parameters should be specified.`, - }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -49,13 +42,10 @@ using the same name as specified here.`, } } -func (b *backend) lockThenpopulateCRLs(ctx context.Context, storage logical.Storage) error { +func (b *backend) populateCRLs(ctx context.Context, storage logical.Storage) error { b.crlUpdateMutex.Lock() defer b.crlUpdateMutex.Unlock() - return b.populateCRLs(ctx, storage) -} -func (b *backend) populateCRLs(ctx context.Context, storage logical.Storage) error { if b.crls != nil { return nil } @@ -139,7 +129,7 @@ func (b *backend) pathCRLDelete(ctx context.Context, req *logical.Request, d *fr return logical.ErrorResponse(`"name" parameter cannot be empty`), nil } - if err := b.lockThenpopulateCRLs(ctx, req.Storage); err != nil { + if err := b.populateCRLs(ctx, req.Storage); err != nil { return nil, err } @@ -170,7 +160,7 @@ func (b *backend) pathCRLRead(ctx context.Context, req *logical.Request, d *fram return logical.ErrorResponse(`"name" parameter must be set`), nil } - if err := b.lockThenpopulateCRLs(ctx, req.Storage); err != nil { + if err := b.populateCRLs(ctx, req.Storage); err != nil { return nil, err } @@ -198,86 +188,44 @@ func (b *backend) pathCRLWrite(ctx context.Context, req *logical.Request, d *fra if name == "" { return logical.ErrorResponse(`"name" parameter cannot be empty`), nil } - if crlRaw, ok := d.GetOk("crl"); ok { - crl := crlRaw.(string) - certList, err := x509.ParseCRL([]byte(crl)) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf("failed to parse CRL: %v", err)), nil - } - if certList == nil { - return logical.ErrorResponse("parsed CRL is nil"), nil - } + crl := d.Get("crl").(string) - b.crlUpdateMutex.Lock() - defer b.crlUpdateMutex.Unlock() - err = b.setCRL(ctx, req.Storage, certList, name, nil) - if err != nil { - return nil, err - } - } else if urlRaw, ok := d.GetOk("url"); ok { - url := urlRaw.(string) - if url == "" { - return logical.ErrorResponse("empty CRL url"), nil - } - _, err := url2.Parse(url) - if err != nil { - return logical.ErrorResponse("invalid CRL url: %v", err), nil - } - - b.crlUpdateMutex.Lock() - defer b.crlUpdateMutex.Unlock() - - cdpInfo := &CDPInfo{ - Url: url, - } - err = b.fetchCRL(ctx, req.Storage, name, &CRLInfo{ - CDP: cdpInfo, - }) - if err != nil { - return nil, err - } - } else { - return logical.ErrorResponse("one of 'crl' or 'url' must be provided"), nil + certList, err := x509.ParseCRL([]byte(crl)) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("failed to parse CRL: %v", err)), nil + } + if certList == nil { + return logical.ErrorResponse("parsed CRL is nil"), nil } - return nil, nil -} - -func (b *backend) setCRL(ctx context.Context, storage logical.Storage, certList *pkix.CertificateList, name string, cdp *CDPInfo) error { - if err := b.populateCRLs(ctx, storage); err != nil { - return err + if err := b.populateCRLs(ctx, req.Storage); err != nil { + return nil, err } + b.crlUpdateMutex.Lock() + defer b.crlUpdateMutex.Unlock() + crlInfo := CRLInfo{ - CDP: cdp, Serials: map[string]RevokedSerialInfo{}, } - - if certList != nil { - for _, revokedCert := range certList.TBSCertList.RevokedCertificates { - crlInfo.Serials[revokedCert.SerialNumber.String()] = RevokedSerialInfo{} - } + for _, revokedCert := range certList.TBSCertList.RevokedCertificates { + crlInfo.Serials[revokedCert.SerialNumber.String()] = RevokedSerialInfo{} } entry, err := logical.StorageEntryJSON("crls/"+name, crlInfo) if err != nil { - return err + return nil, err } - if err = storage.Put(ctx, entry); err != nil { - return err + if err = req.Storage.Put(ctx, entry); err != nil { + return nil, err } b.crls[name] = crlInfo - return err -} -type CDPInfo struct { - Url string `json:"url" structs:"url" mapstructure:"url"` - ValidUntil time.Time `json:"valid_until" structs:"valid_until" mapstructure:"valid_until"` + return nil, nil } type CRLInfo struct { - CDP *CDPInfo `json:"cdp" structs:"cdp" mapstructure:"cdp"` Serials map[string]RevokedSerialInfo `json:"serials" structs:"serials" mapstructure:"serials"` } @@ -289,11 +237,10 @@ Manage Certificate Revocation Lists checked during authentication. const pathCRLsHelpDesc = ` This endpoint allows you to create, read, update, and delete the Certificate -Revocation Lists checked during authentication, and/or CRL Distribution Point -URLs. +Revocation Lists checked during authentication. When any CRLs are in effect, any login will check the trust chains sent by a -client against the submitted or retrieved CRLs. Any chain containing a serial number revoked +client against the submitted CRLs. Any chain containing a serial number revoked by one or more of the CRLs causes that chain to be marked as invalid for the authentication attempt. Conversely, *any* valid chain -- that is, a chain in which none of the serials are revoked by any CRL -- allows authentication. diff --git a/builtin/credential/cert/path_crls_test.go b/builtin/credential/cert/path_crls_test.go deleted file mode 100644 index 0d36c96244970..0000000000000 --- a/builtin/credential/cert/path_crls_test.go +++ /dev/null @@ -1,194 +0,0 @@ -package cert - -import ( - "context" - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "io/ioutil" - "math/big" - "net/http" - "net/http/httptest" - "net/url" - "sync" - "testing" - "time" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/certutil" - "github.com/hashicorp/vault/sdk/logical" - "github.com/stretchr/testify/require" -) - -func TestCRLFetch(t *testing.T) { - storage := &logical.InmemStorage{} - - lb, err := Factory(context.Background(), &logical.BackendConfig{ - System: &logical.StaticSystemView{ - DefaultLeaseTTLVal: 300 * time.Second, - MaxLeaseTTLVal: 1800 * time.Second, - }, - StorageView: storage, - }) - - require.NoError(t, err) - b := lb.(*backend) - closeChan := make(chan bool) - go func() { - t := time.NewTicker(50 * time.Millisecond) - for { - select { - case <-t.C: - b.PeriodicFunc(context.Background(), &logical.Request{Storage: storage}) - case <-closeChan: - break - } - } - }() - defer close(closeChan) - - if err != nil { - t.Fatalf("error: %s", err) - } - connState, err := testConnState("test-fixtures/keys/cert.pem", - "test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem") - require.NoError(t, err) - caPEM, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem") - require.NoError(t, err) - caKeyPEM, err := ioutil.ReadFile("test-fixtures/keys/key.pem") - require.NoError(t, err) - certPEM, err := ioutil.ReadFile("test-fixtures/keys/cert.pem") - - caBundle, err := certutil.ParsePEMBundle(string(caPEM)) - require.NoError(t, err) - bundle, err := certutil.ParsePEMBundle(string(certPEM) + "\n" + string(caKeyPEM)) - require.NoError(t, err) - // Entry with one cert first - - revocationListTemplate := &x509.RevocationList{ - RevokedCertificates: []pkix.RevokedCertificate{ - { - SerialNumber: big.NewInt(1), - RevocationTime: time.Now(), - }, - }, - Number: big.NewInt(1), - ThisUpdate: time.Now(), - NextUpdate: time.Now().Add(50 * time.Millisecond), - SignatureAlgorithm: x509.SHA1WithRSA, - } - - var crlBytesLock sync.Mutex - crlBytes, err := x509.CreateRevocationList(rand.Reader, revocationListTemplate, caBundle.Certificate, bundle.PrivateKey) - require.NoError(t, err) - - var serverURL *url.URL - crlServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Host == serverURL.Host { - crlBytesLock.Lock() - w.Write(crlBytes) - crlBytesLock.Unlock() - } else { - w.WriteHeader(http.StatusNotFound) - } - })) - serverURL, _ = url.Parse(crlServer.URL) - - req := &logical.Request{ - Connection: &logical.Connection{ - ConnState: &connState, - }, - Storage: storage, - Auth: &logical.Auth{}, - } - - fd := &framework.FieldData{ - Raw: map[string]interface{}{ - "name": "test", - "certificate": string(caPEM), - "policies": "foo,bar", - }, - Schema: pathCerts(b).Fields, - } - - resp, err := b.pathCertWrite(context.Background(), req, fd) - if err != nil { - t.Fatal(err) - } - - empty_login_fd := &framework.FieldData{ - Raw: map[string]interface{}{}, - Schema: pathLogin(b).Fields, - } - resp, err = b.pathLogin(context.Background(), req, empty_login_fd) - if err != nil { - t.Fatal(err) - } - if resp.IsError() { - t.Fatalf("got error: %#v", *resp) - } - - // Set a bad CRL - fd = &framework.FieldData{ - Raw: map[string]interface{}{ - "name": "testcrl", - "url": "http://wrongserver.com", - }, - Schema: pathCRLs(b).Fields, - } - resp, err = b.pathCRLWrite(context.Background(), req, fd) - if err == nil { - t.Fatal(err) - } - if resp.IsError() { - t.Fatalf("got error: %#v", *resp) - } - - // Set good CRL - fd = &framework.FieldData{ - Raw: map[string]interface{}{ - "name": "testcrl", - "url": crlServer.URL, - }, - Schema: pathCRLs(b).Fields, - } - resp, err = b.pathCRLWrite(context.Background(), req, fd) - if err != nil { - t.Fatal(err) - } - if resp.IsError() { - t.Fatalf("got error: %#v", *resp) - } - - b.crlUpdateMutex.Lock() - if len(b.crls["testcrl"].Serials) != 1 { - t.Fatalf("wrong number of certs in CRL") - } - b.crlUpdateMutex.Unlock() - - // Add a cert to the CRL, then wait to see if it gets automatically picked up - revocationListTemplate.RevokedCertificates = []pkix.RevokedCertificate{ - { - SerialNumber: big.NewInt(1), - RevocationTime: revocationListTemplate.RevokedCertificates[0].RevocationTime, - }, - { - SerialNumber: big.NewInt(2), - RevocationTime: time.Now(), - }, - } - revocationListTemplate.ThisUpdate = time.Now() - revocationListTemplate.NextUpdate = time.Now().Add(1 * time.Minute) - revocationListTemplate.Number = big.NewInt(2) - - crlBytesLock.Lock() - crlBytes, err = x509.CreateRevocationList(rand.Reader, revocationListTemplate, caBundle.Certificate, bundle.PrivateKey) - crlBytesLock.Unlock() - require.NoError(t, err) - time.Sleep(60 * time.Millisecond) - b.crlUpdateMutex.Lock() - if len(b.crls["testcrl"].Serials) != 2 { - t.Fatalf("wrong number of certs in CRL") - } - b.crlUpdateMutex.Unlock() -} diff --git a/builtin/credential/cert/path_login.go b/builtin/credential/cert/path_login.go index 11e63d75eaada..626ba56adbe4e 100644 --- a/builtin/credential/cert/path_login.go +++ b/builtin/credential/cert/path_login.go @@ -39,28 +39,10 @@ func pathLogin(b *backend) *framework.Path { Callbacks: map[logical.Operation]framework.OperationFunc{ logical.UpdateOperation: b.pathLogin, logical.AliasLookaheadOperation: b.pathLoginAliasLookahead, - logical.ResolveRoleOperation: b.pathLoginResolveRole, }, } } -func (b *backend) pathLoginResolveRole(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - var matched *ParsedCert - if verifyResp, resp, err := b.verifyCredentials(ctx, req, data); err != nil { - return nil, err - } else if resp != nil { - return resp, nil - } else { - matched = verifyResp - } - - if matched == nil { - return logical.ErrorResponse("no certificate was matched by this request"), nil - } - - return logical.ResolveRoleResponse(matched.Entry.Name) -} - func (b *backend) pathLoginAliasLookahead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { clientCerts := req.Connection.ConnState.PeerCertificates if len(clientCerts) == 0 { @@ -77,13 +59,7 @@ func (b *backend) pathLoginAliasLookahead(ctx context.Context, req *logical.Requ } func (b *backend) pathLogin(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - config, err := b.Config(ctx, req.Storage) - if err != nil { - return nil, err - } - if b.crls == nil { - // Probably invalidated due to replication, but we need these to proceed if err := b.populateCRLs(ctx, req.Storage); err != nil { return nil, err } @@ -144,11 +120,6 @@ func (b *backend) pathLogin(ctx context.Context, req *logical.Request, data *fra Name: clientCerts[0].Subject.CommonName, }, } - - if config.EnableIdentityAliasMetadata { - auth.Alias.Metadata = metadata - } - matched.Entry.PopulateTokenAuth(auth) return &logical.Response{ @@ -553,7 +524,6 @@ func (b *backend) checkForChainInCRLs(chain []*x509.Certificate) bool { badChain = true break } - } return badChain } diff --git a/builtin/credential/cert/path_login_test.go b/builtin/credential/cert/path_login_test.go deleted file mode 100644 index a01ec981663f3..0000000000000 --- a/builtin/credential/cert/path_login_test.go +++ /dev/null @@ -1,199 +0,0 @@ -package cert - -import ( - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "io/ioutil" - "math/big" - mathrand "math/rand" - "net" - "os" - "path/filepath" - "strings" - "testing" - "time" - - logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" - - "github.com/hashicorp/vault/sdk/logical" -) - -func TestCert_RoleResolve(t *testing.T) { - certTemplate := &x509.Certificate{ - Subject: pkix.Name{ - CommonName: "example.com", - }, - DNSNames: []string{"example.com"}, - IPAddresses: []net.IP{net.ParseIP("127.0.0.1")}, - ExtKeyUsage: []x509.ExtKeyUsage{ - x509.ExtKeyUsageServerAuth, - x509.ExtKeyUsageClientAuth, - }, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, - SerialNumber: big.NewInt(mathrand.Int63()), - NotBefore: time.Now().Add(-30 * time.Second), - NotAfter: time.Now().Add(262980 * time.Hour), - } - - tempDir, connState, err := generateTestCertAndConnState(t, certTemplate) - if tempDir != "" { - defer os.RemoveAll(tempDir) - } - if err != nil { - t.Fatalf("error testing connection state: %v", err) - } - ca, err := ioutil.ReadFile(filepath.Join(tempDir, "ca_cert.pem")) - if err != nil { - t.Fatalf("err: %v", err) - } - - logicaltest.Test(t, logicaltest.TestCase{ - CredentialBackend: testFactory(t), - Steps: []logicaltest.TestStep{ - testAccStepCert(t, "web", ca, "foo", allowed{dns: "example.com"}, false), - testAccStepLoginWithName(t, connState, "web"), - testAccStepResolveRoleWithName(t, connState, "web"), - }, - }) -} - -func testAccStepResolveRoleWithName(t *testing.T, connState tls.ConnectionState, certName string) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.ResolveRoleOperation, - Path: "login", - Unauthenticated: true, - ConnState: &connState, - Check: func(resp *logical.Response) error { - if resp.Data["role"] != certName { - t.Fatalf("Role was not as expected. Expected %s, received %s", certName, resp.Data["role"]) - } - return nil - }, - Data: map[string]interface{}{ - "name": certName, - }, - } -} - -func TestCert_RoleResolveWithoutProvidingCertName(t *testing.T) { - certTemplate := &x509.Certificate{ - Subject: pkix.Name{ - CommonName: "example.com", - }, - DNSNames: []string{"example.com"}, - IPAddresses: []net.IP{net.ParseIP("127.0.0.1")}, - ExtKeyUsage: []x509.ExtKeyUsage{ - x509.ExtKeyUsageServerAuth, - x509.ExtKeyUsageClientAuth, - }, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, - SerialNumber: big.NewInt(mathrand.Int63()), - NotBefore: time.Now().Add(-30 * time.Second), - NotAfter: time.Now().Add(262980 * time.Hour), - } - - tempDir, connState, err := generateTestCertAndConnState(t, certTemplate) - if tempDir != "" { - defer os.RemoveAll(tempDir) - } - if err != nil { - t.Fatalf("error testing connection state: %v", err) - } - ca, err := ioutil.ReadFile(filepath.Join(tempDir, "ca_cert.pem")) - if err != nil { - t.Fatalf("err: %v", err) - } - - logicaltest.Test(t, logicaltest.TestCase{ - CredentialBackend: testFactory(t), - Steps: []logicaltest.TestStep{ - testAccStepCert(t, "web", ca, "foo", allowed{dns: "example.com"}, false), - testAccStepLoginWithName(t, connState, "web"), - testAccStepResolveRoleWithEmptyDataMap(t, connState, "web"), - }, - }) -} - -func testAccStepResolveRoleWithEmptyDataMap(t *testing.T, connState tls.ConnectionState, certName string) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.ResolveRoleOperation, - Path: "login", - Unauthenticated: true, - ConnState: &connState, - Check: func(resp *logical.Response) error { - if resp.Data["role"] != certName { - t.Fatalf("Role was not as expected. Expected %s, received %s", certName, resp.Data["role"]) - } - return nil - }, - Data: map[string]interface{}{}, - } -} - -func testAccStepResolveRoleExpectRoleResolutionToFail(t *testing.T, connState tls.ConnectionState, certName string) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.ResolveRoleOperation, - Path: "login", - Unauthenticated: true, - ConnState: &connState, - ErrorOk: true, - Check: func(resp *logical.Response) error { - if resp == nil && !resp.IsError() { - t.Fatalf("Response was not an error: resp:%#v", resp) - } - - errString, ok := resp.Data["error"].(string) - if !ok { - t.Fatal("Error not part of response.") - } - - if !strings.Contains(errString, "invalid certificate") { - t.Fatalf("Error was not due to invalid role name. Error: %s", errString) - } - return nil - }, - Data: map[string]interface{}{ - "name": certName, - }, - } -} - -func TestCert_RoleResolve_RoleDoesNotExist(t *testing.T) { - certTemplate := &x509.Certificate{ - Subject: pkix.Name{ - CommonName: "example.com", - }, - DNSNames: []string{"example.com"}, - IPAddresses: []net.IP{net.ParseIP("127.0.0.1")}, - ExtKeyUsage: []x509.ExtKeyUsage{ - x509.ExtKeyUsageServerAuth, - x509.ExtKeyUsageClientAuth, - }, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, - SerialNumber: big.NewInt(mathrand.Int63()), - NotBefore: time.Now().Add(-30 * time.Second), - NotAfter: time.Now().Add(262980 * time.Hour), - } - - tempDir, connState, err := generateTestCertAndConnState(t, certTemplate) - if tempDir != "" { - defer os.RemoveAll(tempDir) - } - if err != nil { - t.Fatalf("error testing connection state: %v", err) - } - ca, err := ioutil.ReadFile(filepath.Join(tempDir, "ca_cert.pem")) - if err != nil { - t.Fatalf("err: %v", err) - } - - logicaltest.Test(t, logicaltest.TestCase{ - CredentialBackend: testFactory(t), - Steps: []logicaltest.TestStep{ - testAccStepCert(t, "web", ca, "foo", allowed{dns: "example.com"}, false), - testAccStepLoginWithName(t, connState, "web"), - testAccStepResolveRoleExpectRoleResolutionToFail(t, connState, "notweb"), - }, - }) -} diff --git a/builtin/credential/github/backend.go b/builtin/credential/github/backend.go index 89ce37c7cd6d0..b7b16a6331daa 100644 --- a/builtin/credential/github/backend.go +++ b/builtin/credential/github/backend.go @@ -2,7 +2,6 @@ package github import ( "context" - "net/url" "github.com/google/go-github/github" cleanhttp "github.com/hashicorp/go-cleanhttp" @@ -70,14 +69,7 @@ func (b *backend) Client(token string) (*github.Client, error) { tc = oauth2.NewClient(ctx, &tokenSource{Value: token}) } - client := github.NewClient(tc) - emptyUrl, err := url.Parse("") - if err != nil { - return nil, err - } - client.UploadURL = emptyUrl - - return client, nil + return github.NewClient(tc), nil } // tokenSource is an oauth2.TokenSource implementation. diff --git a/builtin/credential/ldap/backend.go b/builtin/credential/ldap/backend.go index d318fe4b4c6bb..bbd9f1bfc157d 100644 --- a/builtin/credential/ldap/backend.go +++ b/builtin/credential/ldap/backend.go @@ -150,7 +150,7 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri } if len(ldapGroups) == 0 { errString := fmt.Sprintf( - "no LDAP groups found in groupDN %q; only policies from locally-defined groups available", + "no LDAP groups found in groupDN '%s'; only policies from locally-defined groups available", cfg.GroupDN) ldapResponse.AddWarning(errString) } diff --git a/builtin/credential/ldap/backend_test.go b/builtin/credential/ldap/backend_test.go index 74b4e18a17e33..7a9f0f0eed5e3 100644 --- a/builtin/credential/ldap/backend_test.go +++ b/builtin/credential/ldap/backend_test.go @@ -382,19 +382,19 @@ func TestLdapAuthBackend_UserPolicies(t *testing.T) { } /* -* Acceptance test for LDAP Auth Method -* -* The tests here rely on a docker LDAP server: -* [https://github.com/rroemhild/docker-test-openldap] -* -* ...as well as existence of a person object, `cn=Hermes Conrad,dc=example,dc=com`, -* which is a member of a group, `cn=admin_staff,ou=people,dc=example,dc=com` -* - - Querying the server from the command line: - - $ docker run --privileged -d -p 389:389 --name ldap --rm rroemhild/test-openldap - - $ ldapsearch -x -H ldap://localhost -b dc=planetexpress,dc=com -s sub uid=hermes - - $ ldapsearch -x -H ldap://localhost -b dc=planetexpress,dc=com -s sub \ - 'member=cn=Hermes Conrad,ou=people,dc=planetexpress,dc=com' + * Acceptance test for LDAP Auth Method + * + * The tests here rely on a docker LDAP server: + * [https://github.com/rroemhild/docker-test-openldap] + * + * ...as well as existence of a person object, `cn=Hermes Conrad,dc=example,dc=com`, + * which is a member of a group, `cn=admin_staff,ou=people,dc=example,dc=com` + * + * Querying the server from the command line: + * $ docker run --privileged -d -p 389:389 --name ldap --rm rroemhild/test-openldap + * $ ldapsearch -x -H ldap://localhost -b dc=planetexpress,dc=com -s sub uid=hermes + * $ ldapsearch -x -H ldap://localhost -b dc=planetexpress,dc=com -s sub \ + 'member=cn=Hermes Conrad,ou=people,dc=planetexpress,dc=com' */ func factory(t *testing.T) logical.Backend { defaultLeaseTTLVal := time.Hour * 24 @@ -783,27 +783,27 @@ func TestBackend_configDefaultsAfterUpdate(t *testing.T) { cfg := resp.Data defaultGroupFilter := "(|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))" if cfg["groupfilter"] != defaultGroupFilter { - t.Errorf("Default mismatch: groupfilter. Expected: %q, received :%q", defaultGroupFilter, cfg["groupfilter"]) + t.Errorf("Default mismatch: groupfilter. Expected: '%s', received :'%s'", defaultGroupFilter, cfg["groupfilter"]) } defaultGroupAttr := "cn" if cfg["groupattr"] != defaultGroupAttr { - t.Errorf("Default mismatch: groupattr. Expected: %q, received :%q", defaultGroupAttr, cfg["groupattr"]) + t.Errorf("Default mismatch: groupattr. Expected: '%s', received :'%s'", defaultGroupAttr, cfg["groupattr"]) } defaultUserAttr := "cn" if cfg["userattr"] != defaultUserAttr { - t.Errorf("Default mismatch: userattr. Expected: %q, received :%q", defaultUserAttr, cfg["userattr"]) + t.Errorf("Default mismatch: userattr. Expected: '%s', received :'%s'", defaultUserAttr, cfg["userattr"]) } defaultUserFilter := "({{.UserAttr}}={{.Username}})" if cfg["userfilter"] != defaultUserFilter { - t.Errorf("Default mismatch: userfilter. Expected: %q, received :%q", defaultUserFilter, cfg["userfilter"]) + t.Errorf("Default mismatch: userfilter. Expected: '%s', received :'%s'", defaultUserFilter, cfg["userfilter"]) } defaultDenyNullBind := true if cfg["deny_null_bind"] != defaultDenyNullBind { - t.Errorf("Default mismatch: deny_null_bind. Expected: '%t', received :%q", defaultDenyNullBind, cfg["deny_null_bind"]) + t.Errorf("Default mismatch: deny_null_bind. Expected: '%t', received :'%s'", defaultDenyNullBind, cfg["deny_null_bind"]) } return nil diff --git a/builtin/credential/radius/backend_test.go b/builtin/credential/radius/backend_test.go index de90b3b79d6e9..23d72139585eb 100644 --- a/builtin/credential/radius/backend_test.go +++ b/builtin/credential/radius/backend_test.go @@ -339,8 +339,7 @@ func testStepUserList(t *testing.T, users []string) logicaltest.TestStep { } func testStepUpdateUser( - t *testing.T, name string, policies string, -) logicaltest.TestStep { + t *testing.T, name string, policies string) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "users/" + name, diff --git a/builtin/credential/userpass/backend_test.go b/builtin/credential/userpass/backend_test.go index 83f79db9a4e1e..546b9d1554b1c 100644 --- a/builtin/credential/userpass/backend_test.go +++ b/builtin/credential/userpass/backend_test.go @@ -300,8 +300,7 @@ func testAccStepLogin(t *testing.T, user string, pass string, policies []string) } func testUserCreateOperation( - t *testing.T, name string, password string, policies string, -) logicaltest.TestStep { + t *testing.T, name string, password string, policies string) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.CreateOperation, Path: "users/" + name, @@ -313,8 +312,7 @@ func testUserCreateOperation( } func testAccStepUser( - t *testing.T, name string, password string, policies string, -) logicaltest.TestStep { + t *testing.T, name string, password string, policies string) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "users/" + name, diff --git a/builtin/credential/userpass/cmd/userpass/main.go b/builtin/credential/userpass/cmd/userpass/main.go index 5ea1894d219e5..43098807a29c3 100644 --- a/builtin/credential/userpass/cmd/userpass/main.go +++ b/builtin/credential/userpass/cmd/userpass/main.go @@ -13,6 +13,7 @@ func main() { apiClientMeta := &api.PluginAPIClientMeta{} flags := apiClientMeta.FlagSet() flags.Parse(os.Args[1:]) + tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) diff --git a/builtin/credential/userpass/stepwise_test.go b/builtin/credential/userpass/stepwise_test.go index 90820b883d273..3748ed1cea401 100644 --- a/builtin/credential/userpass/stepwise_test.go +++ b/builtin/credential/userpass/stepwise_test.go @@ -32,8 +32,7 @@ func TestAccBackend_stepwise_UserCrud(t *testing.T) { } func testAccStepwiseUser( - t *testing.T, name string, password string, policies string, -) stepwise.Step { + t *testing.T, name string, password string, policies string) stepwise.Step { return stepwise.Step{ Operation: stepwise.UpdateOperation, Path: "users/" + name, diff --git a/builtin/logical/aws/cmd/aws/main.go b/builtin/logical/aws/cmd/aws/main.go index 74f7d97a7b868..48522bac81583 100644 --- a/builtin/logical/aws/cmd/aws/main.go +++ b/builtin/logical/aws/cmd/aws/main.go @@ -3,7 +3,7 @@ package main import ( "os" - "github.com/hashicorp/go-hclog" + hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/builtin/logical/aws" "github.com/hashicorp/vault/sdk/plugin" diff --git a/builtin/logical/aws/iam_policies_test.go b/builtin/logical/aws/iam_policies_test.go index ddba67f6b8bdc..5e8ae6feb6f0b 100644 --- a/builtin/logical/aws/iam_policies_test.go +++ b/builtin/logical/aws/iam_policies_test.go @@ -207,7 +207,7 @@ func Test_combinePolicyDocuments(t *testing.T) { `{"Version": "2012-10-17", "Statement": [{"Effect": "Allow", "NotAction": "ec2:DescribeAvailabilityZones", "Resource": "*"}]}`, }, expectedOutput: `{"Version": "2012-10-17","Statement":[{"Effect": "Allow","NotAction": "ec2:DescribeAvailabilityZones", "Resource": "*"}]}`, - expectedErr: false, + expectedErr: false, }, { description: "one blank policy", diff --git a/builtin/logical/aws/path_roles.go b/builtin/logical/aws/path_roles.go index a7c3dd84a8967..ca241b5472b68 100644 --- a/builtin/logical/aws/path_roles.go +++ b/builtin/logical/aws/path_roles.go @@ -562,7 +562,7 @@ func (r *awsRoleEntry) validate() error { errors = multierror.Append(errors, fmt.Errorf("user_path parameter only valid for %s credential type", iamUserCred)) } if !userPathRegex.MatchString(r.UserPath) { - errors = multierror.Append(errors, fmt.Errorf("The specified value for user_path is invalid. It must match %q regexp", userPathRegex.String())) + errors = multierror.Append(errors, fmt.Errorf("The specified value for user_path is invalid. It must match '%s' regexp", userPathRegex.String())) } } diff --git a/builtin/logical/aws/path_user.go b/builtin/logical/aws/path_user.go index 035350cdbfb1f..94e4eac286dff 100644 --- a/builtin/logical/aws/path_user.go +++ b/builtin/logical/aws/path_user.go @@ -58,7 +58,7 @@ func (b *backend) pathCredsRead(ctx context.Context, req *logical.Request, d *fr } if role == nil { return logical.ErrorResponse(fmt.Sprintf( - "Role %q not found", roleName)), nil + "Role '%s' not found", roleName)), nil } var ttl int64 diff --git a/builtin/logical/aws/secret_access_keys.go b/builtin/logical/aws/secret_access_keys.go index c70386d681464..7f5492ee5ae63 100644 --- a/builtin/logical/aws/secret_access_keys.go +++ b/builtin/logical/aws/secret_access_keys.go @@ -278,8 +278,7 @@ func (b *backend) secretAccessKeysCreate( ctx context.Context, s logical.Storage, displayName, policyName string, - role *awsRoleEntry, -) (*logical.Response, error) { + role *awsRoleEntry) (*logical.Response, error) { iamClient, err := b.clientIAM(ctx, s) if err != nil { return logical.ErrorResponse(err.Error()), nil diff --git a/builtin/logical/cassandra/path_creds_create.go b/builtin/logical/cassandra/path_creds_create.go index ec100b961317b..a66c4e574e382 100644 --- a/builtin/logical/cassandra/path_creds_create.go +++ b/builtin/logical/cassandra/path_creds_create.go @@ -50,7 +50,7 @@ func (b *backend) pathCredsCreateRead(ctx context.Context, req *logical.Request, return nil, err } username := fmt.Sprintf("vault_%s_%s_%s_%d", name, displayName, userUUID, time.Now().Unix()) - username = strings.ReplaceAll(username, "-", "_") + username = strings.Replace(username, "-", "_", -1) password, err := uuid.GenerateUUID() if err != nil { return nil, err diff --git a/builtin/logical/cassandra/util.go b/builtin/logical/cassandra/util.go index 8257aafd787fb..c0347bc49f5d8 100644 --- a/builtin/logical/cassandra/util.go +++ b/builtin/logical/cassandra/util.go @@ -15,7 +15,7 @@ import ( // Query templates a query for us. func substQuery(tpl string, data map[string]string) string { for k, v := range data { - tpl = strings.ReplaceAll(tpl, fmt.Sprintf("{{%s}}", k), v) + tpl = strings.Replace(tpl, fmt.Sprintf("{{%s}}", k), v, -1) } return tpl diff --git a/builtin/logical/consul/backend.go b/builtin/logical/consul/backend.go index 7fce10e262944..3e37d1510d6e0 100644 --- a/builtin/logical/consul/backend.go +++ b/builtin/logical/consul/backend.go @@ -7,9 +7,6 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -// ReportedVersion is used to report a specific version to Vault. -var ReportedVersion = "" - func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b := Backend() if err := b.Setup(ctx, conf); err != nil { @@ -37,8 +34,7 @@ func Backend() *backend { Secrets: []*framework.Secret{ secretToken(&b), }, - BackendType: logical.TypeLogical, - RunningVersion: ReportedVersion, + BackendType: logical.TypeLogical, } return &b diff --git a/builtin/logical/consul/backend_test.go b/builtin/logical/consul/backend_test.go index fa7cf647135a2..531da0f06b8e7 100644 --- a/builtin/logical/consul/backend_test.go +++ b/builtin/logical/consul/backend_test.go @@ -7,7 +7,6 @@ import ( "log" "os" "reflect" - "strings" "testing" "time" @@ -40,7 +39,7 @@ func TestBackend_Config_Access(t *testing.T) { }) } -func testBackendConfigAccess(t *testing.T, version string, autoBootstrap bool) { +func testBackendConfigAccess(t *testing.T, version string, bootstrap bool) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} b, err := Factory(context.Background(), config) @@ -48,14 +47,12 @@ func testBackendConfigAccess(t *testing.T, version string, autoBootstrap bool) { t.Fatal(err) } - cleanup, consulConfig := consul.PrepareTestContainer(t, version, false, autoBootstrap) + cleanup, consulConfig := consul.PrepareTestContainer(t, version, false, bootstrap) defer cleanup() connData := map[string]interface{}{ "address": consulConfig.Address(), - } - if autoBootstrap || strings.HasPrefix(version, "1.3") { - connData["token"] = consulConfig.Token + "token": consulConfig.Token, } confReq := &logical.Request{ diff --git a/builtin/logical/database/backend.go b/builtin/logical/database/backend.go index e2e362fd5fa31..cd24943c2db8e 100644 --- a/builtin/logical/database/backend.go +++ b/builtin/logical/database/backend.go @@ -8,12 +8,9 @@ import ( "sync" "time" - "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/strutil" "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/helper/metricsutil" - "github.com/hashicorp/vault/internalshared/configutil" v4 "github.com/hashicorp/vault/sdk/database/dbplugin" v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" "github.com/hashicorp/vault/sdk/database/helper/dbutil" @@ -58,23 +55,13 @@ func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, } b.credRotationQueue = queue.New() + // Create a context with a cancel method for processing any WAL entries and + // populating the queue + initCtx := context.Background() + ictx, cancel := context.WithCancel(initCtx) + b.cancelQueue = cancel // Load queue and kickoff new periodic ticker - go b.initQueue(b.queueCtx, conf, conf.System.ReplicationState()) - - // collect metrics on number of plugin instances - var err error - b.gaugeCollectionProcess, err = metricsutil.NewGaugeCollectionProcess( - []string{"secrets", "database", "backend", "pluginInstances", "count"}, - []metricsutil.Label{}, - b.collectPluginInstanceGaugeValues, - metrics.Default(), - configutil.UsageGaugeDefaultPeriod, // TODO: add config settings for these, or add plumbing to the main config settings - configutil.MaximumGaugeCardinalityDefault, - b.logger) - if err != nil { - return nil, err - } - go b.gaugeCollectionProcess.Run() + go b.initQueue(ictx, conf, conf.System.ReplicationState()) return b, nil } @@ -116,110 +103,33 @@ func Backend(conf *logical.BackendConfig) *databaseBackend { b.logger = conf.Logger b.connections = make(map[string]*dbPluginInstance) - b.queueCtx, b.cancelQueueCtx = context.WithCancel(context.Background()) + b.roleLocks = locksutil.CreateLocks() - return &b -} -func (b *databaseBackend) collectPluginInstanceGaugeValues(context.Context) ([]metricsutil.GaugeLabelValues, error) { - // copy the map so we can release the lock - connMapCopy := func() map[string]*dbPluginInstance { - b.connLock.RLock() - defer b.connLock.RUnlock() - mapCopy := map[string]*dbPluginInstance{} - for k, v := range b.connections { - mapCopy[k] = v - } - return mapCopy - }() - counts := map[string]int{} - for _, v := range connMapCopy { - dbType, err := v.database.Type() - if err != nil { - // there's a chance this will already be closed since we don't hold the lock - continue - } - if _, ok := counts[dbType]; !ok { - counts[dbType] = 0 - } - counts[dbType] += 1 - } - var gauges []metricsutil.GaugeLabelValues - for k, v := range counts { - gauges = append(gauges, metricsutil.GaugeLabelValues{Labels: []metricsutil.Label{{Name: "dbType", Value: k}}, Value: float32(v)}) - } - return gauges, nil + return &b } type databaseBackend struct { - // connLock is used to synchronize access to the connections map - connLock sync.RWMutex // connections holds configured database connections by config name connections map[string]*dbPluginInstance logger log.Logger *framework.Backend - // credRotationQueue is an in-memory priority queue used to track Static Roles + sync.RWMutex + // CredRotationQueue is an in-memory priority queue used to track Static Roles // that require periodic rotation. Backends will have a PriorityQueue // initialized on setup, but only backends that are mounted by a primary // server or mounted as a local mount will perform the rotations. + // + // cancelQueue is used to remove the priority queue and terminate the + // background ticker. credRotationQueue *queue.PriorityQueue - // queueCtx is the context for the priority queue - queueCtx context.Context - // cancelQueueCtx is used to terminate the background ticker - cancelQueueCtx context.CancelFunc + cancelQueue context.CancelFunc // roleLocks is used to lock modifications to roles in the queue, to ensure // concurrent requests are not modifying the same role and possibly causing // issues with the priority queue. roleLocks []*locksutil.LockEntry - - // the running gauge collection process - gaugeCollectionProcess *metricsutil.GaugeCollectionProcess - gaugeCollectionProcessStop sync.Once -} - -func (b *databaseBackend) connGet(name string) *dbPluginInstance { - b.connLock.RLock() - defer b.connLock.RUnlock() - return b.connections[name] -} - -func (b *databaseBackend) connPop(name string) *dbPluginInstance { - b.connLock.Lock() - defer b.connLock.Unlock() - dbi, ok := b.connections[name] - if ok { - delete(b.connections, name) - } - return dbi -} - -func (b *databaseBackend) connPopIfEqual(name, id string) *dbPluginInstance { - b.connLock.Lock() - defer b.connLock.Unlock() - dbi, ok := b.connections[name] - if ok && dbi.id == id { - delete(b.connections, name) - return dbi - } - return nil -} - -func (b *databaseBackend) connPut(name string, newDbi *dbPluginInstance) *dbPluginInstance { - b.connLock.Lock() - defer b.connLock.Unlock() - dbi := b.connections[name] - b.connections[name] = newDbi - return dbi -} - -func (b *databaseBackend) connClear() map[string]*dbPluginInstance { - b.connLock.Lock() - defer b.connLock.Unlock() - old := b.connections - b.connections = make(map[string]*dbPluginInstance) - return old } func (b *databaseBackend) DatabaseConfig(ctx context.Context, s logical.Storage, name string) (*DatabaseConfig, error) { @@ -326,8 +236,22 @@ func (b *databaseBackend) GetConnection(ctx context.Context, s logical.Storage, } func (b *databaseBackend) GetConnectionWithConfig(ctx context.Context, name string, config *DatabaseConfig) (*dbPluginInstance, error) { - dbi := b.connGet(name) - if dbi != nil { + b.RLock() + unlockFunc := b.RUnlock + defer func() { unlockFunc() }() + + dbi, ok := b.connections[name] + if ok { + return dbi, nil + } + + // Upgrade lock + b.RUnlock() + b.Lock() + unlockFunc = b.Unlock + + dbi, ok = b.connections[name] + if ok { return dbi, nil } @@ -336,7 +260,7 @@ func (b *databaseBackend) GetConnectionWithConfig(ctx context.Context, name stri return nil, err } - dbw, err := newDatabaseWrapper(ctx, config.PluginName, config.PluginVersion, b.System(), b.logger) + dbw, err := newDatabaseWrapper(ctx, config.PluginName, b.System(), b.logger) if err != nil { return nil, fmt.Errorf("unable to create database instance: %w", err) } @@ -356,34 +280,38 @@ func (b *databaseBackend) GetConnectionWithConfig(ctx context.Context, name stri id: id, name: name, } - oldConn := b.connPut(name, dbi) - if oldConn != nil { - err := oldConn.Close() - if err != nil { - b.Logger().Warn("Error closing database connection", "error", err) - } - } + b.connections[name] = dbi return dbi, nil } +// invalidateQueue cancels any background queue loading and destroys the queue. +func (b *databaseBackend) invalidateQueue() { + // cancel context before grabbing lock to start closing any open connections + // this is safe to do without the lock since it is only written to once in initialization + // and can be canceled multiple times safely + if b.cancelQueue != nil { + b.cancelQueue() + } + b.Lock() + defer b.Unlock() + + b.credRotationQueue = nil +} + // ClearConnection closes the database connection and // removes it from the b.connections map. func (b *databaseBackend) ClearConnection(name string) error { - db := b.connPop(name) - if db != nil { - // Ignore error here since the database client is always killed - db.Close() - } - return nil + b.Lock() + defer b.Unlock() + return b.clearConnection(name) } -// ClearConnectionId closes the database connection with a specific id and -// removes it from the b.connections map. -func (b *databaseBackend) ClearConnectionId(name, id string) error { - db := b.connPopIfEqual(name, id) - if db != nil { +func (b *databaseBackend) clearConnection(name string) error { + db, ok := b.connections[name] + if ok { // Ignore error here since the database client is always killed db.Close() + delete(b.connections, name) } return nil } @@ -396,32 +324,33 @@ func (b *databaseBackend) CloseIfShutdown(db *dbPluginInstance, err error) { // and simply defer the unlock. Since we are attaching the instance and matching // the id in the connection map, we can safely do this. go func() { + b.Lock() + defer b.Unlock() db.Close() - // Delete the connection if it is still active. - b.connPopIfEqual(db.name, db.id) + // Ensure we are deleting the correct connection + mapDB, ok := b.connections[db.name] + if ok && db.id == mapDB.id { + delete(b.connections, db.name) + } }() } } // clean closes all connections from all database types // and cancels any rotation queue loading operation. -func (b *databaseBackend) clean(_ context.Context) { - // kill the queue and terminate the background ticker - if b.cancelQueueCtx != nil { - b.cancelQueueCtx() - } +func (b *databaseBackend) clean(ctx context.Context) { + // invalidateQueue acquires it's own lock on the backend, removes queue, and + // terminates the background ticker + b.invalidateQueue() + + b.Lock() + defer b.Unlock() - connections := b.connClear() - for _, db := range connections { - go db.Close() + for _, db := range b.connections { + db.Close() } - b.gaugeCollectionProcessStop.Do(func() { - if b.gaugeCollectionProcess != nil { - b.gaugeCollectionProcess.Stop() - } - b.gaugeCollectionProcess = nil - }) + b.connections = make(map[string]*dbPluginInstance) } const backendHelp = ` diff --git a/builtin/logical/database/backend_test.go b/builtin/logical/database/backend_test.go index 191d2a5b93873..cf700c6d71fb0 100644 --- a/builtin/logical/database/backend_test.go +++ b/builtin/logical/database/backend_test.go @@ -48,12 +48,12 @@ func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) { os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile) sys := vault.TestDynamicSystemView(cores[0].Core, nil) - vault.TestAddTestPlugin(t, cores[0].Core, "postgresql-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_Postgres", []string{}, "") - vault.TestAddTestPlugin(t, cores[0].Core, "postgresql-database-plugin-muxed", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_PostgresMultiplexed", []string{}, "") - vault.TestAddTestPlugin(t, cores[0].Core, "mongodb-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_Mongo", []string{}, "") - vault.TestAddTestPlugin(t, cores[0].Core, "mongodb-database-plugin-muxed", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MongoMultiplexed", []string{}, "") - vault.TestAddTestPlugin(t, cores[0].Core, "mongodbatlas-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MongoAtlas", []string{}, "") - vault.TestAddTestPlugin(t, cores[0].Core, "mongodbatlas-database-plugin-muxed", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MongoAtlasMultiplexed", []string{}, "") + vault.TestAddTestPlugin(t, cores[0].Core, "postgresql-database-plugin", consts.PluginTypeDatabase, "TestBackend_PluginMain_Postgres", []string{}, "") + vault.TestAddTestPlugin(t, cores[0].Core, "postgresql-database-plugin-muxed", consts.PluginTypeDatabase, "TestBackend_PluginMain_PostgresMultiplexed", []string{}, "") + vault.TestAddTestPlugin(t, cores[0].Core, "mongodb-database-plugin", consts.PluginTypeDatabase, "TestBackend_PluginMain_Mongo", []string{}, "") + vault.TestAddTestPlugin(t, cores[0].Core, "mongodb-database-plugin-muxed", consts.PluginTypeDatabase, "TestBackend_PluginMain_MongoMultiplexed", []string{}, "") + vault.TestAddTestPlugin(t, cores[0].Core, "mongodbatlas-database-plugin", consts.PluginTypeDatabase, "TestBackend_PluginMain_MongoAtlas", []string{}, "") + vault.TestAddTestPlugin(t, cores[0].Core, "mongodbatlas-database-plugin-muxed", consts.PluginTypeDatabase, "TestBackend_PluginMain_MongoAtlasMultiplexed", []string{}, "") return cluster, sys } @@ -236,7 +236,6 @@ func TestBackend_config_connection(t *testing.T) { "allowed_roles": []string{"*"}, "root_credentials_rotate_statements": []string{}, "password_policy": "", - "plugin_version": "", } configReq.Operation = logical.ReadOperation resp, err = b.HandleRequest(namespace.RootContext(nil), configReq) @@ -290,7 +289,6 @@ func TestBackend_config_connection(t *testing.T) { "allowed_roles": []string{"*"}, "root_credentials_rotate_statements": []string{}, "password_policy": "", - "plugin_version": "", } configReq.Operation = logical.ReadOperation resp, err = b.HandleRequest(namespace.RootContext(nil), configReq) @@ -333,7 +331,6 @@ func TestBackend_config_connection(t *testing.T) { "allowed_roles": []string{"flu", "barre"}, "root_credentials_rotate_statements": []string{}, "password_policy": "", - "plugin_version": "", } configReq.Operation = logical.ReadOperation resp, err = b.HandleRequest(namespace.RootContext(nil), configReq) @@ -714,7 +711,7 @@ func TestBackend_connectionCrud(t *testing.T) { // Replace connection url with templated version req.Operation = logical.UpdateOperation - connURL = strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}") + connURL = strings.Replace(connURL, "postgres:secret", "{{username}}:{{password}}", -1) data["connection_url"] = connURL resp, err = b.HandleRequest(namespace.RootContext(nil), req) if err != nil || (resp != nil && resp.IsError()) { @@ -731,7 +728,6 @@ func TestBackend_connectionCrud(t *testing.T) { "allowed_roles": []string{"plugin-role-test"}, "root_credentials_rotate_statements": []string(nil), "password_policy": "", - "plugin_version": "", } req.Operation = logical.ReadOperation resp, err = b.HandleRequest(namespace.RootContext(nil), req) @@ -1271,7 +1267,7 @@ func TestBackend_RotateRootCredentials(t *testing.T) { cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster") defer cleanup() - connURL = strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}") + connURL = strings.Replace(connURL, "postgres:secret", "{{username}}:{{password}}", -1) // Configure a connection data := map[string]interface{}{ @@ -1465,91 +1461,6 @@ func TestBackend_ConnectionURL_redacted(t *testing.T) { } } -type hangingPlugin struct{} - -func (h hangingPlugin) Initialize(_ context.Context, req v5.InitializeRequest) (v5.InitializeResponse, error) { - return v5.InitializeResponse{ - Config: req.Config, - }, nil -} - -func (h hangingPlugin) NewUser(_ context.Context, _ v5.NewUserRequest) (v5.NewUserResponse, error) { - return v5.NewUserResponse{}, nil -} - -func (h hangingPlugin) UpdateUser(_ context.Context, _ v5.UpdateUserRequest) (v5.UpdateUserResponse, error) { - return v5.UpdateUserResponse{}, nil -} - -func (h hangingPlugin) DeleteUser(_ context.Context, _ v5.DeleteUserRequest) (v5.DeleteUserResponse, error) { - return v5.DeleteUserResponse{}, nil -} - -func (h hangingPlugin) Type() (string, error) { - return "hanging", nil -} - -func (h hangingPlugin) Close() error { - time.Sleep(1000 * time.Second) - return nil -} - -var _ v5.Database = (*hangingPlugin)(nil) - -func TestBackend_PluginMain_Hanging(t *testing.T) { - if os.Getenv(pluginutil.PluginVaultVersionEnv) == "" { - return - } - v5.Serve(&hangingPlugin{}) -} - -func TestBackend_AsyncClose(t *testing.T) { - // Test that having a plugin that takes a LONG time to close will not cause the cleanup function to take - // longer than 750ms. - cluster, sys := getCluster(t) - vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "hanging-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_Hanging", []string{}, "") - t.Cleanup(cluster.Cleanup) - - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - config.System = sys - - b, err := Factory(context.Background(), config) - if err != nil { - t.Fatal(err) - } - - // Configure a connection - data := map[string]interface{}{ - "connection_url": "doesn't matter", - "plugin_name": "hanging-plugin", - "allowed_roles": []string{"plugin-role-test"}, - } - req := &logical.Request{ - Operation: logical.UpdateOperation, - Path: "config/hang", - Storage: config.StorageView, - Data: data, - } - _, err = b.HandleRequest(namespace.RootContext(nil), req) - if err != nil { - t.Fatalf("err: %v", err) - } - timeout := time.NewTimer(750 * time.Millisecond) - done := make(chan bool) - go func() { - b.Cleanup(context.Background()) - // check that clean can be called twice safely - b.Cleanup(context.Background()) - done <- true - }() - select { - case <-timeout.C: - t.Error("Hanging plugin caused Close() to take longer than 750ms") - case <-done: - } -} - func testCredsExist(t *testing.T, resp *logical.Response, connURL string) bool { t.Helper() var d struct { diff --git a/builtin/logical/database/dbplugin/plugin_test.go b/builtin/logical/database/dbplugin/plugin_test.go index bea9e30ec7a58..9e86874c9d7c8 100644 --- a/builtin/logical/database/dbplugin/plugin_test.go +++ b/builtin/logical/database/dbplugin/plugin_test.go @@ -110,7 +110,7 @@ func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) { cores := cluster.Cores sys := vault.TestDynamicSystemView(cores[0].Core, nil) - vault.TestAddTestPlugin(t, cores[0].Core, "test-plugin", consts.PluginTypeDatabase, "", "TestPlugin_GRPC_Main", []string{}, "") + vault.TestAddTestPlugin(t, cores[0].Core, "test-plugin", consts.PluginTypeDatabase, "TestPlugin_GRPC_Main", []string{}, "") return cluster, sys } @@ -139,7 +139,7 @@ func TestPlugin_Init(t *testing.T) { cluster, sys := getCluster(t) defer cluster.Cleanup() - dbRaw, err := dbplugin.PluginFactoryVersion(namespace.RootContext(nil), "test-plugin", "", sys, log.NewNullLogger()) + dbRaw, err := dbplugin.PluginFactory(namespace.RootContext(nil), "test-plugin", sys, log.NewNullLogger()) if err != nil { t.Fatalf("err: %s", err) } @@ -163,7 +163,7 @@ func TestPlugin_CreateUser(t *testing.T) { cluster, sys := getCluster(t) defer cluster.Cleanup() - db, err := dbplugin.PluginFactoryVersion(namespace.RootContext(nil), "test-plugin", "", sys, log.NewNullLogger()) + db, err := dbplugin.PluginFactory(namespace.RootContext(nil), "test-plugin", sys, log.NewNullLogger()) if err != nil { t.Fatalf("err: %s", err) } @@ -203,7 +203,7 @@ func TestPlugin_RenewUser(t *testing.T) { cluster, sys := getCluster(t) defer cluster.Cleanup() - db, err := dbplugin.PluginFactoryVersion(namespace.RootContext(nil), "test-plugin", "", sys, log.NewNullLogger()) + db, err := dbplugin.PluginFactory(namespace.RootContext(nil), "test-plugin", sys, log.NewNullLogger()) if err != nil { t.Fatalf("err: %s", err) } @@ -237,7 +237,7 @@ func TestPlugin_RevokeUser(t *testing.T) { cluster, sys := getCluster(t) defer cluster.Cleanup() - db, err := dbplugin.PluginFactoryVersion(namespace.RootContext(nil), "test-plugin", "", sys, log.NewNullLogger()) + db, err := dbplugin.PluginFactory(namespace.RootContext(nil), "test-plugin", sys, log.NewNullLogger()) if err != nil { t.Fatalf("err: %s", err) } diff --git a/builtin/logical/database/path_config_connection.go b/builtin/logical/database/path_config_connection.go index c0522bf9fb247..ac5a623d73635 100644 --- a/builtin/logical/database/path_config_connection.go +++ b/builtin/logical/database/path_config_connection.go @@ -5,16 +5,12 @@ import ( "errors" "fmt" "net/url" - "sort" "github.com/fatih/structs" "github.com/hashicorp/go-uuid" - "github.com/hashicorp/go-version" v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/pluginutil" "github.com/hashicorp/vault/sdk/logical" ) @@ -26,8 +22,7 @@ var ( // DatabaseConfig is used by the Factory function to configure a Database // object. type DatabaseConfig struct { - PluginName string `json:"plugin_name" structs:"plugin_name" mapstructure:"plugin_name"` - PluginVersion string `json:"plugin_version" structs:"plugin_version" mapstructure:"plugin_version"` + PluginName string `json:"plugin_name" structs:"plugin_name" mapstructure:"plugin_name"` // ConnectionDetails stores the database specific connection settings needed // by each database type. ConnectionDetails map[string]interface{} `json:"connection_details" structs:"connection_details" mapstructure:"connection_details"` @@ -115,11 +110,6 @@ func pathConfigurePluginConnection(b *databaseBackend) *framework.Path { that plugin type.`, }, - "plugin_version": { - Type: framework.TypeString, - Description: `The version of the plugin to use.`, - }, - "verify_connection": { Type: framework.TypeBool, Default: true, @@ -291,48 +281,6 @@ func (b *databaseBackend) connectionWriteHandler() framework.OperationFunc { return logical.ErrorResponse(respErrEmptyPluginName), nil } - if pluginVersionRaw, ok := data.GetOk("plugin_version"); ok { - config.PluginVersion = pluginVersionRaw.(string) - } - - unversionedPlugin, err := b.System().LookupPlugin(ctx, config.PluginName, consts.PluginTypeDatabase) - switch { - case config.PluginVersion != "": - semanticVersion, err := version.NewVersion(config.PluginVersion) - if err != nil { - return logical.ErrorResponse("version %q is not a valid semantic version: %s", config.PluginVersion, err), nil - } - - // Canonicalize the version. - config.PluginVersion = "v" + semanticVersion.String() - case err == nil && !unversionedPlugin.Builtin: - // We'll select the unversioned plugin that's been registered. - case req.Operation == logical.CreateOperation: - // No version provided and no unversioned plugin of that name available. - // Pin to the current latest version if any versioned plugins are registered. - plugins, err := b.System().ListVersionedPlugins(ctx, consts.PluginTypeDatabase) - if err != nil { - return nil, err - } - - var versionedCandidates []pluginutil.VersionedPlugin - for _, plugin := range plugins { - if !plugin.Builtin && plugin.Name == config.PluginName && plugin.Version != "" { - versionedCandidates = append(versionedCandidates, plugin) - } - } - - if len(versionedCandidates) != 0 { - // Sort in reverse order. - sort.SliceStable(versionedCandidates, func(i, j int) bool { - return versionedCandidates[i].SemanticVersion.GreaterThan(versionedCandidates[j].SemanticVersion) - }) - - config.PluginVersion = "v" + versionedCandidates[0].SemanticVersion.String() - b.logger.Debug(fmt.Sprintf("pinning %q database plugin version %q from candidates %v", config.PluginName, config.PluginVersion, versionedCandidates)) - } - } - if allowedRolesRaw, ok := data.GetOk("allowed_roles"); ok { config.AllowedRoles = allowedRolesRaw.([]string) } else if req.Operation == logical.CreateOperation { @@ -353,7 +301,6 @@ func (b *databaseBackend) connectionWriteHandler() framework.OperationFunc { // ConnectionDetails. delete(data.Raw, "name") delete(data.Raw, "plugin_name") - delete(data.Raw, "plugin_version") delete(data.Raw, "allowed_roles") delete(data.Raw, "verify_connection") delete(data.Raw, "root_rotation_statements") @@ -379,7 +326,7 @@ func (b *databaseBackend) connectionWriteHandler() framework.OperationFunc { } // Create a database plugin and initialize it. - dbw, err := newDatabaseWrapper(ctx, config.PluginName, config.PluginVersion, b.System(), b.logger) + dbw, err := newDatabaseWrapper(ctx, config.PluginName, b.System(), b.logger) if err != nil { return logical.ErrorResponse("error creating database object: %s", err), nil } @@ -397,14 +344,16 @@ func (b *databaseBackend) connectionWriteHandler() framework.OperationFunc { b.Logger().Debug("created database object", "name", name, "plugin_name", config.PluginName) + b.Lock() + defer b.Unlock() + // Close and remove the old connection - oldConn := b.connPut(name, &dbPluginInstance{ + b.clearConnection(name) + + b.connections[name] = &dbPluginInstance{ database: dbw, name: name, id: id, - }) - if oldConn != nil { - oldConn.Close() } err = storeConfig(ctx, req.Storage, name, config) diff --git a/builtin/logical/database/path_rotate_credentials.go b/builtin/logical/database/path_rotate_credentials.go index 4d05393b86acc..2b197d59e780f 100644 --- a/builtin/logical/database/path_rotate_credentials.go +++ b/builtin/logical/database/path_rotate_credentials.go @@ -78,19 +78,22 @@ func (b *databaseBackend) pathRotateRootCredentialsUpdate() framework.OperationF return nil, err } + // Take out the backend lock since we are swapping out the connection + b.Lock() + defer b.Unlock() + // Take the write lock on the instance dbi.Lock() - defer func() { - dbi.Unlock() - // Even on error, still remove the connection - b.ClearConnectionId(name, dbi.id) - }() + defer dbi.Unlock() + defer func() { // Close the plugin dbi.closed = true if err := dbi.database.Close(); err != nil { b.Logger().Error("error closing the database plugin connection", "err", err) } + // Even on error, still remove the connection + delete(b.connections, name) }() generator, err := newPasswordGenerator(nil) diff --git a/builtin/logical/database/rollback_test.go b/builtin/logical/database/rollback_test.go index dc061ae99a3fb..48e3744cc88cc 100644 --- a/builtin/logical/database/rollback_test.go +++ b/builtin/logical/database/rollback_test.go @@ -20,9 +20,9 @@ const ( // Tests that the WAL rollback function rolls back the database password. // The database password should be rolled back when: -// - A WAL entry exists -// - Password has been altered on the database -// - Password has not been updated in storage +// - A WAL entry exists +// - Password has been altered on the database +// - Password has not been updated in storage func TestBackend_RotateRootCredentials_WAL_rollback(t *testing.T) { cluster, sys := getCluster(t) defer cluster.Cleanup() @@ -44,7 +44,7 @@ func TestBackend_RotateRootCredentials_WAL_rollback(t *testing.T) { cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") defer cleanup() - connURL = strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}") + connURL = strings.Replace(connURL, "postgres:secret", "{{username}}:{{password}}", -1) // Configure a connection to the database data := map[string]interface{}{ @@ -163,9 +163,9 @@ func TestBackend_RotateRootCredentials_WAL_rollback(t *testing.T) { // Tests that the WAL rollback function does not roll back the database password. // The database password should not be rolled back when: -// - A WAL entry exists -// - Password has not been altered on the database -// - Password has not been updated in storage +// - A WAL entry exists +// - Password has not been altered on the database +// - Password has not been updated in storage func TestBackend_RotateRootCredentials_WAL_no_rollback_1(t *testing.T) { cluster, sys := getCluster(t) defer cluster.Cleanup() @@ -183,7 +183,7 @@ func TestBackend_RotateRootCredentials_WAL_no_rollback_1(t *testing.T) { cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") defer cleanup() - connURL = strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}") + connURL = strings.Replace(connURL, "postgres:secret", "{{username}}:{{password}}", -1) // Configure a connection to the database data := map[string]interface{}{ @@ -267,9 +267,9 @@ func TestBackend_RotateRootCredentials_WAL_no_rollback_1(t *testing.T) { // Tests that the WAL rollback function does not roll back the database password. // The database password should not be rolled back when: -// - A WAL entry exists -// - Password has been altered on the database -// - Password has been updated in storage +// - A WAL entry exists +// - Password has been altered on the database +// - Password has been updated in storage func TestBackend_RotateRootCredentials_WAL_no_rollback_2(t *testing.T) { cluster, sys := getCluster(t) defer cluster.Cleanup() @@ -291,7 +291,7 @@ func TestBackend_RotateRootCredentials_WAL_no_rollback_2(t *testing.T) { cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") defer cleanup() - connURL = strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}") + connURL = strings.Replace(connURL, "postgres:secret", "{{username}}:{{password}}", -1) // Configure a connection to the database data := map[string]interface{}{ diff --git a/builtin/logical/database/rotation.go b/builtin/logical/database/rotation.go index 5ae2756f279cd..53464b361394e 100644 --- a/builtin/logical/database/rotation.go +++ b/builtin/logical/database/rotation.go @@ -312,12 +312,12 @@ type setStaticAccountOutput struct { // - verifies role exists and is in the allowed roles list // - loads an existing WAL entry if WALID input is given, otherwise creates a // new WAL entry -// - gets a database connection -// - accepts an input credential, otherwise generates a new one for -// the role's credential type -// - sets new credential for the static account -// - uses WAL for ensuring new credentials are not lost if storage to Vault fails, -// resulting in a partial failure. +// - gets a database connection +// - accepts an input credential, otherwise generates a new one for +// the role's credential type +// - sets new credential for the static account +// - uses WAL for ensuring new credentials are not lost if storage to Vault fails, +// resulting in a partial failure. // // This method does not perform any operations on the priority queue. Those // tasks must be handled outside of this method. @@ -642,11 +642,14 @@ func (b *databaseBackend) loadStaticWALs(ctx context.Context, s logical.Storage) // actually available. This is needed because both runTicker and initQueue // operate in go-routines, and could be accessing the queue concurrently func (b *databaseBackend) pushItem(item *queue.Item) error { - select { - case <-b.queueCtx.Done(): - default: + b.RLock() + unlockFunc := b.RUnlock + defer func() { unlockFunc() }() + + if b.credRotationQueue != nil { return b.credRotationQueue.Push(item) } + b.Logger().Warn("no queue found during push item") return nil } @@ -655,9 +658,9 @@ func (b *databaseBackend) pushItem(item *queue.Item) error { // actually available. This is needed because both runTicker and initQueue // operate in go-routines, and could be accessing the queue concurrently func (b *databaseBackend) popFromRotationQueue() (*queue.Item, error) { - select { - case <-b.queueCtx.Done(): - default: + b.RLock() + defer b.RUnlock() + if b.credRotationQueue != nil { return b.credRotationQueue.Pop() } return nil, queue.ErrEmpty @@ -667,9 +670,9 @@ func (b *databaseBackend) popFromRotationQueue() (*queue.Item, error) { // actually available. This is needed because both runTicker and initQueue // operate in go-routines, and could be accessing the queue concurrently func (b *databaseBackend) popFromRotationQueueByKey(name string) (*queue.Item, error) { - select { - case <-b.queueCtx.Done(): - default: + b.RLock() + defer b.RUnlock() + if b.credRotationQueue != nil { item, err := b.credRotationQueue.PopByKey(name) if err != nil { return nil, err diff --git a/builtin/logical/database/rotation_test.go b/builtin/logical/database/rotation_test.go index 1fdbba55f1d6d..e58f2246d4463 100644 --- a/builtin/logical/database/rotation_test.go +++ b/builtin/logical/database/rotation_test.go @@ -1379,7 +1379,6 @@ func setupMockDB(b *databaseBackend) *mockNewDatabase { mockDB := &mockNewDatabase{} mockDB.On("Initialize", mock.Anything, mock.Anything).Return(v5.InitializeResponse{}, nil) mockDB.On("Close").Return(nil) - mockDB.On("Type").Return("mock", nil) dbw := databaseVersionWrapper{ v5: mockDB, } diff --git a/builtin/logical/database/version_wrapper.go b/builtin/logical/database/version_wrapper.go index 734c36ca6bf1c..3dc6c69a5966c 100644 --- a/builtin/logical/database/version_wrapper.go +++ b/builtin/logical/database/version_wrapper.go @@ -9,7 +9,6 @@ import ( v4 "github.com/hashicorp/vault/sdk/database/dbplugin" v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" "github.com/hashicorp/vault/sdk/helper/pluginutil" - "github.com/hashicorp/vault/sdk/logical" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -19,12 +18,10 @@ type databaseVersionWrapper struct { v5 v5.Database } -var _ logical.PluginVersioner = databaseVersionWrapper{} - // newDatabaseWrapper figures out which version of the database the pluginName is referring to and returns a wrapper object // that can be used to make operations on the underlying database plugin. -func newDatabaseWrapper(ctx context.Context, pluginName string, pluginVersion string, sys pluginutil.LookRunnerUtil, logger log.Logger) (dbw databaseVersionWrapper, err error) { - newDB, err := v5.PluginFactoryVersion(ctx, pluginName, pluginVersion, sys, logger) +func newDatabaseWrapper(ctx context.Context, pluginName string, sys pluginutil.LookRunnerUtil, logger log.Logger) (dbw databaseVersionWrapper, err error) { + newDB, err := v5.PluginFactory(ctx, pluginName, sys, logger) if err == nil { dbw = databaseVersionWrapper{ v5: newDB, @@ -35,7 +32,7 @@ func newDatabaseWrapper(ctx context.Context, pluginName string, pluginVersion st merr := &multierror.Error{} merr = multierror.Append(merr, err) - legacyDB, err := v4.PluginFactoryVersion(ctx, pluginName, pluginVersion, sys, logger) + legacyDB, err := v4.PluginFactory(ctx, pluginName, sys, logger) if err == nil { dbw = databaseVersionWrapper{ v4: legacyDB, @@ -230,21 +227,6 @@ func (d databaseVersionWrapper) Close() error { return d.v4.Close() } -func (d databaseVersionWrapper) PluginVersion() logical.PluginVersion { - // v5 Database - if d.isV5() { - if versioner, ok := d.v5.(logical.PluginVersioner); ok { - return versioner.PluginVersion() - } - } - - // v4 Database - if versioner, ok := d.v4.(logical.PluginVersioner); ok { - return versioner.PluginVersion() - } - return logical.EmptyPluginVersion -} - func (d databaseVersionWrapper) isV5() bool { return d.v5 != nil } diff --git a/builtin/logical/database/versioning_large_test.go b/builtin/logical/database/versioning_large_test.go index a9f7efde62a68..723f9dd808def 100644 --- a/builtin/logical/database/versioning_large_test.go +++ b/builtin/logical/database/versioning_large_test.go @@ -22,9 +22,9 @@ func TestPlugin_lifecycle(t *testing.T) { cluster, sys := getCluster(t) defer cluster.Cleanup() - vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v4-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV4", []string{}, "") - vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV5", []string{}, "") - vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v6-database-plugin-muxed", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV6Multiplexed", []string{}, "") + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v4-database-plugin", consts.PluginTypeDatabase, "TestBackend_PluginMain_MockV4", []string{}, "") + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, "TestBackend_PluginMain_MockV5", []string{}, "") + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v6-database-plugin-muxed", consts.PluginTypeDatabase, "TestBackend_PluginMain_MockV6Multiplexed", []string{}, "") config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} @@ -218,216 +218,6 @@ func TestPlugin_lifecycle(t *testing.T) { } } -func TestPlugin_VersionSelection(t *testing.T) { - cluster, sys := getCluster(t) - defer cluster.Cleanup() - - for _, version := range []string{"v11.0.0", "v11.0.1-rc1", "v2.0.0"} { - vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, version, "TestBackend_PluginMain_MockV5", []string{}, "") - } - - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - config.System = sys - lb, err := Factory(context.Background(), config) - if err != nil { - t.Fatal(err) - } - b, ok := lb.(*databaseBackend) - if !ok { - t.Fatal("could not convert to database backend") - } - defer b.Cleanup(context.Background()) - - test := func(t *testing.T, selectVersion, expectedVersion string) func(t *testing.T) { - return func(t *testing.T) { - req := &logical.Request{ - Operation: logical.CreateOperation, - Path: "config/db", - Storage: config.StorageView, - Data: map[string]interface{}{ - "connection_url": "sample_connection_url", - "plugin_name": "mock-v5-database-plugin", - "plugin_version": selectVersion, - "verify_connection": true, - "allowed_roles": []string{"*"}, - "name": "mockv5", - "username": "mockv5-user", - "password": "mysecurepassword", - }, - } - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - resp, err := b.HandleRequest(ctx, req) - assertErrIsNil(t, err) - assertRespHasNoErr(t, resp) - assertNoRespData(t, resp) - - defer func() { - _, err := b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.DeleteOperation, - Path: "config/db", - Storage: config.StorageView, - }) - if err != nil { - t.Fatal(err) - } - }() - - req = &logical.Request{ - Operation: logical.ReadOperation, - Path: "config/db", - Storage: config.StorageView, - } - ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - resp, err = b.HandleRequest(ctx, req) - assertErrIsNil(t, err) - assertRespHasNoErr(t, resp) - if resp.Data["plugin_version"].(string) != expectedVersion { - t.Fatalf("Expected version %q but got %q", expectedVersion, resp.Data["plugin_version"].(string)) - } - } - } - - for name, tc := range map[string]struct { - selectVersion string - expectedVersion string - }{ - "no version specified, selects latest in the absence of unversioned plugins": { - selectVersion: "", - expectedVersion: "v11.0.1-rc1", - }, - "specific version selected": { - selectVersion: "11.0.0", - expectedVersion: "v11.0.0", - }, - } { - t.Run(name, test(t, tc.selectVersion, tc.expectedVersion)) - } - - // Register a newer version of the plugin, and ensure that's the new default version selected. - vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, "v11.0.1", "TestBackend_PluginMain_MockV5", []string{}, "") - t.Run("no version specified, new latest version selected", test(t, "", "v11.0.1")) - - // Register an unversioned plugin and ensure that is now selected when no version is specified. - vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV5", []string{}, "") - for name, tc := range map[string]struct { - selectVersion string - expectedVersion string - }{ - "no version specified, selects unversioned": { - selectVersion: "", - expectedVersion: "", - }, - "specific version selected": { - selectVersion: "v2.0.0", - expectedVersion: "v2.0.0", - }, - } { - t.Run(name, test(t, tc.selectVersion, tc.expectedVersion)) - } -} - -func TestPlugin_VersionMustBeExplicitlyUpgraded(t *testing.T) { - cluster, sys := getCluster(t) - defer cluster.Cleanup() - - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - config.System = sys - lb, err := Factory(context.Background(), config) - if err != nil { - t.Fatal(err) - } - b, ok := lb.(*databaseBackend) - if !ok { - t.Fatal("could not convert to database backend") - } - defer b.Cleanup(context.Background()) - - configData := func(extraData ...string) map[string]interface{} { - data := map[string]interface{}{ - "connection_url": "sample_connection_url", - "plugin_name": "mysql-database-plugin", - "verify_connection": false, - "allowed_roles": []string{"*"}, - "username": "mockv5-user", - "password": "mysecurepassword", - } - if len(extraData)%2 != 0 { - t.Fatal("Expected an even number of args in extraData") - } - for i := 0; i < len(extraData); i += 2 { - data[extraData[i]] = extraData[i+1] - } - return data - } - - readVersion := func() string { - resp, err := b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.ReadOperation, - Path: "config/db", - Storage: config.StorageView, - }) - assertErrIsNil(t, err) - assertRespHasNoErr(t, resp) - return resp.Data["plugin_version"].(string) - } - - resp, err := b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.CreateOperation, - Path: "config/db", - Storage: config.StorageView, - Data: configData(), - }) - assertErrIsNil(t, err) - assertRespHasNoErr(t, resp) - assertNoRespData(t, resp) - - version := readVersion() - expectedVersion := "" - if version != expectedVersion { - t.Fatalf("Expected version %q but got %q", expectedVersion, version) - } - - // Register versioned plugin, and check that a new write to existing config doesn't upgrade the plugin implicitly. - vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mysql-database-plugin", consts.PluginTypeDatabase, "v1.0.0", "TestBackend_PluginMain_MockV5", []string{}, "") - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "config/db", - Storage: config.StorageView, - Data: configData(), - }) - assertErrIsNil(t, err) - assertRespHasNoErr(t, resp) - assertNoRespData(t, resp) - - version = readVersion() - if version != expectedVersion { - t.Fatalf("Expected version %q but got %q", expectedVersion, version) - } - - // Now explicitly upgrade. - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "config/db", - Storage: config.StorageView, - Data: configData("plugin_version", "1.0.0"), - }) - assertErrIsNil(t, err) - assertRespHasNoErr(t, resp) - assertNoRespData(t, resp) - - version = readVersion() - expectedVersion = "v1.0.0" - if version != expectedVersion { - t.Fatalf("Expected version %q but got %q", expectedVersion, version) - } -} - func cleanup(t *testing.T, b *databaseBackend, reqs []*logical.Request) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -500,7 +290,7 @@ func assertStringPrefix(expectedPrefix string) stringAssertion { return func(t *testing.T, str string) { t.Helper() if !strings.HasPrefix(str, expectedPrefix) { - t.Fatalf("Missing prefix %q: Actual: %q", expectedPrefix, str) + t.Fatalf("Missing prefix '%s': Actual: '%s'", expectedPrefix, str) } } } @@ -509,7 +299,7 @@ func assertStringRegex(expectedRegex string) stringAssertion { re := regexp.MustCompile(expectedRegex) return func(t *testing.T, str string) { if !re.MatchString(str) { - t.Fatalf("Actual: %q did not match regexp %q", str, expectedRegex) + t.Fatalf("Actual: '%s' did not match regexp '%s'", str, expectedRegex) } } } diff --git a/builtin/logical/mssql/util.go b/builtin/logical/mssql/util.go index 17c46c6813bb8..362cbd36ad1f9 100644 --- a/builtin/logical/mssql/util.go +++ b/builtin/logical/mssql/util.go @@ -21,7 +21,7 @@ func SplitSQL(sql string) []string { // Query templates a query for us. func Query(tpl string, data map[string]string) string { for k, v := range data { - tpl = strings.ReplaceAll(tpl, fmt.Sprintf("{{%s}}", k), v) + tpl = strings.Replace(tpl, fmt.Sprintf("{{%s}}", k), v, -1) } return tpl diff --git a/builtin/logical/mysql/secret_creds.go b/builtin/logical/mysql/secret_creds.go index 454edbaa927e1..5de5f3c1783af 100644 --- a/builtin/logical/mysql/secret_creds.go +++ b/builtin/logical/mysql/secret_creds.go @@ -18,7 +18,7 @@ const SecretCredsType = "creds" // grants, at least we ensure that the open connection is useless. Dropping the // user will only affect the next connection. const defaultRevocationSQL = ` -REVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{name}}'@'%'; +REVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{name}}'@'%'; DROP USER '{{name}}'@'%' ` @@ -119,7 +119,7 @@ func (b *backend) secretCredsRevoke(ctx context.Context, req *logical.Request, d // This is not a prepared statement because not all commands are supported // 1295: This command is not supported in the prepared statement protocol yet // Reference https://mariadb.com/kb/en/mariadb/prepare-statement/ - query = strings.ReplaceAll(query, "{{name}}", username) + query = strings.Replace(query, "{{name}}", username, -1) _, err = tx.Exec(query) if err != nil { return nil, err diff --git a/builtin/logical/mysql/util.go b/builtin/logical/mysql/util.go index 4ba7c650c208d..313264f905cd9 100644 --- a/builtin/logical/mysql/util.go +++ b/builtin/logical/mysql/util.go @@ -8,7 +8,7 @@ import ( // Query templates a query for us. func Query(tpl string, data map[string]string) string { for k, v := range data { - tpl = strings.ReplaceAll(tpl, fmt.Sprintf("{{%s}}", k), v) + tpl = strings.Replace(tpl, fmt.Sprintf("{{%s}}", k), v, -1) } return tpl diff --git a/builtin/logical/nomad/backend_test.go b/builtin/logical/nomad/backend_test.go index 8452c2b019e41..985ef4c9b09a9 100644 --- a/builtin/logical/nomad/backend_test.go +++ b/builtin/logical/nomad/backend_test.go @@ -184,8 +184,6 @@ func TestBackend_config_Bootstrap(t *testing.T) { expected := map[string]interface{}{ "address": connData["address"].(string), "max_token_name_length": 0, - "ca_cert": "", - "client_cert": "", } if !reflect.DeepEqual(expected, resp.Data) { t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data) @@ -244,59 +242,6 @@ func TestBackend_config_access(t *testing.T) { expected := map[string]interface{}{ "address": connData["address"].(string), "max_token_name_length": 0, - "ca_cert": "", - "client_cert": "", - } - if !reflect.DeepEqual(expected, resp.Data) { - t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data) - } - if resp.Data["token"] != nil { - t.Fatalf("token should not be set in the response") - } -} - -func TestBackend_config_access_with_certs(t *testing.T) { - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - b, err := Factory(context.Background(), config) - if err != nil { - t.Fatal(err) - } - - cleanup, svccfg := prepareTestContainer(t, true) - defer cleanup() - - connData := map[string]interface{}{ - "address": svccfg.URL().String(), - "token": svccfg.Token, - "ca_cert": caCert, - "client_cert": clientCert, - "client_key": clientKey, - } - - confReq := &logical.Request{ - Operation: logical.UpdateOperation, - Path: "config/access", - Storage: config.StorageView, - Data: connData, - } - - resp, err := b.HandleRequest(context.Background(), confReq) - if err != nil || (resp != nil && resp.IsError()) || resp != nil { - t.Fatalf("failed to write configuration: resp:%#v err:%s", resp, err) - } - - confReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), confReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("failed to write configuration: resp:%#v err:%s", resp, err) - } - - expected := map[string]interface{}{ - "address": connData["address"].(string), - "max_token_name_length": 0, - "ca_cert": caCert, - "client_cert": clientCert, } if !reflect.DeepEqual(expected, resp.Data) { t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data) @@ -504,8 +449,6 @@ func TestBackend_max_token_name_length(t *testing.T) { expected := map[string]interface{}{ "address": svccfg.URL().String(), "max_token_name_length": tc.tokenLength, - "ca_cert": "", - "client_cert": "", } expectedMaxTokenNameLength := maxTokenNameLength @@ -610,96 +553,3 @@ func TestBackend_max_token_name_length(t *testing.T) { }) } } - -const caCert = `-----BEGIN CERTIFICATE----- -MIIF7zCCA9egAwIBAgIINVVQic4bju8wDQYJKoZIhvcNAQELBQAwaDELMAkGA1UE -BhMCVVMxFDASBgNVBAoMC1Vuc3BlY2lmaWVkMR8wHQYDVQQLDBZjYS0zODQzMDY2 -NDA5ODI5MjQwNTU5MSIwIAYDVQQDDBl4cHMxNS5sb2NhbC5jaXBoZXJib3kuY29t -MB4XDTIyMDYwMjIxMTgxN1oXDTIzMDcwNTIxMTgxN1owaDELMAkGA1UEBhMCVVMx -FDASBgNVBAoMC1Vuc3BlY2lmaWVkMR8wHQYDVQQLDBZjYS0zODQzMDY2NDA5ODI5 -MjQwNTU5MSIwIAYDVQQDDBl4cHMxNS5sb2NhbC5jaXBoZXJib3kuY29tMIICIjAN -BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA35VilgfqMUKhword7wORXRFyPbpz -8uqO7eRaylMnkAkbk5eoQB/iYfXjJ6ZBs5mJGQVz5ZNvh9EzZsk1J6wqYgbwVKUx -fh4kvW6sXtDirtb4ZQAK7OTLEoapUQGnGcvm+aEYfvC1sTBl4fbex7yyN5FYMJTM -TAUumhdq2pwujaj2xkN9DwZa89Tk7tbj9HE9DTRji7bnciEtrmTAOIOfOrT/1l3x -YW1BwYXpQ0TamJ58pC/iNgEp5FAxKt9d3RggesMA7pvG/f8fNgsa/Tku/PeEXNPA -+Yx4CcAipujmqpBKiKwJ6TOzp80m2zrZ7Da4Av5vVS5GsNJxhFYD1h8hU1ptK9BS -2CaTwBpV421C9BfEmtSAksGDIWYujfiHb6XNaQrt8Hu85GBuPUudVn0lpoXLn2xD -rGK8WEK2gWZ4eez3ZDLbpLui6c1m7AVlMtj374s+LHcD7JIxY475Na7pXmEWReqM -RUyCEq1spOOn70fOdhphhmpY6DoklOTOriPawCLNmkPWRnhrIwqyP1gse9YMqQ2n -LhWUkv/08m/0pb4e5ijVhsZNzv+1PXPWCk968nzt0BMDgJT+0ZiXsaU7FILXuo7Y -Ijgrj7dpXWx2MBdMGPFQdveog7Pa80Yb7r4ERW0DL78TxYC6m/S1p14PHwZpDZzQ -LrPrBcpI5XzI7osCAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCAqQwDAYDVR0TBAUw -AwEB/zA0BgNVHR4ELTAroCkwG4IZeHBzMTUubG9jYWwuY2lwaGVyYm95LmNvbTAK -hwh/AAAB/wAAADAkBgNVHREEHTAbghl4cHMxNS5sb2NhbC5jaXBoZXJib3kuY29t -MB0GA1UdDgQWBBR3bHgDp5RpzerMKRkaGDFN/ZeImjANBgkqhkiG9w0BAQsFAAOC -AgEArkuDYYWYHYxIoTeZkQz5L1y0H27ZWPJx5jBBuktPonDLQxBGAwkl6NZbJGLU -v+usII+eyjPKIgjhCiTXJAmeIngwWoN3PHOLMIPe9axuNt6qVoP4dQtzfpPR3buK -CWj9i3H0ixK73klk7QWZiBUDinYfEMSNRpU3G7NsqmqCXD4s5gB+8y9c7+zIiJyN -IaJBWpzI4eQBi/4cBhtM7Xa+CMB/8whhWYR6H+GXGZdNcP5f7bwneMstWKceTadk -IEzFucJHDySpEkIA2A9t33pV54FmEp+JVwvxAH4FABCnjPmhg0j1IonWV5pySWpG -hhEZpnRRH1XfpTA5i6dlyUA5DJjL8X1lYrgOK+LaoR52mQh5JBsMoVHFzN50DiMA -RTsbq4Qzozf23hU1BqW4NOzPTukgSGEcbT/DhXKPPPLL8JD0rPelJPq76X3TJjgZ -C9uMnZaDnxjppDXp5oBIXqC05FDxJ5sSODNOpKGyuzOU2qQLMau33yYOgaSAttBk -r29+LNFJ+0QzMuPjYXPznpxbsI+lrlZ3F2tDGGs8+JVceC1YX+cBEsEOiqNGTIip -/DY3b9gu5oiTwhcFyQW8+WFsirRS/g5t+M40WLKVPdK09z96krFXQMkL6a7LHLY1 -n9ivwj+sTG1XmJYXp8naLg4wdzIUf2fJxaFNI5Yq4elZ8sY= ------END CERTIFICATE-----` - -const clientCert = `-----BEGIN CERTIFICATE----- -MIIEsDCCApigAwIBAgIIRY1JBRIynFYwDQYJKoZIhvcNAQELBQAwaDELMAkGA1UE -BhMCVVMxFDASBgNVBAoMC1Vuc3BlY2lmaWVkMR8wHQYDVQQLDBZjYS0zODQzMDY2 -NDA5ODI5MjQwNTU5MSIwIAYDVQQDDBl4cHMxNS5sb2NhbC5jaXBoZXJib3kuY29t -MB4XDTIyMDYwMjIxMTgxOFoXDTIzMDcwNTIxMTgxOFowRzELMAkGA1UEBhMCVVMx -FDASBgNVBAoMC1Vuc3BlY2lmaWVkMSIwIAYDVQQDDBl4cHMxNS5sb2NhbC5jaXBo -ZXJib3kuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAs+XYhsW2 -vTwN7gY3xMxgbNN8d3aoeqCswOp05BBf0Vgv3febahm422ubXXd5Mg2UGiU7sJVe -4tUpDeupVVRX5Qr/hpiXgEyfRDAAAJKqrl65KSS62TCbT/eJZ0ah25HV1evI4uM2 -0kl5QWhtQjDyaVlTS38YFqXXQvpOuU5DG6UbKnpMcpsCPTyUKEJvJ95ZLcz0HJ8I -kIHrnX0Lt0pOhkllj5Nk4cXhU8CFk8IGNz7SVAycrUsffAUMNNEbrIOIfOTPHR1c -q3X9hO4/5pt80uIDMFwwumoA7nQR0AhlKkw9SskCIzJhKwKwssQY7fmovNG0fOEd -/+vSHK7OsYW+gwIDAQABo38wfTAOBgNVHQ8BAf8EBAMCBaAwEwYDVR0lBAwwCgYI -KwYBBQUHAwIwCQYDVR0TBAIwADAqBgNVHREEIzAhghl4cHMxNS5sb2NhbC5jaXBo -ZXJib3kuY29thwR/AAABMB8GA1UdIwQYMBaAFHdseAOnlGnN6swpGRoYMU39l4ia -MA0GCSqGSIb3DQEBCwUAA4ICAQBUSP4ZJglCCrYkM5Le7McdvfkM5uYv1aQn0sM4 -gbyDEWO0fnv50vLpD3y4ckgHLoD52pAZ0hN8a7rwAUae21GA6DvEchSH5x/yvJiS -7FBlq39sAafe03ZlzDErNYJRkLcnPAqG74lJ1SSsMcs9gCPHM8R7HtNnhAga06L7 -K8/G43dsGZCmEb+xcX2B9McCt8jBG6TJPTGafb3BJ0JTmR/tHdoLFIiNwI+qzd2U -lMnGlkIApULX8tmIMsWO0rjdiFkPWGcmfn9ChC0iDpQOAcKSDBcZlWrDNpzKk0mK -l0TbE6cxcmCUUpiwaXFrbkwVWQw4W0c4b3sWFtWifFbiR1qZ/OT2Y2sHbkbxwvPl -PjjXMDBAdRRwtNcTP1E55I5zvwzzBxUpxOob0miorhTJrZR9So0rgv7Roce4ED6M -WETYa/mGhe+Q7gBQygIVoryfQLgGBsHC+7V4RDvYTazwZkz9nLQxHLI/TAZU5ofM -WqdoUkMd68rxTTEUoMfGbftxjKA0raxGcO7/PjLR3O743EwCqeqYJ7OKWgGRLnui -kIKNUJlZ9umURUFzL++Bx4Pr95jWXb2WYqYYQxhDz0oR5q5smnFm5+/1/MLDMvDU -TrgBK6pey4QF33B/I55H1+7tGdv85Q57Z8UrNi/IQxR2sFlsOTeCwStpBQ56sdZk -Wi4+cQ== ------END CERTIFICATE-----` - -const clientKey = `-----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCz5diGxba9PA3u -BjfEzGBs03x3dqh6oKzA6nTkEF/RWC/d95tqGbjba5tdd3kyDZQaJTuwlV7i1SkN -66lVVFflCv+GmJeATJ9EMAAAkqquXrkpJLrZMJtP94lnRqHbkdXV68ji4zbSSXlB -aG1CMPJpWVNLfxgWpddC+k65TkMbpRsqekxymwI9PJQoQm8n3lktzPQcnwiQgeud -fQu3Sk6GSWWPk2ThxeFTwIWTwgY3PtJUDJytSx98BQw00Rusg4h85M8dHVyrdf2E -7j/mm3zS4gMwXDC6agDudBHQCGUqTD1KyQIjMmErArCyxBjt+ai80bR84R3/69Ic -rs6xhb6DAgMBAAECggEAPBcja2kxcCZWNNKo4DiwYMmHwtPE1SlEazAlmWSKzP+b -BZbGt/sdj1VzURYuSnTUqqMTPBm41yYCj57PMix5K42v6sKfoIB3lqw94/MZxiLn -0IFvVErzJhP2NqQWPqSI++rFcFwbHMTkFuAN1tVIs73dn9M1NaNxsvKvRyCIM/wz -5YQSDyTkdW4jQM2RvUFOoqwmeyAlQoBRMgQ4bHfLHxmPEjFgw1MAmmG8bJdkupin -MVzhZyKj4Fh80Xa2MU4KokijjG41hmYbg/sjNHaHJFDA92Rwq13dhWytrauJDxa/ -3yj8pHWc23Y3hXvRAf/cibDVzXmmLj49W1i06KuUCQKBgQDj5yF/DJV0IOkhfbol -+f5AGH4ZrEXA/JwA5SxHU+aKhUuPEqK/LeUWqiy3szFjOz2JOnCC0LMN42nsmMyK -sdQEKHp2SPd2wCxsAKZAuxrEi6yBt1mEPFFU5yzvZbdMqYChKJjm9fbRHtuc63s8 -PyVw67Ii9o4ij+PxfTobIs18xwKBgQDKE59w3uUDt2uoqNC8x4m5onL2p2vtcTHC -CxU57mu1+9CRM8N2BEp2VI5JaXjqt6W4u9ISrmOqmsPgTwosAquKpA/nu3bVvR9g -WlN9dh2Xgza0/AFaA9CB++ier8RJq5xFlcasMUmgkhYt3zgKNgRDfjfREWM0yamm -P++hAYRcZQKBgHEuYQk6k6J3ka/rQ54GmEj2oPFZB88+5K7hIWtO9IhIiGzGYYK2 -ZTYrT0fvuxA/5GCZYDTnNnUoQnuYqsQaamOiQqcpt5QG/kiozegJw9JmV0aYauFs -HyweHsfJaQ2uhE4E3mKdNnVGcORuYeZaqdp5gx8v+QibEyXj/g5p60kTAoGBALKp -TMOHXmW9yqKwtvThWoRU+13WQlcJSFvuXpL8mCCrBgkLAhqaypb6RV7ksLKdMhk1 -fhNkOdxBv0LXvv+QUMhgK2vP084/yrjuw3hecOVfboPvduZ2DuiNp2p9rocQAjeH -p8LgRN+Bqbhe7fYhMf3WX1UqEVM/pQ3G43+vjq39AoGAOyD2/hFSIx6BMddUNTHG -BEsMUc/DHYslZebbF1zAWnkKdTt+URhtHAFB2tYRDgkZfwW+wr/w12dJTIkX965o -HO7tI4FgpU9b0i8FTuwYkBfjwp2j0Xd2/VBR8Qpd17qKl3I6NXDsf3ykjGZAvldH -Tll+qwEZpXSRa5OWWTpGV8I= ------END PRIVATE KEY-----` diff --git a/builtin/logical/nomad/path_config_access.go b/builtin/logical/nomad/path_config_access.go index b482a9c1aca86..d834b0b19de36 100644 --- a/builtin/logical/nomad/path_config_access.go +++ b/builtin/logical/nomad/path_config_access.go @@ -95,8 +95,6 @@ func (b *backend) pathConfigAccessRead(ctx context.Context, req *logical.Request Data: map[string]interface{}{ "address": conf.Address, "max_token_name_length": conf.MaxTokenNameLength, - "ca_cert": conf.CACert, - "client_cert": conf.ClientCert, }, }, nil } diff --git a/builtin/logical/pki/backend.go b/builtin/logical/pki/backend.go index 8aa0219671fe9..721f2d374a8bb 100644 --- a/builtin/logical/pki/backend.go +++ b/builtin/logical/pki/backend.go @@ -3,14 +3,11 @@ package pki import ( "context" "fmt" - "sort" "strings" "sync" "sync/atomic" "time" - atomic2 "go.uber.org/atomic" - "github.com/hashicorp/vault/sdk/helper/consts" "github.com/armon/go-metrics" @@ -78,27 +75,19 @@ func Backend(conf *logical.BackendConfig) *backend { "ca/pem", "ca_chain", "ca", - "crl/delta", - "crl/delta/pem", "crl/pem", "crl", "issuer/+/crl/der", "issuer/+/crl/pem", "issuer/+/crl", - "issuer/+/crl/delta/der", - "issuer/+/crl/delta/pem", - "issuer/+/crl/delta", "issuer/+/pem", "issuer/+/der", "issuer/+/json", "issuers/", // LIST operations append a '/' to the requested path - "ocsp", // OCSP POST - "ocsp/*", // OCSP GET }, LocalStorage: []string{ - revokedPath, - deltaWALPath, + "revoked/", legacyCRLPath, "crls/", "certs/", @@ -131,13 +120,9 @@ func Backend(conf *logical.BackendConfig) *backend { pathSign(&b), pathIssue(&b), pathRotateCRL(&b), - pathRotateDeltaCRL(&b), pathRevoke(&b), - pathRevokeWithKey(&b), pathTidy(&b), - pathTidyCancel(&b), pathTidyStatus(&b), - pathConfigAutoTidy(&b), // Issuer APIs pathListIssuers(&b), @@ -155,7 +140,6 @@ func Backend(conf *logical.BackendConfig) *backend { pathCrossSignIntermediate(&b), pathConfigIssuers(&b), pathReplaceRoot(&b), - pathRevokeIssuer(&b), // Key APIs pathListKeys(&b), @@ -172,10 +156,6 @@ func Backend(conf *logical.BackendConfig) *backend { pathFetchValidRaw(&b), pathFetchValid(&b), pathFetchListCerts(&b), - - // OCSP APIs - buildPathOcspGet(&b), - buildPathOcspPost(&b), }, Secrets: []*framework.Secret{ @@ -188,31 +168,15 @@ func Backend(conf *logical.BackendConfig) *backend { PeriodicFunc: b.periodicFunc, } + b.crlLifetime = time.Hour * 72 b.tidyCASGuard = new(uint32) - b.tidyCancelCAS = new(uint32) b.tidyStatus = &tidyStatus{state: tidyStatusInactive} b.storage = conf.StorageView b.backendUUID = conf.BackendUUID b.pkiStorageVersion.Store(0) - // b isn't yet initialized with SystemView state; calling b.System() will - // result in a nil pointer dereference. Instead query BackendConfig's - // copy of SystemView. - cannotRebuildCRLs := conf.System.ReplicationState().HasState(consts.ReplicationPerformanceStandby) || - conf.System.ReplicationState().HasState(consts.ReplicationDRSecondary) - b.crlBuilder = newCRLBuilder(!cannotRebuildCRLs) - - // Delay the first tidy until after we've started up. - b.lastTidy = time.Now() - - // Metrics initialization for count of certificates in storage - b.certsCounted = atomic2.NewBool(false) - b.certCount = new(uint32) - b.revokedCertCount = new(uint32) - b.possibleDoubleCountedSerials = make([]string, 0, 250) - b.possibleDoubleCountedRevokedSerials = make([]string, 0, 250) - + b.crlBuilder = &crlBuilder{} return &b } @@ -221,19 +185,12 @@ type backend struct { backendUUID string storage logical.Storage + crlLifetime time.Duration revokeStorageLock sync.RWMutex tidyCASGuard *uint32 - tidyCancelCAS *uint32 tidyStatusLock sync.RWMutex tidyStatus *tidyStatus - lastTidy time.Time - - certCount *uint32 - revokedCertCount *uint32 - certsCounted *atomic2.Bool - possibleDoubleCountedSerials []string - possibleDoubleCountedRevokedSerials []string pkiStorageVersion atomic.Value crlBuilder *crlBuilder @@ -248,21 +205,17 @@ type ( ) const ( - tidyStatusInactive tidyStatusState = iota - tidyStatusStarted = iota - tidyStatusFinished = iota - tidyStatusError = iota - tidyStatusCancelling = iota - tidyStatusCancelled = iota + tidyStatusInactive tidyStatusState = iota + tidyStatusStarted + tidyStatusFinished + tidyStatusError ) type tidyStatus struct { // Parameters used to initiate the operation - safetyBuffer int - tidyCertStore bool - tidyRevokedCerts bool - tidyRevokedAssocs bool - pauseDuration string + safetyBuffer int + tidyCertStore bool + tidyRevokedCerts bool // Status state tidyStatusState @@ -272,7 +225,6 @@ type tidyStatus struct { message string certStoreDeletedCount uint revokedCertDeletedCount uint - missingIssuerCertCount uint } const backendHelp = ` @@ -341,26 +293,6 @@ func (b *backend) metricsWrap(callType string, roleMode int, ofunc roleOperation // initialize is used to perform a possible PKI storage migration if needed func (b *backend) initialize(ctx context.Context, _ *logical.InitializationRequest) error { - sc := b.makeStorageContext(ctx, b.storage) - if err := b.crlBuilder.reloadConfigIfRequired(sc); err != nil { - return err - } - - err := b.initializePKIIssuersStorage(ctx) - if err != nil { - return err - } - - // Initialize also needs to populate our certificate and revoked certificate count - err = b.initializeStoredCertificateCounts(ctx) - if err != nil { - return err - } - - return nil -} - -func (b *backend) initializePKIIssuersStorage(ctx context.Context) error { // Grab the lock prior to the updating of the storage lock preventing us flipping // the storage flag midway through the request stream of other requests. b.issuersLock.Lock() @@ -431,296 +363,17 @@ func (b *backend) invalidate(ctx context.Context, key string) { b.crlBuilder.requestRebuildIfActiveNode(b) }() case strings.HasPrefix(key, issuerPrefix): + // If an issuer has changed on the primary, we need to schedule an update of our CRL, + // the primary cluster would have done it already, but the CRL is cluster specific so + // force a rebuild of ours. if !b.useLegacyBundleCaStorage() { - // See note in updateDefaultIssuerId about why this is necessary. - // We do this ahead of CRL rebuilding just so we know that things - // are stale. - b.crlBuilder.invalidateCRLBuildTime() - - // If an issuer has changed on the primary, we need to schedule an update of our CRL, - // the primary cluster would have done it already, but the CRL is cluster specific so - // force a rebuild of ours. b.crlBuilder.requestRebuildIfActiveNode(b) } else { b.Logger().Debug("Ignoring invalidation updates for issuer as the PKI migration has yet to complete.") } - case key == "config/crl": - // We may need to reload our OCSP status flag - b.crlBuilder.markConfigDirty() - case key == storageIssuerConfig: - b.crlBuilder.invalidateCRLBuildTime() } } func (b *backend) periodicFunc(ctx context.Context, request *logical.Request) error { - sc := b.makeStorageContext(ctx, request.Storage) - - doCRL := func() error { - // First attempt to reload the CRL configuration. - if err := b.crlBuilder.reloadConfigIfRequired(sc); err != nil { - return err - } - - // As we're (below) modifying the backing storage, we need to ensure - // we're not on a standby/secondary node. - if b.System().ReplicationState().HasState(consts.ReplicationPerformanceStandby) || - b.System().ReplicationState().HasState(consts.ReplicationDRSecondary) { - return nil - } - - // Check if we're set to auto rebuild and a CRL is set to expire. - if err := b.crlBuilder.checkForAutoRebuild(sc); err != nil { - return err - } - - // Then attempt to rebuild the CRLs if required. - if err := b.crlBuilder.rebuildIfForced(ctx, b, request); err != nil { - return err - } - - // If a delta CRL was rebuilt above as part of the complete CRL rebuild, - // this will be a no-op. However, if we do need to rebuild delta CRLs, - // this would cause us to do so. - if err := b.crlBuilder.rebuildDeltaCRLsIfForced(sc, false); err != nil { - return err - } - - return nil - } - - doAutoTidy := func() error { - // As we're (below) modifying the backing storage, we need to ensure - // we're not on a standby/secondary node. - if b.System().ReplicationState().HasState(consts.ReplicationPerformanceStandby) || - b.System().ReplicationState().HasState(consts.ReplicationDRSecondary) { - return nil - } - - config, err := sc.getAutoTidyConfig() - if err != nil { - return err - } - - if !config.Enabled || config.Interval <= 0*time.Second { - return nil - } - - // Check if we should run another tidy... - now := time.Now() - b.tidyStatusLock.RLock() - nextOp := b.lastTidy.Add(config.Interval) - b.tidyStatusLock.RUnlock() - if now.Before(nextOp) { - return nil - } - - // Ensure a tidy isn't already running... If it is, we'll trigger - // again when the running one finishes. - if !atomic.CompareAndSwapUint32(b.tidyCASGuard, 0, 1) { - return nil - } - - // Prevent ourselves from starting another tidy operation while - // this one is still running. This operation runs in the background - // and has a separate error reporting mechanism. - b.tidyStatusLock.Lock() - b.lastTidy = now - b.tidyStatusLock.Unlock() - - // Because the request from the parent storage will be cleared at - // some point (and potentially reused) -- due to tidy executing in - // a background goroutine -- we need to copy the storage entry off - // of the backend instead. - backendReq := &logical.Request{ - Storage: b.storage, - } - - b.startTidyOperation(backendReq, config) - return nil - } - - crlErr := doCRL() - tidyErr := doAutoTidy() - - if crlErr != nil && tidyErr != nil { - return fmt.Errorf("Error building CRLs:\n - %v\n\nError running auto-tidy:\n - %v\n", crlErr, tidyErr) - } - - if crlErr != nil { - return fmt.Errorf("Error building CRLs:\n - %v\n", crlErr) - } - - if tidyErr != nil { - return fmt.Errorf("Error running auto-tidy:\n - %v\n", tidyErr) - } - - // Check if the CRL was invalidated due to issuer swap and update - // accordingly. - if err := b.crlBuilder.flushCRLBuildTimeInvalidation(sc); err != nil { - return err - } - - // All good! - return nil -} - -func (b *backend) initializeStoredCertificateCounts(ctx context.Context) error { - b.tidyStatusLock.RLock() - defer b.tidyStatusLock.RUnlock() - // For performance reasons, we can't lock on issuance/storage of certs until a list operation completes, - // but we want to limit possible miscounts / double-counts to over-counting, so we take the tidy lock which - // prevents (most) deletions - in particular we take a read lock (sufficient to block the write lock in - // tidyStatusStart while allowing tidy to still acquire a read lock to report via its endpoint) - - entries, err := b.storage.List(ctx, "certs/") - if err != nil { - return err - } - atomic.AddUint32(b.certCount, uint32(len(entries))) - - revokedEntries, err := b.storage.List(ctx, "revoked/") - if err != nil { - return err - } - atomic.AddUint32(b.revokedCertCount, uint32(len(revokedEntries))) - - b.certsCounted.Store(true) - // Now that the metrics are set, we can switch from appending newly-stored certificates to the possible double-count - // list, and instead have them update the counter directly. We need to do this so that we are looking at a static - // slice of possibly double counted serials. Note that certsCounted is computed before the storage operation, so - // there may be some delay here. - - // Sort the listed-entries first, to accommodate that delay. - sort.Slice(entries, func(i, j int) bool { - return entries[i] < entries[j] - }) - - sort.Slice(revokedEntries, func(i, j int) bool { - return revokedEntries[i] < revokedEntries[j] - }) - - // We assume here that these lists are now complete. - sort.Slice(b.possibleDoubleCountedSerials, func(i, j int) bool { - return b.possibleDoubleCountedSerials[i] < b.possibleDoubleCountedSerials[j] - }) - - listEntriesIndex := 0 - possibleDoubleCountIndex := 0 - for { - if listEntriesIndex >= len(entries) { - break - } - if possibleDoubleCountIndex >= len(b.possibleDoubleCountedSerials) { - break - } - if entries[listEntriesIndex] == b.possibleDoubleCountedSerials[possibleDoubleCountIndex] { - // This represents a double-counted entry - b.decrementTotalCertificatesCountNoReport() - listEntriesIndex = listEntriesIndex + 1 - possibleDoubleCountIndex = possibleDoubleCountIndex + 1 - continue - } - if entries[listEntriesIndex] < b.possibleDoubleCountedSerials[possibleDoubleCountIndex] { - listEntriesIndex = listEntriesIndex + 1 - continue - } - if entries[listEntriesIndex] > b.possibleDoubleCountedSerials[possibleDoubleCountIndex] { - possibleDoubleCountIndex = possibleDoubleCountIndex + 1 - continue - } - } - - sort.Slice(b.possibleDoubleCountedRevokedSerials, func(i, j int) bool { - return b.possibleDoubleCountedRevokedSerials[i] < b.possibleDoubleCountedRevokedSerials[j] - }) - - listRevokedEntriesIndex := 0 - possibleRevokedDoubleCountIndex := 0 - for { - if listRevokedEntriesIndex >= len(revokedEntries) { - break - } - if possibleRevokedDoubleCountIndex >= len(b.possibleDoubleCountedRevokedSerials) { - break - } - if revokedEntries[listRevokedEntriesIndex] == b.possibleDoubleCountedRevokedSerials[possibleRevokedDoubleCountIndex] { - // This represents a double-counted revoked entry - b.decrementTotalRevokedCertificatesCountNoReport() - listRevokedEntriesIndex = listRevokedEntriesIndex + 1 - possibleRevokedDoubleCountIndex = possibleRevokedDoubleCountIndex + 1 - continue - } - if revokedEntries[listRevokedEntriesIndex] < b.possibleDoubleCountedRevokedSerials[possibleRevokedDoubleCountIndex] { - listRevokedEntriesIndex = listRevokedEntriesIndex + 1 - continue - } - if revokedEntries[listRevokedEntriesIndex] > b.possibleDoubleCountedRevokedSerials[possibleRevokedDoubleCountIndex] { - possibleRevokedDoubleCountIndex = possibleRevokedDoubleCountIndex + 1 - continue - } - } - - b.possibleDoubleCountedRevokedSerials = nil - b.possibleDoubleCountedSerials = nil - - certCount := atomic.LoadUint32(b.certCount) - metrics.SetGauge([]string{"secrets", "pki", b.backendUUID, "total_certificates_stored"}, float32(certCount)) - revokedCertCount := atomic.LoadUint32(b.revokedCertCount) - metrics.SetGauge([]string{"secrets", "pki", b.backendUUID, "total_revoked_certificates_stored"}, float32(revokedCertCount)) - - return nil -} - -// The "certsCounted" boolean here should be loaded from the backend certsCounted before the corresponding storage call: -// eg. certsCounted := b.certsCounted.Load() -func (b *backend) incrementTotalCertificatesCount(certsCounted bool, newSerial string) { - certCount := atomic.AddUint32(b.certCount, 1) - switch { - case !certsCounted: - // This is unsafe, but a good best-attempt - if strings.HasPrefix(newSerial, "certs/") { - newSerial = newSerial[6:] - } - b.possibleDoubleCountedSerials = append(b.possibleDoubleCountedSerials, newSerial) - default: - metrics.SetGauge([]string{"secrets", "pki", b.backendUUID, "total_certificates_stored"}, float32(certCount)) - } -} - -func (b *backend) decrementTotalCertificatesCountReport() { - certCount := b.decrementTotalCertificatesCountNoReport() - metrics.SetGauge([]string{"secrets", "pki", b.backendUUID, "total_certificates_stored"}, float32(certCount)) -} - -// Called directly only by the initialize function to deduplicate the count, when we don't have a full count yet -func (b *backend) decrementTotalCertificatesCountNoReport() uint32 { - newCount := atomic.AddUint32(b.certCount, ^uint32(0)) - return newCount -} - -// The "certsCounted" boolean here should be loaded from the backend certsCounted before the corresponding storage call: -// eg. certsCounted := b.certsCounted.Load() -func (b *backend) incrementTotalRevokedCertificatesCount(certsCounted bool, newSerial string) { - newRevokedCertCount := atomic.AddUint32(b.revokedCertCount, 1) - switch { - case !certsCounted: - // This is unsafe, but a good best-attempt - if strings.HasPrefix(newSerial, "revoked/") { // allow passing in the path (revoked/serial) OR the serial - newSerial = newSerial[8:] - } - b.possibleDoubleCountedRevokedSerials = append(b.possibleDoubleCountedRevokedSerials, newSerial) - default: - metrics.SetGauge([]string{"secrets", "pki", b.backendUUID, "total_revoked_certificates_stored"}, float32(newRevokedCertCount)) - } -} - -func (b *backend) decrementTotalRevokedCertificatesCountReport() { - revokedCertCount := b.decrementTotalRevokedCertificatesCountNoReport() - metrics.SetGauge([]string{"secrets", "pki", b.backendUUID, "total_revoked_certificates_stored"}, float32(revokedCertCount)) -} - -// Called directly only by the initialize function to deduplicate the count, when we don't have a full count yet -func (b *backend) decrementTotalRevokedCertificatesCountNoReport() uint32 { - newRevokedCertCount := atomic.AddUint32(b.revokedCertCount, ^uint32(0)) - return newRevokedCertCount + return b.crlBuilder.rebuildIfForced(ctx, b, request) } diff --git a/builtin/logical/pki/backend_test.go b/builtin/logical/pki/backend_test.go index 190c336b87678..5d3373855f440 100644 --- a/builtin/logical/pki/backend_test.go +++ b/builtin/logical/pki/backend_test.go @@ -12,7 +12,6 @@ import ( "crypto/x509" "crypto/x509/pkix" "encoding/base64" - "encoding/hex" "encoding/json" "encoding/pem" "fmt" @@ -27,7 +26,6 @@ import ( "strconv" "strings" "sync" - "sync/atomic" "testing" "time" @@ -103,7 +101,6 @@ OzQeADTSCn5VidOfjDkIst9UXjMlrFfV9/oJEw5Eiqa6lkNPCGDhfA8= ) func TestPKI_RequireCN(t *testing.T) { - t.Parallel() b, s := createBackendWithStorage(t) resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ @@ -129,7 +126,7 @@ func TestPKI_RequireCN(t *testing.T) { // Issue a cert with require_cn set to true and with common name supplied. // It should succeed. - _, err = CBWrite(b, s, "issue/example", map[string]interface{}{ + resp, err = CBWrite(b, s, "issue/example", map[string]interface{}{ "common_name": "foobar.com", }) if err != nil { @@ -138,7 +135,7 @@ func TestPKI_RequireCN(t *testing.T) { // Issue a cert with require_cn set to true and with out supplying the // common name. It should error out. - _, err = CBWrite(b, s, "issue/example", map[string]interface{}{}) + resp, err = CBWrite(b, s, "issue/example", map[string]interface{}{}) if err == nil { t.Fatalf("expected an error due to missing common_name") } @@ -179,7 +176,6 @@ func TestPKI_RequireCN(t *testing.T) { } func TestPKI_DeviceCert(t *testing.T) { - t.Parallel() b, s := createBackendWithStorage(t) resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ @@ -248,7 +244,6 @@ func TestPKI_DeviceCert(t *testing.T) { } func TestBackend_InvalidParameter(t *testing.T) { - t.Parallel() b, s := createBackendWithStorage(t) _, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ @@ -270,7 +265,6 @@ func TestBackend_InvalidParameter(t *testing.T) { } func TestBackend_CSRValues(t *testing.T) { - t.Parallel() initTest.Do(setCerts) b, _ := createBackendWithStorage(t) @@ -287,7 +281,6 @@ func TestBackend_CSRValues(t *testing.T) { } func TestBackend_URLsCRUD(t *testing.T) { - t.Parallel() initTest.Do(setCerts) b, _ := createBackendWithStorage(t) @@ -306,7 +299,6 @@ func TestBackend_URLsCRUD(t *testing.T) { // Generates and tests steps that walk through the various possibilities // of role flags to ensure that they are properly restricted func TestBackend_Roles(t *testing.T) { - t.Parallel() cases := []struct { name string key, cert *string @@ -582,11 +574,8 @@ func generateURLSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[s Data: map[string]interface{}{ "common_name": "intermediate.cert.com", "csr": csrPem2048, - "signature_bits": 512, "format": "der", "not_before_duration": "2h", - // Let's Encrypt -- R3 SKID - "skid": "14:2E:B3:17:B7:58:56:CB:AE:50:09:40:E6:1F:AF:9D:8B:14:C2:C6", }, Check: func(resp *logical.Response) error { certString := resp.Data["certificate"].(string) @@ -606,21 +595,15 @@ func generateURLSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[s } cert := certs[0] - skid, _ := hex.DecodeString("142EB317B75856CBAE500940E61FAF9D8B14C2C6") - switch { case !reflect.DeepEqual(expected.IssuingCertificates, cert.IssuingCertificateURL): - return fmt.Errorf("IssuingCertificateURL:\nexpected\n%#v\ngot\n%#v\n", expected.IssuingCertificates, cert.IssuingCertificateURL) + return fmt.Errorf("expected\n%#v\ngot\n%#v\n", expected.IssuingCertificates, cert.IssuingCertificateURL) case !reflect.DeepEqual(expected.CRLDistributionPoints, cert.CRLDistributionPoints): - return fmt.Errorf("CRLDistributionPoints:\nexpected\n%#v\ngot\n%#v\n", expected.CRLDistributionPoints, cert.CRLDistributionPoints) + return fmt.Errorf("expected\n%#v\ngot\n%#v\n", expected.CRLDistributionPoints, cert.CRLDistributionPoints) case !reflect.DeepEqual(expected.OCSPServers, cert.OCSPServer): - return fmt.Errorf("OCSPServer:\nexpected\n%#v\ngot\n%#v\n", expected.OCSPServers, cert.OCSPServer) + return fmt.Errorf("expected\n%#v\ngot\n%#v\n", expected.OCSPServers, cert.OCSPServer) case !reflect.DeepEqual([]string{"intermediate.cert.com"}, cert.DNSNames): - return fmt.Errorf("DNSNames\nexpected\n%#v\ngot\n%#v\n", []string{"intermediate.cert.com"}, cert.DNSNames) - case !reflect.DeepEqual(x509.SHA512WithRSA, cert.SignatureAlgorithm): - return fmt.Errorf("Signature Algorithm:\nexpected\n%#v\ngot\n%#v\n", x509.SHA512WithRSA, cert.SignatureAlgorithm) - case !reflect.DeepEqual(skid, cert.SubjectKeyId): - return fmt.Errorf("SKID:\nexpected\n%#v\ngot\n%#v\n", skid, cert.SubjectKeyId) + return fmt.Errorf("expected\n%#v\ngot\n%#v\n", []string{"intermediate.cert.com"}, cert.DNSNames) } if math.Abs(float64(time.Now().Add(-2*time.Hour).Unix()-cert.NotBefore.Unix())) > 10 { @@ -1080,7 +1063,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { } cert := parsedCertBundle.Certificate - actualDiff := time.Since(cert.NotBefore) + actualDiff := time.Now().Sub(cert.NotBefore) certRoleDiff := (role.NotBeforeDuration - actualDiff).Truncate(time.Second) // These times get truncated, so give a 1 second buffer on each side if certRoleDiff >= -1*time.Second && certRoleDiff <= 1*time.Second { @@ -1513,8 +1496,8 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { return fmt.Errorf("error parsing cert bundle: %s", err) } cert := parsedCertBundle.Certificate - var expected []net.IP - expected = append(expected, expectedIp...) + var emptyIPs []net.IP + var expected []net.IP = append(emptyIPs, expectedIp...) if diff := deep.Equal(cert.IPAddresses, expected); len(diff) > 0 { return fmt.Errorf("wrong SAN IPs, diff: %v", diff) } @@ -1590,8 +1573,8 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { if err != nil { return err } - var expected []otherNameUtf8 - expected = append(expected, expectedOthers...) + var emptyOthers []otherNameUtf8 + var expected []otherNameUtf8 = append(emptyOthers, expectedOthers...) if diff := deep.Equal(foundOthers, expected); len(diff) > 0 { return fmt.Errorf("wrong SAN IPs, diff: %v", diff) } @@ -1729,7 +1712,6 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { } func TestRolesAltIssuer(t *testing.T) { - t.Parallel() b, s := createBackendWithStorage(t) // Create two issuers. @@ -1827,7 +1809,6 @@ func TestRolesAltIssuer(t *testing.T) { } func TestBackend_PathFetchValidRaw(t *testing.T) { - t.Parallel() b, storage := createBackendWithStorage(t) resp, err := b.HandleRequest(context.Background(), &logical.Request{ @@ -1875,11 +1856,11 @@ func TestBackend_PathFetchValidRaw(t *testing.T) { t.Fatalf("failed read ca/pem, %#v", resp) } // check the raw cert matches the response body - if !bytes.Equal(resp.Data[logical.HTTPRawBody].([]byte), []byte(rootCaAsPem)) { + if bytes.Compare(resp.Data[logical.HTTPRawBody].([]byte), []byte(rootCaAsPem)) != 0 { t.Fatalf("failed to get raw cert") } - _, err = b.HandleRequest(context.Background(), &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.UpdateOperation, Path: "roles/example", Storage: storage, @@ -1909,7 +1890,7 @@ func TestBackend_PathFetchValidRaw(t *testing.T) { issueCrtAsPem := resp.Data["certificate"].(string) issuedCrt := parseCert(t, issueCrtAsPem) - expectedSerial := serialFromCert(issuedCrt) + expectedSerial := certutil.GetHexFormatted(issuedCrt.SerialNumber.Bytes(), ":") expectedCert := []byte(issueCrtAsPem) // get der cert @@ -1928,7 +1909,7 @@ func TestBackend_PathFetchValidRaw(t *testing.T) { // check the raw cert matches the response body rawBody := resp.Data[logical.HTTPRawBody].([]byte) bodyAsPem := []byte(strings.TrimSpace(string(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: rawBody})))) - if !bytes.Equal(bodyAsPem, expectedCert) { + if bytes.Compare(bodyAsPem, expectedCert) != 0 { t.Fatalf("failed to get raw cert for serial number: %s", expectedSerial) } if resp.Data[logical.HTTPContentType] != "application/pkix-cert" { @@ -1949,7 +1930,7 @@ func TestBackend_PathFetchValidRaw(t *testing.T) { } // check the pem cert matches the response body - if !bytes.Equal(resp.Data[logical.HTTPRawBody].([]byte), expectedCert) { + if bytes.Compare(resp.Data[logical.HTTPRawBody].([]byte), expectedCert) != 0 { t.Fatalf("failed to get pem cert") } if resp.Data[logical.HTTPContentType] != "application/pem-certificate-chain" { @@ -1958,7 +1939,6 @@ func TestBackend_PathFetchValidRaw(t *testing.T) { } func TestBackend_PathFetchCertList(t *testing.T) { - t.Parallel() // create the backend b, storage := createBackendWithStorage(t) @@ -2084,7 +2064,6 @@ func TestBackend_PathFetchCertList(t *testing.T) { } func TestBackend_SignVerbatim(t *testing.T) { - t.Parallel() testCases := []struct { testName string keyType string @@ -2095,7 +2074,6 @@ func TestBackend_SignVerbatim(t *testing.T) { {testName: "Any", keyType: "any"}, } for _, tc := range testCases { - tc := tc t.Run(tc.testName, func(t *testing.T) { runTestSignVerbatim(t, tc.keyType) }) @@ -2151,17 +2129,13 @@ func runTestSignVerbatim(t *testing.T, keyType string) { t.Fatal("pem csr is empty") } - signVerbatimData := map[string]interface{}{ - "csr": pemCSR, - } - if keyType == "rsa" { - signVerbatimData["signature_bits"] = 512 - } resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "sign-verbatim", - Storage: storage, - Data: signVerbatimData, + Operation: logical.UpdateOperation, + Path: "sign-verbatim", + Storage: storage, + Data: map[string]interface{}{ + "csr": pemCSR, + }, MountPoint: "pki/", }) if resp != nil && resp.IsError() { @@ -2335,7 +2309,6 @@ func runTestSignVerbatim(t *testing.T, keyType string) { } func TestBackend_Root_Idempotency(t *testing.T) { - t.Parallel() b, s := createBackendWithStorage(t) // This is a change within 1.11, we are no longer idempotent across generate/internal calls. @@ -2440,7 +2413,6 @@ func TestBackend_Root_Idempotency(t *testing.T) { } func TestBackend_SignIntermediate_AllowedPastCA(t *testing.T) { - t.Parallel() b_root, s_root := createBackendWithStorage(t) b_int, s_int := createBackendWithStorage(t) var err error @@ -2508,7 +2480,6 @@ func TestBackend_SignIntermediate_AllowedPastCA(t *testing.T) { } func TestBackend_ConsulSignLeafWithLegacyRole(t *testing.T) { - t.Parallel() // create the backend b, s := createBackendWithStorage(t) @@ -2543,7 +2514,6 @@ func TestBackend_ConsulSignLeafWithLegacyRole(t *testing.T) { } func TestBackend_SignSelfIssued(t *testing.T) { - t.Parallel() // create the backend b, storage := createBackendWithStorage(t) @@ -2632,7 +2602,7 @@ func TestBackend_SignSelfIssued(t *testing.T) { t.Fatalf("expected error due to different issuer; cert info is\nIssuer\n%#v\nSubject\n%#v\n", ssCert.Issuer, ssCert.Subject) } - ss, _ = getSelfSigned(t, template, template, key) + ss, ssCert = getSelfSigned(t, template, template, key) resp, err = b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.UpdateOperation, Path: "root/sign-self-issued", @@ -2659,8 +2629,7 @@ func TestBackend_SignSelfIssued(t *testing.T) { t.Fatal(err) } - sc := b.makeStorageContext(context.Background(), storage) - signingBundle, err := sc.fetchCAInfo(defaultRef, ReadOnlyUsage) + signingBundle, err := fetchCAInfo(context.Background(), b, &logical.Request{Storage: storage}, defaultRef, ReadOnlyUsage) if err != nil { t.Fatal(err) } @@ -2684,7 +2653,6 @@ func TestBackend_SignSelfIssued(t *testing.T) { // TestBackend_SignSelfIssued_DifferentTypes tests the functionality of the // require_matching_certificate_algorithms flag. func TestBackend_SignSelfIssued_DifferentTypes(t *testing.T) { - t.Parallel() // create the backend b, storage := createBackendWithStorage(t) @@ -2766,7 +2734,7 @@ func TestBackend_SignSelfIssued_DifferentTypes(t *testing.T) { // Test with flag present and true ss, _ = getSelfSigned(t, template, template, key) - _, err = b.HandleRequest(context.Background(), &logical.Request{ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.UpdateOperation, Path: "root/sign-self-issued", Storage: storage, @@ -2810,7 +2778,6 @@ func TestBackend_SignSelfIssued_DifferentTypes(t *testing.T) { // here into the form at that site as it will do the right thing so it's pretty // easy to validate. func TestBackend_OID_SANs(t *testing.T) { - t.Parallel() b, s := createBackendWithStorage(t) var err error @@ -2867,7 +2834,7 @@ func TestBackend_OID_SANs(t *testing.T) { } // First test some bad stuff that shouldn't work - _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ "common_name": "foobar.com", "ip_sans": "1.2.3.4", "alt_names": "foo.foobar.com,bar.foobar.com", @@ -2879,7 +2846,7 @@ func TestBackend_OID_SANs(t *testing.T) { t.Fatal("expected error") } - _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ "common_name": "foobar.com", "ip_sans": "1.2.3.4", "alt_names": "foo.foobar.com,bar.foobar.com", @@ -2891,7 +2858,7 @@ func TestBackend_OID_SANs(t *testing.T) { t.Fatal("expected error") } - _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ "common_name": "foobar.com", "ip_sans": "1.2.3.4", "alt_names": "foo.foobar.com,bar.foobar.com", @@ -2903,7 +2870,7 @@ func TestBackend_OID_SANs(t *testing.T) { t.Fatal("expected error") } - _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ "common_name": "foobar.com", "ip_sans": "1.2.3.4", "alt_names": "foo.foobar.com,bar.foobar.com", @@ -2915,7 +2882,7 @@ func TestBackend_OID_SANs(t *testing.T) { t.Fatal("expected error") } - _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ "common_name": "foobar.com", "ip_sans": "1.2.3.4", "alt_names": "foo.foobar.com,bar.foobar.com", @@ -3033,7 +3000,6 @@ func TestBackend_OID_SANs(t *testing.T) { } func TestBackend_AllowedSerialNumbers(t *testing.T) { - t.Parallel() b, s := createBackendWithStorage(t) var err error @@ -3059,7 +3025,7 @@ func TestBackend_AllowedSerialNumbers(t *testing.T) { t.Fatal(err) } - _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ "common_name": "foobar", "ttl": "1h", }) @@ -3067,7 +3033,7 @@ func TestBackend_AllowedSerialNumbers(t *testing.T) { t.Fatal(err) } - _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ "common_name": "foobar", "ttl": "1h", "serial_number": "foobar", @@ -3086,7 +3052,7 @@ func TestBackend_AllowedSerialNumbers(t *testing.T) { t.Fatal(err) } - _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ "common_name": "foobar", "ttl": "1h", // Not a valid serial number @@ -3140,7 +3106,6 @@ func TestBackend_AllowedSerialNumbers(t *testing.T) { } func TestBackend_URI_SANs(t *testing.T) { - t.Parallel() b, s := createBackendWithStorage(t) var err error @@ -3234,7 +3199,6 @@ func TestBackend_URI_SANs(t *testing.T) { } func TestBackend_AllowedURISANsTemplate(t *testing.T) { - t.Parallel() coreConfig := &vault.CoreConfig{ CredentialBackends: map[string]logical.Factory{ "userpass": userpass.Factory, @@ -3359,7 +3323,6 @@ func TestBackend_AllowedURISANsTemplate(t *testing.T) { } func TestBackend_AllowedDomainsTemplate(t *testing.T) { - t.Parallel() coreConfig := &vault.CoreConfig{ CredentialBackends: map[string]logical.Factory{ "userpass": userpass.Factory, @@ -3457,7 +3420,7 @@ func TestBackend_AllowedDomainsTemplate(t *testing.T) { t.Fatal(err) } - // Issue certificate for foobar.com to verify allowed_domain_template doesn't break plain domains. + // Issue certificate for foobar.com to verify allowed_domain_templae doesnt break plain domains. _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"common_name": "foobar.com"}) if err != nil { t.Fatal(err) @@ -3491,7 +3454,6 @@ func TestBackend_AllowedDomainsTemplate(t *testing.T) { } func TestReadWriteDeleteRoles(t *testing.T) { - t.Parallel() ctx := context.Background() coreConfig := &vault.CoreConfig{ CredentialBackends: map[string]logical.Factory{ @@ -3553,7 +3515,6 @@ func TestReadWriteDeleteRoles(t *testing.T) { "allowed_serial_numbers": []interface{}{}, "generate_lease": false, "signature_bits": json.Number("256"), - "use_pss": false, "allowed_domains": []interface{}{}, "allowed_uri_sans_template": false, "enforce_hostnames": true, @@ -3592,7 +3553,6 @@ func TestReadWriteDeleteRoles(t *testing.T) { "street_address": []interface{}{}, "code_signing_flag": false, "issuer_ref": "default", - "cn_validations": []interface{}{"email", "hostname"}, } if diff := deep.Equal(expectedData, resp.Data); len(diff) > 0 { @@ -3670,7 +3630,7 @@ func setCerts() { if err != nil { panic(err) } - _, err = certutil.GetSubjKeyID(rak) + subjKeyID, err = certutil.GetSubjKeyID(rak) if err != nil { panic(err) } @@ -3700,7 +3660,7 @@ func setCerts() { if err != nil { panic(err) } - _, err = certutil.GetSubjKeyID(edk) + subjKeyID, err = certutil.GetSubjKeyID(edk) if err != nil { panic(err) } @@ -3720,8 +3680,6 @@ func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { // that we have to deal with more than one interval. // InMemSink rounds down to an interval boundary rather than // starting one at the time of initialization. - // - // This test is not parallelizable. inmemSink := metrics.NewInmemSink( 1000000*time.Hour, 2000000*time.Hour) @@ -3732,10 +3690,7 @@ func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { metricsConf.EnableServiceLabel = false metricsConf.EnableTypePrefix = false - _, err := metrics.NewGlobal(metricsConf, inmemSink) - if err != nil { - t.Fatal(err) - } + metrics.NewGlobal(metricsConf, inmemSink) // Enable PKI secret engine coreConfig := &vault.CoreConfig{ @@ -3752,6 +3707,8 @@ func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { vault.TestWaitActive(t, cores[0].Core) client := cores[0].Client + var err error + // Mount /pki as a root CA err = client.Sys().Mount("pki", &api.MountInput{ Type: "pki", @@ -3764,22 +3721,6 @@ func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { t.Fatal(err) } - // Check the metrics initialized in order to calculate backendUUID for /pki - // BackendUUID not consistent during tests with UUID from /sys/mounts/pki - metricsSuffix := "total_certificates_stored" - backendUUID := "" - mostRecentInterval := inmemSink.Data()[len(inmemSink.Data())-1] - for _, existingGauge := range mostRecentInterval.Gauges { - if strings.HasSuffix(existingGauge.Name, metricsSuffix) { - expandedGaugeName := existingGauge.Name - backendUUID = strings.Split(expandedGaugeName, ".")[2] - break - } - } - if backendUUID == "" { - t.Fatalf("No Gauge Found ending with %s", metricsSuffix) - } - // Set the cluster's certificate as the root CA in /pki pemBundleRootCA := string(cluster.CACertPEM) + string(cluster.CAKeyPEM) _, err = client.Logical().Write("pki/config/ca", map[string]interface{}{ @@ -3837,21 +3778,6 @@ func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { t.Fatal(err) } - // Check the cert-count metrics - expectedCertCountGaugeMetrics := map[string]float32{ - "secrets.pki." + backendUUID + ".total_revoked_certificates_stored": 1, - "secrets.pki." + backendUUID + ".total_certificates_stored": 1, - } - mostRecentInterval = inmemSink.Data()[len(inmemSink.Data())-1] - for gauge, value := range expectedCertCountGaugeMetrics { - if _, ok := mostRecentInterval.Gauges[gauge]; !ok { - t.Fatalf("Expected metrics to include a value for gauge %s", gauge) - } - if value != mostRecentInterval.Gauges[gauge].Value { - t.Fatalf("Expected value metric %s to be %f but got %f", gauge, value, mostRecentInterval.Gauges[gauge].Value) - } - } - // Revoke adds a fixed 2s buffer, so we sleep for a bit longer to ensure // the revocation time is past the current time. time.Sleep(3 * time.Second) @@ -3906,21 +3832,16 @@ func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { t.Fatal(err) } expectedData := map[string]interface{}{ - "safety_buffer": json.Number("1"), - "tidy_cert_store": true, - "tidy_revoked_certs": true, - "tidy_revoked_cert_issuer_associations": false, - "pause_duration": "0s", - "state": "Finished", - "error": nil, - "time_started": nil, - "time_finished": nil, - "message": nil, - "cert_store_deleted_count": json.Number("1"), - "revoked_cert_deleted_count": json.Number("1"), - "missing_issuer_cert_count": json.Number("0"), - "current_cert_store_count": json.Number("0"), - "current_revoked_cert_count": json.Number("0"), + "safety_buffer": json.Number("1"), + "tidy_cert_store": true, + "tidy_revoked_certs": true, + "state": "Finished", + "error": nil, + "time_started": nil, + "time_finished": nil, + "message": nil, + "cert_store_deleted_count": json.Number("1"), + "revoked_cert_deleted_count": json.Number("1"), } // Let's copy the times from the response so that we can use deep.Equal() timeStarted, ok := tidyStatus.Data["time_started"] @@ -3940,17 +3861,13 @@ func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { } // Check the tidy metrics { - // Map of gauges to expected value + // Map of gagues to expected value expectedGauges := map[string]float32{ - "secrets.pki.tidy.cert_store_current_entry": 0, - "secrets.pki.tidy.cert_store_total_entries": 1, - "secrets.pki.tidy.revoked_cert_current_entry": 0, - "secrets.pki.tidy.revoked_cert_total_entries": 1, - "secrets.pki.tidy.start_time_epoch": 0, - "secrets.pki." + backendUUID + ".total_certificates_stored": 0, - "secrets.pki." + backendUUID + ".total_revoked_certificates_stored": 0, - "secrets.pki.tidy.cert_store_total_entries_remaining": 0, - "secrets.pki.tidy.revoked_cert_total_entries_remaining": 0, + "secrets.pki.tidy.cert_store_current_entry": 0, + "secrets.pki.tidy.cert_store_total_entries": 1, + "secrets.pki.tidy.revoked_cert_current_entry": 0, + "secrets.pki.tidy.revoked_cert_total_entries": 1, + "secrets.pki.tidy.start_time_epoch": 0, } // Map of counters to the sum of the metrics for that counter expectedCounters := map[string]float64{ @@ -3960,7 +3877,7 @@ func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { // Note that "secrets.pki.tidy.failure" won't be in the captured metrics } - // If the metrics span more than one interval, skip the checks + // If the metrics span mnore than one interval, skip the checks intervals := inmemSink.Data() if len(intervals) == 1 { interval := inmemSink.Data()[0] @@ -4002,7 +3919,6 @@ func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { } func TestBackend_Root_FullCAChain(t *testing.T) { - t.Parallel() testCases := []struct { testName string keyType string @@ -4012,7 +3928,6 @@ func TestBackend_Root_FullCAChain(t *testing.T) { {testName: "EC", keyType: "ec"}, } for _, tc := range testCases { - tc := tc t.Run(tc.testName, func(t *testing.T) { runFullCAChainTest(t, tc.keyType) }) @@ -4051,7 +3966,7 @@ func runFullCAChainTest(t *testing.T, keyType string) { requireCertInCaChainString(t, fullChain, rootCert, "expected root cert within root cert/ca_chain") // Make sure when we issue a leaf certificate we get the full chain back. - _, err = CBWrite(b_root, s_root, "roles/example", map[string]interface{}{ + resp, err = CBWrite(b_root, s_root, "roles/example", map[string]interface{}{ "allowed_domains": "example.com", "allow_subdomains": "true", "max_ttl": "1h", @@ -4084,7 +3999,7 @@ func runFullCAChainTest(t *testing.T, keyType string) { resp, err = CBWrite(b_root, s_root, "root/sign-intermediate", map[string]interface{}{ "csr": intermediateData["csr"], - "format": "pem", + "format": "pem_bundle", }) if err != nil { t.Fatal(err) @@ -4103,7 +4018,7 @@ func runFullCAChainTest(t *testing.T, keyType string) { require.Equal(t, parseCert(t, intermediateCaChain[0]), intermediaryCaCert, "intermediate signed cert should have been part of ca_chain") require.Equal(t, parseCert(t, intermediateCaChain[1]), rootCaCert, "root cert should have been part of ca_chain") - _, err = CBWrite(b_int, s_int, "intermediate/set-signed", map[string]interface{}{ + resp, err = CBWrite(b_int, s_int, "intermediate/set-signed", map[string]interface{}{ "certificate": intermediateCert + "\n" + rootCert + "\n", }) if err != nil { @@ -4129,7 +4044,7 @@ func runFullCAChainTest(t *testing.T, keyType string) { requireCertInCaChainString(t, fullChain, rootCert, "expected full chain to contain root certificate from pki-intermediate/cert/ca_chain") // Make sure when we issue a leaf certificate we get the full chain back. - _, err = CBWrite(b_int, s_int, "roles/example", map[string]interface{}{ + resp, err = CBWrite(b_int, s_int, "roles/example", map[string]interface{}{ "allowed_domains": "example.com", "allow_subdomains": "true", "max_ttl": "1h", @@ -4149,7 +4064,7 @@ func runFullCAChainTest(t *testing.T, keyType string) { // "external" CAs behave as expected. b_ext, s_ext := createBackendWithStorage(t) - _, err = CBWrite(b_ext, s_ext, "config/ca", map[string]interface{}{ + resp, err = CBWrite(b_ext, s_ext, "config/ca", map[string]interface{}{ "pem_bundle": intermediateKey + "\n" + intermediateCert + "\n" + rootCert + "\n", }) if err != nil { @@ -4174,7 +4089,7 @@ func runFullCAChainTest(t *testing.T, keyType string) { } // Now issue a short-lived certificate from our pki-external. - _, err = CBWrite(b_ext, s_ext, "roles/example", map[string]interface{}{ + resp, err = CBWrite(b_ext, s_ext, "roles/example", map[string]interface{}{ "allowed_domains": "example.com", "allow_subdomains": "true", "max_ttl": "1h", @@ -4192,21 +4107,6 @@ func runFullCAChainTest(t *testing.T, keyType string) { // Verify that the certificates are signed by the intermediary CA key... requireSignedBy(t, issuedCrt, intermediaryCaCert.PublicKey) - - // Test that we can request that the root ca certificate not appear in the ca_chain field - resp, err = CBWrite(b_ext, s_ext, "issue/example", map[string]interface{}{ - "common_name": "test.example.com", - "ttl": "5m", - "remove_roots_from_chain": "true", - }) - requireSuccessNonNilResponse(t, resp, err, "error issuing certificate when removing self signed") - fullChain = strings.Join(resp.Data["ca_chain"].([]string), "\n") - if strings.Count(fullChain, intermediateCert) != 1 { - t.Fatalf("expected full chain to contain intermediate certificate; got %v occurrences", strings.Count(fullChain, intermediateCert)) - } - if strings.Count(fullChain, rootCert) != 0 { - t.Fatalf("expected full chain to NOT contain root certificate; got %v occurrences", strings.Count(fullChain, rootCert)) - } } func requireCertInCaChainArray(t *testing.T, chain []string, cert string, msgAndArgs ...interface{}) { @@ -4257,7 +4157,6 @@ type IssuanceRegression struct { AllowSubdomains MultiBool AllowLocalhost MultiBool AllowWildcardCertificates MultiBool - CNValidations []string CommonName string Issued bool } @@ -4270,30 +4169,25 @@ func RoleIssuanceRegressionHelper(t *testing.T, b *backend, s logical.Storage, i for _, AllowLocalhost := range test.AllowLocalhost.ToValues() { for _, AllowWildcardCertificates := range test.AllowWildcardCertificates.ToValues() { role := fmt.Sprintf("issuance-regression-%d-bare-%v-glob-%v-subdomains-%v-localhost-%v-wildcard-%v", index, AllowBareDomains, AllowGlobDomains, AllowSubdomains, AllowLocalhost, AllowWildcardCertificates) - _, err := CBWrite(b, s, "roles/"+role, map[string]interface{}{ + resp, err := CBWrite(b, s, "roles/"+role, map[string]interface{}{ "allowed_domains": test.AllowedDomains, "allow_bare_domains": AllowBareDomains, "allow_glob_domains": AllowGlobDomains, "allow_subdomains": AllowSubdomains, "allow_localhost": AllowLocalhost, "allow_wildcard_certificates": AllowWildcardCertificates, - "cn_validations": test.CNValidations, // TODO: test across this vector as well. Currently certain wildcard // matching is broken with it enabled (such as x*x.foo). "enforce_hostnames": false, "key_type": "ec", "key_bits": 256, - "no_store": true, - // With the CN Validations field, ensure we prevent CN from appearing - // in SANs. }) if err != nil { t.Fatal(err) } - resp, err := CBWrite(b, s, "issue/"+role, map[string]interface{}{ - "common_name": test.CommonName, - "exclude_cn_from_sans": true, + resp, err = CBWrite(b, s, "issue/"+role, map[string]interface{}{ + "common_name": test.CommonName, }) haveErr := err != nil || resp == nil @@ -4314,7 +4208,6 @@ func RoleIssuanceRegressionHelper(t *testing.T, b *backend, s logical.Storage, i } func TestBackend_Roles_IssuanceRegression(t *testing.T) { - t.Parallel() // Regression testing of role's issuance policy. testCases := []IssuanceRegression{ // allowed, bare, glob, subdomains, localhost, wildcards, cn, issued @@ -4323,166 +4216,149 @@ func TestBackend_Roles_IssuanceRegression(t *testing.T) { // Allowed contains globs, but globbing not allowed, resulting in all // issuances failing. Note that tests against issuing a wildcard with // a bare domain will be covered later. - /* 0 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "baz.fud.bar.foo", false}, - /* 1 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "*.fud.bar.foo", false}, - /* 2 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "fud.bar.foo", false}, - /* 3 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "*.bar.foo", false}, - /* 4 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "bar.foo", false}, - /* 5 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "*.foo", false}, - /* 6 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "foo", false}, - /* 7 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "baz.fud.bar.foo", false}, - /* 8 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "*.fud.bar.foo", false}, - /* 9 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "fud.bar.foo", false}, - /* 10 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "*.bar.foo", false}, - /* 11 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "bar.foo", false}, - /* 12 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "foo", false}, + /* 0 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, "baz.fud.bar.foo", false}, + /* 1 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, "*.fud.bar.foo", false}, + /* 2 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, "fud.bar.foo", false}, + /* 3 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, "*.bar.foo", false}, + /* 4 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, "bar.foo", false}, + /* 5 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, "*.foo", false}, + /* 6 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, "foo", false}, + /* 7 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, "baz.fud.bar.foo", false}, + /* 8 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, "*.fud.bar.foo", false}, + /* 9 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, "fud.bar.foo", false}, + /* 10 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, "*.bar.foo", false}, + /* 11 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, "bar.foo", false}, + /* 12 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, "foo", false}, // === Localhost sanity === // // Localhost forbidden, not matching allowed domains -> not issued - /* 13 */ {[]string{"*.*.foo"}, MAny, MAny, MAny, MFalse, MAny, nil, "localhost", false}, + /* 13 */ {[]string{"*.*.foo"}, MAny, MAny, MAny, MFalse, MAny, "localhost", false}, // Localhost allowed, not matching allowed domains -> issued - /* 14 */ {[]string{"*.*.foo"}, MAny, MAny, MAny, MTrue, MAny, nil, "localhost", true}, + /* 14 */ {[]string{"*.*.foo"}, MAny, MAny, MAny, MTrue, MAny, "localhost", true}, // Localhost allowed via allowed domains (and bare allowed), not by AllowLocalhost -> issued - /* 15 */ {[]string{"localhost"}, MTrue, MAny, MAny, MFalse, MAny, nil, "localhost", true}, + /* 15 */ {[]string{"localhost"}, MTrue, MAny, MAny, MFalse, MAny, "localhost", true}, // Localhost allowed via allowed domains (and bare not allowed), not by AllowLocalhost -> not issued - /* 16 */ {[]string{"localhost"}, MFalse, MAny, MAny, MFalse, MAny, nil, "localhost", false}, + /* 16 */ {[]string{"localhost"}, MFalse, MAny, MAny, MFalse, MAny, "localhost", false}, // Localhost allowed via allowed domains (but bare not allowed), and by AllowLocalhost -> issued - /* 17 */ {[]string{"localhost"}, MFalse, MAny, MAny, MTrue, MAny, nil, "localhost", true}, + /* 17 */ {[]string{"localhost"}, MFalse, MAny, MAny, MTrue, MAny, "localhost", true}, // === Bare wildcard issuance == // // allowed_domains contains one or more wildcards and bare domains allowed, // resulting in the cert being issued. - /* 18 */ {[]string{"*.foo"}, MTrue, MAny, MAny, MAny, MTrue, nil, "*.foo", true}, - /* 19 */ {[]string{"*.*.foo"}, MTrue, MAny, MAny, MAny, MAny, nil, "*.*.foo", false}, // Does not conform to RFC 6125 + /* 18 */ {[]string{"*.foo"}, MTrue, MAny, MAny, MAny, MTrue, "*.foo", true}, + /* 19 */ {[]string{"*.*.foo"}, MTrue, MAny, MAny, MAny, MAny, "*.*.foo", false}, // Does not conform to RFC 6125 // === Double Leading Glob Testing === // // Allowed contains globs, but glob allowed so certain matches work. // The value of bare and localhost does not impact these results. - /* 20 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "baz.fud.bar.foo", true}, // glob domains allow infinite subdomains - /* 21 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MTrue, nil, "*.fud.bar.foo", true}, // glob domain allows wildcard of subdomains - /* 22 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "fud.bar.foo", true}, - /* 23 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MTrue, nil, "*.bar.foo", true}, // Regression fix: Vault#13530 - /* 24 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "bar.foo", false}, - /* 25 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "*.foo", false}, - /* 26 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "foo", false}, + /* 20 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MAny, "baz.fud.bar.foo", true}, // glob domains allow infinite subdomains + /* 21 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MTrue, "*.fud.bar.foo", true}, // glob domain allows wildcard of subdomains + /* 22 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MAny, "fud.bar.foo", true}, + /* 23 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MTrue, "*.bar.foo", true}, // Regression fix: Vault#13530 + /* 24 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MAny, "bar.foo", false}, + /* 25 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MAny, "*.foo", false}, + /* 26 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MAny, "foo", false}, // Allowed contains globs, but glob and subdomain both work, so we expect // wildcard issuance to work as well. The value of bare and localhost does // not impact these results. - /* 27 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "baz.fud.bar.foo", true}, - /* 28 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MTrue, nil, "*.fud.bar.foo", true}, - /* 29 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "fud.bar.foo", true}, - /* 30 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MTrue, nil, "*.bar.foo", true}, // Regression fix: Vault#13530 - /* 31 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "bar.foo", false}, - /* 32 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "*.foo", false}, - /* 33 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "foo", false}, + /* 27 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MAny, "baz.fud.bar.foo", true}, + /* 28 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MTrue, "*.fud.bar.foo", true}, + /* 29 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MAny, "fud.bar.foo", true}, + /* 30 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MTrue, "*.bar.foo", true}, // Regression fix: Vault#13530 + /* 31 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MAny, "bar.foo", false}, + /* 32 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MAny, "*.foo", false}, + /* 33 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MAny, "foo", false}, // === Single Leading Glob Testing === // // Allowed contains globs, but glob allowed so certain matches work. // The value of bare and localhost does not impact these results. - /* 34 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "baz.fud.bar.foo", true}, // glob domains allow infinite subdomains - /* 35 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MTrue, nil, "*.fud.bar.foo", true}, // glob domain allows wildcard of subdomains - /* 36 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "fud.bar.foo", true}, // glob domains allow infinite subdomains - /* 37 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MTrue, nil, "*.bar.foo", true}, // glob domain allows wildcards of subdomains - /* 38 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "bar.foo", true}, - /* 39 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "foo", false}, + /* 34 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MAny, "baz.fud.bar.foo", true}, // glob domains allow infinite subdomains + /* 35 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MTrue, "*.fud.bar.foo", true}, // glob domain allows wildcard of subdomains + /* 36 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MAny, "fud.bar.foo", true}, // glob domains allow infinite subdomains + /* 37 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MTrue, "*.bar.foo", true}, // glob domain allows wildcards of subdomains + /* 38 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MAny, "bar.foo", true}, + /* 39 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MAny, "foo", false}, // Allowed contains globs, but glob and subdomain both work, so we expect // wildcard issuance to work as well. The value of bare and localhost does // not impact these results. - /* 40 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "baz.fud.bar.foo", true}, - /* 41 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MTrue, nil, "*.fud.bar.foo", true}, - /* 42 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "fud.bar.foo", true}, - /* 43 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MTrue, nil, "*.bar.foo", true}, - /* 44 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "bar.foo", true}, - /* 45 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "foo", false}, + /* 40 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MAny, "baz.fud.bar.foo", true}, + /* 41 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MTrue, "*.fud.bar.foo", true}, + /* 42 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MAny, "fud.bar.foo", true}, + /* 43 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MTrue, "*.bar.foo", true}, + /* 44 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MAny, "bar.foo", true}, + /* 45 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MAny, "foo", false}, // === Only base domain name === // // Allowed contains only domain components, but subdomains not allowed. This // results in most issuances failing unless we allow bare domains, in which // case only the final issuance for "foo" will succeed. - /* 46 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, nil, "baz.fud.bar.foo", false}, - /* 47 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, nil, "*.fud.bar.foo", false}, - /* 48 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, nil, "fud.bar.foo", false}, - /* 49 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, nil, "*.bar.foo", false}, - /* 50 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, nil, "bar.foo", false}, - /* 51 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, nil, "*.foo", false}, - /* 52 */ {[]string{"foo"}, MFalse, MAny, MFalse, MAny, MAny, nil, "foo", false}, - /* 53 */ {[]string{"foo"}, MTrue, MAny, MFalse, MAny, MAny, nil, "foo", true}, + /* 46 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, "baz.fud.bar.foo", false}, + /* 47 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, "*.fud.bar.foo", false}, + /* 48 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, "fud.bar.foo", false}, + /* 49 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, "*.bar.foo", false}, + /* 50 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, "bar.foo", false}, + /* 51 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, "*.foo", false}, + /* 52 */ {[]string{"foo"}, MFalse, MAny, MFalse, MAny, MAny, "foo", false}, + /* 53 */ {[]string{"foo"}, MTrue, MAny, MFalse, MAny, MAny, "foo", true}, // Allowed contains only domain components, and subdomains are now allowed. // This results in most issuances succeeding, with the exception of the // base foo, which is still governed by base's value. - /* 54 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MAny, nil, "baz.fud.bar.foo", true}, - /* 55 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, nil, "*.fud.bar.foo", true}, - /* 56 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MAny, nil, "fud.bar.foo", true}, - /* 57 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, nil, "*.bar.foo", true}, - /* 58 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MAny, nil, "bar.foo", true}, - /* 59 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, nil, "*.foo", true}, - /* 60 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, nil, "x*x.foo", true}, // internal wildcards should be allowed per RFC 6125/6.4.3 - /* 61 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, nil, "*x.foo", true}, // prefix wildcards should be allowed per RFC 6125/6.4.3 - /* 62 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, nil, "x*.foo", true}, // suffix wildcards should be allowed per RFC 6125/6.4.3 - /* 63 */ {[]string{"foo"}, MFalse, MAny, MTrue, MAny, MAny, nil, "foo", false}, - /* 64 */ {[]string{"foo"}, MTrue, MAny, MTrue, MAny, MAny, nil, "foo", true}, + /* 54 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MAny, "baz.fud.bar.foo", true}, + /* 55 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, "*.fud.bar.foo", true}, + /* 56 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MAny, "fud.bar.foo", true}, + /* 57 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, "*.bar.foo", true}, + /* 58 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MAny, "bar.foo", true}, + /* 59 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, "*.foo", true}, + /* 60 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, "x*x.foo", true}, // internal wildcards should be allowed per RFC 6125/6.4.3 + /* 61 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, "*x.foo", true}, // prefix wildcards should be allowed per RFC 6125/6.4.3 + /* 62 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, "x*.foo", true}, // suffix wildcards should be allowed per RFC 6125/6.4.3 + /* 63 */ {[]string{"foo"}, MFalse, MAny, MTrue, MAny, MAny, "foo", false}, + /* 64 */ {[]string{"foo"}, MTrue, MAny, MTrue, MAny, MAny, "foo", true}, // === Internal Glob Matching === // // Basic glob matching requirements - /* 65 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "xerox.foo", true}, - /* 66 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "xylophone.files.pyrex.foo", true}, // globs can match across subdomains - /* 67 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "xercex.bar.foo", false}, // x.foo isn't matched - /* 68 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "bar.foo", false}, // x*x isn't matched. - /* 69 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "*.foo", false}, // unrelated wildcard - /* 70 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "*.x*x.foo", false}, // Does not conform to RFC 6125 - /* 71 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "*.xyx.foo", false}, // Globs and Subdomains do not layer per docs. + /* 65 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, "xerox.foo", true}, + /* 66 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, "xylophone.files.pyrex.foo", true}, // globs can match across subdomains + /* 67 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, "xercex.bar.foo", false}, // x.foo isn't matched + /* 68 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, "bar.foo", false}, // x*x isn't matched. + /* 69 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, "*.foo", false}, // unrelated wildcard + /* 70 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, "*.x*x.foo", false}, // Does not conform to RFC 6125 + /* 71 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, "*.xyx.foo", false}, // Globs and Subdomains do not layer per docs. // Various requirements around x*x.foo wildcard matching. - /* 72 */ {[]string{"x*x.foo"}, MFalse, MFalse, MAny, MAny, MAny, nil, "x*x.foo", false}, // base disabled, shouldn't match wildcard - /* 73 */ {[]string{"x*x.foo"}, MFalse, MTrue, MAny, MAny, MTrue, nil, "x*x.foo", true}, // base disallowed, but globbing allowed and should match - /* 74 */ {[]string{"x*x.foo"}, MTrue, MAny, MAny, MAny, MTrue, nil, "x*x.foo", true}, // base allowed, should match wildcard + /* 72 */ {[]string{"x*x.foo"}, MFalse, MFalse, MAny, MAny, MAny, "x*x.foo", false}, // base disabled, shouldn't match wildcard + /* 73 */ {[]string{"x*x.foo"}, MFalse, MTrue, MAny, MAny, MTrue, "x*x.foo", true}, // base disallowed, but globbing allowed and should match + /* 74 */ {[]string{"x*x.foo"}, MTrue, MAny, MAny, MAny, MTrue, "x*x.foo", true}, // base allowed, should match wildcard // Basic glob matching requirements with internal dots. - /* 75 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "xerox.foo", false}, // missing dots - /* 76 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "x.ero.x.foo", true}, - /* 77 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "xylophone.files.pyrex.foo", false}, // missing dots - /* 78 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "x.ylophone.files.pyre.x.foo", true}, // globs can match across subdomains - /* 79 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "xercex.bar.foo", false}, // x.foo isn't matched - /* 80 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "bar.foo", false}, // x.*.x isn't matched. - /* 81 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "*.foo", false}, // unrelated wildcard - /* 82 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "*.x.*.x.foo", false}, // Does not conform to RFC 6125 - /* 83 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "*.x.y.x.foo", false}, // Globs and Subdomains do not layer per docs. + /* 75 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, "xerox.foo", false}, // missing dots + /* 76 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, "x.ero.x.foo", true}, + /* 77 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, "xylophone.files.pyrex.foo", false}, // missing dots + /* 78 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, "x.ylophone.files.pyre.x.foo", true}, // globs can match across subdomains + /* 79 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, "xercex.bar.foo", false}, // x.foo isn't matched + /* 80 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, "bar.foo", false}, // x.*.x isn't matched. + /* 81 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, "*.foo", false}, // unrelated wildcard + /* 82 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, "*.x.*.x.foo", false}, // Does not conform to RFC 6125 + /* 83 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, "*.x.y.x.foo", false}, // Globs and Subdomains do not layer per docs. // === Wildcard restriction testing === // - /* 84 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MFalse, nil, "*.fud.bar.foo", false}, // glob domain allows wildcard of subdomains - /* 85 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MFalse, nil, "*.bar.foo", false}, // glob domain allows wildcards of subdomains - /* 86 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, nil, "*.fud.bar.foo", false}, - /* 87 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, nil, "*.bar.foo", false}, - /* 88 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, nil, "*.foo", false}, - /* 89 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, nil, "x*x.foo", false}, - /* 90 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, nil, "*x.foo", false}, - /* 91 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, nil, "x*.foo", false}, - /* 92 */ {[]string{"x*x.foo"}, MTrue, MAny, MAny, MAny, MFalse, nil, "x*x.foo", false}, - /* 93 */ {[]string{"*.foo"}, MFalse, MFalse, MAny, MAny, MAny, nil, "*.foo", false}, // Bare and globs forbidden despite (potentially) allowing wildcards. - /* 94 */ {[]string{"x.*.x.foo"}, MAny, MAny, MAny, MAny, MAny, nil, "x.*.x.foo", false}, // Does not conform to RFC 6125 - - // === CN validation allowances === // - /* 95 */ {[]string{"foo"}, MAny, MAny, MAny, MAny, MAny, []string{"disabled"}, "*.fud.bar.foo", true}, - /* 96 */ {[]string{"foo"}, MAny, MAny, MAny, MAny, MAny, []string{"disabled"}, "*.fud.*.foo", true}, - /* 97 */ {[]string{"foo"}, MAny, MAny, MAny, MAny, MAny, []string{"disabled"}, "*.bar.*.bar", true}, - /* 98 */ {[]string{"foo"}, MAny, MAny, MAny, MAny, MAny, []string{"disabled"}, "foo@foo", true}, - /* 99 */ {[]string{"foo"}, MAny, MAny, MAny, MAny, MAny, []string{"disabled"}, "foo@foo@foo", true}, - /* 100 */ {[]string{"foo"}, MAny, MAny, MAny, MAny, MAny, []string{"disabled"}, "bar@bar@bar", true}, - /* 101 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"email"}, "bar@bar@bar", false}, - /* 102 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"email"}, "bar@bar", false}, - /* 103 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"email"}, "bar@foo", true}, - /* 104 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"hostname"}, "bar@foo", false}, - /* 105 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"hostname"}, "bar@bar", false}, - /* 106 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"hostname"}, "bar.foo", true}, - /* 107 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"hostname"}, "bar.bar", false}, - /* 108 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"email"}, "bar.foo", false}, - /* 109 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"email"}, "bar.bar", false}, - } - - if len(testCases) != 110 { + /* 84 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MFalse, "*.fud.bar.foo", false}, // glob domain allows wildcard of subdomains + /* 85 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MFalse, "*.bar.foo", false}, // glob domain allows wildcards of subdomains + /* 86 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, "*.fud.bar.foo", false}, + /* 87 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, "*.bar.foo", false}, + /* 88 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, "*.foo", false}, + /* 89 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, "x*x.foo", false}, + /* 90 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, "*x.foo", false}, + /* 91 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, "x*.foo", false}, + /* 92 */ {[]string{"x*x.foo"}, MTrue, MAny, MAny, MAny, MFalse, "x*x.foo", false}, + /* 93 */ {[]string{"*.foo"}, MFalse, MFalse, MAny, MAny, MAny, "*.foo", false}, // Bare and globs forbidden despite (potentially) allowing wildcards. + /* 94 */ {[]string{"x.*.x.foo"}, MAny, MAny, MAny, MAny, MAny, "x.*.x.foo", false}, // Does not conform to RFC 6125 + } + + if len(testCases) != 95 { t.Fatalf("misnumbered test case entries will make it hard to find bugs: %v", len(testCases)) } @@ -4507,7 +4383,7 @@ func TestBackend_Roles_IssuanceRegression(t *testing.T) { tested += RoleIssuanceRegressionHelper(t, b, s, index, test) } - t.Logf("Issuance regression expanded matrix test scenarios: %d", tested) + t.Log(fmt.Sprintf("Issuance regression expanded matrix test scenarios: %d", tested)) } type KeySizeRegression struct { @@ -4517,7 +4393,6 @@ type KeySizeRegression struct { // Signature Bits presently is only specified on the role. RoleSignatureBits []int - RoleUsePSS bool // These are tuples; must be of the same length. TestKeyTypes []string @@ -4557,11 +4432,10 @@ func RoleKeySizeRegressionHelper(t *testing.T, b *backend, s logical.Storage, in for _, roleKeyBits := range test.RoleKeyBits { for _, roleSignatureBits := range test.RoleSignatureBits { role := fmt.Sprintf("key-size-regression-%d-keytype-%v-keybits-%d-signature-bits-%d", index, test.RoleKeyType, roleKeyBits, roleSignatureBits) - _, err := CBWrite(b, s, "roles/"+role, map[string]interface{}{ + resp, err := CBWrite(b, s, "roles/"+role, map[string]interface{}{ "key_type": test.RoleKeyType, "key_bits": roleKeyBits, "signature_bits": roleSignatureBits, - "use_pss": test.RoleUsePSS, }) if err != nil { t.Fatal(err) @@ -4587,15 +4461,6 @@ func RoleKeySizeRegressionHelper(t *testing.T, b *backend, s logical.Storage, in t.Fatalf("key size regression test [%d] failed: haveErr: %v, expectErr: %v, err: %v, resp: %v, test case: %v, caKeyType: %v, caKeyBits: %v, role: %v, keyType: %v, keyBits: %v", index, haveErr, test.ExpectError, err, resp, test, caKeyType, caKeyBits, role, keyType, keyBits) } - if resp != nil && test.RoleUsePSS && caKeyType == "rsa" { - leafCert := parseCert(t, resp.Data["certificate"].(string)) - switch leafCert.SignatureAlgorithm { - case x509.SHA256WithRSAPSS, x509.SHA384WithRSAPSS, x509.SHA512WithRSAPSS: - default: - t.Fatalf("key size regression test [%d] failed on role %v: unexpected signature algorithm; expected RSA-type CA to sign a leaf cert with PSS algorithm; got %v", index, role, leafCert.SignatureAlgorithm.String()) - } - } - tested += 1 } } @@ -4612,46 +4477,40 @@ func RoleKeySizeRegressionHelper(t *testing.T, b *backend, s logical.Storage, in } func TestBackend_Roles_KeySizeRegression(t *testing.T) { - t.Parallel() // Regression testing of role's issuance policy. testCases := []KeySizeRegression{ // RSA with default parameters should fail to issue smaller RSA keys // and any size ECDSA/Ed25519 keys. - /* 0 */ {"rsa", []int{0, 2048}, []int{0, 256, 384, 512}, false, []string{"rsa", "ec", "ec", "ec", "ec", "ed25519"}, []int{1024, 224, 256, 384, 521, 0}, true}, + /* 0 */ {"rsa", []int{0, 2048}, []int{0, 256, 384, 512}, []string{"rsa", "ec", "ec", "ec", "ec", "ed25519"}, []int{1024, 224, 256, 384, 521, 0}, true}, // But it should work to issue larger RSA keys. - /* 1 */ {"rsa", []int{0, 2048}, []int{0, 256, 384, 512}, false, []string{"rsa", "rsa"}, []int{2048, 3072}, false}, + /* 1 */ {"rsa", []int{0, 2048}, []int{0, 256, 384, 512}, []string{"rsa", "rsa"}, []int{2048, 3072}, false}, // EC with default parameters should fail to issue smaller EC keys // and any size RSA/Ed25519 keys. - /* 2 */ {"ec", []int{0}, []int{0}, false, []string{"rsa", "ec", "ed25519"}, []int{2048, 224, 0}, true}, + /* 2 */ {"ec", []int{0}, []int{0}, []string{"rsa", "ec", "ed25519"}, []int{2048, 224, 0}, true}, // But it should work to issue larger EC keys. Note that we should be // independent of signature bits as that's computed from the issuer // type (for EC based issuers). - /* 3 */ {"ec", []int{224}, []int{0, 256, 384, 521}, false, []string{"ec", "ec", "ec", "ec"}, []int{224, 256, 384, 521}, false}, - /* 4 */ {"ec", []int{0, 256}, []int{0, 256, 384, 521}, false, []string{"ec", "ec", "ec"}, []int{256, 384, 521}, false}, - /* 5 */ {"ec", []int{384}, []int{0, 256, 384, 521}, false, []string{"ec", "ec"}, []int{384, 521}, false}, - /* 6 */ {"ec", []int{521}, []int{0, 256, 384, 512}, false, []string{"ec"}, []int{521}, false}, + /* 3 */ {"ec", []int{224}, []int{0, 256, 384, 521}, []string{"ec", "ec", "ec", "ec"}, []int{224, 256, 384, 521}, false}, + /* 4 */ {"ec", []int{0, 256}, []int{0, 256, 384, 521}, []string{"ec", "ec", "ec"}, []int{256, 384, 521}, false}, + /* 5 */ {"ec", []int{384}, []int{0, 256, 384, 521}, []string{"ec", "ec"}, []int{384, 521}, false}, + /* 6 */ {"ec", []int{521}, []int{0, 256, 384, 512}, []string{"ec"}, []int{521}, false}, // Ed25519 should reject RSA and EC keys. - /* 7 */ {"ed25519", []int{0}, []int{0}, false, []string{"rsa", "ec", "ec"}, []int{2048, 256, 521}, true}, + /* 7 */ {"ed25519", []int{0}, []int{0}, []string{"rsa", "ec", "ec"}, []int{2048, 256, 521}, true}, // But it should work to issue Ed25519 keys. - /* 8 */ {"ed25519", []int{0}, []int{0}, false, []string{"ed25519"}, []int{0}, false}, + /* 8 */ {"ed25519", []int{0}, []int{0}, []string{"ed25519"}, []int{0}, false}, // Any key type should reject insecure RSA key sizes. - /* 9 */ {"any", []int{0}, []int{0, 256, 384, 512}, false, []string{"rsa", "rsa"}, []int{512, 1024}, true}, + /* 9 */ {"any", []int{0}, []int{0, 256, 384, 512}, []string{"rsa", "rsa"}, []int{512, 1024}, true}, // But work for everything else. - /* 10 */ {"any", []int{0}, []int{0, 256, 384, 512}, false, []string{"rsa", "rsa", "ec", "ec", "ec", "ec", "ed25519"}, []int{2048, 3072, 224, 256, 384, 521, 0}, false}, + /* 10 */ {"any", []int{0}, []int{0, 256, 384, 512}, []string{"rsa", "rsa", "ec", "ec", "ec", "ec", "ed25519"}, []int{2048, 3072, 224, 256, 384, 521, 0}, false}, // RSA with larger than default key size should reject smaller ones. - /* 11 */ {"rsa", []int{3072}, []int{0, 256, 384, 512}, false, []string{"rsa"}, []int{2048}, true}, - - // We should be able to sign with PSS with any CA key type. - /* 12 */ {"rsa", []int{0}, []int{0, 256, 384, 512}, true, []string{"rsa"}, []int{2048}, false}, - /* 13 */ {"ec", []int{0}, []int{0}, true, []string{"ec"}, []int{256}, false}, - /* 14 */ {"ed25519", []int{0}, []int{0}, true, []string{"ed25519"}, []int{0}, false}, + /* 11 */ {"rsa", []int{3072}, []int{0, 256, 384, 512}, []string{"rsa"}, []int{2048}, true}, } - if len(testCases) != 15 { + if len(testCases) != 12 { t.Fatalf("misnumbered test case entries will make it hard to find bugs: %v", len(testCases)) } @@ -4662,11 +4521,10 @@ func TestBackend_Roles_KeySizeRegression(t *testing.T) { tested += RoleKeySizeRegressionHelper(t, b, s, index, test) } - t.Logf("Key size regression expanded matrix test scenarios: %d", tested) + t.Log(fmt.Sprintf("Key size regression expanded matrix test scenarios: %d", tested)) } func TestRootWithExistingKey(t *testing.T) { - t.Parallel() b, s := createBackendWithStorage(t) var err error @@ -4799,7 +4657,6 @@ func TestRootWithExistingKey(t *testing.T) { } func TestIntermediateWithExistingKey(t *testing.T) { - t.Parallel() b, s := createBackendWithStorage(t) var err error @@ -4864,7 +4721,6 @@ func TestIntermediateWithExistingKey(t *testing.T) { } func TestIssuanceTTLs(t *testing.T) { - t.Parallel() b, s := createBackendWithStorage(t) resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ @@ -4921,7 +4777,7 @@ func TestIssuanceTTLs(t *testing.T) { // Sleep until the parent cert expires and the clock rolls over // to the next second. - time.Sleep(time.Until(rootCert.NotAfter) + (1500 * time.Millisecond)) + time.Sleep(rootCert.NotAfter.Sub(time.Now()) + (1500 * time.Millisecond)) resp, err = CBWrite(b, s, "issuer/root", map[string]interface{}{ "issuer_name": "root", @@ -4939,7 +4795,6 @@ func TestIssuanceTTLs(t *testing.T) { } func TestSealWrappedStorageConfigured(t *testing.T) { - t.Parallel() b, _ := createBackendWithStorage(t) wrappedEntries := b.Backend.PathsSpecial.SealWrapStorage @@ -4979,860 +4834,6 @@ AwEHoUQDQgAE57NX8bR/nDoW8yRgLswoXBQcjHrdyfuHS0gPwki6BNnfunUzryVb require.Equal(t, len(importedIssuers), 0) } -func TestPerIssuerAIA(t *testing.T) { - t.Parallel() - b, s := createBackendWithStorage(t) - - // Generating a root without anything should not have AIAs. - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "root example.com", - "issuer_name": "root", - "key_type": "ec", - }) - require.NoError(t, err) - require.NotNil(t, resp) - rootCert := parseCert(t, resp.Data["certificate"].(string)) - require.Empty(t, rootCert.OCSPServer) - require.Empty(t, rootCert.IssuingCertificateURL) - require.Empty(t, rootCert.CRLDistributionPoints) - - // Set some local URLs on the issuer. - _, err = CBWrite(b, s, "issuer/default", map[string]interface{}{ - "issuing_certificates": []string{"https://google.com"}, - }) - require.NoError(t, err) - - _, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ - "allow_any_name": true, - "ttl": "85s", - "key_type": "ec", - }) - require.NoError(t, err) - - // Issue something with this re-configured issuer. - resp, err = CBWrite(b, s, "issuer/default/issue/testing", map[string]interface{}{ - "common_name": "localhost.com", - }) - require.NoError(t, err) - require.NotNil(t, resp) - leafCert := parseCert(t, resp.Data["certificate"].(string)) - require.Empty(t, leafCert.OCSPServer) - require.Equal(t, leafCert.IssuingCertificateURL, []string{"https://google.com"}) - require.Empty(t, leafCert.CRLDistributionPoints) - - // Set global URLs and ensure they don't appear on this issuer's leaf. - _, err = CBWrite(b, s, "config/urls", map[string]interface{}{ - "issuing_certificates": []string{"https://example.com/ca", "https://backup.example.com/ca"}, - "crl_distribution_points": []string{"https://example.com/crl", "https://backup.example.com/crl"}, - "ocsp_servers": []string{"https://example.com/ocsp", "https://backup.example.com/ocsp"}, - }) - require.NoError(t, err) - resp, err = CBWrite(b, s, "issuer/default/issue/testing", map[string]interface{}{ - "common_name": "localhost.com", - }) - require.NoError(t, err) - require.NotNil(t, resp) - leafCert = parseCert(t, resp.Data["certificate"].(string)) - require.Empty(t, leafCert.OCSPServer) - require.Equal(t, leafCert.IssuingCertificateURL, []string{"https://google.com"}) - require.Empty(t, leafCert.CRLDistributionPoints) - - // Now come back and remove the local modifications and ensure we get - // the defaults again. - _, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ - "issuing_certificates": []string{}, - }) - require.NoError(t, err) - resp, err = CBWrite(b, s, "issuer/default/issue/testing", map[string]interface{}{ - "common_name": "localhost.com", - }) - require.NoError(t, err) - require.NotNil(t, resp) - leafCert = parseCert(t, resp.Data["certificate"].(string)) - require.Equal(t, leafCert.IssuingCertificateURL, []string{"https://example.com/ca", "https://backup.example.com/ca"}) - require.Equal(t, leafCert.OCSPServer, []string{"https://example.com/ocsp", "https://backup.example.com/ocsp"}) - require.Equal(t, leafCert.CRLDistributionPoints, []string{"https://example.com/crl", "https://backup.example.com/crl"}) -} - -func TestIssuersWithoutCRLBits(t *testing.T) { - t.Parallel() - b, s := createBackendWithStorage(t) - - // Importing a root without CRL signing bits should work fine. - customBundleWithoutCRLBits := ` ------BEGIN CERTIFICATE----- -MIIDGTCCAgGgAwIBAgIBATANBgkqhkiG9w0BAQsFADATMREwDwYDVQQDDAhyb290 -LW5ldzAeFw0yMjA4MjQxMjEzNTVaFw0yMzA5MDMxMjEzNTVaMBMxETAPBgNVBAMM -CHJvb3QtbmV3MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAojTA/Mx7 -LVW/Zgn/N4BqZbaF82MrTIBFug3ob7mqycNRlWp4/PH8v37+jYn8e691HUsKjden -rDTrO06kiQKiJinAzmlLJvgcazE3aXoh7wSzVG9lFHYvljEmVj+yDbkeaqaCktup -skuNjxCoN9BLmKzZIwVCHn92ZHlhN6LI7CNaU3SDJdu7VftWF9Ugzt9FIvI+6Gcn -/WNE9FWvZ9o7035rZ+1vvTn7/tgxrj2k3XvD51Kq4tsSbqjnSf3QieXT6E6uvtUE -TbPp3xjBElgBCKmeogR1l28rs1aujqqwzZ0B/zOeF8ptaH0aZOIBsVDJR8yTwHzq -s34hNdNfKLHzOwIDAQABo3gwdjAdBgNVHQ4EFgQUF4djNmx+1+uJINhZ82pN+7jz -H8EwHwYDVR0jBBgwFoAUF4djNmx+1+uJINhZ82pN+7jzH8EwDwYDVR0TAQH/BAUw -AwEB/zAOBgNVHQ8BAf8EBAMCAoQwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDQYJKoZI -hvcNAQELBQADggEBAICQovBz4KLWlLmXeZ2Vf6WfQYyGNgGyJa10XNXtWQ5dM2NU -OLAit4x1c2dz+aFocc8ZsX/ikYi/bruT2rsGWqMAGC4at3U4GuaYGO5a6XzMKIDC -nxIlbiO+Pn6Xum7fAqUri7+ZNf/Cygmc5sByi3MAAIkszeObUDZFTJL7gEOuXIMT -rKIXCINq/U+qc7m9AQ8vKhF1Ddj+dLGLzNQ5j3cKfilPs/wRaYqbMQvnmarX+5Cs -k1UL6kWSQsiP3+UWaBlcWkmD6oZ3fIG7c0aMxf7RISq1eTAM9XjH3vMxWQJlS5q3 -2weJ2LYoPe/DwX5CijR0IezapBCrin1BscJMLFQ= ------END CERTIFICATE----- ------BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCiNMD8zHstVb9m -Cf83gGpltoXzYytMgEW6DehvuarJw1GVanj88fy/fv6Nifx7r3UdSwqN16esNOs7 -TqSJAqImKcDOaUsm+BxrMTdpeiHvBLNUb2UUdi+WMSZWP7INuR5qpoKS26myS42P -EKg30EuYrNkjBUIef3ZkeWE3osjsI1pTdIMl27tV+1YX1SDO30Ui8j7oZyf9Y0T0 -Va9n2jvTfmtn7W+9Ofv+2DGuPaTde8PnUqri2xJuqOdJ/dCJ5dPoTq6+1QRNs+nf -GMESWAEIqZ6iBHWXbyuzVq6OqrDNnQH/M54Xym1ofRpk4gGxUMlHzJPAfOqzfiE1 -018osfM7AgMBAAECggEAAVd6kZZaN69IZITIc1vHRYa2rlZpKS2JP7c8Vd3Z/4Fz -ZZvnJ7LgVAmUYg5WPZ2sOqBNLfKVN/oke5Q0dALgdxYl7dWQIhPjHeRFbZFtjqEV -OXZGBniamMO/HSKGWGrqFf7BM/H7AhClUwQgjnzVSz+B+LJJidM+SVys3n1xuDmC -EP+iOda+bAHqHv/7oCELQKhLmCvPc9v2fDy+180ttdo8EHuxwVnKiyR/ryKFhSyx -K1wgAPQ9jO+V+GESL90rqpX/r501REsIOOpm4orueelHTD4+dnHxvUPqJ++9aYGX -79qBNPPUhxrQI1yoHxwW0cTxW5EqkZ9bT2lSd5rjcQKBgQDNyPBpidkHPrYemQDT -RldtS6FiW/jc1It/CRbjU4A6Gi7s3Cda43pEUObKNLeXMyLQaMf4GbDPDX+eh7B8 -RkUq0Q/N0H4bn1hbxYSUdgv0j/6czpMo6rLcJHGwOTSpHGsNsxSLL7xlpgzuzqrG -FzEgjMA1aD3w8B9+/77AoSLoMQKBgQDJyYMw82+euLYRbR5Wc/SbrWfh2n1Mr2BG -pp1ZNYorXE5CL4ScdLcgH1q/b8r5XGwmhMcpeA+geAAaKmk1CGG+gPLoq20c9Q1Y -Ykq9tUVJasIkelvbb/SPxyjkJdBwylzcPP14IJBsqQM0be+yVqLJJVHSaoKhXZcl -IW2xgCpjKwKBgFpeX5U5P+F6nKebMU2WmlYY3GpBUWxIummzKCX0SV86mFjT5UR4 -mPzfOjqaI/V2M1eqbAZ74bVLjDumAs7QXReMb5BGetrOgxLqDmrT3DQt9/YMkXtq -ddlO984XkRSisjB18BOfhvBsl0lX4I7VKHHO3amWeX0RNgOjc7VMDfRBAoGAWAQH -r1BfvZHACLXZ58fISCdJCqCsysgsbGS8eW77B5LJp+DmLQBT6DUE9j+i/0Wq/ton -rRTrbAkrsj4RicpQKDJCwe4UN+9DlOu6wijRQgbJC/Q7IOoieJxcX7eGxcve2UnZ -HY7GsD7AYRwa02UquCYJHIjM1enmxZFhMW1AD+UCgYEAm4jdNz5e4QjA4AkNF+cB -ZenrAZ0q3NbTyiSsJEAtRe/c5fNFpmXo3mqgCannarREQYYDF0+jpSoTUY8XAc4q -wL7EZNzwxITLqBnnHQbdLdAvYxB43kvWTy+JRK8qY9LAMCCFeDoYwXkWV4Wkx/b0 -TgM7RZnmEjNdeaa4M52o7VY= ------END PRIVATE KEY----- - ` - resp, err := CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ - "pem_bundle": customBundleWithoutCRLBits, - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data) - require.NotEmpty(t, resp.Data["imported_issuers"]) - require.NotEmpty(t, resp.Data["imported_keys"]) - require.NotEmpty(t, resp.Data["mapping"]) - - // Shouldn't have crl-signing on the newly imported issuer's usage. - resp, err = CBRead(b, s, "issuer/default") - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data) - require.NotEmpty(t, resp.Data["usage"]) - require.NotContains(t, resp.Data["usage"], "crl-signing") - - // Modifying to set CRL should fail. - resp, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ - "usage": "issuing-certificates,crl-signing", - }) - require.Error(t, err) - require.True(t, resp.IsError()) - - // Modifying to set issuing-certificates and ocsp-signing should succeed. - resp, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ - "usage": "issuing-certificates,ocsp-signing", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data) - require.NotEmpty(t, resp.Data["usage"]) - require.NotContains(t, resp.Data["usage"], "crl-signing") -} - -func TestBackend_IfModifiedSinceHeaders(t *testing.T) { - t.Parallel() - coreConfig := &vault.CoreConfig{ - LogicalBackends: map[string]logical.Factory{ - "pki": Factory, - }, - } - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - client := cluster.Cores[0].Client - - // Mount PKI. - err := client.Sys().Mount("pki", &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{ - DefaultLeaseTTL: "16h", - MaxLeaseTTL: "60h", - // Required to allow the header to be passed through. - PassthroughRequestHeaders: []string{"if-modified-since"}, - AllowedResponseHeaders: []string{"Last-Modified"}, - }, - }) - require.NoError(t, err) - - // Get a time before CA generation. Subtract two seconds to ensure - // the value in the seconds field is different than the time the CA - // is actually generated at. - beforeOldCAGeneration := time.Now().Add(-2 * time.Second) - - // Generate an internal CA. This one is the default. - resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ - "ttl": "40h", - "common_name": "Root X1", - "key_type": "ec", - "issuer_name": "old-root", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["certificate"]) - - // CA is generated, but give a grace window. - afterOldCAGeneration := time.Now().Add(2 * time.Second) - - // When you _save_ headers, client returns a copy. But when you go to - // reset them, it doesn't create a new copy (and instead directly - // assigns). This means we have to continually refresh our view of the - // last headers, otherwise the headers added after the last set operation - // leak into this copy... Yuck! - lastHeaders := client.Headers() - for _, path := range []string{"pki/cert/ca", "pki/cert/crl", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/old-root/crl", "pki/cert/delta-crl", "pki/issuer/old-root/crl/delta"} { - t.Logf("path: %v", path) - field := "certificate" - if strings.HasPrefix(path, "pki/issuer") && strings.Contains(path, "/crl") { - field = "crl" - } - - // Reading the CA should work, without a header. - resp, err := client.Logical().Read(path) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data[field]) - - // Ensure that the CA is returned correctly if we give it the old time. - client.AddHeader("If-Modified-Since", beforeOldCAGeneration.Format(time.RFC1123)) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data[field]) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - - // Ensure that the CA is elided if we give it the present time (plus a - // grace window). - client.AddHeader("If-Modified-Since", afterOldCAGeneration.Format(time.RFC1123)) - t.Logf("headers: %v", client.Headers()) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.Nil(t, resp) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - } - - // Wait three seconds. This ensures we have adequate grace period - // to distinguish the two cases, even with grace periods. - time.Sleep(3 * time.Second) - - // Generating a second root. This one isn't the default. - beforeNewCAGeneration := time.Now().Add(-2 * time.Second) - - // Generate an internal CA. This one is the default. - _, err = client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ - "ttl": "40h", - "common_name": "Root X1", - "key_type": "ec", - "issuer_name": "new-root", - }) - require.NoError(t, err) - - // As above. - afterNewCAGeneration := time.Now().Add(2 * time.Second) - - // New root isn't the default, so it has fewer paths. - for _, path := range []string{"pki/issuer/new-root/json", "pki/issuer/new-root/crl", "pki/issuer/new-root/crl/delta"} { - t.Logf("path: %v", path) - field := "certificate" - if strings.HasPrefix(path, "pki/issuer") && strings.Contains(path, "/crl") { - field = "crl" - } - - // Reading the CA should work, without a header. - resp, err := client.Logical().Read(path) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data[field]) - - // Ensure that the CA is returned correctly if we give it the old time. - client.AddHeader("If-Modified-Since", beforeNewCAGeneration.Format(time.RFC1123)) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data[field]) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - - // Ensure that the CA is elided if we give it the present time (plus a - // grace window). - client.AddHeader("If-Modified-Since", afterNewCAGeneration.Format(time.RFC1123)) - t.Logf("headers: %v", client.Headers()) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.Nil(t, resp) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - } - - // Wait three seconds. This ensures we have adequate grace period - // to distinguish the two cases, even with grace periods. - time.Sleep(3 * time.Second) - - // Now swap the default issuers around. - _, err = client.Logical().Write("pki/config/issuers", map[string]interface{}{ - "default": "new-root", - }) - require.NoError(t, err) - - // Reading both with the last modified date should return new values. - for _, path := range []string{"pki/cert/ca", "pki/cert/crl", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/new-root/json", "pki/issuer/old-root/crl", "pki/issuer/new-root/crl", "pki/cert/delta-crl", "pki/issuer/old-root/crl/delta", "pki/issuer/new-root/crl/delta"} { - t.Logf("path: %v", path) - field := "certificate" - if strings.HasPrefix(path, "pki/issuer") && strings.Contains(path, "/crl") { - field = "crl" - } - - // Ensure that the CA is returned correctly if we give it the old time. - client.AddHeader("If-Modified-Since", afterOldCAGeneration.Format(time.RFC1123)) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data[field]) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - - // Ensure that the CA is returned correctly if we give it the old time. - client.AddHeader("If-Modified-Since", afterNewCAGeneration.Format(time.RFC1123)) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data[field]) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - } - - // Wait for things to settle, record the present time, and wait for the - // clock to definitely tick over again. - time.Sleep(2 * time.Second) - preRevocationTimestamp := time.Now() - time.Sleep(2 * time.Second) - - // The above tests should say everything is cached. - for _, path := range []string{"pki/cert/ca", "pki/cert/crl", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/new-root/json", "pki/issuer/old-root/crl", "pki/issuer/new-root/crl", "pki/cert/delta-crl", "pki/issuer/old-root/crl/delta", "pki/issuer/new-root/crl/delta"} { - t.Logf("path: %v", path) - - // Ensure that the CA is returned correctly if we give it the new time. - client.AddHeader("If-Modified-Since", preRevocationTimestamp.Format(time.RFC1123)) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.Nil(t, resp) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - } - - // We could generate some leaves and verify the revocation updates the - // CRL. But, revoking the issuer behaves the same, so let's do that - // instead. - _, err = client.Logical().Write("pki/issuer/old-root/revoke", map[string]interface{}{}) - require.NoError(t, err) - - // CA should still be valid. - for _, path := range []string{"pki/cert/ca", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/new-root/json"} { - t.Logf("path: %v", path) - - // Ensure that the CA is returned correctly if we give it the old time. - client.AddHeader("If-Modified-Since", preRevocationTimestamp.Format(time.RFC1123)) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.Nil(t, resp) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - } - - // CRL should be invalidated - for _, path := range []string{"pki/cert/crl", "pki/issuer/old-root/crl", "pki/issuer/new-root/crl", "pki/cert/delta-crl", "pki/issuer/old-root/crl/delta", "pki/issuer/new-root/crl/delta"} { - t.Logf("path: %v", path) - field := "certificate" - if strings.HasPrefix(path, "pki/issuer") && strings.Contains(path, "/crl") { - field = "crl" - } - - client.AddHeader("If-Modified-Since", preRevocationTimestamp.Format(time.RFC1123)) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data[field]) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - } - - // If we send some time in the future, everything should be cached again! - futureTime := time.Now().Add(30 * time.Second) - for _, path := range []string{"pki/cert/ca", "pki/cert/crl", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/new-root/json", "pki/issuer/old-root/crl", "pki/issuer/new-root/crl", "pki/cert/delta-crl", "pki/issuer/old-root/crl/delta", "pki/issuer/new-root/crl/delta"} { - t.Logf("path: %v", path) - - // Ensure that the CA is returned correctly if we give it the new time. - client.AddHeader("If-Modified-Since", futureTime.Format(time.RFC1123)) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.Nil(t, resp) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - } - - beforeThreeWaySwap := time.Now().Add(-2 * time.Second) - - // Now, do a three-way swap of names (old->tmp; new->old; tmp->new). This - // should result in all names/CRLs being invalidated. - _, err = client.Logical().JSONMergePatch(ctx, "pki/issuer/old-root", map[string]interface{}{ - "issuer_name": "tmp-root", - }) - require.NoError(t, err) - _, err = client.Logical().JSONMergePatch(ctx, "pki/issuer/new-root", map[string]interface{}{ - "issuer_name": "old-root", - }) - require.NoError(t, err) - _, err = client.Logical().JSONMergePatch(ctx, "pki/issuer/tmp-root", map[string]interface{}{ - "issuer_name": "new-root", - }) - require.NoError(t, err) - - afterThreeWaySwap := time.Now().Add(2 * time.Second) - - for _, path := range []string{"pki/cert/ca", "pki/cert/crl", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/new-root/json", "pki/issuer/old-root/crl", "pki/issuer/new-root/crl", "pki/cert/delta-crl", "pki/issuer/old-root/crl/delta", "pki/issuer/new-root/crl/delta"} { - t.Logf("path: %v", path) - field := "certificate" - if strings.HasPrefix(path, "pki/issuer") && strings.Contains(path, "/crl") { - field = "crl" - } - - // Ensure that the CA is returned if we give it the pre-update time. - client.AddHeader("If-Modified-Since", beforeThreeWaySwap.Format(time.RFC1123)) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data[field]) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - - // Ensure that the CA is elided correctly if we give it the after time. - client.AddHeader("If-Modified-Since", afterThreeWaySwap.Format(time.RFC1123)) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.Nil(t, resp) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - } - - time.Sleep(4 * time.Second) - - beforeDeltaRotation := time.Now().Add(-2 * time.Second) - - // Finally, rebuild the delta CRL and ensure that only that is - // invalidated. We first need to enable it though. - _, err = client.Logical().Write("pki/config/crl", map[string]interface{}{ - "auto_rebuild": true, - "enable_delta": true, - }) - require.NoError(t, err) - resp, err = client.Logical().Read("pki/crl/rotate-delta") - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.Equal(t, resp.Data["success"], true) - - afterDeltaRotation := time.Now().Add(2 * time.Second) - - for _, path := range []string{"pki/cert/ca", "pki/cert/crl", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/new-root/json", "pki/issuer/old-root/crl", "pki/issuer/new-root/crl"} { - t.Logf("path: %v", path) - - for _, when := range []time.Time{beforeDeltaRotation, afterDeltaRotation} { - client.AddHeader("If-Modified-Since", when.Format(time.RFC1123)) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.Nil(t, resp) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - } - } - - for _, path := range []string{"pki/cert/delta-crl", "pki/issuer/old-root/crl/delta", "pki/issuer/new-root/crl/delta"} { - t.Logf("path: %v", path) - field := "certificate" - if strings.HasPrefix(path, "pki/issuer") && strings.Contains(path, "/crl") { - field = "crl" - } - - // Ensure that the CRL is present if we give it the pre-update time. - client.AddHeader("If-Modified-Since", beforeDeltaRotation.Format(time.RFC1123)) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data[field]) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - - client.AddHeader("If-Modified-Since", afterDeltaRotation.Format(time.RFC1123)) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.Nil(t, resp) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - } -} - -func TestBackend_InitializeCertificateCounts(t *testing.T) { - t.Parallel() - b, s := createBackendWithStorage(t) - ctx := context.Background() - - // Set up an Issuer and Role - // We need a root certificate to write/revoke certificates with - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "myvault.com", - }) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("expected ca info") - } - - // Create a role - _, err = CBWrite(b, s, "roles/example", map[string]interface{}{ - "allowed_domains": "myvault.com", - "allow_bare_domains": true, - "allow_subdomains": true, - "max_ttl": "2h", - }) - if err != nil { - t.Fatal(err) - } - - // Put certificates A, B, C, D, E in backend - var certificates []string = []string{"a", "b", "c", "d", "e"} - serials := make([]string, 5) - for i, cn := range certificates { - resp, err = CBWrite(b, s, "issue/example", map[string]interface{}{ - "common_name": cn + ".myvault.com", - }) - if err != nil { - t.Fatal(err) - } - serials[i] = resp.Data["serial_number"].(string) - } - - // Revoke certificates A + B - revocations := serials[0:2] - for _, key := range revocations { - resp, err = CBWrite(b, s, "revoke", map[string]interface{}{ - "serial_number": key, - }) - if err != nil { - t.Fatal(err) - } - } - - // Assert initialize from clean is correct: - b.initializeStoredCertificateCounts(ctx) - if atomic.LoadUint32(b.certCount) != 6 { - t.Fatalf("Failed to count six certificates root,A,B,C,D,E, instead counted %d certs", atomic.LoadUint32(b.certCount)) - } - if atomic.LoadUint32(b.revokedCertCount) != 2 { - t.Fatalf("Failed to count two revoked certificates A+B, instead counted %d certs", atomic.LoadUint32(b.revokedCertCount)) - } - - // Simulates listing while initialize in progress, by "restarting it" - atomic.StoreUint32(b.certCount, 0) - atomic.StoreUint32(b.revokedCertCount, 0) - b.certsCounted.Store(false) - - // Revoke certificates C, D - dirtyRevocations := serials[2:4] - for _, key := range dirtyRevocations { - resp, err = CBWrite(b, s, "revoke", map[string]interface{}{ - "serial_number": key, - }) - if err != nil { - t.Fatal(err) - } - } - - // Put certificates F, G in the backend - dirtyCertificates := []string{"f", "g"} - for _, cn := range dirtyCertificates { - resp, err = CBWrite(b, s, "issue/example", map[string]interface{}{ - "common_name": cn + ".myvault.com", - }) - if err != nil { - t.Fatal(err) - } - } - - // Run initialize - b.initializeStoredCertificateCounts(ctx) - - // Test certificate count - if *(b.certCount) != 8 { - t.Fatalf("Failed to initialize count of certificates root, A,B,C,D,E,F,G counted %d certs", *(b.certCount)) - } - - if *(b.revokedCertCount) != 4 { - t.Fatalf("Failed to count revoked certificates A,B,C,D counted %d certs", *(b.revokedCertCount)) - } - - return -} - -// Verify that our default values are consistent when creating an issuer and when we do an -// empty POST update to it. This will hopefully identify if we have different default values -// for fields across the two APIs. -func TestBackend_VerifyIssuerUpdateDefaultsMatchCreation(t *testing.T) { - t.Parallel() - b, s := createBackendWithStorage(t) - - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "myvault.com", - }) - requireSuccessNonNilResponse(t, resp, err, "failed generating root issuer") - - resp, err = CBRead(b, s, "issuer/default") - requireSuccessNonNilResponse(t, resp, err, "failed reading default issuer") - preUpdateValues := resp.Data - - resp, err = CBWrite(b, s, "issuer/default", map[string]interface{}{}) - requireSuccessNonNilResponse(t, resp, err, "failed updating default issuer with no values") - - resp, err = CBRead(b, s, "issuer/default") - requireSuccessNonNilResponse(t, resp, err, "failed reading default issuer") - postUpdateValues := resp.Data - - require.Equal(t, preUpdateValues, postUpdateValues, - "A value was updated based on the empty update of an issuer, "+ - "most likely we have a different set of field parameters across create and update of issuers.") -} - -func TestBackend_VerifyPSSKeysIssuersFailImport(t *testing.T) { - t.Parallel() - b, s := createBackendWithStorage(t) - - // PKCS8 parsing fails on this key due to rsaPSS OID - rsaOIDKey := ` ------BEGIN PRIVATE KEY----- -MIIEugIBADALBgkqhkiG9w0BAQoEggSmMIIEogIBAAKCAQEAtN0/NPuJHLuyEdBr -tUikXoXOV741XZcNvLAIVBIqDA0ege2gXt9A15FGUI4X3u6kT16Fl6MRdtUZ/qNS -Vs15nK9A1PI/AVekMgTVFTnoCzs550CKN8iRk9Om+lwHimpyXxKkFW69v8fsXwKE -Bsz69jjT7HV9VZQ7fQhmE79brAMuwKP1fUQKdHq5OBKtQ7Cl3Gmipp0izCsVuQIE -kBHvT3UUgyaSp2n+FONpOiyuBoYUH5tVEv9sZzBqSsrYBJYF+GvfnFy9AcTdqRe2 -VX2SjjWjDF84T30OBA798gIFIPwu9R4OjWOlPeh2bo2kGeo3AITjwFZ28m7kS7kc -OtvHpwIDAQABAoIBAFQxmjbj0RQbG+3HBBzD0CBgUYnu9ZC3vKFVoMriGci6YrVB -FSKU8u5mpkDhpKMWnE6GRdItCvgyg4NSLAZUaIRT4O5ARqwtTDYsobTb2/U+gNnx -5WXKbFpQcK6jIK+ClfNEDjYb8yDPxG0GEsfHrBvqoFy25L1t37N4sWwH7HjJyZIe -Hbqx4NVDur9qgqaUwkfSeufn4ycHqFtkzKNzCUarDkST9cxE6/1AKfhl09PPuMEa -lAY2JLiEplQL5sh9cxG5FObJbutJo5EIhR2OdM0VcPf0MTD9LXKRoGR3SNlG7IlS -llJzBjlh4J1ByMX32btKMHzEvlhyrMI90E1SEGECgYEAx1yDQWe4/b1MBqCxA3d0 -20dDmUHSRQFhkd/Mzkl5dPzRkG42W3ryNbMKdeuL0ZgK9AhfaLCjcj1i+44O7dHb -qBTVwfRrer2uoQVCqqJ6z8PGxPJJxTaqh9QuJxkoQ0i43ZNPcjc2M2sWLn+lkkdE -MaGMiyrmjIQEC6tmgCtZ1VUCgYEA6D9xoT9VuAnQjDvW2tO5N2U2H/8ZyRd1pC3z -H1CzjwShhxsP4YOUaVdw59K95JL4SMxSmpRrhthlW3cRaiT/exBcXLEvz0Qu0OhW -a6155ZFjK3UaLDKlwvmtuoAsuAFqX084LO0B1oxvUJESgyPncQ36fv2lZGV7A66z -Uo+BKQsCgYB2yGBMMAjA5nDN4iCV+C7gF+3m+pjWFKSVzcqxfoWndptGeuRYTUDT -TgIFkHqWPwkHrZVrQxOflYPMbi/m8wr1crSKA5+mWi4aMpAuKvERqYxc/B+IKbIh -jAKTuSGMNWAwZP0JCGx65mso+VUleuDe0Wpz4PPM9TuT2GQSKcI0oQKBgHAHcouC -npmo+lU65DgoWzaydrpWdpy+2Tt6AsW/Su4ZIMWoMy/oJaXuzQK2cG0ay/NpxArW -v0uLhNDrDZZzBF3blYIM4nALhr205UMJqjwntnuXACoDwFvdzoShIXEdFa+l6gYZ -yYIxudxWLmTd491wDb5GIgrcvMsY8V1I5dfjAoGAM9g2LtdqgPgK33dCDtZpBm8m -y4ri9PqHxnpps9WJ1dO6MW/YbW+a7vbsmNczdJ6XNLEfy2NWho1dw3xe7ztFVDjF -cWNUzs1+/6aFsi41UX7EFn3zAFhQUPxT59hXspuWuKbRAWc5fMnxbCfI/Cr8wTLJ -E/0kiZ4swUMyI4tYSbM= ------END PRIVATE KEY----- -` - _, err := CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ - "pem_bundle": rsaOIDKey, - }) - require.Error(t, err, "expected error importing PKCS8 rsaPSS OID key") - - _, err = CBWrite(b, s, "keys/import", map[string]interface{}{ - "key": rsaOIDKey, - }) - require.Error(t, err, "expected error importing PKCS8 rsaPSS OID key") - - // Importing a cert with rsaPSS OID should also fail - rsaOIDCert := ` ------BEGIN CERTIFICATE----- -MIIDfjCCAjGgAwIBAgIBATBCBgkqhkiG9w0BAQowNaAPMA0GCWCGSAFlAwQCAQUA -oRwwGgYJKoZIhvcNAQEIMA0GCWCGSAFlAwQCAQUAogQCAgDeMBMxETAPBgNVBAMM -CHJvb3Qtb2xkMB4XDTIyMDkxNjE0MDEwM1oXDTIzMDkyNjE0MDEwM1owEzERMA8G -A1UEAwwIcm9vdC1vbGQwggEgMAsGCSqGSIb3DQEBCgOCAQ8AMIIBCgKCAQEAtN0/ -NPuJHLuyEdBrtUikXoXOV741XZcNvLAIVBIqDA0ege2gXt9A15FGUI4X3u6kT16F -l6MRdtUZ/qNSVs15nK9A1PI/AVekMgTVFTnoCzs550CKN8iRk9Om+lwHimpyXxKk -FW69v8fsXwKEBsz69jjT7HV9VZQ7fQhmE79brAMuwKP1fUQKdHq5OBKtQ7Cl3Gmi -pp0izCsVuQIEkBHvT3UUgyaSp2n+FONpOiyuBoYUH5tVEv9sZzBqSsrYBJYF+Gvf -nFy9AcTdqRe2VX2SjjWjDF84T30OBA798gIFIPwu9R4OjWOlPeh2bo2kGeo3AITj -wFZ28m7kS7kcOtvHpwIDAQABo3UwczAdBgNVHQ4EFgQUVGkTAUJ8inxIVGBlfxf4 -cDhRSnowHwYDVR0jBBgwFoAUVGkTAUJ8inxIVGBlfxf4cDhRSnowDAYDVR0TBAUw -AwEB/zAOBgNVHQ8BAf8EBAMCAYYwEwYDVR0lBAwwCgYIKwYBBQUHAwEwQgYJKoZI -hvcNAQEKMDWgDzANBglghkgBZQMEAgEFAKEcMBoGCSqGSIb3DQEBCDANBglghkgB -ZQMEAgEFAKIEAgIA3gOCAQEAQZ3iQ3NjvS4FYJ5WG41huZI0dkvNFNan+ZYWlYHJ -MIQhbFogb/UQB0rlsuldG0+HF1RDXoYNuThfzt5hiBWYEtMBNurezvnOn4DF0hrl -Uk3sBVnvTalVXg+UVjqh9hBGB75JYJl6a5Oa2Zrq++4qGNwjd0FqgnoXzqS5UGuB -TJL8nlnXPuOIK3VHoXEy7l9GtvEzKcys0xa7g1PYpaJ5D2kpbBJmuQGmU6CDcbP+ -m0hI4QDfVfHtnBp2VMCvhj0yzowtwF4BFIhv4EXZBU10mzxVj0zyKKft9++X8auH -nebuK22ZwzbPe4NhOvAdfNDElkrrtGvTnzkDB7ezPYjelA== ------END CERTIFICATE----- -` - _, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ - "pem_bundle": rsaOIDCert, - }) - require.Error(t, err, "expected error importing PKCS8 rsaPSS OID cert") - - _, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ - "pem_bundle": rsaOIDKey + "\n" + rsaOIDCert, - }) - require.Error(t, err, "expected error importing PKCS8 rsaPSS OID key+cert") - - _, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ - "pem_bundle": rsaOIDCert + "\n" + rsaOIDKey, - }) - require.Error(t, err, "expected error importing PKCS8 rsaPSS OID cert+key") - - // After all these errors, we should have zero issuers and keys. - resp, err := CBList(b, s, "issuers") - require.NoError(t, err) - require.Equal(t, nil, resp.Data["keys"]) - - resp, err = CBList(b, s, "keys") - require.NoError(t, err) - require.Equal(t, nil, resp.Data["keys"]) - - // If we create a new PSS root, we should be able to issue an intermediate - // under it. - resp, err = CBWrite(b, s, "root/generate/exported", map[string]interface{}{ - "use_pss": "true", - "common_name": "root x1 - pss", - "key_type": "ec", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["certificate"]) - require.NotEmpty(t, resp.Data["private_key"]) - - resp, err = CBWrite(b, s, "intermediate/generate/exported", map[string]interface{}{ - "use_pss": "true", - "common_name": "int x1 - pss", - "key_type": "ec", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["csr"]) - require.NotEmpty(t, resp.Data["private_key"]) - - resp, err = CBWrite(b, s, "issuer/default/sign-intermediate", map[string]interface{}{ - "use_pss": "true", - "common_name": "int x1 - pss", - "csr": resp.Data["csr"].(string), - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["certificate"]) - - resp, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ - "pem_bundle": resp.Data["certificate"].(string), - }) - require.NoError(t, err) - - // Finally, if we were to take an rsaPSS OID'd CSR and use it against this - // mount, it will fail. - _, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ - "allow_any_name": true, - "ttl": "85s", - "key_type": "any", - }) - require.NoError(t, err) - - // Issuing a leaf from a CSR with rsaPSS OID should fail... - rsaOIDCSR := `-----BEGIN CERTIFICATE REQUEST----- -MIICkTCCAUQCAQAwGTEXMBUGA1UEAwwOcmFuY2hlci5teS5vcmcwggEgMAsGCSqG -SIb3DQEBCgOCAQ8AMIIBCgKCAQEAtzHuGEUK55lXI08yp9DXoye9yCZbkJZO+Hej -1TWGEkbX4hzauRJeNp2+wn8xU5y8ITjWSIXEVDHeezosLCSy0Y2QT7/V45zWPUYY -ld0oUnPiwsb9CPFlBRFnX3dO9SS5MONIrNCJGKXmLdF3lgSl8zPT6J/hWM+JBjHO -hBzK6L8IYwmcEujrQfnOnOztzgMEBJtWG8rnI8roz1adpczTddDKGymh2QevjhlL -X9CLeYSSQZInOMsgaDYl98Hn00K5x0CBp8ADzzXtaPSQ9nsnihN8VvZ/wHw6YbBS -BSHa6OD+MrYnw3Sao6/YgBRNT2glIX85uro4ARW9zGB9/748dwIDAQABoAAwQgYJ -KoZIhvcNAQEKMDWgDzANBglghkgBZQMEAgEFAKEcMBoGCSqGSIb3DQEBCDANBglg -hkgBZQMEAgEFAKIEAgIA3gOCAQEARGAa0HiwzWCpvAdLOVc4/srEyOYFZPLbtv+Y -ezZIaUBNaWhOvkunqpa48avmcbGlji7r6fxJ5sT28lHt7ODWcJfn1XPAnqesXErm -EBuOIhCv6WiwVyGeTVynuHYkHyw3rIL/zU7N8+zIFV2G2M1UAv5D/eyh/74cr9Of -+nvm9jAbkHix8UwOBCFY2LLNl6bXvbIeJEdDOEtA9UmDXs8QGBg4lngyqcE2Z7rz -+5N/x4guMk2FqblbFGiCc5fLB0Gp6lFFOqhX9Q8nLJ6HteV42xGJUUtsFpppNCRm -82dGIH2PTbXZ0k7iAAwLaPjzOv1v58Wq90o35d4iEsOfJ8v98Q== ------END CERTIFICATE REQUEST-----` - - _, err = CBWrite(b, s, "issuer/default/sign/testing", map[string]interface{}{ - "common_name": "example.com", - "csr": rsaOIDCSR, - }) - require.Error(t, err) - - _, err = CBWrite(b, s, "issuer/default/sign-verbatim", map[string]interface{}{ - "common_name": "example.com", - "use_pss": true, - "csr": rsaOIDCSR, - }) - require.Error(t, err) - - _, err = CBWrite(b, s, "issuer/default/sign-intermediate", map[string]interface{}{ - "common_name": "faulty x1 - pss", - "use_pss": true, - "csr": rsaOIDCSR, - }) - require.Error(t, err) - - // Vault has a weird API for signing self-signed certificates. Ensure - // that doesn't accept rsaPSS OID'd certificates either. - _, err = CBWrite(b, s, "issuer/default/sign-self-issued", map[string]interface{}{ - "use_pss": true, - "certificate": rsaOIDCert, - }) - require.Error(t, err) - - // Issuing a regular leaf should succeed. - _, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ - "allow_any_name": true, - "ttl": "85s", - "key_type": "rsa", - "use_pss": "true", - }) - require.NoError(t, err) - - resp, err = CBWrite(b, s, "issuer/default/issue/testing", map[string]interface{}{ - "common_name": "example.com", - "use_pss": "true", - }) - requireSuccessNonNilResponse(t, resp, err, "failed to issue PSS leaf") -} - var ( initTest sync.Once rsaCAKey string diff --git a/builtin/logical/pki/ca_test.go b/builtin/logical/pki/ca_test.go index 2c33397114259..ec3220ba37c82 100644 --- a/builtin/logical/pki/ca_test.go +++ b/builtin/logical/pki/ca_test.go @@ -26,7 +26,6 @@ import ( ) func TestBackend_CA_Steps(t *testing.T) { - t.Parallel() var b *backend factory := func(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { diff --git a/builtin/logical/pki/ca_util.go b/builtin/logical/pki/ca_util.go index d2a59f34bdd2f..9499908e3556f 100644 --- a/builtin/logical/pki/ca_util.go +++ b/builtin/logical/pki/ca_util.go @@ -17,7 +17,7 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -func getGenerationParams(sc *storageContext, data *framework.FieldData) (exported bool, format string, role *roleEntry, errorResp *logical.Response) { +func (b *backend) getGenerationParams(ctx context.Context, storage logical.Storage, data *framework.FieldData) (exported bool, format string, role *roleEntry, errorResp *logical.Response) { exportedStr := data.Get("exported").(string) switch exportedStr { case "exported": @@ -37,8 +37,7 @@ func getGenerationParams(sc *storageContext, data *framework.FieldData) (exporte `the "format" path parameter must be "pem", "der", or "pem_bundle"`) return } - - keyType, keyBits, err := sc.getKeyTypeAndBitsForRole(data) + keyType, keyBits, err := getKeyTypeAndBitsForRole(ctx, b, storage, data) if err != nil { errorResp = logical.ErrorResponse(err.Error()) return @@ -49,7 +48,6 @@ func getGenerationParams(sc *storageContext, data *framework.FieldData) (exporte KeyType: keyType, KeyBits: keyBits, SignatureBits: data.Get("signature_bits").(int), - UsePSS: data.Get("use_pss").(bool), AllowLocalhost: true, AllowAnyName: true, AllowIPSANs: true, @@ -66,7 +64,6 @@ func getGenerationParams(sc *storageContext, data *framework.FieldData) (exporte StreetAddress: data.Get("street_address").([]string), PostalCode: data.Get("postal_code").([]string), NotBeforeDuration: time.Duration(data.Get("not_before_duration").(int)) * time.Second, - CNValidations: []string{"disabled"}, } *role.AllowWildcardCertificates = true @@ -77,10 +74,7 @@ func getGenerationParams(sc *storageContext, data *framework.FieldData) (exporte return } -func generateCABundle(sc *storageContext, input *inputBundle, data *certutil.CreationBundle, randomSource io.Reader) (*certutil.ParsedCertBundle, error) { - ctx := sc.Context - b := sc.Backend - +func generateCABundle(ctx context.Context, b *backend, input *inputBundle, data *certutil.CreationBundle, randomSource io.Reader) (*certutil.ParsedCertBundle, error) { if kmsRequested(input) { keyId, err := getManagedKeyId(input.apiData) if err != nil { @@ -95,7 +89,7 @@ func generateCABundle(sc *storageContext, input *inputBundle, data *certutil.Cre return nil, err } - keyEntry, err := sc.getExistingKeyFromRef(keyRef) + keyEntry, err := getExistingKeyFromRef(ctx, input.req.Storage, keyRef) if err != nil { return nil, err } @@ -114,10 +108,7 @@ func generateCABundle(sc *storageContext, input *inputBundle, data *certutil.Cre return certutil.CreateCertificateWithRandomSource(data, randomSource) } -func generateCSRBundle(sc *storageContext, input *inputBundle, data *certutil.CreationBundle, addBasicConstraints bool, randomSource io.Reader) (*certutil.ParsedCSRBundle, error) { - ctx := sc.Context - b := sc.Backend - +func generateCSRBundle(ctx context.Context, b *backend, input *inputBundle, data *certutil.CreationBundle, addBasicConstraints bool, randomSource io.Reader) (*certutil.ParsedCSRBundle, error) { if kmsRequested(input) { keyId, err := getManagedKeyId(input.apiData) if err != nil { @@ -133,7 +124,7 @@ func generateCSRBundle(sc *storageContext, input *inputBundle, data *certutil.Cr return nil, err } - key, err := sc.getExistingKeyFromRef(keyRef) + key, err := getExistingKeyFromRef(ctx, input.req.Storage, keyRef) if err != nil { return nil, err } @@ -159,7 +150,7 @@ func parseCABundle(ctx context.Context, b *backend, bundle *certutil.CertBundle) return bundle.ToParsedCertBundle() } -func (sc *storageContext) getKeyTypeAndBitsForRole(data *framework.FieldData) (string, int, error) { +func getKeyTypeAndBitsForRole(ctx context.Context, b *backend, storage logical.Storage, data *framework.FieldData) (string, int, error) { exportedStr := data.Get("exported").(string) var keyType string var keyBits int @@ -188,7 +179,7 @@ func (sc *storageContext) getKeyTypeAndBitsForRole(data *framework.FieldData) (s return "", 0, errors.New("unable to determine managed key id" + err.Error()) } - pubKeyManagedKey, err := getManagedKeyPublicKey(sc.Context, sc.Backend, keyId) + pubKeyManagedKey, err := getManagedKeyPublicKey(ctx, b, keyId) if err != nil { return "", 0, errors.New("failed to lookup public key from managed key: " + err.Error()) } @@ -196,7 +187,7 @@ func (sc *storageContext) getKeyTypeAndBitsForRole(data *framework.FieldData) (s } if existingKeyRequestedFromFieldData(data) { - existingPubKey, err := sc.getExistingPublicKey(data) + existingPubKey, err := getExistingPublicKey(ctx, b, storage, data) if err != nil { return "", 0, errors.New("failed to lookup public key from existing key: " + err.Error()) } @@ -207,20 +198,20 @@ func (sc *storageContext) getKeyTypeAndBitsForRole(data *framework.FieldData) (s return string(privateKeyType), keyBits, err } -func (sc *storageContext) getExistingPublicKey(data *framework.FieldData) (crypto.PublicKey, error) { +func getExistingPublicKey(ctx context.Context, b *backend, s logical.Storage, data *framework.FieldData) (crypto.PublicKey, error) { keyRef, err := getKeyRefWithErr(data) if err != nil { return nil, err } - id, err := sc.resolveKeyReference(keyRef) + id, err := resolveKeyReference(ctx, s, keyRef) if err != nil { return nil, err } - key, err := sc.fetchKeyById(id) + key, err := fetchKeyById(ctx, s, id) if err != nil { return nil, err } - return getPublicKey(sc.Context, sc.Backend, key) + return getPublicKey(ctx, b, key) } func getKeyTypeAndBitsFromPublicKeyForRole(pubKey crypto.PublicKey) (certutil.PrivateKeyType, int, error) { @@ -241,12 +232,12 @@ func getKeyTypeAndBitsFromPublicKeyForRole(pubKey crypto.PublicKey) (certutil.Pr return keyType, keyBits, nil } -func (sc *storageContext) getExistingKeyFromRef(keyRef string) (*keyEntry, error) { - keyId, err := sc.resolveKeyReference(keyRef) +func getExistingKeyFromRef(ctx context.Context, s logical.Storage, keyRef string) (*keyEntry, error) { + keyId, err := resolveKeyReference(ctx, s, keyRef) if err != nil { return nil, err } - return sc.fetchKeyById(keyId) + return fetchKeyById(ctx, s, keyId) } func existingKeyGeneratorFromBytes(key *keyEntry) certutil.KeyGenerator { diff --git a/builtin/logical/pki/cert_util.go b/builtin/logical/pki/cert_util.go index 5608310637567..23661422b52ac 100644 --- a/builtin/logical/pki/cert_util.go +++ b/builtin/logical/pki/cert_util.go @@ -10,12 +10,10 @@ import ( "crypto/x509/pkix" "encoding/asn1" "encoding/base64" - "encoding/hex" "encoding/pem" "errors" "fmt" "io" - "math/big" "net" "net/url" "regexp" @@ -85,29 +83,29 @@ func getFormat(data *framework.FieldData) string { // fetchCAInfo will fetch the CA info, will return an error if no ca info exists, this does NOT support // loading using the legacyBundleShimID and should be used with care. This should be called only once // within the request path otherwise you run the risk of a race condition with the issuer migration on perf-secondaries. -func (sc *storageContext) fetchCAInfo(issuerRef string, usage issuerUsage) (*certutil.CAInfoBundle, error) { +func fetchCAInfo(ctx context.Context, b *backend, req *logical.Request, issuerRef string, usage issuerUsage) (*certutil.CAInfoBundle, error) { var issuerId issuerID - if sc.Backend.useLegacyBundleCaStorage() { + if b.useLegacyBundleCaStorage() { // We have not completed the migration so attempt to load the bundle from the legacy location - sc.Backend.Logger().Info("Using legacy CA bundle as PKI migration has not completed.") + b.Logger().Info("Using legacy CA bundle as PKI migration has not completed.") issuerId = legacyBundleShimID } else { var err error - issuerId, err = sc.resolveIssuerReference(issuerRef) + issuerId, err = resolveIssuerReference(ctx, req.Storage, issuerRef) if err != nil { // Usually a bad label from the user or mis-configured default. return nil, errutil.UserError{Err: err.Error()} } } - return sc.fetchCAInfoByIssuerId(issuerId, usage) + return fetchCAInfoByIssuerId(ctx, b, req, issuerId, usage) } // fetchCAInfoByIssuerId will fetch the CA info, will return an error if no ca info exists for the given issuerId. // This does support the loading using the legacyBundleShimID -func (sc *storageContext) fetchCAInfoByIssuerId(issuerId issuerID, usage issuerUsage) (*certutil.CAInfoBundle, error) { - entry, bundle, err := sc.fetchCertBundleByIssuerId(issuerId, true) +func fetchCAInfoByIssuerId(ctx context.Context, b *backend, req *logical.Request, issuerId issuerID, usage issuerUsage) (*certutil.CAInfoBundle, error) { + entry, bundle, err := fetchCertBundleByIssuerId(ctx, req.Storage, issuerId, true) if err != nil { switch err.(type) { case errutil.UserError: @@ -123,7 +121,7 @@ func (sc *storageContext) fetchCAInfoByIssuerId(issuerId issuerID, usage issuerU return nil, errutil.InternalError{Err: fmt.Sprintf("error while attempting to use issuer %v: %v", issuerId, err)} } - parsedBundle, err := parseCABundle(sc.Context, sc.Backend, bundle) + parsedBundle, err := parseCABundle(ctx, b, bundle) if err != nil { return nil, errutil.InternalError{Err: err.Error()} } @@ -139,10 +137,9 @@ func (sc *storageContext) fetchCAInfoByIssuerId(issuerId issuerID, usage issuerU ParsedCertBundle: *parsedBundle, URLs: nil, LeafNotAfterBehavior: entry.LeafNotAfterBehavior, - RevocationSigAlg: entry.RevocationSigAlg, } - entries, err := entry.GetAIAURLs(sc) + entries, err := getURLs(ctx, req) if err != nil { return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch URL information: %v", err)} } @@ -151,10 +148,6 @@ func (sc *storageContext) fetchCAInfoByIssuerId(issuerId issuerID, usage issuerU return caInfo, nil } -func fetchCertBySerialBigInt(ctx context.Context, b *backend, req *logical.Request, prefix string, serial *big.Int) (*logical.StorageEntry, error) { - return fetchCertBySerial(ctx, b, req, prefix, serialFromBigInt(serial)) -} - // Allows fetching certificates from the backend; it handles the slightly // separate pathing for CRL, and revoked certificates. // @@ -166,7 +159,7 @@ func fetchCertBySerial(ctx context.Context, b *backend, req *logical.Request, pr var certEntry *logical.StorageEntry hyphenSerial := normalizeSerial(serial) - colonSerial := strings.ReplaceAll(strings.ToLower(serial), "-", ":") + colonSerial := strings.Replace(strings.ToLower(serial), "-", ":", -1) switch { // Revoked goes first as otherwise crl get hardcoded paths which fail if @@ -174,23 +167,14 @@ func fetchCertBySerial(ctx context.Context, b *backend, req *logical.Request, pr case strings.HasPrefix(prefix, "revoked/"): legacyPath = "revoked/" + colonSerial path = "revoked/" + hyphenSerial - case serial == legacyCRLPath || serial == deltaCRLPath: + case serial == legacyCRLPath: if err = b.crlBuilder.rebuildIfForced(ctx, b, req); err != nil { return nil, err } - sc := b.makeStorageContext(ctx, req.Storage) - path, err = sc.resolveIssuerCRLPath(defaultRef) + path, err = resolveIssuerCRLPath(ctx, b, req.Storage, defaultRef) if err != nil { return nil, err } - - if serial == deltaCRLPath { - if sc.Backend.useLegacyBundleCaStorage() { - return nil, fmt.Errorf("refusing to serve delta CRL with legacy CA bundle") - } - - path += deltaCRLPathSuffix - } default: legacyPath = "certs/" + colonSerial path = "certs/" + hyphenSerial @@ -213,7 +197,7 @@ func fetchCertBySerial(ctx context.Context, b *backend, req *logical.Request, pr } // Retrieve the old-style path. We disregard errors here because they - // always manifest on Windows, and thus the initial check for a revoked + // always manifest on windows, and thus the initial check for a revoked // cert fails would return an error when the cert isn't revoked, preventing // the happy path from working. certEntry, _ = req.Storage.Get(ctx, legacyPath) @@ -226,18 +210,10 @@ func fetchCertBySerial(ctx context.Context, b *backend, req *logical.Request, pr // Update old-style paths to new-style paths certEntry.Key = path - certsCounted := b.certsCounted.Load() if err = req.Storage.Put(ctx, certEntry); err != nil { return nil, errutil.InternalError{Err: fmt.Sprintf("error saving certificate with serial %s to new location", serial)} } if err = req.Storage.Delete(ctx, legacyPath); err != nil { - // If we fail here, we have an extra (copy) of a cert in storage, add to metrics: - switch { - case strings.HasPrefix(prefix, "revoked/"): - b.incrementTotalRevokedCertificatesCount(certsCounted, path) - default: - b.incrementTotalCertificatesCount(certsCounted, path) - } return nil, errutil.InternalError{Err: fmt.Sprintf("error deleting certificate with serial %s from old location", serial)} } @@ -267,54 +243,6 @@ func validateURISAN(b *backend, data *inputBundle, uri string) bool { return valid } -// Validates a given common name, ensuring it's either an email or a hostname -// after validating it according to the role parameters, or disables -// validation altogether. -func validateCommonName(b *backend, data *inputBundle, name string) string { - isDisabled := len(data.role.CNValidations) == 1 && data.role.CNValidations[0] == "disabled" - if isDisabled { - return "" - } - - if validateNames(b, data, []string{name}) != "" { - return name - } - - // Validations weren't disabled, but the role lacked CN Validations, so - // don't restrict types. This case is hit in certain existing tests. - if len(data.role.CNValidations) == 0 { - return "" - } - - // If there's an at in the data, ensure email type validation is allowed. - // Otherwise, ensure hostname is allowed. - if strings.Contains(name, "@") { - var allowsEmails bool - for _, validation := range data.role.CNValidations { - if validation == "email" { - allowsEmails = true - break - } - } - if !allowsEmails { - return name - } - } else { - var allowsHostnames bool - for _, validation := range data.role.CNValidations { - if validation == "hostname" { - allowsHostnames = true - break - } - } - if !allowsHostnames { - return name - } - } - - return "" -} - // Given a set of requested names for a certificate, verifies that all of them // match the various toggles set in the role for controlling issuance. // If one does not pass, it is returned in the string argument. @@ -665,29 +593,27 @@ func validateSerialNumber(data *inputBundle, serialNumber string) string { } } -func generateCert(sc *storageContext, +func generateCert(ctx context.Context, + b *backend, input *inputBundle, caSign *certutil.CAInfoBundle, isCA bool, - randomSource io.Reader) (*certutil.ParsedCertBundle, []string, error, + randomSource io.Reader) (*certutil.ParsedCertBundle, error, ) { - ctx := sc.Context - b := sc.Backend - if input.role == nil { - return nil, nil, errutil.InternalError{Err: "no role found in data bundle"} + return nil, errutil.InternalError{Err: "no role found in data bundle"} } if input.role.KeyType == "rsa" && input.role.KeyBits < 2048 { - return nil, nil, errutil.UserError{Err: "RSA keys < 2048 bits are unsafe and not supported"} + return nil, errutil.UserError{Err: "RSA keys < 2048 bits are unsafe and not supported"} } - data, warnings, err := generateCreationBundle(b, input, caSign, nil) + data, err := generateCreationBundle(b, input, caSign, nil) if err != nil { - return nil, nil, err + return nil, err } if data.Params == nil { - return nil, nil, errutil.InternalError{Err: "nil parameters received from parameter bundle generation"} + return nil, errutil.InternalError{Err: "nil parameters received from parameter bundle generation"} } if isCA { @@ -695,11 +621,10 @@ func generateCert(sc *storageContext, data.Params.PermittedDNSDomains = input.apiData.Get("permitted_dns_domains").([]string) if data.SigningBundle == nil { - // Generating a self-signed root certificate. Since we have no - // issuer entry yet, we default to the global URLs. - entries, err := getGlobalAIAURLs(ctx, sc.Storage) + // Generating a self-signed root certificate + entries, err := getURLs(ctx, input.req) if err != nil { - return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch URL information: %v", err)} + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch URL information: %v", err)} } data.Params.URLs = entries @@ -711,62 +636,57 @@ func generateCert(sc *storageContext, } } - parsedBundle, err := generateCABundle(sc, input, data, randomSource) + parsedBundle, err := generateCABundle(ctx, b, input, data, randomSource) if err != nil { - return nil, nil, err + return nil, err } - return parsedBundle, warnings, nil + return parsedBundle, nil } // N.B.: This is only meant to be used for generating intermediate CAs. // It skips some sanity checks. -func generateIntermediateCSR(sc *storageContext, input *inputBundle, randomSource io.Reader) (*certutil.ParsedCSRBundle, []string, error) { - b := sc.Backend - - creation, warnings, err := generateCreationBundle(b, input, nil, nil) +func generateIntermediateCSR(ctx context.Context, b *backend, input *inputBundle, randomSource io.Reader) (*certutil.ParsedCSRBundle, error) { + creation, err := generateCreationBundle(b, input, nil, nil) if err != nil { - return nil, nil, err + return nil, err } if creation.Params == nil { - return nil, nil, errutil.InternalError{Err: "nil parameters received from parameter bundle generation"} + return nil, errutil.InternalError{Err: "nil parameters received from parameter bundle generation"} } addBasicConstraints := input.apiData != nil && input.apiData.Get("add_basic_constraints").(bool) - parsedBundle, err := generateCSRBundle(sc, input, creation, addBasicConstraints, randomSource) + parsedBundle, err := generateCSRBundle(ctx, b, input, creation, addBasicConstraints, randomSource) if err != nil { - return nil, nil, err + return nil, err } - return parsedBundle, warnings, nil + return parsedBundle, nil } func signCert(b *backend, data *inputBundle, caSign *certutil.CAInfoBundle, isCA bool, - useCSRValues bool) (*certutil.ParsedCertBundle, []string, error, + useCSRValues bool) (*certutil.ParsedCertBundle, error, ) { if data.role == nil { - return nil, nil, errutil.InternalError{Err: "no role found in data bundle"} + return nil, errutil.InternalError{Err: "no role found in data bundle"} } csrString := data.apiData.Get("csr").(string) if csrString == "" { - return nil, nil, errutil.UserError{Err: "\"csr\" is empty"} + return nil, errutil.UserError{Err: fmt.Sprintf("\"csr\" is empty")} } - pemBlock, _ := pem.Decode([]byte(csrString)) + pemBytes := []byte(csrString) + pemBlock, pemBytes := pem.Decode(pemBytes) if pemBlock == nil { - return nil, nil, errutil.UserError{Err: "csr contains no data"} + return nil, errutil.UserError{Err: "csr contains no data"} } csr, err := x509.ParseCertificateRequest(pemBlock.Bytes) if err != nil { - return nil, nil, errutil.UserError{Err: fmt.Sprintf("certificate request could not be parsed: %v", err)} - } - - if csr.PublicKeyAlgorithm == x509.UnknownPublicKeyAlgorithm || csr.PublicKey == nil { - return nil, nil, errutil.UserError{Err: "Refusing to sign CSR with empty PublicKey. This usually means the SubjectPublicKeyInfo field has an OID not recognized by Go, such as 1.2.840.113549.1.1.10 for rsaPSS."} + return nil, errutil.UserError{Err: fmt.Sprintf("certificate request could not be parsed: %v", err)} } // This switch validates that the CSR key type matches the role and sets @@ -778,14 +698,14 @@ func signCert(b *backend, case "rsa": // Verify that the key matches the role type if csr.PublicKeyAlgorithm != x509.RSA { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "role requires keys of type %s", data.role.KeyType)} } pubKey, ok := csr.PublicKey.(*rsa.PublicKey) if !ok { - return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} + return nil, errutil.UserError{Err: "could not parse CSR's public key"} } actualKeyType = "rsa" @@ -793,13 +713,13 @@ func signCert(b *backend, case "ec": // Verify that the key matches the role type if csr.PublicKeyAlgorithm != x509.ECDSA { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "role requires keys of type %s", data.role.KeyType)} } pubKey, ok := csr.PublicKey.(*ecdsa.PublicKey) if !ok { - return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} + return nil, errutil.UserError{Err: "could not parse CSR's public key"} } actualKeyType = "ec" @@ -807,14 +727,14 @@ func signCert(b *backend, case "ed25519": // Verify that the key matches the role type if csr.PublicKeyAlgorithm != x509.Ed25519 { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "role requires keys of type %s", data.role.KeyType)} } _, ok := csr.PublicKey.(ed25519.PublicKey) if !ok { - return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} + return nil, errutil.UserError{Err: "could not parse CSR's public key"} } actualKeyType = "ed25519" @@ -826,10 +746,10 @@ func signCert(b *backend, case x509.RSA: pubKey, ok := csr.PublicKey.(*rsa.PublicKey) if !ok { - return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} + return nil, errutil.UserError{Err: "could not parse CSR's public key"} } if pubKey.N.BitLen() < 2048 { - return nil, nil, errutil.UserError{Err: "RSA keys < 2048 bits are unsafe and not supported"} + return nil, errutil.UserError{Err: "RSA keys < 2048 bits are unsafe and not supported"} } actualKeyType = "rsa" @@ -837,7 +757,7 @@ func signCert(b *backend, case x509.ECDSA: pubKey, ok := csr.PublicKey.(*ecdsa.PublicKey) if !ok { - return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} + return nil, errutil.UserError{Err: "could not parse CSR's public key"} } actualKeyType = "ec" @@ -845,16 +765,16 @@ func signCert(b *backend, case x509.Ed25519: _, ok := csr.PublicKey.(ed25519.PublicKey) if !ok { - return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} + return nil, errutil.UserError{Err: "could not parse CSR's public key"} } actualKeyType = "ed25519" actualKeyBits = 0 default: - return nil, nil, errutil.UserError{Err: "Unknown key type in CSR: " + csr.PublicKeyAlgorithm.String()} + return nil, errutil.UserError{Err: "Unknown key type in CSR: " + csr.PublicKeyAlgorithm.String()} } default: - return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unsupported key type value: %s", data.role.KeyType)} + return nil, errutil.InternalError{Err: fmt.Sprintf("unsupported key type value: %s", data.role.KeyType)} } // Before validating key lengths, update our KeyBits/SignatureBits based @@ -873,7 +793,7 @@ func signCert(b *backend, // for signing operations if data.role.KeyBits, data.role.SignatureBits, err = certutil.ValidateDefaultOrValueKeyTypeSignatureLength( actualKeyType, 0, data.role.SignatureBits); err != nil { - return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unknown internal error updating default values: %v", err)} + return nil, errutil.InternalError{Err: fmt.Sprintf("unknown internal error updating default values: %v", err)} } // We're using the KeyBits field as a minimum value below, and P-224 is safe @@ -895,31 +815,31 @@ func signCert(b *backend, // that we always validate both RSA and ECDSA key sizes. if actualKeyType == "rsa" { if actualKeyBits < data.role.KeyBits { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "role requires a minimum of a %d-bit key, but CSR's key is %d bits", data.role.KeyBits, actualKeyBits)} } if actualKeyBits < 2048 { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "Vault requires a minimum of a 2048-bit key, but CSR's key is %d bits", actualKeyBits)} } } else if actualKeyType == "ec" { if actualKeyBits < data.role.KeyBits { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "role requires a minimum of a %d-bit key, but CSR's key is %d bits", data.role.KeyBits, actualKeyBits)} } } - creation, warnings, err := generateCreationBundle(b, data, caSign, csr) + creation, err := generateCreationBundle(b, data, caSign, csr) if err != nil { - return nil, nil, err + return nil, err } if creation.Params == nil { - return nil, nil, errutil.InternalError{Err: "nil parameters received from parameter bundle generation"} + return nil, errutil.InternalError{Err: "nil parameters received from parameter bundle generation"} } creation.Params.IsCA = isCA @@ -931,18 +851,17 @@ func signCert(b *backend, parsedBundle, err := certutil.SignCertificate(creation) if err != nil { - return nil, nil, err + return nil, err } - return parsedBundle, warnings, nil + return parsedBundle, nil } // otherNameRaw describes a name related to a certificate which is not in one // of the standard name formats. RFC 5280, 4.2.1.6: -// -// OtherName ::= SEQUENCE { -// type-id OBJECT IDENTIFIER, -// value [0] EXPLICIT ANY DEFINED BY type-id } +// OtherName ::= SEQUENCE { +// type-id OBJECT IDENTIFIER, +// value [0] EXPLICIT ANY DEFINED BY type-id } type otherNameRaw struct { TypeID asn1.ObjectIdentifier Value asn1.RawValue @@ -1049,11 +968,10 @@ func forEachSAN(extension []byte, callback func(tag int, data []byte) error) err // generateCreationBundle is a shared function that reads parameters supplied // from the various endpoints and generates a CreationParameters with the // parameters that can be used to issue or sign -func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAInfoBundle, csr *x509.CertificateRequest) (*certutil.CreationBundle, []string, error) { +func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAInfoBundle, csr *x509.CertificateRequest) (*certutil.CreationBundle, error) { // Read in names -- CN, DNS and email addresses var cn string var ridSerialNumber string - var warnings []string dnsNames := []string{} emailAddresses := []string{} { @@ -1063,7 +981,7 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn if cn == "" { cn = data.apiData.Get("common_name").(string) if cn == "" && data.role.RequireCN { - return nil, nil, errutil.UserError{Err: `the common_name field is required, or must be provided in a CSR with "use_csr_common_name" set to true, unless "require_cn" is set to false`} + return nil, errutil.UserError{Err: `the common_name field is required, or must be provided in a CSR with "use_csr_common_name" set to true, unless "require_cn" is set to false`} } } @@ -1096,7 +1014,7 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn ) converted, err := p.ToASCII(cn) if err != nil { - return nil, nil, errutil.UserError{Err: err.Error()} + return nil, errutil.UserError{Err: err.Error()} } if hostnameRegex.MatchString(converted) { dnsNames = append(dnsNames, converted) @@ -1120,7 +1038,7 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn ) converted, err := p.ToASCII(v) if err != nil { - return nil, nil, errutil.UserError{Err: err.Error()} + return nil, errutil.UserError{Err: err.Error()} } if hostnameRegex.MatchString(converted) { dnsNames = append(dnsNames, converted) @@ -1133,9 +1051,9 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn // Check the CN. This ensures that the CN is checked even if it's // excluded from SANs. if cn != "" { - badName := validateCommonName(b, data, cn) + badName := validateNames(b, data, []string{cn}) if len(badName) != 0 { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "common name %s not allowed by this role", badName)} } } @@ -1143,7 +1061,7 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn if ridSerialNumber != "" { badName := validateSerialNumber(data, ridSerialNumber) if len(badName) != 0 { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "serial_number %s not allowed by this role", badName)} } } @@ -1151,13 +1069,13 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn // Check for bad email and/or DNS names badName := validateNames(b, data, dnsNames) if len(badName) != 0 { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "subject alternate name %s not allowed by this role", badName)} } badName = validateNames(b, data, emailAddresses) if len(badName) != 0 { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "email address %s not allowed by this role", badName)} } } @@ -1175,7 +1093,7 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn if data.role.UseCSRSANs && csr != nil && len(csr.Extensions) > 0 { others, err := getOtherSANsFromX509Extensions(csr.Extensions) if err != nil { - return nil, nil, errutil.UserError{Err: fmt.Errorf("could not parse requested other SAN: %w", err).Error()} + return nil, errutil.UserError{Err: fmt.Errorf("could not parse requested other SAN: %w", err).Error()} } for _, other := range others { otherSANsInput = append(otherSANsInput, other.String()) @@ -1184,17 +1102,17 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn if len(otherSANsInput) > 0 { requested, err := parseOtherSANs(otherSANsInput) if err != nil { - return nil, nil, errutil.UserError{Err: fmt.Errorf("could not parse requested other SAN: %w", err).Error()} + return nil, errutil.UserError{Err: fmt.Errorf("could not parse requested other SAN: %w", err).Error()} } badOID, badName, err := validateOtherSANs(data, requested) switch { case err != nil: - return nil, nil, errutil.UserError{Err: err.Error()} + return nil, errutil.UserError{Err: err.Error()} case len(badName) > 0: - return nil, nil, errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "other SAN %s not allowed for OID %s by this role", badName, badOID)} case len(badOID) > 0: - return nil, nil, errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "other SAN OID %s not allowed by this role", badOID)} default: otherSANs = requested @@ -1207,7 +1125,8 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn if csr != nil && data.role.UseCSRSANs { if len(csr.IPAddresses) > 0 { if !data.role.AllowIPSANs { - return nil, nil, errutil.UserError{Err: "IP Subject Alternative Names are not allowed in this role, but was provided some via CSR"} + return nil, errutil.UserError{Err: fmt.Sprintf( + "IP Subject Alternative Names are not allowed in this role, but was provided some via CSR")} } ipAddresses = csr.IPAddresses } @@ -1215,14 +1134,14 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn ipAlt := data.apiData.Get("ip_sans").([]string) if len(ipAlt) > 0 { if !data.role.AllowIPSANs { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "IP Subject Alternative Names are not allowed in this role, but was provided %s", ipAlt)} } for _, v := range ipAlt { parsedIP := net.ParseIP(v) if parsedIP == nil { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "the value %q is not a valid IP address", v)} + return nil, errutil.UserError{Err: fmt.Sprintf( + "the value '%s' is not a valid IP address", v)} } ipAddresses = append(ipAddresses, parsedIP) } @@ -1235,8 +1154,9 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn if csr != nil && data.role.UseCSRSANs { if len(csr.URIs) > 0 { if len(data.role.AllowedURISANs) == 0 { - return nil, nil, errutil.UserError{ - Err: "URI Subject Alternative Names are not allowed in this role, but were provided via CSR", + return nil, errutil.UserError{ + Err: fmt.Sprintf( + "URI Subject Alternative Names are not allowed in this role, but were provided via CSR"), } } @@ -1244,8 +1164,9 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn for _, uri := range csr.URIs { valid := validateURISAN(b, data, uri.String()) if !valid { - return nil, nil, errutil.UserError{ - Err: "URI Subject Alternative Names were provided via CSR which are not valid for this role", + return nil, errutil.UserError{ + Err: fmt.Sprintf( + "URI Subject Alternative Names were provided via CSR which are not valid for this role"), } } @@ -1256,24 +1177,26 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn uriAlt := data.apiData.Get("uri_sans").([]string) if len(uriAlt) > 0 { if len(data.role.AllowedURISANs) == 0 { - return nil, nil, errutil.UserError{ - Err: "URI Subject Alternative Names are not allowed in this role, but were provided via the API", + return nil, errutil.UserError{ + Err: fmt.Sprintf( + "URI Subject Alternative Names are not allowed in this role, but were provided via the API"), } } for _, uri := range uriAlt { valid := validateURISAN(b, data, uri) if !valid { - return nil, nil, errutil.UserError{ - Err: "URI Subject Alternative Names were provided via the API which are not valid for this role", + return nil, errutil.UserError{ + Err: fmt.Sprintf( + "URI Subject Alternative Names were provided via the API which are not valid for this role"), } } parsedURI, err := url.Parse(uri) if parsedURI == nil || err != nil { - return nil, nil, errutil.UserError{ + return nil, errutil.UserError{ Err: fmt.Sprintf( - "the provided URI Subject Alternative Name %q is not a valid URI", uri), + "the provided URI Subject Alternative Name '%s' is not a valid URI", uri), } } @@ -1313,8 +1236,9 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn } if ttl > 0 && notAfterAlt != "" { - return nil, nil, errutil.UserError{ - Err: "Either ttl or not_after should be provided. Both should not be provided in the same request.", + return nil, errutil.UserError{ + Err: fmt.Sprintf( + "Either ttl or not_after should be provided. Both should not be provided in the same request."), } } @@ -1333,14 +1257,13 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn maxTTL = b.System().MaxLeaseTTL() } if ttl > maxTTL { - warnings = append(warnings, fmt.Sprintf("TTL %q is longer than permitted maxTTL %q, so maxTTL is being used", ttl, maxTTL)) ttl = maxTTL } if notAfterAlt != "" { notAfter, err = time.Parse(time.RFC3339, notAfterAlt) if err != nil { - return nil, nil, errutil.UserError{Err: err.Error()} + return nil, errutil.UserError{Err: err.Error()} } } else { notAfter = time.Now().Add(ttl) @@ -1348,7 +1271,7 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn if caSign != nil && notAfter.After(caSign.Certificate.NotAfter) { // If it's not self-signed, verify that the issued certificate // won't be valid past the lifetime of the CA certificate, and - // act accordingly. This is dependent based on the issuer's + // act accordingly. This is dependent based on the issuers's // LeafNotAfterBehavior argument. switch caSign.LeafNotAfterBehavior { case certutil.PermitNotAfterBehavior: @@ -1358,33 +1281,12 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn case certutil.ErrNotAfterBehavior: fallthrough default: - return nil, nil, errutil.UserError{Err: fmt.Sprintf( + return nil, errutil.UserError{Err: fmt.Sprintf( "cannot satisfy request, as TTL would result in notAfter %s that is beyond the expiration of the CA certificate at %s", notAfter.Format(time.RFC3339Nano), caSign.Certificate.NotAfter.Format(time.RFC3339Nano))} } } } - // Parse SKID from the request for cross-signing. - var skid []byte - { - if rawSKIDValue, ok := data.apiData.GetOk("skid"); ok { - // Handle removing common separators to make copy/paste from tool - // output easier. Chromium uses space, OpenSSL uses colons, and at - // one point, Vault had preferred dash as a separator for hex - // strings. - var err error - skidValue := rawSKIDValue.(string) - for _, separator := range []string{":", "-", " "} { - skidValue = strings.ReplaceAll(skidValue, separator, "") - } - - skid, err = hex.DecodeString(skidValue) - if err != nil { - return nil, nil, errutil.UserError{Err: fmt.Sprintf("cannot parse requested SKID value as hex: %v", err)} - } - } - } - creation := &certutil.CreationBundle{ Params: &certutil.CreationParameters{ Subject: subject, @@ -1396,7 +1298,6 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn KeyType: data.role.KeyType, KeyBits: data.role.KeyBits, SignatureBits: data.role.SignatureBits, - UsePSS: data.role.UsePSS, NotAfter: notAfter, KeyUsage: x509.KeyUsage(parseKeyUsages(data.role.KeyUsage)), ExtKeyUsage: parseExtKeyUsages(data.role), @@ -1405,7 +1306,6 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn BasicConstraintsValidForNonCA: data.role.BasicConstraintsValidForNonCA, NotBeforeDuration: data.role.NotBeforeDuration, ForceAppendCaChain: caSign != nil, - SKID: skid, }, SigningBundle: caSign, CSR: csr, @@ -1414,10 +1314,10 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn // Don't deal with URLs or max path length if it's self-signed, as these // normally come from the signing bundle if caSign == nil { - return creation, warnings, nil + return creation, nil } - // This will have been read in from the getGlobalAIAURLs function + // This will have been read in from the getURLs function creation.Params.URLs = caSign.URLs // If the max path length in the role is not nil, it was specified at @@ -1440,7 +1340,7 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn } } - return creation, warnings, nil + return creation, nil } func convertRespToPKCS8(resp *logical.Response) error { @@ -1523,7 +1423,9 @@ func handleOtherCSRSANs(in *x509.CertificateRequest, sans map[string][]string) e return err } if len(certTemplate.ExtraExtensions) > 0 { - in.ExtraExtensions = append(in.ExtraExtensions, certTemplate.ExtraExtensions...) + for _, v := range certTemplate.ExtraExtensions { + in.ExtraExtensions = append(in.ExtraExtensions, v) + } } return nil } @@ -1600,7 +1502,7 @@ const ( ) // Note: Taken from the Go source code since it's not public, plus changed to not marshal -// marshalSANs marshals a list of addresses into the contents of an X.509 +// marshalSANs marshals a list of addresses into a the contents of an X.509 // SubjectAlternativeName extension. func marshalSANs(dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL) []asn1.RawValue { var rawValues []asn1.RawValue diff --git a/builtin/logical/pki/cert_util_test.go b/builtin/logical/pki/cert_util_test.go index ba2e6f8106116..a631323724eaa 100644 --- a/builtin/logical/pki/cert_util_test.go +++ b/builtin/logical/pki/cert_util_test.go @@ -12,7 +12,6 @@ import ( ) func TestPki_FetchCertBySerial(t *testing.T) { - t.Parallel() b, storage := createBackendWithStorage(t) cases := map[string]struct { @@ -92,7 +91,6 @@ func TestPki_FetchCertBySerial(t *testing.T) { // Demonstrate that multiple OUs in the name are handled in an // order-preserving way. func TestPki_MultipleOUs(t *testing.T) { - t.Parallel() var b backend fields := addCACommonFields(map[string]*framework.FieldSchema{}) @@ -110,7 +108,7 @@ func TestPki_MultipleOUs(t *testing.T) { OU: []string{"Z", "E", "V"}, }, } - cb, _, err := generateCreationBundle(&b, input, nil, nil) + cb, err := generateCreationBundle(&b, input, nil, nil) if err != nil { t.Fatalf("Error: %v", err) } @@ -124,7 +122,6 @@ func TestPki_MultipleOUs(t *testing.T) { } func TestPki_PermitFQDNs(t *testing.T) { - t.Parallel() var b backend fields := addCACommonFields(map[string]*framework.FieldSchema{}) @@ -209,10 +206,8 @@ func TestPki_PermitFQDNs(t *testing.T) { } for name, testCase := range cases { - name := name - testCase := testCase t.Run(name, func(t *testing.T) { - cb, _, err := generateCreationBundle(&b, testCase.input, nil, nil) + cb, err := generateCreationBundle(&b, testCase.input, nil, nil) if err != nil { t.Fatalf("Error: %v", err) } diff --git a/builtin/logical/pki/chain_test.go b/builtin/logical/pki/chain_test.go index 746a378cbaf47..2338708c9fbad 100644 --- a/builtin/logical/pki/chain_test.go +++ b/builtin/logical/pki/chain_test.go @@ -1,11 +1,9 @@ package pki import ( - "bytes" "context" "crypto/x509" "crypto/x509/pkix" - "encoding/hex" "encoding/pem" "fmt" "strconv" @@ -115,7 +113,6 @@ type CBGenerateIntermediate struct { Existing bool Name string CommonName string - SKID string Parent string ImportErrorMessage string } @@ -154,14 +151,6 @@ func (c CBGenerateIntermediate) Run(t testing.TB, b *backend, s logical.Storage, if len(c.CommonName) > 0 { data["common_name"] = c.CommonName } - if len(c.SKID) > 0 { - // Copy the SKID from an existing, already-issued cert. - otherPEM := knownCerts[c.SKID] - otherCert := ToCertificate(t, otherPEM) - - data["skid"] = hex.EncodeToString(otherCert.SubjectKeyId) - } - resp, err = CBWrite(b, s, url, data) if err != nil { t.Fatalf("failed to sign CSR for issuer (%v): %v / body: %v", c.Name, err, data) @@ -169,17 +158,6 @@ func (c CBGenerateIntermediate) Run(t testing.TB, b *backend, s logical.Storage, knownCerts[c.Name] = strings.TrimSpace(resp.Data["certificate"].(string)) - // Verify SKID if one was requested. - if len(c.SKID) > 0 { - otherPEM := knownCerts[c.SKID] - otherCert := ToCertificate(t, otherPEM) - ourCert := ToCertificate(t, knownCerts[c.Name]) - - if !bytes.Equal(otherCert.SubjectKeyId, ourCert.SubjectKeyId) { - t.Fatalf("Expected two certs to have equal SKIDs but differed: them: %v vs us: %v", otherCert.SubjectKeyId, ourCert.SubjectKeyId) - } - } - // Set the signed intermediate url = "intermediate/set-signed" data = make(map[string]interface{}) @@ -535,7 +513,7 @@ func (c CBIssueLeaf) RevokeLeaf(t testing.TB, b *backend, s logical.Storage, kno t.Fatalf("failed to revoke issued certificate (%v) under role %v / issuer %v: expected response parameter revocation_time was missing from response:\n%v", api_serial, c.Role, c.Issuer, resp.Data) } - if !hasCRL { + if !hasCRL && isDefault { // Nothing further we can test here. We could re-enable CRL building // and check that it works, but that seems like a stretch. Other // issuers might be functionally the same as this issuer (and thus @@ -614,7 +592,7 @@ func (c CBIssueLeaf) RevokeLeaf(t testing.TB, b *backend, s logical.Storage, kno } } - t.Fatalf("expected to find certificate with serial [%v] on issuer %v's CRL but was missing: %v revoked certs\n\nCRL:\n[%v]\n\nLeaf:\n[%v]\n\nIssuer (hasCRL: %v):\n[%v]\n", api_serial, c.Issuer, len(crl.TBSCertList.RevokedCertificates), raw_crl, raw_cert, hasCRL, raw_issuer) + t.Fatalf("expected to find certificate with serial [%v] on issuer %v's CRL but was missing: %v revoked certs\n\nCRL:\n[%v]\n\nLeaf:\n[%v]\n\nIssuer:\n[%v]\n", api_serial, c.Issuer, len(crl.TBSCertList.RevokedCertificates), raw_crl, raw_cert, raw_issuer) } } @@ -851,7 +829,6 @@ var chainBuildingTestCases = []CBTestScenario{ Existing: true, Name: "cross-old-new", CommonName: "root-new", - SKID: "root-new-a", // Which old issuer is used here doesn't matter as they have // the same CN and key. Parent: "root-old-a", @@ -910,7 +887,6 @@ var chainBuildingTestCases = []CBTestScenario{ Existing: true, Name: "cross-new-old", CommonName: "root-old", - SKID: "root-old-a", // Which new issuer is used here doesn't matter as they have // the same CN and key. Parent: "root-new-a", @@ -1596,7 +1572,6 @@ var chainBuildingTestCases = []CBTestScenario{ } func Test_CAChainBuilding(t *testing.T) { - t.Parallel() for testIndex, testCase := range chainBuildingTestCases { b, s := createBackendWithStorage(t) @@ -1630,10 +1605,9 @@ func BenchmarkChainBuilding(benchies *testing.B) { // Run the benchmark. ctx := context.Background() - sc := b.makeStorageContext(ctx, s) bench.StartTimer() for n := 0; n < bench.N; n++ { - sc.rebuildIssuersChains(nil) + rebuildIssuersChains(ctx, s, nil) } }) } diff --git a/builtin/logical/pki/chain_util.go b/builtin/logical/pki/chain_util.go index 334000a7c255e..8774137ffce12 100644 --- a/builtin/logical/pki/chain_util.go +++ b/builtin/logical/pki/chain_util.go @@ -2,11 +2,12 @@ package pki import ( "bytes" + "context" "crypto/x509" "fmt" "sort" - "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" ) func prettyIssuer(issuerIdEntryMap map[issuerID]*issuerEntry, issuer issuerID) string { @@ -17,7 +18,7 @@ func prettyIssuer(issuerIdEntryMap map[issuerID]*issuerEntry, issuer issuerID) s return "[" + string(issuer) + "]" } -func (sc *storageContext) rebuildIssuersChains(referenceCert *issuerEntry /* optional */) error { +func rebuildIssuersChains(ctx context.Context, s logical.Storage, referenceCert *issuerEntry /* optional */) error { // This function rebuilds the CAChain field of all known issuers. This // function should usually be invoked when a new issuer is added to the // pool of issuers. @@ -41,7 +42,7 @@ func (sc *storageContext) rebuildIssuersChains(referenceCert *issuerEntry /* opt // themselves. // // To begin, we fetch all known issuers from disk. - issuers, err := sc.listIssuers() + issuers, err := listIssuers(ctx, s) if err != nil { return fmt.Errorf("unable to list issuers to build chain: %v", err) } @@ -57,7 +58,7 @@ func (sc *storageContext) rebuildIssuersChains(referenceCert *issuerEntry /* opt // Otherwise, the only entry in the chain (that we know about) is the // certificate itself. referenceCert.CAChain = []string{referenceCert.Certificate} - return sc.writeIssuer(referenceCert) + return writeIssuer(ctx, s, referenceCert) } // Our provided reference cert might not be in the list of issuers. In @@ -114,7 +115,7 @@ func (sc *storageContext) rebuildIssuersChains(referenceCert *issuerEntry /* opt stored = referenceCert } else { // Otherwise, fetch it from disk. - stored, err = sc.fetchIssuerById(identifier) + stored, err = fetchIssuerById(ctx, s, identifier) if err != nil { return fmt.Errorf("unable to fetch issuer %v to build chain: %v", identifier, err) } @@ -267,7 +268,9 @@ func (sc *storageContext) rebuildIssuersChains(referenceCert *issuerEntry /* opt continue } - toVisit = append(toVisit, children...) + for _, child := range children { + toVisit = append(toVisit, child) + } } // Setup the toVisit queue. @@ -372,10 +375,6 @@ func (sc *storageContext) rebuildIssuersChains(referenceCert *issuerEntry /* opt } } - if len(parentCerts) > 1024*1024*1024 { - return errutil.InternalError{Err: fmt.Sprintf("error building certificate chain, %d is too many parent certs", - len(parentCerts))} - } includedParentCerts := make(map[string]bool, len(parentCerts)+1) includedParentCerts[entry.Certificate] = true for _, parentCert := range append(roots, intermediates...) { @@ -420,7 +419,7 @@ func (sc *storageContext) rebuildIssuersChains(referenceCert *issuerEntry /* opt for _, issuer := range issuers { entry := issuerIdEntryMap[issuer] - err := sc.writeIssuer(entry) + err := writeIssuer(ctx, s, entry) if err != nil { pretty := prettyIssuer(issuerIdEntryMap, issuer) return fmt.Errorf("failed to persist issuer (%v) chain to disk: %v", pretty, err) @@ -580,7 +579,9 @@ func processAnyCliqueOrCycle( continue } - cliquesToProcess = append(cliquesToProcess, children...) + for _, child := range children { + cliquesToProcess = append(cliquesToProcess, child) + } // While we're here, add this cycle node to the closure. closure[cycleNode] = true @@ -1161,9 +1162,6 @@ func findAllCyclesWithNode( } } - if len(path) > 1024*1024*1024 { - return nil, errutil.InternalError{Err: fmt.Sprintf("Error updating certificate path: path of length %d is too long", len(path))} - } // Make sure to deep copy the path. newPath := make([]issuerID, 0, len(path)+1) newPath = append(newPath, path...) diff --git a/builtin/logical/pki/config_util.go b/builtin/logical/pki/config_util.go index f775b9d58c1f0..6a926c2a3736f 100644 --- a/builtin/logical/pki/config_util.go +++ b/builtin/logical/pki/config_util.go @@ -1,13 +1,14 @@ package pki import ( - "fmt" + "context" "strings" - "time" + + "github.com/hashicorp/vault/sdk/logical" ) -func (sc *storageContext) isDefaultKeySet() (bool, error) { - config, err := sc.getKeysConfig() +func isDefaultKeySet(ctx context.Context, s logical.Storage) (bool, error) { + config, err := getKeysConfig(ctx, s) if err != nil { return false, err } @@ -15,8 +16,8 @@ func (sc *storageContext) isDefaultKeySet() (bool, error) { return strings.TrimSpace(config.DefaultKeyId.String()) != "", nil } -func (sc *storageContext) isDefaultIssuerSet() (bool, error) { - config, err := sc.getIssuersConfig() +func isDefaultIssuerSet(ctx context.Context, s logical.Storage) (bool, error) { + config, err := getIssuersConfig(ctx, s) if err != nil { return false, err } @@ -24,14 +25,14 @@ func (sc *storageContext) isDefaultIssuerSet() (bool, error) { return strings.TrimSpace(config.DefaultIssuerId.String()) != "", nil } -func (sc *storageContext) updateDefaultKeyId(id keyID) error { - config, err := sc.getKeysConfig() +func updateDefaultKeyId(ctx context.Context, s logical.Storage, id keyID) error { + config, err := getKeysConfig(ctx, s) if err != nil { return err } if config.DefaultKeyId != id { - return sc.setKeysConfig(&keyConfigEntry{ + return setKeysConfig(ctx, s, &keyConfigEntry{ DefaultKeyId: id, }) } @@ -39,63 +40,16 @@ func (sc *storageContext) updateDefaultKeyId(id keyID) error { return nil } -func (sc *storageContext) updateDefaultIssuerId(id issuerID) error { - config, err := sc.getIssuersConfig() +func updateDefaultIssuerId(ctx context.Context, s logical.Storage, id issuerID) error { + config, err := getIssuersConfig(ctx, s) if err != nil { return err } if config.DefaultIssuerId != id { - oldDefault := config.DefaultIssuerId - newDefault := id - now := time.Now().UTC() - - err := sc.setIssuersConfig(&issuerConfigEntry{ - DefaultIssuerId: newDefault, + return setIssuersConfig(ctx, s, &issuerConfigEntry{ + DefaultIssuerId: id, }) - if err != nil { - return err - } - - // When the default issuer changes, we need to modify four - // pieces of information: - // - // 1. The old default issuer's modification time, as it no - // longer works for the /cert/ca path. - // 2. The new default issuer's modification time, as it now - // works for the /cert/ca path. - // 3. & 4. Both issuer's CRLs, as they behave the same, under - // the /cert/crl path! - for _, thisId := range []issuerID{oldDefault, newDefault} { - if len(thisId) == 0 { - continue - } - - // 1 & 2 above. - issuer, err := sc.fetchIssuerById(thisId) - if err != nil { - return fmt.Errorf("unable to update issuer (%v)'s modification time: error fetching issuer: %v", thisId, err) - } - - issuer.LastModified = now - err = sc.writeIssuer(issuer) - if err != nil { - return fmt.Errorf("unable to update issuer (%v)'s modification time: error persisting issuer: %v", thisId, err) - } - } - - // Fetch and update the localCRLConfigEntry (3&4). - cfg, err := sc.getLocalCRLConfig() - if err != nil { - return fmt.Errorf("unable to update local CRL config's modification time: error fetching local CRL config: %v", err) - } - - cfg.LastModified = now - cfg.DeltaLastModified = now - err = sc.setLocalCRLConfig(cfg) - if err != nil { - return fmt.Errorf("unable to update local CRL config's modification time: error persisting local CRL config: %v", err) - } } return nil diff --git a/builtin/logical/pki/crl_test.go b/builtin/logical/pki/crl_test.go index 962cd650f96fd..eb0d5fe5d7935 100644 --- a/builtin/logical/pki/crl_test.go +++ b/builtin/logical/pki/crl_test.go @@ -2,23 +2,15 @@ package pki import ( "context" - "encoding/asn1" - "encoding/json" - "fmt" "strings" "testing" "time" - "github.com/hashicorp/vault/api" - vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/vault" - "github.com/stretchr/testify/require" ) func TestBackend_CRL_EnableDisableRoot(t *testing.T) { - t.Parallel() b, s := createBackendWithStorage(t) resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ @@ -33,175 +25,11 @@ func TestBackend_CRL_EnableDisableRoot(t *testing.T) { crlEnableDisableTestForBackend(t, b, s, []string{caSerial}) } -func TestBackend_CRLConfigUpdate(t *testing.T) { - t.Parallel() - b, s := createBackendWithStorage(t) - - // Write a legacy config to storage. - type legacyConfig struct { - Expiry string `json:"expiry"` - Disable bool `json:"disable"` - } - oldConfig := legacyConfig{Expiry: "24h", Disable: false} - entry, err := logical.StorageEntryJSON("config/crl", oldConfig) - require.NoError(t, err, "generate storage entry objection with legacy config") - err = s.Put(ctx, entry) - require.NoError(t, err, "failed writing legacy config") - - // Now lets read it. - resp, err := CBRead(b, s, "config/crl") - requireSuccessNonNilResponse(t, resp, err) - requireFieldsSetInResp(t, resp, "disable", "expiry", "ocsp_disable", "auto_rebuild", "auto_rebuild_grace_period") - - require.Equal(t, "24h", resp.Data["expiry"]) - require.Equal(t, false, resp.Data["disable"]) - require.Equal(t, defaultCrlConfig.OcspDisable, resp.Data["ocsp_disable"]) - require.Equal(t, defaultCrlConfig.OcspExpiry, resp.Data["ocsp_expiry"]) - require.Equal(t, defaultCrlConfig.AutoRebuild, resp.Data["auto_rebuild"]) - require.Equal(t, defaultCrlConfig.AutoRebuildGracePeriod, resp.Data["auto_rebuild_grace_period"]) -} - -func TestBackend_CRLConfig(t *testing.T) { - t.Parallel() - - tests := []struct { - expiry string - disable bool - ocspDisable bool - ocspExpiry string - autoRebuild bool - autoRebuildGracePeriod string - }{ - {expiry: "24h", disable: true, ocspDisable: true, ocspExpiry: "72h", autoRebuild: false, autoRebuildGracePeriod: "36h"}, - {expiry: "16h", disable: false, ocspDisable: true, ocspExpiry: "0h", autoRebuild: true, autoRebuildGracePeriod: "1h"}, - {expiry: "8h", disable: true, ocspDisable: false, ocspExpiry: "24h", autoRebuild: false, autoRebuildGracePeriod: "24h"}, - } - for _, tc := range tests { - name := fmt.Sprintf("%s-%t-%t", tc.expiry, tc.disable, tc.ocspDisable) - t.Run(name, func(t *testing.T) { - b, s := createBackendWithStorage(t) - - resp, err := CBWrite(b, s, "config/crl", map[string]interface{}{ - "expiry": tc.expiry, - "disable": tc.disable, - "ocsp_disable": tc.ocspDisable, - "ocsp_expiry": tc.ocspExpiry, - "auto_rebuild": tc.autoRebuild, - "auto_rebuild_grace_period": tc.autoRebuildGracePeriod, - }) - requireSuccessNilResponse(t, resp, err) - - resp, err = CBRead(b, s, "config/crl") - requireSuccessNonNilResponse(t, resp, err) - requireFieldsSetInResp(t, resp, "disable", "expiry", "ocsp_disable", "auto_rebuild", "auto_rebuild_grace_period") - - require.Equal(t, tc.expiry, resp.Data["expiry"]) - require.Equal(t, tc.disable, resp.Data["disable"]) - require.Equal(t, tc.ocspDisable, resp.Data["ocsp_disable"]) - require.Equal(t, tc.ocspExpiry, resp.Data["ocsp_expiry"]) - require.Equal(t, tc.autoRebuild, resp.Data["auto_rebuild"]) - require.Equal(t, tc.autoRebuildGracePeriod, resp.Data["auto_rebuild_grace_period"]) - }) - } - - badValueTests := []struct { - expiry string - disable string - ocspDisable string - ocspExpiry string - autoRebuild string - autoRebuildGracePeriod string - }{ - {expiry: "not a duration", disable: "true", ocspDisable: "true", ocspExpiry: "72h", autoRebuild: "true", autoRebuildGracePeriod: "1d"}, - {expiry: "16h", disable: "not a boolean", ocspDisable: "true", ocspExpiry: "72h", autoRebuild: "true", autoRebuildGracePeriod: "1d"}, - {expiry: "8h", disable: "true", ocspDisable: "not a boolean", ocspExpiry: "72h", autoRebuild: "true", autoRebuildGracePeriod: "1d"}, - {expiry: "8h", disable: "true", ocspDisable: "true", ocspExpiry: "not a duration", autoRebuild: "true", autoRebuildGracePeriod: "1d"}, - {expiry: "8h", disable: "true", ocspDisable: "true", ocspExpiry: "-1", autoRebuild: "true", autoRebuildGracePeriod: "1d"}, - {expiry: "8h", disable: "true", ocspDisable: "true", ocspExpiry: "72h", autoRebuild: "not a boolean", autoRebuildGracePeriod: "1d"}, - {expiry: "8h", disable: "true", ocspDisable: "true", ocspExpiry: "-1", autoRebuild: "true", autoRebuildGracePeriod: "not a duration"}, - } - for _, tc := range badValueTests { - name := fmt.Sprintf("bad-%s-%s-%s", tc.expiry, tc.disable, tc.ocspDisable) - t.Run(name, func(t *testing.T) { - b, s := createBackendWithStorage(t) - - _, err := CBWrite(b, s, "config/crl", map[string]interface{}{ - "expiry": tc.expiry, - "disable": tc.disable, - "ocsp_disable": tc.ocspDisable, - "ocsp_expiry": tc.ocspExpiry, - "auto_rebuild": tc.autoRebuild, - "auto_rebuild_grace_period": tc.autoRebuildGracePeriod, - }) - require.Error(t, err) - }) - } -} - -func TestBackend_CRL_AllKeyTypeSigAlgos(t *testing.T) { - t.Parallel() - - type testCase struct { - KeyType string - KeyBits int - SigAlgo string - } - - testCases := []testCase{ - {"rsa", 2048, "SHA256WithRSA"}, - {"rsa", 2048, "SHA384WithRSA"}, - {"rsa", 2048, "SHA512WithRSA"}, - {"rsa", 2048, "SHA256WithRSAPSS"}, - {"rsa", 2048, "SHA384WithRSAPSS"}, - {"rsa", 2048, "SHA512WithRSAPSS"}, - {"ec", 256, "ECDSAWithSHA256"}, - {"ec", 384, "ECDSAWithSHA384"}, - {"ec", 521, "ECDSAWithSHA512"}, - {"ed25519", 0, "PureEd25519"}, - } - - for index, tc := range testCases { - t.Logf("tv %v", index) - b, s := createBackendWithStorage(t) - - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "ttl": "40h", - "common_name": "myvault.com", - "key_type": tc.KeyType, - "key_bits": tc.KeyBits, - }) - if err != nil { - t.Fatalf("tc %v: %v", index, err) - } - caSerial := resp.Data["serial_number"].(string) - - _, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ - "revocation_signature_algorithm": tc.SigAlgo, - }) - if err != nil { - t.Fatalf("tc %v: %v", index, err) - } - - crlEnableDisableTestForBackend(t, b, s, []string{caSerial}) - - crl := getParsedCrlFromBackend(t, b, s, "crl") - if strings.HasSuffix(tc.SigAlgo, "PSS") { - algo := crl.SignatureAlgorithm - pssOid := asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 10} - if !algo.Algorithm.Equal(pssOid) { - t.Fatalf("tc %v failed: expected sig-alg to be %v / got %v", index, pssOid, algo) - } - } - } -} - func TestBackend_CRL_EnableDisableIntermediateWithRoot(t *testing.T) { - t.Parallel() crlEnableDisableIntermediateTestForBackend(t, true) } func TestBackend_CRL_EnableDisableIntermediateWithoutRoot(t *testing.T) { - t.Parallel() crlEnableDisableIntermediateTestForBackend(t, false) } @@ -241,7 +69,7 @@ func crlEnableDisableIntermediateTestForBackend(t *testing.T, withRoot bool) { t.Fatal("expected signed intermediate info") } intermediateSignedData := resp.Data - certs := intermediateSignedData["certificate"].(string) + var certs string = intermediateSignedData["certificate"].(string) caSerial := intermediateSignedData["serial_number"].(string) caSerials := []string{caSerial} if withRoot { @@ -250,12 +78,10 @@ func crlEnableDisableIntermediateTestForBackend(t *testing.T, withRoot bool) { caSerials = append(caSerials, rootSerial) } - _, err = CBWrite(b_int, s_int, "intermediate/set-signed", map[string]interface{}{ + resp, err = CBWrite(b_int, s_int, "intermediate/set-signed", map[string]interface{}{ "certificate": certs, }) - if err != nil { - t.Fatal(err) - } + crlEnableDisableTestForBackend(t, b_int, s_int, caSerials) } @@ -293,14 +119,6 @@ func crlEnableDisableTestForBackend(t *testing.T, b *backend, s logical.Storage, for _, serialNum := range expectedSerials { requireSerialNumberInCRL(t, certList, serialNum) } - - // Since this test assumes a complete CRL was rebuilt, we can grab - // the delta CRL and ensure it is empty. - deltaList := getParsedCrlFromBackend(t, b, s, "crl/delta").TBSCertList - lenDeltaList := len(deltaList.RevokedCertificates) - if lenDeltaList != 0 { - t.Fatalf("expected zero revoked certificates on the delta CRL due to complete CRL rebuild, found %d", lenDeltaList) - } } revoke := func(serialIndex int) { @@ -360,14 +178,12 @@ func crlEnableDisableTestForBackend(t *testing.T, b *backend, s logical.Storage, } func TestBackend_Secondary_CRL_Rebuilding(t *testing.T) { - t.Parallel() ctx := context.Background() b, s := createBackendWithStorage(t) - sc := b.makeStorageContext(ctx, s) // Write out the issuer/key to storage without going through the api call as replication would. bundle := genCertBundle(t, b, s) - issuer, _, err := sc.writeCaBundle(bundle, "", "") + issuer, _, err := writeCaBundle(ctx, b, s, bundle, "", "") require.NoError(t, err) // Just to validate, before we call the invalidate function, make sure our CRL has not been generated @@ -385,18 +201,16 @@ func TestBackend_Secondary_CRL_Rebuilding(t *testing.T) { } func TestCrlRebuilder(t *testing.T) { - t.Parallel() ctx := context.Background() b, s := createBackendWithStorage(t) - sc := b.makeStorageContext(ctx, s) // Write out the issuer/key to storage without going through the api call as replication would. bundle := genCertBundle(t, b, s) - _, _, err := sc.writeCaBundle(bundle, "", "") + _, _, err := writeCaBundle(ctx, b, s, bundle, "", "") require.NoError(t, err) req := &logical.Request{Storage: s} - cb := newCRLBuilder(true /* can rebuild and write CRLs */) + cb := crlBuilder{} // Force an initial build err = cb.rebuild(ctx, b, req, true) @@ -414,7 +228,7 @@ func TestCrlRebuilder(t *testing.T) { // Make sure we have ticked over to the next second for { - diff := time.Since(crl1.ThisUpdate) + diff := time.Now().Sub(crl1.ThisUpdate) if diff.Seconds() >= 1 { break } @@ -431,858 +245,6 @@ func TestCrlRebuilder(t *testing.T) { "initial crl time: %#v not before next crl rebuild time: %#v", crl1.ThisUpdate, crl3.ThisUpdate) } -func TestBYOC(t *testing.T) { - t.Parallel() - - b, s := createBackendWithStorage(t) - - // Create a root CA. - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "root example.com", - "issuer_name": "root", - "key_type": "ec", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data["certificate"]) - oldRoot := resp.Data["certificate"].(string) - - // Create a role for issuance. - _, err = CBWrite(b, s, "roles/local-testing", map[string]interface{}{ - "allow_any_name": true, - "enforce_hostnames": false, - "key_type": "ec", - "ttl": "75s", - "no_store": "true", - }) - require.NoError(t, err) - - // Issue a leaf cert and ensure we can revoke it. - resp, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ - "common_name": "testing", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data["certificate"]) - - _, err = CBWrite(b, s, "revoke", map[string]interface{}{ - "certificate": resp.Data["certificate"], - }) - require.NoError(t, err) - - // Issue a second leaf, but hold onto it for now. - resp, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ - "common_name": "testing2", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data["certificate"]) - notStoredCert := resp.Data["certificate"].(string) - - // Update the role to make things stored and issue another cert. - _, err = CBWrite(b, s, "roles/stored-testing", map[string]interface{}{ - "allow_any_name": true, - "enforce_hostnames": false, - "key_type": "ec", - "ttl": "75s", - "no_store": "false", - }) - require.NoError(t, err) - - // Issue a leaf cert and ensure we can revoke it. - resp, err = CBWrite(b, s, "issue/stored-testing", map[string]interface{}{ - "common_name": "testing", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data["certificate"]) - storedCert := resp.Data["certificate"].(string) - - // Delete the root and regenerate a new one. - _, err = CBDelete(b, s, "issuer/default") - require.NoError(t, err) - - resp, err = CBList(b, s, "issuers") - require.NoError(t, err) - require.Equal(t, len(resp.Data), 0) - - _, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "root2 example.com", - "issuer_name": "root2", - "key_type": "ec", - }) - require.NoError(t, err) - - // Issue a new leaf and revoke that one. - resp, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ - "common_name": "testing3", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data["certificate"]) - - _, err = CBWrite(b, s, "revoke", map[string]interface{}{ - "certificate": resp.Data["certificate"], - }) - require.NoError(t, err) - - // Now attempt to revoke the earlier leaves. The first should fail since - // we deleted its issuer, but the stored one should succeed. - _, err = CBWrite(b, s, "revoke", map[string]interface{}{ - "certificate": notStoredCert, - }) - require.Error(t, err) - - _, err = CBWrite(b, s, "revoke", map[string]interface{}{ - "certificate": storedCert, - }) - require.NoError(t, err) - - // Import the old root again and revoke the no stored leaf should work. - _, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ - "pem_bundle": oldRoot, - }) - require.NoError(t, err) - - _, err = CBWrite(b, s, "revoke", map[string]interface{}{ - "certificate": notStoredCert, - }) - require.NoError(t, err) -} - -func TestPoP(t *testing.T) { - t.Parallel() - - b, s := createBackendWithStorage(t) - - // Create a root CA. - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "root example.com", - "issuer_name": "root", - "key_type": "ec", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data["certificate"]) - oldRoot := resp.Data["certificate"].(string) - - // Create a role for issuance. - _, err = CBWrite(b, s, "roles/local-testing", map[string]interface{}{ - "allow_any_name": true, - "enforce_hostnames": false, - "key_type": "ec", - "ttl": "75s", - "no_store": "true", - }) - require.NoError(t, err) - - // Issue a leaf cert and ensure we can revoke it with the private key and - // an explicit certificate. - resp, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ - "common_name": "testing1", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data["certificate"]) - - _, err = CBWrite(b, s, "revoke-with-key", map[string]interface{}{ - "certificate": resp.Data["certificate"], - "private_key": resp.Data["private_key"], - }) - require.NoError(t, err) - - // Issue a second leaf, but hold onto it for now. - resp, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ - "common_name": "testing2", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data["certificate"]) - notStoredCert := resp.Data["certificate"].(string) - notStoredKey := resp.Data["private_key"].(string) - - // Update the role to make things stored and issue another cert. - _, err = CBWrite(b, s, "roles/stored-testing", map[string]interface{}{ - "allow_any_name": true, - "enforce_hostnames": false, - "key_type": "ec", - "ttl": "75s", - "no_store": "false", - }) - require.NoError(t, err) - - // Issue a leaf and ensure we can revoke it via serial number and private key. - resp, err = CBWrite(b, s, "issue/stored-testing", map[string]interface{}{ - "common_name": "testing3", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data["certificate"]) - require.NotEmpty(t, resp.Data["serial_number"]) - require.NotEmpty(t, resp.Data["private_key"]) - - _, err = CBWrite(b, s, "revoke-with-key", map[string]interface{}{ - "serial_number": resp.Data["serial_number"], - "private_key": resp.Data["private_key"], - }) - require.NoError(t, err) - - // Issue a leaf cert and ensure we can revoke it after removing its root; - // hold onto it for now. - resp, err = CBWrite(b, s, "issue/stored-testing", map[string]interface{}{ - "common_name": "testing4", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data["certificate"]) - storedCert := resp.Data["certificate"].(string) - storedKey := resp.Data["private_key"].(string) - - // Delete the root and regenerate a new one. - _, err = CBDelete(b, s, "issuer/default") - require.NoError(t, err) - - resp, err = CBList(b, s, "issuers") - require.NoError(t, err) - require.Equal(t, len(resp.Data), 0) - - _, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "root2 example.com", - "issuer_name": "root2", - "key_type": "ec", - }) - require.NoError(t, err) - - // Issue a new leaf and revoke that one. - resp, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ - "common_name": "testing5", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data["certificate"]) - - _, err = CBWrite(b, s, "revoke-with-key", map[string]interface{}{ - "certificate": resp.Data["certificate"], - "private_key": resp.Data["private_key"], - }) - require.NoError(t, err) - - // Now attempt to revoke the earlier leaves. The first should fail since - // we deleted its issuer, but the stored one should succeed. - _, err = CBWrite(b, s, "revoke-with-key", map[string]interface{}{ - "certificate": notStoredCert, - "private_key": notStoredKey, - }) - require.Error(t, err) - - // Incorrect combination (stored with not stored key) should fail. - _, err = CBWrite(b, s, "revoke-with-key", map[string]interface{}{ - "certificate": storedCert, - "private_key": notStoredKey, - }) - require.Error(t, err) - - // Correct combination (stored with stored) should succeed. - _, err = CBWrite(b, s, "revoke-with-key", map[string]interface{}{ - "certificate": storedCert, - "private_key": storedKey, - }) - require.NoError(t, err) - - // Import the old root again and revoke the no stored leaf should work. - _, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ - "pem_bundle": oldRoot, - }) - require.NoError(t, err) - - // Incorrect combination (not stored with stored key) should fail. - _, err = CBWrite(b, s, "revoke-with-key", map[string]interface{}{ - "certificate": notStoredCert, - "private_key": storedKey, - }) - require.Error(t, err) - - // Correct combination (not stored with not stored) should succeed. - _, err = CBWrite(b, s, "revoke-with-key", map[string]interface{}{ - "certificate": notStoredCert, - "private_key": notStoredKey, - }) - require.NoError(t, err) -} - -func TestIssuerRevocation(t *testing.T) { - t.Parallel() - - b, s := createBackendWithStorage(t) - - // Create a root CA. - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "root example.com", - "issuer_name": "root", - "key_type": "ec", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data["certificate"]) - require.NotEmpty(t, resp.Data["serial_number"]) - // oldRoot := resp.Data["certificate"].(string) - oldRootSerial := resp.Data["serial_number"].(string) - - // Create a second root CA. We'll revoke this one and ensure it - // doesn't appear on the former's CRL. - resp, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "root2 example.com", - "issuer_name": "root2", - "key_type": "ec", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data["certificate"]) - require.NotEmpty(t, resp.Data["serial_number"]) - // revokedRoot := resp.Data["certificate"].(string) - revokedRootSerial := resp.Data["serial_number"].(string) - - // Shouldn't be able to revoke it by serial number. - _, err = CBWrite(b, s, "revoke", map[string]interface{}{ - "serial_number": revokedRootSerial, - }) - require.Error(t, err) - - // Revoke it. - resp, err = CBWrite(b, s, "issuer/root2/revoke", map[string]interface{}{}) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotZero(t, resp.Data["revocation_time"]) - - // Regenerate the CRLs - _, err = CBRead(b, s, "crl/rotate") - require.NoError(t, err) - - // Ensure the old cert isn't on its own CRL. - crl := getParsedCrlFromBackend(t, b, s, "issuer/root2/crl/der") - if requireSerialNumberInCRL(nil, crl.TBSCertList, revokedRootSerial) { - t.Fatalf("the serial number %v should not be on its own CRL as self-CRL appearance should not occur", revokedRootSerial) - } - - // Ensure the old cert isn't on the one's CRL. - crl = getParsedCrlFromBackend(t, b, s, "issuer/root/crl/der") - if requireSerialNumberInCRL(nil, crl.TBSCertList, revokedRootSerial) { - t.Fatalf("the serial number %v should not be on %v's CRL as they're separate roots", revokedRootSerial, oldRootSerial) - } - - // Create a role and ensure we can't use the revoked root. - _, err = CBWrite(b, s, "roles/local-testing", map[string]interface{}{ - "allow_any_name": true, - "enforce_hostnames": false, - "key_type": "ec", - "ttl": "75s", - }) - require.NoError(t, err) - - // Issue a leaf cert and ensure it fails (because the issuer is revoked). - _, err = CBWrite(b, s, "issuer/root2/issue/local-testing", map[string]interface{}{ - "common_name": "testing", - }) - require.Error(t, err) - - // Issue an intermediate and ensure we can revoke it. - resp, err = CBWrite(b, s, "intermediate/generate/internal", map[string]interface{}{ - "common_name": "intermediate example.com", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data["csr"]) - intCsr := resp.Data["csr"].(string) - resp, err = CBWrite(b, s, "root/sign-intermediate", map[string]interface{}{ - "ttl": "30h", - "csr": intCsr, - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data["certificate"]) - require.NotEmpty(t, resp.Data["serial_number"]) - intCert := resp.Data["certificate"].(string) - intCertSerial := resp.Data["serial_number"].(string) - resp, err = CBWrite(b, s, "intermediate/set-signed", map[string]interface{}{ - "certificate": intCert, - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data["imported_issuers"]) - importedIssuers := resp.Data["imported_issuers"].([]string) - require.Equal(t, len(importedIssuers), 1) - intId := importedIssuers[0] - _, err = CBPatch(b, s, "issuer/"+intId, map[string]interface{}{ - "issuer_name": "int1", - }) - require.NoError(t, err) - - // Now issue a leaf with the intermediate. - resp, err = CBWrite(b, s, "issuer/int1/issue/local-testing", map[string]interface{}{ - "common_name": "testing", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data["certificate"]) - require.NotEmpty(t, resp.Data["serial_number"]) - issuedSerial := resp.Data["serial_number"].(string) - - // Now revoke the intermediate. - resp, err = CBWrite(b, s, "issuer/int1/revoke", map[string]interface{}{}) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotZero(t, resp.Data["revocation_time"]) - - // Update the CRLs and ensure it appears. - _, err = CBRead(b, s, "crl/rotate") - require.NoError(t, err) - crl = getParsedCrlFromBackend(t, b, s, "issuer/root/crl/der") - requireSerialNumberInCRL(t, crl.TBSCertList, intCertSerial) - - // Ensure we can still revoke the issued leaf. - resp, err = CBWrite(b, s, "revoke", map[string]interface{}{ - "serial_number": issuedSerial, - }) - require.NoError(t, err) - require.NotNil(t, resp) - - // Ensure it appears on the intermediate's CRL. - _, err = CBRead(b, s, "crl/rotate") - require.NoError(t, err) - crl = getParsedCrlFromBackend(t, b, s, "issuer/int1/crl/der") - requireSerialNumberInCRL(t, crl.TBSCertList, issuedSerial) - - // Ensure we can't fetch the intermediate's cert by serial any more. - resp, err = CBRead(b, s, "cert/"+intCertSerial) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data["revocation_time"]) -} - -func TestAutoRebuild(t *testing.T) { - t.Parallel() - - // While we'd like to reduce this duration, we need to wait until - // the rollback manager timer ticks. With the new helper, we can - // modify the rollback manager timer period directly, allowing us - // to shorten the total test time significantly. - // - // We set the delta CRL time to ensure it executes prior to the - // main CRL rebuild, and the new CRL doesn't rebuild until after - // we're done. - newPeriod := 1 * time.Second - deltaPeriod := (newPeriod + 1*time.Second).String() - crlTime := (6*newPeriod + 2*time.Second).String() - gracePeriod := (3 * newPeriod).String() - delta := 2 * newPeriod - - // This test requires the periodicFunc to trigger, which requires we stand - // up a full test cluster. - coreConfig := &vault.CoreConfig{ - LogicalBackends: map[string]logical.Factory{ - "pki": Factory, - }, - // See notes below about usage of /sys/raw for reading cluster - // storage without barrier encryption. - EnableRaw: true, - } - cluster := vault.CreateTestClusterWithRollbackPeriod(t, newPeriod, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - defer cluster.Cleanup() - client := cluster.Cores[0].Client - - // Mount PKI - err := client.Sys().Mount("pki", &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{ - DefaultLeaseTTL: "16h", - MaxLeaseTTL: "60h", - }, - }) - require.NoError(t, err) - - // Generate root. - resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ - "ttl": "40h", - "common_name": "Root X1", - "key_type": "ec", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data) - require.NotEmpty(t, resp.Data["issuer_id"]) - rootIssuer := resp.Data["issuer_id"].(string) - - // Setup a testing role. - _, err = client.Logical().Write("pki/roles/local-testing", map[string]interface{}{ - "allow_any_name": true, - "enforce_hostnames": false, - "key_type": "ec", - }) - require.NoError(t, err) - - // Regression test: ensure we respond with the default values for CRL - // config when we haven't set any values yet. - resp, err = client.Logical().Read("pki/config/crl") - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.Equal(t, resp.Data["expiry"], defaultCrlConfig.Expiry) - require.Equal(t, resp.Data["disable"], defaultCrlConfig.Disable) - require.Equal(t, resp.Data["ocsp_disable"], defaultCrlConfig.OcspDisable) - require.Equal(t, resp.Data["auto_rebuild"], defaultCrlConfig.AutoRebuild) - require.Equal(t, resp.Data["auto_rebuild_grace_period"], defaultCrlConfig.AutoRebuildGracePeriod) - require.Equal(t, resp.Data["enable_delta"], defaultCrlConfig.EnableDelta) - require.Equal(t, resp.Data["delta_rebuild_interval"], defaultCrlConfig.DeltaRebuildInterval) - - // Safety guard: we play with rebuild timing below. - _, err = client.Logical().Write("pki/config/crl", map[string]interface{}{ - "expiry": crlTime, - }) - require.NoError(t, err) - - // Issue a cert and revoke it. It should appear on the CRL right away. - resp, err = client.Logical().Write("pki/issue/local-testing", map[string]interface{}{ - "common_name": "example.com", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["serial_number"]) - leafSerial := resp.Data["serial_number"].(string) - - _, err = client.Logical().Write("pki/revoke", map[string]interface{}{ - "serial_number": leafSerial, - }) - require.NoError(t, err) - - crl := getCrlCertificateList(t, client, "pki") - lastCRLNumber := crl.Version - lastCRLExpiry := crl.NextUpdate - requireSerialNumberInCRL(t, crl, leafSerial) - - // Enable periodic rebuild of the CRL. - _, err = client.Logical().Write("pki/config/crl", map[string]interface{}{ - "expiry": crlTime, - "auto_rebuild": true, - "auto_rebuild_grace_period": gracePeriod, - "enable_delta": true, - "delta_rebuild_interval": deltaPeriod, - }) - require.NoError(t, err) - - // Issue a cert and revoke it. - resp, err = client.Logical().Write("pki/issue/local-testing", map[string]interface{}{ - "common_name": "example.com", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["serial_number"]) - newLeafSerial := resp.Data["serial_number"].(string) - - _, err = client.Logical().Write("pki/revoke", map[string]interface{}{ - "serial_number": newLeafSerial, - }) - require.NoError(t, err) - - // Now, we want to test the issuer identification on revocation. This - // only happens as a distinct "step" when CRL building isn't done on - // each revocation. Pull the storage from the cluster (via the sys/raw - // endpoint which requires the mount UUID) and verify the revInfo contains - // a matching issuer. - resp, err = client.Logical().Read("sys/mounts/pki") - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["uuid"]) - pkiMount := resp.Data["uuid"].(string) - require.NotEmpty(t, pkiMount) - revEntryPath := "logical/" + pkiMount + "/" + revokedPath + normalizeSerial(newLeafSerial) - - // storage from cluster.Core[0] is a physical storage copy, not a logical - // storage. This difference means, if we were to do a storage.Get(...) - // on the above path, we'd read the barrier-encrypted value. This is less - // than useful for decoding, and fetching the proper storage view is a - // touch much work. So, assert EnableRaw above and (ab)use it here. - resp, err = client.Logical().Read("sys/raw/" + revEntryPath) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["value"]) - revEntryValue := resp.Data["value"].(string) - var revInfo revocationInfo - err = json.Unmarshal([]byte(revEntryValue), &revInfo) - require.NoError(t, err) - require.Equal(t, revInfo.CertificateIssuer, issuerID(rootIssuer)) - - // New serial should not appear on CRL. - crl = getCrlCertificateList(t, client, "pki") - thisCRLNumber := crl.Version - requireSerialNumberInCRL(t, crl, leafSerial) // But the old one should. - now := time.Now() - graceInterval, _ := time.ParseDuration(gracePeriod) - expectedUpdate := lastCRLExpiry.Add(-1 * graceInterval) - if requireSerialNumberInCRL(nil, crl, newLeafSerial) { - // If we somehow lagged and we ended up needing to rebuild - // the CRL, we should avoid throwing an error. - - if thisCRLNumber == lastCRLNumber { - t.Fatalf("unexpected failure: last (%v) and current (%v) leaf certificate might have the same serial number?", leafSerial, newLeafSerial) - } - - if !now.After(expectedUpdate) { - t.Fatalf("expected newly generated certificate with serial %v not to appear on this CRL but it did, prematurely: %v", newLeafSerial, crl) - } - - t.Fatalf("shouldn't be here") - } - - // This serial should exist in the delta WAL section for the mount... - resp, err = client.Logical().List("sys/raw/logical/" + pkiMount + "/" + deltaWALPath) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data) - require.NotEmpty(t, resp.Data["keys"]) - require.Contains(t, resp.Data["keys"], normalizeSerial(newLeafSerial)) - - haveUpdatedDeltaCRL := false - interruptChan := time.After(4*newPeriod + delta) - for { - if haveUpdatedDeltaCRL { - break - } - - select { - case <-interruptChan: - t.Fatalf("expected to regenerate delta CRL within a couple of periodicFunc invocations (plus %v grace period)", delta) - default: - // Check and see if there's a storage entry for the last rebuild - // serial. If so, validate the delta CRL contains this entry. - resp, err = client.Logical().List("sys/raw/logical/" + pkiMount + "/" + deltaWALPath) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data) - require.NotEmpty(t, resp.Data["keys"]) - - haveRebuildMarker := false - for _, rawEntry := range resp.Data["keys"].([]interface{}) { - entry := rawEntry.(string) - if entry == deltaWALLastRevokedSerialName { - haveRebuildMarker = true - break - } - } - - if !haveRebuildMarker { - time.Sleep(1 * time.Second) - continue - } - - // Read the marker and see if its correct. - resp, err = client.Logical().Read("sys/raw/logical/" + pkiMount + "/" + deltaWALLastBuildSerial) - require.NoError(t, err) - if resp == nil { - time.Sleep(1 * time.Second) - continue - } - - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data) - require.NotEmpty(t, resp.Data["value"]) - - // Easy than JSON decoding... - if !strings.Contains(resp.Data["value"].(string), newLeafSerial) { - time.Sleep(1 * time.Second) - continue - } - - haveUpdatedDeltaCRL = true - - // Ensure it has what we want. - deltaCrl := getParsedCrlAtPath(t, client, "/v1/pki/crl/delta").TBSCertList - if !requireSerialNumberInCRL(nil, deltaCrl, newLeafSerial) { - // Check if it is on the main CRL because its already regenerated. - mainCRL := getParsedCrlAtPath(t, client, "/v1/pki/crl").TBSCertList - requireSerialNumberInCRL(t, mainCRL, newLeafSerial) - } - } - } - - // Now, wait until we're within the grace period... Then start prompting - // for regeneration. - if expectedUpdate.After(now) { - time.Sleep(expectedUpdate.Sub(now)) - } - - // Otherwise, the absolute latest we're willing to wait is some delta - // after CRL expiry (to let stuff regenerate &c). - interruptChan = time.After(lastCRLExpiry.Sub(now) + delta) - for { - select { - case <-interruptChan: - t.Fatalf("expected CRL to regenerate prior to CRL expiry (plus %v grace period)", delta) - default: - crl = getCrlCertificateList(t, client, "pki") - if crl.NextUpdate.Equal(lastCRLExpiry) { - // Hack to ensure we got a net-new CRL. If we didn't, we can - // exit this default conditional and wait for the next - // go-round. When the timer fires, it'll populate the channel - // and we'll exit correctly. - time.Sleep(1 * time.Second) - break - } - - now := time.Now() - require.True(t, crl.ThisUpdate.Before(now)) - require.True(t, crl.NextUpdate.After(now)) - requireSerialNumberInCRL(t, crl, leafSerial) - requireSerialNumberInCRL(t, crl, newLeafSerial) - return - } - } -} - -func TestTidyIssuerAssociation(t *testing.T) { - t.Parallel() - - b, s := createBackendWithStorage(t) - - // Create a root CA. - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "root example.com", - "issuer_name": "root", - "key_type": "ec", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data["certificate"]) - require.NotEmpty(t, resp.Data["issuer_id"]) - rootCert := resp.Data["certificate"].(string) - rootID := resp.Data["issuer_id"].(issuerID) - - // Create a role for issuance. - _, err = CBWrite(b, s, "roles/local-testing", map[string]interface{}{ - "allow_any_name": true, - "enforce_hostnames": false, - "key_type": "ec", - "ttl": "75m", - }) - require.NoError(t, err) - - // Issue a leaf cert and ensure we can revoke it. - resp, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ - "common_name": "testing", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data["serial_number"]) - leafSerial := resp.Data["serial_number"].(string) - - _, err = CBWrite(b, s, "revoke", map[string]interface{}{ - "serial_number": leafSerial, - }) - require.NoError(t, err) - - // This leaf's revInfo entry should have an issuer associated - // with it. - entry, err := s.Get(ctx, revokedPath+normalizeSerial(leafSerial)) - require.NoError(t, err) - require.NotNil(t, entry) - require.NotNil(t, entry.Value) - - var leafInfo revocationInfo - err = entry.DecodeJSON(&leafInfo) - require.NoError(t, err) - require.Equal(t, rootID, leafInfo.CertificateIssuer) - - // Now remove the root and run tidy. - _, err = CBDelete(b, s, "issuer/default") - require.NoError(t, err) - _, err = CBWrite(b, s, "tidy", map[string]interface{}{ - "tidy_revoked_cert_issuer_associations": true, - }) - require.NoError(t, err) - - // Wait for tidy to finish. - for { - time.Sleep(125 * time.Millisecond) - - resp, err = CBRead(b, s, "tidy-status") - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["state"]) - state := resp.Data["state"].(string) - - if state == "Finished" { - break - } - if state == "Error" { - t.Fatalf("unexpected state for tidy operation: Error:\nStatus: %v", resp.Data) - } - } - - // Ensure we don't have an association on this leaf any more. - entry, err = s.Get(ctx, revokedPath+normalizeSerial(leafSerial)) - require.NoError(t, err) - require.NotNil(t, entry) - require.NotNil(t, entry.Value) - - err = entry.DecodeJSON(&leafInfo) - require.NoError(t, err) - require.Empty(t, leafInfo.CertificateIssuer) - - // Now, re-import the root and try again. - resp, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ - "pem_bundle": rootCert, - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotNil(t, resp.Data["imported_issuers"]) - importedIssuers := resp.Data["imported_issuers"].([]string) - require.Equal(t, 1, len(importedIssuers)) - newRootID := importedIssuers[0] - require.NotEmpty(t, newRootID) - - // Re-run tidy... - _, err = CBWrite(b, s, "tidy", map[string]interface{}{ - "tidy_revoked_cert_issuer_associations": true, - }) - require.NoError(t, err) - - // Wait for tidy to finish. - for { - time.Sleep(125 * time.Millisecond) - - resp, err = CBRead(b, s, "tidy-status") - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["state"]) - state := resp.Data["state"].(string) - - if state == "Finished" { - break - } - if state == "Error" { - t.Fatalf("unexpected state for tidy operation: Error:\nStatus: %v", resp.Data) - } - } - - // Finally, double-check we associated things correctly. - entry, err = s.Get(ctx, revokedPath+normalizeSerial(leafSerial)) - require.NoError(t, err) - require.NotNil(t, entry) - require.NotNil(t, entry.Value) - - err = entry.DecodeJSON(&leafInfo) - require.NoError(t, err) - require.Equal(t, newRootID, string(leafInfo.CertificateIssuer)) -} - func requestCrlFromBackend(t *testing.T, s logical.Storage, b *backend) *logical.Response { crlReq := &logical.Request{ Operation: logical.ReadOperation, diff --git a/builtin/logical/pki/crl_util.go b/builtin/logical/pki/crl_util.go index 8dda13495f1d2..42bdd8ea48cc5 100644 --- a/builtin/logical/pki/crl_util.go +++ b/builtin/logical/pki/crl_util.go @@ -10,23 +10,17 @@ import ( "math/big" "strings" "sync" + "sync/atomic" "time" - atomic2 "go.uber.org/atomic" + "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" ) -const ( - revokedPath = "revoked/" - deltaWALPath = "delta-wal/" - deltaWALLastBuildSerialName = "last-build-serial" - deltaWALLastBuildSerial = deltaWALPath + deltaWALLastBuildSerialName - deltaWALLastRevokedSerialName = "last-revoked-serial" - deltaWALLastRevokedSerial = deltaWALPath + deltaWALLastRevokedSerialName -) +const revokedPath = "revoked/" type revocationInfo struct { CertificateBytes []byte `json:"certificate_bytes"` @@ -35,49 +29,14 @@ type revocationInfo struct { CertificateIssuer issuerID `json:"issuer_id"` } -type ( - // Placeholder in case of migrations needing more data. Currently - // we use the path name to store the serial number that was revoked. - deltaWALInfo struct{} - lastWALInfo struct { - // Info to write about the last WAL entry. This is the serial number - // of the last revoked certificate. - // - // We write this below in revokedCert(...) and read it in - // rebuildDeltaCRLsIfForced(...). - Serial string `json:"serial"` - } - lastDeltaInfo struct { - // Info to write about the last built delta CRL. This is the serial - // number of the last revoked certificate that we saw prior to delta - // CRL building. - // - // We write this below in buildAnyCRLs(...) and read it in - // rebuildDeltaCRLsIfForced(...). - Serial string `json:"serial"` - } -) - // crlBuilder is gatekeeper for controlling various read/write operations to the storage of the CRL. // The extra complexity arises from secondary performance clusters seeing various writes to its storage // without the actual API calls. During the storage invalidation process, we do not have the required state // to actually rebuild the CRLs, so we need to schedule it in a deferred fashion. This allows either // read or write calls to perform the operation if required, or have the flag reset upon a write operation -// -// The CRL builder also tracks the revocation configuration. type crlBuilder struct { - _builder sync.Mutex - forceRebuild *atomic2.Bool - canRebuild bool - lastDeltaRebuildCheck time.Time - - _config sync.RWMutex - dirty *atomic2.Bool - config crlConfig - - // Whether to invalidate our LastModifiedTime due to write on the - // global issuance config. - invalidate *atomic2.Bool + m sync.Mutex + forceRebuild uint32 } const ( @@ -85,165 +44,10 @@ const ( _enforceForceFlag = false ) -func newCRLBuilder(canRebuild bool) *crlBuilder { - return &crlBuilder{ - forceRebuild: atomic2.NewBool(false), - canRebuild: canRebuild, - // Set the last delta rebuild window to now, delaying the first delta - // rebuild by the first rebuild period to give us some time on startup - // to stabilize. - lastDeltaRebuildCheck: time.Now(), - dirty: atomic2.NewBool(true), - config: defaultCrlConfig, - invalidate: atomic2.NewBool(false), - } -} - -func (cb *crlBuilder) markConfigDirty() { - cb.dirty.Store(true) -} - -func (cb *crlBuilder) reloadConfigIfRequired(sc *storageContext) error { - if cb.dirty.Load() { - // Acquire a write lock. - cb._config.Lock() - defer cb._config.Unlock() - - if !cb.dirty.Load() { - // Someone else might've been reloading the config; no need - // to do it twice. - return nil - } - - config, err := sc.getRevocationConfig() - if err != nil { - return err - } - - // Set the default config if none was returned to us. - if config != nil { - cb.config = *config - } else { - cb.config = defaultCrlConfig - } - - // Updated the config; unset dirty. - cb.dirty.Store(false) - } - - return nil -} - -func (cb *crlBuilder) getConfigWithUpdate(sc *storageContext) (*crlConfig, error) { - // Config may mutate immediately after accessing, but will be freshly - // fetched if necessary. - if err := cb.reloadConfigIfRequired(sc); err != nil { - return nil, err - } - - cb._config.RLock() - defer cb._config.RUnlock() - - configCopy := cb.config - return &configCopy, nil -} - -func (cb *crlBuilder) checkForAutoRebuild(sc *storageContext) error { - cfg, err := cb.getConfigWithUpdate(sc) - if err != nil { - return err - } - - if cfg.Disable || !cfg.AutoRebuild || cb.forceRebuild.Load() { - // Not enabled, not on auto-rebuilder, or we're already scheduled to - // rebuild so there's no point to interrogate CRL values... - return nil - } - - // Auto-Rebuild is enabled. We need to check each issuer's CRL and see - // if its about to expire. If it is, we've gotta rebuild it (and well, - // every other CRL since we don't have a fine-toothed rebuilder). - // - // We store a list of all (unique) CRLs in the cluster-local CRL - // configuration along with their expiration dates. - crlConfig, err := sc.getLocalCRLConfig() - if err != nil { - return fmt.Errorf("error checking for auto-rebuild status: unable to fetch cluster-local CRL configuration: %v", err) - } - - // If there's no config, assume we've gotta rebuild it to get this - // information. - if crlConfig == nil { - cb.forceRebuild.Store(true) - return nil - } - - // If the map is empty, assume we need to upgrade and schedule a - // rebuild. - if len(crlConfig.CRLExpirationMap) == 0 { - cb.forceRebuild.Store(true) - return nil - } - - // Otherwise, check CRL's expirations and see if its zero or within - // the grace period and act accordingly. - now := time.Now() - - period, err := time.ParseDuration(cfg.AutoRebuildGracePeriod) - if err != nil { - // This may occur if the duration is empty; in that case - // assume the default. The default should be valid and shouldn't - // error. - defaultPeriod, defaultErr := time.ParseDuration(defaultCrlConfig.AutoRebuildGracePeriod) - if defaultErr != nil { - return fmt.Errorf("error checking for auto-rebuild status: unable to parse duration from both config's grace period (%v) and default grace period (%v):\n- config: %v\n- default: %v\n", cfg.AutoRebuildGracePeriod, defaultCrlConfig.AutoRebuildGracePeriod, err, defaultErr) - } - - period = defaultPeriod - } - - for _, value := range crlConfig.CRLExpirationMap { - if value.IsZero() || now.After(value.Add(-1*period)) { - cb.forceRebuild.Store(true) - return nil - } - } - - return nil -} - -// Mark the internal LastModifiedTime tracker invalid. -func (cb *crlBuilder) invalidateCRLBuildTime() { - cb.invalidate.Store(true) -} - -// Update the config to mark the modified CRL. See note in -// updateDefaultIssuerId about why this is necessary. -func (cb *crlBuilder) flushCRLBuildTimeInvalidation(sc *storageContext) error { - if cb.invalidate.CAS(true, false) { - // Flush out our invalidation. - cfg, err := sc.getLocalCRLConfig() - if err != nil { - cb.invalidate.Store(true) - return fmt.Errorf("unable to update local CRL config's modification time: error fetching: %v", err) - } - - cfg.LastModified = time.Now().UTC() - cfg.DeltaLastModified = time.Now().UTC() - err = sc.setLocalCRLConfig(cfg) - if err != nil { - cb.invalidate.Store(true) - return fmt.Errorf("unable to update local CRL config's modification time: error persisting: %v", err) - } - } - - return nil -} - // rebuildIfForced is to be called by readers or periodic functions that might need to trigger // a refresh of the CRL before the read occurs. func (cb *crlBuilder) rebuildIfForced(ctx context.Context, b *backend, request *logical.Request) error { - if cb.forceRebuild.Load() { + if atomic.LoadUint32(&cb.forceRebuild) == 1 { return cb._doRebuild(ctx, b, request, true, _enforceForceFlag) } @@ -260,181 +64,59 @@ func (cb *crlBuilder) requestRebuildIfActiveNode(b *backend) { // Only schedule us on active nodes, as the active node is the only node that can rebuild/write the CRL. // Note 1: The CRL is cluster specific, so this does need to run on the active node of a performance secondary cluster. // Note 2: This is called by the storage invalidation function, so it should not block. - if !cb.canRebuild { + if b.System().ReplicationState().HasState(consts.ReplicationPerformanceStandby) || + b.System().ReplicationState().HasState(consts.ReplicationDRSecondary) { b.Logger().Debug("Ignoring request to schedule a CRL rebuild, not on active node.") return } b.Logger().Info("Scheduling PKI CRL rebuild.") // Set the flag to 1, we don't care if we aren't the ones that actually swap it to 1. - cb.forceRebuild.Store(true) + atomic.CompareAndSwapUint32(&cb.forceRebuild, 0, 1) } func (cb *crlBuilder) _doRebuild(ctx context.Context, b *backend, request *logical.Request, forceNew bool, ignoreForceFlag bool) error { - cb._builder.Lock() - defer cb._builder.Unlock() + cb.m.Lock() + defer cb.m.Unlock() // Re-read the lock in case someone beat us to the punch between the previous load op. - forceBuildFlag := cb.forceRebuild.Load() - if forceBuildFlag || ignoreForceFlag { + forceBuildFlag := atomic.LoadUint32(&cb.forceRebuild) + if forceBuildFlag == 1 || ignoreForceFlag { // Reset our original flag back to 0 before we start the rebuilding. This may lead to another round of // CRL building, but we want to avoid the race condition caused by clearing the flag after we completed (An // update/revocation occurred attempting to set the flag, after we listed the certs but before we wrote // the CRL, so we missed the update and cleared the flag.) - cb.forceRebuild.Store(false) + atomic.CompareAndSwapUint32(&cb.forceRebuild, 1, 0) // if forceRebuild was requested, that should force a complete rebuild even if requested not too by forceNew - myForceNew := forceBuildFlag || forceNew + myForceNew := forceBuildFlag == 1 || forceNew return buildCRLs(ctx, b, request, myForceNew) } return nil } -func (cb *crlBuilder) getPresentDeltaWALForClearing(sc *storageContext) ([]string, error) { - // Clearing of the delta WAL occurs after a new complete CRL has been built. - walSerials, err := sc.Storage.List(sc.Context, deltaWALPath) - if err != nil { - return nil, fmt.Errorf("error fetching list of delta WAL certificates to clear: %s", err) - } - - // We _should_ remove the special WAL entries here, but we don't really - // want to traverse the list again (and also below in clearDeltaWAL). So - // trust the latter does the right thing. - return walSerials, nil -} - -func (cb *crlBuilder) clearDeltaWAL(sc *storageContext, walSerials []string) error { - // Clearing of the delta WAL occurs after a new complete CRL has been built. - for _, serial := range walSerials { - // Don't remove our special entries! - if serial == deltaWALLastBuildSerialName || serial == deltaWALLastRevokedSerialName { - continue - } - - if err := sc.Storage.Delete(sc.Context, deltaWALPath+serial); err != nil { - return fmt.Errorf("error clearing delta WAL certificate: %s", err) - } - } - - return nil -} - -func (cb *crlBuilder) rebuildDeltaCRLsIfForced(sc *storageContext, override bool) error { - // Delta CRLs use the same expiry duration as the complete CRL. Because - // we always rebuild the complete CRL and then the delta CRL, we can - // be assured that the delta CRL always expires after a complete CRL, - // and that rebuilding the complete CRL will trigger a fresh delta CRL - // build of its own. - // - // This guarantee means we can avoid checking delta CRL expiry. Thus, - // we only need to rebuild the delta CRL when we have new revocations, - // within our time window for updating it. - cfg, err := cb.getConfigWithUpdate(sc) - if err != nil { - return err - } - - if !cfg.EnableDelta { - // We explicitly do not update the last check time here, as we - // want to persist the last rebuild window if it hasn't been set. - return nil - } - - deltaRebuildDuration, err := time.ParseDuration(cfg.DeltaRebuildInterval) - if err != nil { - return err - } - - // Acquire CRL building locks before we get too much further. - cb._builder.Lock() - defer cb._builder.Unlock() - - // Last is setup during newCRLBuilder(...), so we don't need to deal with - // a zero condition. - now := time.Now() - last := cb.lastDeltaRebuildCheck - nextRebuildCheck := last.Add(deltaRebuildDuration) - if !override && now.Before(nextRebuildCheck) { - // If we're still before the time of our next rebuild check, we can - // safely return here even if we have certs. We'll wait for a bit, - // retrigger this check, and then do the rebuild. - return nil - } - - // Update our check time. If we bail out below (due to storage errors - // or whatever), we'll delay the next CRL check (hopefully allowing - // things to stabilize). Otherwise, we might not build a new Delta CRL - // until our next complete CRL build. - cb.lastDeltaRebuildCheck = now - - // Fetch two storage entries to see if we actually need to do this - // rebuild, given we're within the window. - lastWALEntry, err := sc.Storage.Get(sc.Context, deltaWALLastRevokedSerial) - if err != nil || !override && (lastWALEntry == nil || lastWALEntry.Value == nil) { - // If this entry does not exist, we don't need to rebuild the - // delta WAL due to the expiration assumption above. There must - // not have been any new revocations. Since err should be nil - // in this case, we can safely return it. - return err - } - - lastBuildEntry, err := sc.Storage.Get(sc.Context, deltaWALLastBuildSerial) - if err != nil { - return err - } - - if !override && lastBuildEntry != nil && lastBuildEntry.Value != nil { - // If the last build entry doesn't exist, we still want to build a - // new delta WAL, since this could be our very first time doing so. - // - // Otherwise, here, now that we know it exists, we want to check this - // value against the other value. Since we previously guarded the WAL - // entry being non-empty, we're good to decode everything within this - // guard. - var walInfo lastWALInfo - if err := lastWALEntry.DecodeJSON(&walInfo); err != nil { - return err - } - - var deltaInfo lastDeltaInfo - if err := lastBuildEntry.DecodeJSON(&deltaInfo); err != nil { - return err - } - - // Here, everything decoded properly and we know that no new certs - // have been revoked since we built this last delta CRL. We can exit - // without rebuilding then. - if walInfo.Serial == deltaInfo.Serial { - return nil - } +// Revokes a cert, and tries to be smart about error recovery +func revokeCert(ctx context.Context, b *backend, req *logical.Request, serial string, fromLease bool) (*logical.Response, error) { + // As this backend is self-contained and this function does not hook into + // third parties to manage users or resources, if the mount is tainted, + // revocation doesn't matter anyways -- the CRL that would be written will + // be immediately blown away by the view being cleared. So we can simply + // fast path a successful exit. + if b.System().Tainted() { + return nil, nil } - // Finally, we must've needed to do the rebuild. Execute! - return cb.rebuildDeltaCRLsHoldingLock(sc, false) -} - -func (cb *crlBuilder) rebuildDeltaCRLs(sc *storageContext, forceNew bool) error { - cb._builder.Lock() - defer cb._builder.Unlock() - - return cb.rebuildDeltaCRLsHoldingLock(sc, forceNew) -} - -func (cb *crlBuilder) rebuildDeltaCRLsHoldingLock(sc *storageContext, forceNew bool) error { - return buildAnyCRLs(sc, forceNew, true /* building delta */) -} - -// Helper function to fetch a map of issuerID->parsed cert for revocation -// usage. Unlike other paths, this needs to handle the legacy bundle -// more gracefully than rejecting it outright. -func fetchIssuerMapForRevocationChecking(sc *storageContext) (map[issuerID]*x509.Certificate, error) { + // Validate that no issuers match the serial number to be revoked. We need + // to gracefully degrade to the legacy cert bundle when it is required, as + // secondary PR clusters might not have been upgraded, but still need to + // handle revoking certs. var err error var issuers []issuerID - if !sc.Backend.useLegacyBundleCaStorage() { - issuers, err = sc.listIssuers() + if !b.useLegacyBundleCaStorage() { + issuers, err = listIssuers(ctx, req.Storage) if err != nil { - return nil, fmt.Errorf("could not fetch issuers list: %v", err) + return logical.ErrorResponse(fmt.Sprintf("could not fetch issuers list: %v", err)), nil } } else { // Hack: this isn't a real issuerID, but it works for fetchCAInfo @@ -442,18 +124,22 @@ func fetchIssuerMapForRevocationChecking(sc *storageContext) (map[issuerID]*x509 issuers = []issuerID{legacyBundleShimID} } - issuerIDCertMap := make(map[issuerID]*x509.Certificate, len(issuers)) for _, issuer := range issuers { - _, bundle, caErr := sc.fetchCertBundleByIssuerId(issuer, false) + _, bundle, caErr := fetchCertBundleByIssuerId(ctx, req.Storage, issuer, false) if caErr != nil { - return nil, fmt.Errorf("error fetching CA certificate for issuer id %v: %s", issuer, caErr) + switch caErr.(type) { + case errutil.UserError: + return logical.ErrorResponse(fmt.Sprintf("could not fetch the CA certificate for issuer id %v: %s", issuer, caErr)), nil + default: + return nil, fmt.Errorf("error fetching CA certificate for issuer id %v: %s", issuer, caErr) + } } if bundle == nil { return nil, fmt.Errorf("faulty reference: %v - CA info not found", issuer) } - parsedBundle, err := parseCABundle(sc.Context, sc.Backend, bundle) + parsedBundle, err := parseCABundle(ctx, b, bundle) if err != nil { return nil, errutil.InternalError{Err: err.Error()} } @@ -462,39 +148,8 @@ func fetchIssuerMapForRevocationChecking(sc *storageContext) (map[issuerID]*x509 return nil, errutil.InternalError{Err: "stored CA information not able to be parsed"} } - issuerIDCertMap[issuer] = parsedBundle.Certificate - } - - return issuerIDCertMap, nil -} - -// Revokes a cert, and tries to be smart about error recovery -func revokeCert(ctx context.Context, b *backend, req *logical.Request, serial string, fromLease bool) (*logical.Response, error) { - // As this backend is self-contained and this function does not hook into - // third parties to manage users or resources, if the mount is tainted, - // revocation doesn't matter anyways -- the CRL that would be written will - // be immediately blown away by the view being cleared. So we can simply - // fast path a successful exit. - if b.System().Tainted() { - return nil, nil - } - - // Validate that no issuers match the serial number to be revoked. We need - // to gracefully degrade to the legacy cert bundle when it is required, as - // secondary PR clusters might not have been upgraded, but still need to - // handle revoking certs. - sc := b.makeStorageContext(ctx, req.Storage) - - issuerIDCertMap, err := fetchIssuerMapForRevocationChecking(sc) - if err != nil { - return nil, err - } - - // Ensure we don't revoke an issuer via this API; use /issuer/:issuer_ref/revoke - // instead. - for issuer, certificate := range issuerIDCertMap { - colonSerial := strings.ReplaceAll(strings.ToLower(serial), "-", ":") - if colonSerial == serialFromCert(certificate) { + colonSerial := strings.Replace(strings.ToLower(serial), "-", ":", -1) + if colonSerial == certutil.GetHexFormatted(parsedBundle.Certificate.SerialNumber.Bytes(), ":") { return logical.ErrorResponse(fmt.Sprintf("adding issuer (id: %v) to its own CRL is not allowed", issuer)), nil } } @@ -568,80 +223,24 @@ func revokeCert(ctx context.Context, b *backend, req *logical.Request, serial st revInfo.RevocationTime = currTime.Unix() revInfo.RevocationTimeUTC = currTime.UTC() - // We may not find an issuer with this certificate; that's fine so - // ignore the return value. - associateRevokedCertWithIsssuer(&revInfo, cert, issuerIDCertMap) - revEntry, err = logical.StorageEntryJSON(revokedPath+normalizeSerial(serial), revInfo) if err != nil { return nil, fmt.Errorf("error creating revocation entry") } - certsCounted := b.certsCounted.Load() err = req.Storage.Put(ctx, revEntry) if err != nil { return nil, fmt.Errorf("error saving revoked certificate to new location") } - b.incrementTotalRevokedCertificatesCount(certsCounted, revEntry.Key) } - // Fetch the config and see if we need to rebuild the CRL. If we have - // auto building enabled, we will wait for the next rebuild period to - // actually rebuild it. - config, err := b.crlBuilder.getConfigWithUpdate(sc) - if err != nil { - return nil, fmt.Errorf("error building CRL: while updating config: %v", err) - } - - if !config.AutoRebuild { - // Note that writing the Delta WAL here isn't necessary; we've - // already rebuilt the full CRL so the Delta WAL will be cleared - // afterwards. Writing an entry only to immediately remove it - // isn't necessary. - crlErr := b.crlBuilder.rebuild(ctx, b, req, false) - if crlErr != nil { - switch crlErr.(type) { - case errutil.UserError: - return logical.ErrorResponse(fmt.Sprintf("Error during CRL building: %s", crlErr)), nil - default: - return nil, fmt.Errorf("error encountered during CRL building: %w", crlErr) - } - } - } else if !alreadyRevoked { - // Regardless of whether or not we've presently enabled Delta CRLs, - // we should always write the Delta WAL in case it is enabled in the - // future. We could trigger another full CRL rebuild instead (to avoid - // inconsistent state between the CRL and missing Delta WAL entries), - // but writing extra (unused?) WAL entries versus an expensive full - // CRL rebuild is probably a net wash. - /// - // We should only do this when the cert hasn't already been revoked. - // Otherwise, the re-revocation may appear on both an existing CRL and - // on a delta CRL, or a serial may be skipped from the delta CRL if - // there's an A->B->A revocation pattern and the delta was rebuilt - // after the first cert. - // - // Currently we don't store any data in the WAL entry. - var walInfo deltaWALInfo - walEntry, err := logical.StorageEntryJSON(deltaWALPath+normalizeSerial(serial), walInfo) - if err != nil { - return nil, fmt.Errorf("unable to create delta CRL WAL entry") - } - - if err = req.Storage.Put(ctx, walEntry); err != nil { - return nil, fmt.Errorf("error saving delta CRL WAL entry") - } - - // In order for periodic delta rebuild to be mildly efficient, we - // should write the last revoked delta WAL entry so we know if we - // have new revocations that we should rebuild the delta WAL for. - lastRevSerial := lastWALInfo{Serial: serial} - lastWALEntry, err := logical.StorageEntryJSON(deltaWALLastRevokedSerial, lastRevSerial) - if err != nil { - return nil, fmt.Errorf("unable to create last delta CRL WAL entry") - } - if err = req.Storage.Put(ctx, lastWALEntry); err != nil { - return nil, fmt.Errorf("error saving last delta CRL WAL entry") + crlErr := b.crlBuilder.rebuild(ctx, b, req, false) + if crlErr != nil { + switch crlErr.(type) { + case errutil.UserError: + return logical.ErrorResponse(fmt.Sprintf("Error during CRL building: %s", crlErr)), nil + default: + return nil, fmt.Errorf("error encountered during CRL building: %w", crlErr) } } @@ -657,11 +256,6 @@ func revokeCert(ctx context.Context, b *backend, req *logical.Request, serial st } func buildCRLs(ctx context.Context, b *backend, req *logical.Request, forceNew bool) error { - sc := b.makeStorageContext(ctx, req.Storage) - return buildAnyCRLs(sc, forceNew, false) -} - -func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { // In order to build all CRLs, we need knowledge of all issuers. Any two // issuers with the same keys _and_ subject should have the same CRL since // they're functionally equivalent. @@ -691,28 +285,8 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { var err error var issuers []issuerID var wasLegacy bool - - // First, fetch an updated copy of the CRL config. We'll pass this into - // buildCRL. - globalCRLConfig, err := sc.Backend.crlBuilder.getConfigWithUpdate(sc) - if err != nil { - return fmt.Errorf("error building CRL: while updating config: %v", err) - } - - if globalCRLConfig.Disable && !forceNew { - // We build a single long-lived empty CRL in the event that we disable - // the CRL, but we don't keep updating it with newer, more-valid empty - // CRLs in the event that we later re-enable it. This is a historical - // behavior. - // - // So, since tidy can now associate issuers on revocation entries, we - // can skip the rest of this function and exit early without updating - // anything. - return nil - } - - if !sc.Backend.useLegacyBundleCaStorage() { - issuers, err = sc.listIssuers() + if !b.useLegacyBundleCaStorage() { + issuers, err = listIssuers(ctx, req.Storage) if err != nil { return fmt.Errorf("error building CRL: while listing issuers: %v", err) } @@ -722,17 +296,9 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { // below for revocation to handle the legacy bundle. issuers = []issuerID{legacyBundleShimID} wasLegacy = true - - // Here, we avoid building a delta CRL with the legacy CRL bundle. - // - // Users should upgrade symmetrically, rather than attempting - // backward compatibility for new features across disparate versions. - if isDelta { - return nil - } } - config, err := sc.getIssuersConfig() + config, err := getIssuersConfig(ctx, req.Storage) if err != nil { return fmt.Errorf("error building CRLs: while getting the default config: %v", err) } @@ -749,7 +315,7 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { for _, issuer := range issuers { // We don't strictly need this call, but by requesting the bundle, the // legacy path is automatically ignored. - thisEntry, _, err := sc.fetchCertBundleByIssuerId(issuer, false) + thisEntry, _, err := fetchCertBundleByIssuerId(ctx, req.Storage, issuer, false) if err != nil { return fmt.Errorf("error building CRLs: unable to fetch specified issuer (%v): %v", issuer, err) } @@ -758,20 +324,11 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { continue } - // n.b.: issuer usage check has been delayed. This occurred because - // we want to ensure any issuer (representative of a larger set) can - // be used to associate revocation entries and we won't bother - // rewriting that entry (causing churn) if the particular selected - // issuer lacks CRL signing capabilities. - // - // The result is that this map (and the other maps) contain all the - // issuers we know about, and only later do we check crlSigning before - // choosing our representative. - // - // The other side effect (making this not compatible with Vault 1.11 - // behavior) is that _identified_ certificates whose issuer set is - // not allowed for crlSigning will no longer appear on the default - // issuer's CRL. + // Skip entries which aren't enabled for CRL signing. + if err := thisEntry.EnsureUsage(CRLSigningUsage); err != nil { + continue + } + issuerIDEntryMap[issuer] = thisEntry thisCert, err := thisEntry.GetCertificate() @@ -790,57 +347,20 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { // Fetch the cluster-local CRL mapping so we know where to write the // CRLs. - crlConfig, err := sc.getLocalCRLConfig() + crlConfig, err := getLocalCRLConfig(ctx, req.Storage) if err != nil { return fmt.Errorf("error building CRLs: unable to fetch cluster-local CRL configuration: %v", err) } - // Before we load cert entries, we want to store the last seen delta WAL - // serial number. The subsequent List will have at LEAST that certificate - // (and potentially more) in it; when we're done writing the delta CRL, - // we'll write this serial as a sentinel to see if we need to rebuild it - // in the future. - var lastDeltaSerial string - if isDelta { - lastWALEntry, err := sc.Storage.Get(sc.Context, deltaWALLastRevokedSerial) - if err != nil { - return err - } - - if lastWALEntry != nil && lastWALEntry.Value != nil { - var walInfo lastWALInfo - if err := lastWALEntry.DecodeJSON(&walInfo); err != nil { - return err - } - - lastDeltaSerial = walInfo.Serial - } - } - - // We fetch a list of delta WAL entries prior to generating the complete - // CRL. This allows us to avoid a lock (to clear such storage): anything - // visible now, should also be visible on the complete CRL we're writing. - var currDeltaCerts []string - if !isDelta { - currDeltaCerts, err = sc.Backend.crlBuilder.getPresentDeltaWALForClearing(sc) - if err != nil { - return fmt.Errorf("error building CRLs: unable to get present delta WAL entries for removal: %v", err) - } - } - // Next, we load and parse all revoked certificates. We need to assign // these certificates to an issuer. Some certificates will not be // assignable (if they were issued by a since-deleted issuer), so we need // a separate pool for those. - unassignedCerts, revokedCertsMap, err := getRevokedCertEntries(sc, issuerIDCertMap, isDelta) + unassignedCerts, revokedCertsMap, err := getRevokedCertEntries(ctx, req, issuerIDCertMap) if err != nil { return fmt.Errorf("error building CRLs: unable to get revoked certificate entries: %v", err) } - if err := augmentWithRevokedIssuers(issuerIDEntryMap, issuerIDCertMap, revokedCertsMap); err != nil { - return fmt.Errorf("error building CRLs: unable to parse revoked issuers: %v", err) - } - // Now we can call buildCRL once, on an arbitrary/representative issuer // from each of these (keyID, subject) sets. for _, subjectIssuersMap := range keySubjectIssuersMap { @@ -850,24 +370,10 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { } var revokedCerts []pkix.RevokedCertificate - representative := issuerID("") + representative := issuersSet[0] var crlIdentifier crlID var crlIdIssuer issuerID for _, issuerId := range issuersSet { - // Skip entries which aren't enabled for CRL signing. We don't - // particularly care which issuer is ultimately chosen as the - // set representative for signing at this point, other than - // that it has crl-signing usage. - if err := issuerIDEntryMap[issuerId].EnsureUsage(CRLSigningUsage); err != nil { - continue - } - - // Prefer to use the default as the representative of this - // set, if it is a member. - // - // If it is, we'll also pull in the unassigned certs to remain - // compatible with Vault's earlier, potentially questionable - // behavior. if issuerId == config.DefaultIssuerId { if len(unassignedCerts) > 0 { revokedCerts = append(revokedCerts, unassignedCerts...) @@ -876,18 +382,10 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { representative = issuerId } - // Otherwise, use any other random issuer if we've not yet - // chosen one. - if representative == issuerID("") { - representative = issuerId - } - - // Pull in the revoked certs associated with this member. if thisRevoked, ok := revokedCertsMap[issuerId]; ok && len(thisRevoked) > 0 { revokedCerts = append(revokedCerts, thisRevoked...) } - // Finally, check our crlIdentifier. if thisCRLId, ok := crlConfig.IssuerIDCRLMap[issuerId]; ok && len(thisCRLId) > 0 { if len(crlIdentifier) > 0 && crlIdentifier != thisCRLId { return fmt.Errorf("error building CRLs: two issuers with same keys/subjects (%v vs %v) have different internal CRL IDs: %v vs %v", issuerId, crlIdIssuer, thisCRLId, crlIdentifier) @@ -898,13 +396,6 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { } } - if representative == "" { - // Skip this set for the time being; while we have valid - // issuers and associated keys, this occurred because we lack - // crl-signing usage on all issuers in this set. - continue - } - if len(crlIdentifier) == 0 { // Create a new random UUID for this CRL if none exists. crlIdentifier = genCRLId() @@ -921,38 +412,10 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { crlNumber := crlConfig.CRLNumberMap[crlIdentifier] crlConfig.CRLNumberMap[crlIdentifier] += 1 - // CRLs (regardless of complete vs delta) are incrementally - // numbered. But delta CRLs need to know the number of the - // last complete CRL. We assume that's the previous identifier - // if no value presently exists. - lastCompleteNumber, haveLast := crlConfig.LastCompleteNumberMap[crlIdentifier] - if !haveLast { - // We use the value of crlNumber for the current CRL, so - // decrement it by one to find the last one. - lastCompleteNumber = crlNumber - 1 - } - - // Update `LastModified` - if isDelta { - crlConfig.DeltaLastModified = time.Now().UTC() - } else { - crlConfig.LastModified = time.Now().UTC() - } - // Lastly, build the CRL. - nextUpdate, err := buildCRL(sc, globalCRLConfig, forceNew, representative, revokedCerts, crlIdentifier, crlNumber, isDelta, lastCompleteNumber) - if err != nil { + if err := buildCRL(ctx, b, req, forceNew, representative, revokedCerts, crlIdentifier, crlNumber); err != nil { return fmt.Errorf("error building CRLs: unable to build CRL for issuer (%v): %v", representative, err) } - - crlConfig.CRLExpirationMap[crlIdentifier] = *nextUpdate - if !isDelta { - crlConfig.LastCompleteNumberMap[crlIdentifier] = crlNumber - } else if !haveLast { - // Since we're writing this config anyways, save our guess - // as to the last CRL number. - crlConfig.LastCompleteNumberMap[crlIdentifier] = lastCompleteNumber - } } } @@ -990,7 +453,7 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { } if !stillHaveIssuerForID { - if err := sc.Storage.Delete(sc.Context, "crls/"+crlId.String()); err != nil { + if err := req.Storage.Delete(ctx, "crls/"+crlId.String()); err != nil { return fmt.Errorf("error building CRLs: unable to clean up deleted issuers' CRL: %v", err) } } @@ -999,104 +462,30 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { // Finally, persist our potentially updated local CRL config. Only do this // if we didn't have a legacy CRL bundle. if !wasLegacy { - if err := sc.setLocalCRLConfig(crlConfig); err != nil { + if err := setLocalCRLConfig(ctx, req.Storage, crlConfig); err != nil { return fmt.Errorf("error building CRLs: unable to persist updated cluster-local CRL config: %v", err) } } - if !isDelta { - // After we've confirmed the primary CRLs have built OK, go ahead and - // clear the delta CRL WAL and rebuild it. - if err := sc.Backend.crlBuilder.clearDeltaWAL(sc, currDeltaCerts); err != nil { - return fmt.Errorf("error building CRLs: unable to clear Delta WAL: %v", err) - } - if err := sc.Backend.crlBuilder.rebuildDeltaCRLsHoldingLock(sc, forceNew); err != nil { - return fmt.Errorf("error building CRLs: unable to rebuild empty Delta WAL: %v", err) - } - } else { - // Update our last build time here so we avoid checking for new certs - // for a while. - sc.Backend.crlBuilder.lastDeltaRebuildCheck = time.Now() - - if len(lastDeltaSerial) > 0 { - // When we have a last delta serial, write out the relevant info - // so we can skip extra CRL rebuilds. - deltaInfo := lastDeltaInfo{Serial: lastDeltaSerial} - - lastDeltaBuildEntry, err := logical.StorageEntryJSON(deltaWALLastBuildSerial, deltaInfo) - if err != nil { - return fmt.Errorf("error creating last delta CRL rebuild serial entry: %v", err) - } - - err = sc.Storage.Put(sc.Context, lastDeltaBuildEntry) - if err != nil { - return fmt.Errorf("error persisting last delta CRL rebuild info: %v", err) - } - } - } - // All good :-) return nil } -func isRevInfoIssuerValid(revInfo *revocationInfo, issuerIDCertMap map[issuerID]*x509.Certificate) bool { - if len(revInfo.CertificateIssuer) > 0 { - issuerId := revInfo.CertificateIssuer - if _, issuerExists := issuerIDCertMap[issuerId]; issuerExists { - return true - } - } - - return false -} - -func associateRevokedCertWithIsssuer(revInfo *revocationInfo, revokedCert *x509.Certificate, issuerIDCertMap map[issuerID]*x509.Certificate) bool { - for issuerId, issuerCert := range issuerIDCertMap { - if bytes.Equal(revokedCert.RawIssuer, issuerCert.RawSubject) { - if err := revokedCert.CheckSignatureFrom(issuerCert); err == nil { - // Valid mapping. Add it to the specified entry. - revInfo.CertificateIssuer = issuerId - return true - } - } - } - - return false -} - -func getRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuerID]*x509.Certificate, isDelta bool) ([]pkix.RevokedCertificate, map[issuerID][]pkix.RevokedCertificate, error) { +func getRevokedCertEntries(ctx context.Context, req *logical.Request, issuerIDCertMap map[issuerID]*x509.Certificate) ([]pkix.RevokedCertificate, map[issuerID][]pkix.RevokedCertificate, error) { var unassignedCerts []pkix.RevokedCertificate revokedCertsMap := make(map[issuerID][]pkix.RevokedCertificate) - listingPath := revokedPath - if isDelta { - listingPath = deltaWALPath - } - - revokedSerials, err := sc.Storage.List(sc.Context, listingPath) + revokedSerials, err := req.Storage.List(ctx, revokedPath) if err != nil { return nil, nil, errutil.InternalError{Err: fmt.Sprintf("error fetching list of revoked certs: %s", err)} } - // Build a mapping of issuer serial -> certificate. - issuerSerialCertMap := make(map[string][]*x509.Certificate, len(issuerIDCertMap)) - for _, cert := range issuerIDCertMap { - serialStr := serialFromCert(cert) - issuerSerialCertMap[serialStr] = append(issuerSerialCertMap[serialStr], cert) - } - for _, serial := range revokedSerials { - if isDelta && (serial == deltaWALLastBuildSerialName || serial == deltaWALLastRevokedSerialName) { - // Skip our placeholder entries... - continue - } - var revInfo revocationInfo - revokedEntry, err := sc.Storage.Get(sc.Context, revokedPath+serial) + revokedEntry, err := req.Storage.Get(ctx, revokedPath+serial) if err != nil { return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch revoked cert with serial %s: %s", serial, err)} } - if revokedEntry == nil { return nil, nil, errutil.InternalError{Err: fmt.Sprintf("revoked certificate entry for serial %s is nil", serial)} } @@ -1104,7 +493,7 @@ func getRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuerID]*x50 // TODO: In this case, remove it and continue? How likely is this to // happen? Alternately, could skip it entirely, or could implement a // delete function so that there is a way to remove these - return nil, nil, errutil.InternalError{Err: "found revoked serial but actual certificate is empty"} + return nil, nil, errutil.InternalError{Err: fmt.Sprintf("found revoked serial but actual certificate is empty")} } err = revokedEntry.DecodeJSON(&revInfo) @@ -1117,34 +506,6 @@ func getRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuerID]*x50 return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse stored revoked certificate with serial %s: %s", serial, err)} } - // We want to skip issuer certificate's revocationEntries for two - // reasons: - // - // 1. We canonically use augmentWithRevokedIssuers to handle this - // case and this entry is just a backup. This prevents the issue - // of duplicate serial numbers on the CRL from both paths. - // 2. We want to avoid a root's serial from appearing on its own - // CRL. If it is a cross-signed or re-issued variant, this is OK, - // but in the case we mark the root itself as "revoked", we want - // to avoid it appearing on the CRL as that is definitely - // undefined/little-supported behavior. - // - // This hash map lookup should be faster than byte comparison against - // each issuer proactively. - if candidates, present := issuerSerialCertMap[serialFromCert(revokedCert)]; present { - revokedCertIsIssuer := false - for _, candidate := range candidates { - if bytes.Equal(candidate.Raw, revokedCert.Raw) { - revokedCertIsIssuer = true - break - } - } - - if revokedCertIsIssuer { - continue - } - } - // NOTE: We have to change this to UTC time because the CRL standard // mandates it but Go will happily encode the CRL without this. newRevCert := pkix.RevokedCertificate{ @@ -1160,21 +521,34 @@ func getRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuerID]*x50 // prefer it to manually checking each issuer signature, assuming it // appears valid. It's highly unlikely for two different issuers // to have the same id (after the first was deleted). - if isRevInfoIssuerValid(&revInfo, issuerIDCertMap) { - revokedCertsMap[revInfo.CertificateIssuer] = append(revokedCertsMap[revInfo.CertificateIssuer], newRevCert) - continue + if len(revInfo.CertificateIssuer) > 0 { + issuerId := revInfo.CertificateIssuer + if _, issuerExists := issuerIDCertMap[issuerId]; issuerExists { + revokedCertsMap[issuerId] = append(revokedCertsMap[issuerId], newRevCert) + continue + } // Otherwise, fall through and update the entry. } // Now we need to assign the revoked certificate to an issuer. - foundParent := associateRevokedCertWithIsssuer(&revInfo, revokedCert, issuerIDCertMap) + foundParent := false + for issuerId, issuerCert := range issuerIDCertMap { + if bytes.Equal(revokedCert.RawIssuer, issuerCert.RawSubject) { + if err := revokedCert.CheckSignatureFrom(issuerCert); err == nil { + // Valid mapping. Add it to the specified entry. + revokedCertsMap[issuerId] = append(revokedCertsMap[issuerId], newRevCert) + revInfo.CertificateIssuer = issuerId + foundParent = true + break + } + } + } + if !foundParent { // If the parent isn't found, add it to the unassigned bucket. unassignedCerts = append(unassignedCerts, newRevCert) } else { - revokedCertsMap[revInfo.CertificateIssuer] = append(revokedCertsMap[revInfo.CertificateIssuer], newRevCert) - // When the CertificateIssuer field wasn't found on the existing // entry (or was invalid), and we've found a new value for it, // we should update the entry to make future CRL builds faster. @@ -1183,7 +557,7 @@ func getRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuerID]*x50 return nil, nil, fmt.Errorf("error creating revocation entry for existing cert: %v", serial) } - err = sc.Storage.Put(sc.Context, revokedEntry) + err = req.Storage.Put(ctx, revokedEntry) if err != nil { return nil, nil, fmt.Errorf("error updating revoked certificate at existing location: %v", serial) } @@ -1193,55 +567,28 @@ func getRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuerID]*x50 return unassignedCerts, revokedCertsMap, nil } -func augmentWithRevokedIssuers(issuerIDEntryMap map[issuerID]*issuerEntry, issuerIDCertMap map[issuerID]*x509.Certificate, revokedCertsMap map[issuerID][]pkix.RevokedCertificate) error { - // When setup our maps with the legacy CA bundle, we only have a - // single entry here. This entry is never revoked, so the outer loop - // will exit quickly. - for ourIssuerID, ourIssuer := range issuerIDEntryMap { - if !ourIssuer.Revoked { - continue - } - - ourCert := issuerIDCertMap[ourIssuerID] - ourRevCert := pkix.RevokedCertificate{ - SerialNumber: ourCert.SerialNumber, - RevocationTime: ourIssuer.RevocationTimeUTC, - } - - for otherIssuerID := range issuerIDEntryMap { - if otherIssuerID == ourIssuerID { - continue - } - - // Find all _other_ certificates which verify this issuer, - // allowing us to add this revoked issuer to this issuer's - // CRL. - otherCert := issuerIDCertMap[otherIssuerID] - if err := ourCert.CheckSignatureFrom(otherCert); err == nil { - // Valid signature; add our result. - revokedCertsMap[otherIssuerID] = append(revokedCertsMap[otherIssuerID], ourRevCert) - } - } - } - - return nil -} - // Builds a CRL by going through the list of revoked certificates and building // a new CRL with the stored revocation times and serial numbers. -func buildCRL(sc *storageContext, crlInfo *crlConfig, forceNew bool, thisIssuerId issuerID, revoked []pkix.RevokedCertificate, identifier crlID, crlNumber int64, isDelta bool, lastCompleteNumber int64) (*time.Time, error) { +func buildCRL(ctx context.Context, b *backend, req *logical.Request, forceNew bool, thisIssuerId issuerID, revoked []pkix.RevokedCertificate, identifier crlID, crlNumber int64) error { + crlInfo, err := b.CRL(ctx, req.Storage) + if err != nil { + return errutil.InternalError{Err: fmt.Sprintf("error fetching CRL config information: %s", err)} + } + + crlLifetime := b.crlLifetime var revokedCerts []pkix.RevokedCertificate - crlLifetime, err := time.ParseDuration(crlInfo.Expiry) - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("error parsing CRL duration of %s", crlInfo.Expiry)} + if crlInfo.Expiry != "" { + crlDur, err := time.ParseDuration(crlInfo.Expiry) + if err != nil { + return errutil.InternalError{Err: fmt.Sprintf("error parsing CRL duration of %s", crlInfo.Expiry)} + } + crlLifetime = crlDur } if crlInfo.Disable { if !forceNew { - // In the event of a disabled CRL, we'll have the next time set - // to the zero time as a sentinel in case we get re-enabled. - return &time.Time{}, nil + return nil } // NOTE: in this case, the passed argument (revoked) is not added @@ -1256,36 +603,26 @@ func buildCRL(sc *storageContext, crlInfo *crlConfig, forceNew bool, thisIssuerI revokedCerts = revoked WRITE: - signingBundle, caErr := sc.fetchCAInfoByIssuerId(thisIssuerId, CRLSigningUsage) + signingBundle, caErr := fetchCAInfoByIssuerId(ctx, b, req, thisIssuerId, CRLSigningUsage) if caErr != nil { switch caErr.(type) { case errutil.UserError: - return nil, errutil.UserError{Err: fmt.Sprintf("could not fetch the CA certificate: %s", caErr)} + return errutil.UserError{Err: fmt.Sprintf("could not fetch the CA certificate: %s", caErr)} default: - return nil, errutil.InternalError{Err: fmt.Sprintf("error fetching CA certificate: %s", caErr)} + return errutil.InternalError{Err: fmt.Sprintf("error fetching CA certificate: %s", caErr)} } } - now := time.Now() - nextUpdate := now.Add(crlLifetime) - - ext, err := certutil.CreateDeltaCRLIndicatorExt(lastCompleteNumber) - if err != nil { - return nil, fmt.Errorf("could not create crl delta indicator extension: %v", err) - } - revocationListTemplate := &x509.RevocationList{ RevokedCertificates: revokedCerts, Number: big.NewInt(crlNumber), - ThisUpdate: now, - NextUpdate: nextUpdate, - SignatureAlgorithm: signingBundle.RevocationSigAlg, - ExtraExtensions: []pkix.Extension{ext}, + ThisUpdate: time.Now(), + NextUpdate: time.Now().Add(crlLifetime), } crlBytes, err := x509.CreateRevocationList(rand.Reader, revocationListTemplate, signingBundle.Certificate, signingBundle.PrivateKey) if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("error creating new CRL: %s", err)} + return errutil.InternalError{Err: fmt.Sprintf("error creating new CRL: %s", err)} } writePath := "crls/" + identifier.String() @@ -1293,18 +630,15 @@ WRITE: // Ignore the CRL ID as it won't be persisted anyways; hard-code the // old legacy path and allow it to be updated. writePath = legacyCRLPath - } else if isDelta { - // Write the delta CRL to a unique storage location. - writePath += deltaCRLPathSuffix } - err = sc.Storage.Put(sc.Context, &logical.StorageEntry{ + err = req.Storage.Put(ctx, &logical.StorageEntry{ Key: writePath, Value: crlBytes, }) if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("error storing CRL: %s", err)} + return errutil.InternalError{Err: fmt.Sprintf("error storing CRL: %s", err)} } - return &nextUpdate, nil + return nil } diff --git a/builtin/logical/pki/fields.go b/builtin/logical/pki/fields.go index 497ea8893a996..22100f3888923 100644 --- a/builtin/logical/pki/fields.go +++ b/builtin/logical/pki/fields.go @@ -1,10 +1,6 @@ package pki -import ( - "time" - - "github.com/hashicorp/vault/sdk/framework" -) +import "github.com/hashicorp/vault/sdk/framework" const ( issuerRefParam = "issuer_ref" @@ -145,13 +141,6 @@ be larger than the role max TTL.`, The value format should be given in UTC format YYYY-MM-ddTHH:MM:SSZ`, } - fields["remove_roots_from_chain"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Default: false, - Description: `Whether or not to remove self-signed CA certificates in the output -of the ca_chain field.`, - } - fields = addIssuerRefField(fields) return fields @@ -330,13 +319,6 @@ SHA-2-512. Defaults to 0 to automatically detect based on key length }, } - fields["use_pss"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Default: false, - Description: `Whether or not to use PSS signatures when using a -RSA key-type issuer. Defaults to false.`, - } - fields["key_type"] = &framework.FieldSchema{ Type: framework.TypeString, Default: "rsa", @@ -427,53 +409,3 @@ to the key.`, } return fields } - -func addTidyFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { - fields["tidy_cert_store"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Description: `Set to true to enable tidying up -the certificate store`, - } - - fields["tidy_revocation_list"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Description: `Deprecated; synonym for 'tidy_revoked_certs`, - } - - fields["tidy_revoked_certs"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Description: `Set to true to expire all revoked -and expired certificates, removing them both from the CRL and from storage. The -CRL will be rotated if this causes any values to be removed.`, - } - - fields["tidy_revoked_cert_issuer_associations"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Description: `Set to true to validate issuer associations -on revocation entries. This helps increase the performance of CRL building -and OCSP responses.`, - } - - fields["safety_buffer"] = &framework.FieldSchema{ - Type: framework.TypeDurationSecond, - Description: `The amount of extra time that must have passed -beyond certificate expiration before it is removed -from the backend storage and/or revocation list. -Defaults to 72 hours.`, - Default: int(defaultTidyConfig.SafetyBuffer / time.Second), // TypeDurationSecond currently requires defaults to be int - } - - fields["pause_duration"] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: `The amount of time to wait between processing -certificates. This allows operators to change the execution profile -of tidy to take consume less resources by slowing down how long it -takes to run. Note that the entire list of certificates will be -stored in memory during the entire tidy operation, but resources to -read/process/update existing entries will be spread out over a -greater period of time. By default this is zero seconds.`, - Default: "0s", - } - - return fields -} diff --git a/builtin/logical/pki/integation_test.go b/builtin/logical/pki/integation_test.go index ed2b45940baac..4f546b7ed854d 100644 --- a/builtin/logical/pki/integation_test.go +++ b/builtin/logical/pki/integation_test.go @@ -9,7 +9,6 @@ import ( ) func TestIntegration_RotateRootUsesNext(t *testing.T) { - t.Parallel() b, s := createBackendWithStorage(t) resp, err := b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.UpdateOperation, @@ -76,7 +75,6 @@ func TestIntegration_RotateRootUsesNext(t *testing.T) { } func TestIntegration_ReplaceRootNormal(t *testing.T) { - t.Parallel() b, s := createBackendWithStorage(t) // generate roots @@ -114,7 +112,6 @@ func TestIntegration_ReplaceRootNormal(t *testing.T) { } func TestIntegration_ReplaceRootDefaultsToNext(t *testing.T) { - t.Parallel() b, s := createBackendWithStorage(t) // generate roots @@ -151,7 +148,6 @@ func TestIntegration_ReplaceRootDefaultsToNext(t *testing.T) { } func TestIntegration_ReplaceRootBadIssuer(t *testing.T) { - t.Parallel() b, s := createBackendWithStorage(t) // generate roots @@ -201,7 +197,6 @@ func TestIntegration_ReplaceRootBadIssuer(t *testing.T) { } func TestIntegration_SetSignedWithBackwardsPemBundles(t *testing.T) { - t.Parallel() rootBackend, rootStorage := createBackendWithStorage(t) intBackend, intStorage := createBackendWithStorage(t) diff --git a/builtin/logical/pki/key_util.go b/builtin/logical/pki/key_util.go index c40831714625e..8478bbc9987e2 100644 --- a/builtin/logical/pki/key_util.go +++ b/builtin/logical/pki/key_util.go @@ -9,10 +9,11 @@ import ( "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" ) -func comparePublicKey(sc *storageContext, key *keyEntry, publicKey crypto.PublicKey) (bool, error) { - publicKeyForKeyEntry, err := getPublicKey(sc.Context, sc.Backend, key) +func comparePublicKey(ctx context.Context, b *backend, key *keyEntry, publicKey crypto.PublicKey) (bool, error) { + publicKeyForKeyEntry, err := getPublicKey(ctx, b, key) if err != nil { return false, err } @@ -75,7 +76,7 @@ func getPublicKeyFromBytes(keyBytes []byte) (crypto.PublicKey, error) { return signer.Public(), nil } -func importKeyFromBytes(sc *storageContext, keyValue string, keyName string) (*keyEntry, bool, error) { +func importKeyFromBytes(ctx context.Context, b *backend, s logical.Storage, keyValue string, keyName string) (*keyEntry, bool, error) { signer, _, _, err := getSignerFromBytes([]byte(keyValue)) if err != nil { return nil, false, err @@ -85,7 +86,7 @@ func importKeyFromBytes(sc *storageContext, keyValue string, keyName string) (*k return nil, false, errors.New("unsupported private key type within pem bundle") } - key, existed, err := sc.importKey(keyValue, keyName, privateKeyType) + key, existed, err := importKey(ctx, b, s, keyValue, keyName, privateKeyType) if err != nil { return nil, false, err } diff --git a/builtin/logical/pki/ocsp.go b/builtin/logical/pki/ocsp.go deleted file mode 100644 index a3711c01da320..0000000000000 --- a/builtin/logical/pki/ocsp.go +++ /dev/null @@ -1,416 +0,0 @@ -package pki - -import ( - "bytes" - "context" - "crypto" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/base64" - "errors" - "fmt" - "io" - "math/big" - "net/http" - "time" - - "github.com/hashicorp/vault/sdk/helper/errutil" - - "golang.org/x/crypto/ocsp" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/certutil" - "github.com/hashicorp/vault/sdk/logical" -) - -const ( - ocspReqParam = "req" - ocspResponseContentType = "application/ocsp-response" - maximumRequestSize = 2048 // A normal simple request is 87 bytes, so give us some buffer -) - -type ocspRespInfo struct { - serialNumber *big.Int - ocspStatus int - revocationTimeUTC *time.Time - issuerID issuerID -} - -// These response variables should not be mutated, instead treat them as constants -var ( - OcspUnauthorizedResponse = &logical.Response{ - Data: map[string]interface{}{ - logical.HTTPContentType: ocspResponseContentType, - logical.HTTPStatusCode: http.StatusUnauthorized, - logical.HTTPRawBody: ocsp.UnauthorizedErrorResponse, - }, - } - OcspMalformedResponse = &logical.Response{ - Data: map[string]interface{}{ - logical.HTTPContentType: ocspResponseContentType, - logical.HTTPStatusCode: http.StatusBadRequest, - logical.HTTPRawBody: ocsp.MalformedRequestErrorResponse, - }, - } - OcspInternalErrorResponse = &logical.Response{ - Data: map[string]interface{}{ - logical.HTTPContentType: ocspResponseContentType, - logical.HTTPStatusCode: http.StatusInternalServerError, - logical.HTTPRawBody: ocsp.InternalErrorErrorResponse, - }, - } - - ErrMissingOcspUsage = errors.New("issuer entry did not have the OCSPSigning usage") - ErrIssuerHasNoKey = errors.New("issuer has no key") - ErrUnknownIssuer = errors.New("unknown issuer") -) - -func buildPathOcspGet(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "ocsp/" + framework.MatchAllRegex(ocspReqParam), - Fields: map[string]*framework.FieldSchema{ - ocspReqParam: { - Type: framework.TypeString, - Description: "base-64 encoded ocsp request", - }, - }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ReadOperation: &framework.PathOperation{ - Callback: b.ocspHandler, - }, - }, - - HelpSynopsis: pathOcspHelpSyn, - HelpDescription: pathOcspHelpDesc, - } -} - -func buildPathOcspPost(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "ocsp", - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.ocspHandler, - }, - }, - - HelpSynopsis: pathOcspHelpSyn, - HelpDescription: pathOcspHelpDesc, - } -} - -func (b *backend) ocspHandler(ctx context.Context, request *logical.Request, data *framework.FieldData) (*logical.Response, error) { - sc := b.makeStorageContext(ctx, request.Storage) - cfg, err := b.crlBuilder.getConfigWithUpdate(sc) - if err != nil || cfg.OcspDisable { - return OcspUnauthorizedResponse, nil - } - - derReq, err := fetchDerEncodedRequest(request, data) - if err != nil { - return OcspMalformedResponse, nil - } - - ocspReq, err := ocsp.ParseRequest(derReq) - if err != nil { - return OcspMalformedResponse, nil - } - - ocspStatus, err := getOcspStatus(sc, request, ocspReq) - if err != nil { - return logAndReturnInternalError(b, err), nil - } - - caBundle, err := lookupOcspIssuer(sc, ocspReq, ocspStatus.issuerID) - if err != nil { - if errors.Is(err, ErrUnknownIssuer) { - // Since we were not able to find a matching issuer for the incoming request - // generate an Unknown OCSP response. This might turn into an Unauthorized if - // we find out that we don't have a default issuer or it's missing the proper Usage flags - return generateUnknownResponse(cfg, sc, ocspReq), nil - } - if errors.Is(err, ErrMissingOcspUsage) { - // If we did find a matching issuer but aren't allowed to sign, the spec says - // we should be responding with an Unauthorized response as we don't have the - // ability to sign the response. - // https://www.rfc-editor.org/rfc/rfc5019#section-2.2.3 - return OcspUnauthorizedResponse, nil - } - return logAndReturnInternalError(b, err), nil - } - - byteResp, err := genResponse(cfg, caBundle, ocspStatus, ocspReq.HashAlgorithm) - if err != nil { - return logAndReturnInternalError(b, err), nil - } - - return &logical.Response{ - Data: map[string]interface{}{ - logical.HTTPContentType: ocspResponseContentType, - logical.HTTPStatusCode: http.StatusOK, - logical.HTTPRawBody: byteResp, - }, - }, nil -} - -func generateUnknownResponse(cfg *crlConfig, sc *storageContext, ocspReq *ocsp.Request) *logical.Response { - // Generate an Unknown OCSP response, signing with the default issuer from the mount as we did - // not match the request's issuer. If no default issuer can be used, return with Unauthorized as there - // isn't much else we can do at this point. - config, err := sc.getIssuersConfig() - if err != nil { - return logAndReturnInternalError(sc.Backend, err) - } - - if config.DefaultIssuerId == "" { - // If we don't have any issuers or default issuers set, no way to sign a response so Unauthorized it is. - return OcspUnauthorizedResponse - } - - caBundle, issuer, err := getOcspIssuerParsedBundle(sc, config.DefaultIssuerId) - if err != nil { - if errors.Is(err, ErrUnknownIssuer) || errors.Is(err, ErrIssuerHasNoKey) { - // We must have raced on a delete/update of the default issuer, anyways - // no way to sign a response so Unauthorized it is. - return OcspUnauthorizedResponse - } - return logAndReturnInternalError(sc.Backend, err) - } - - if !issuer.Usage.HasUsage(OCSPSigningUsage) { - // If we don't have any issuers or default issuers set, no way to sign a response so Unauthorized it is. - return OcspUnauthorizedResponse - } - - info := &ocspRespInfo{ - serialNumber: ocspReq.SerialNumber, - ocspStatus: ocsp.Unknown, - } - - byteResp, err := genResponse(cfg, caBundle, info, ocspReq.HashAlgorithm) - if err != nil { - return logAndReturnInternalError(sc.Backend, err) - } - - return &logical.Response{ - Data: map[string]interface{}{ - logical.HTTPContentType: ocspResponseContentType, - logical.HTTPStatusCode: http.StatusOK, - logical.HTTPRawBody: byteResp, - }, - } -} - -func fetchDerEncodedRequest(request *logical.Request, data *framework.FieldData) ([]byte, error) { - switch request.Operation { - case logical.ReadOperation: - // The param within the GET request should have a base64 encoded version of a DER request. - base64Req := data.Get(ocspReqParam).(string) - if base64Req == "" { - return nil, errors.New("no base64 encoded ocsp request was found") - } - - if len(base64Req) >= maximumRequestSize { - return nil, errors.New("request is too large") - } - - return base64.StdEncoding.DecodeString(base64Req) - case logical.UpdateOperation: - // POST bodies should contain the binary form of the DER request. - rawBody := request.HTTPRequest.Body - defer rawBody.Close() - - requestBytes, err := io.ReadAll(io.LimitReader(rawBody, maximumRequestSize)) - if err != nil { - return nil, err - } - - if len(requestBytes) >= maximumRequestSize { - return nil, errors.New("request is too large") - } - return requestBytes, nil - default: - return nil, fmt.Errorf("unsupported request method: %s", request.HTTPRequest.Method) - } -} - -func logAndReturnInternalError(b *backend, err error) *logical.Response { - // Since OCSP might be a high traffic endpoint, we will log at debug level only - // any internal errors we do get. There is no way for us to return to the end-user - // errors, so we rely on the log statement to help in debugging possible - // issues in the field. - b.Logger().Debug("OCSP internal error", "error", err) - return OcspInternalErrorResponse -} - -func getOcspStatus(sc *storageContext, request *logical.Request, ocspReq *ocsp.Request) (*ocspRespInfo, error) { - revEntryRaw, err := fetchCertBySerialBigInt(sc.Context, sc.Backend, request, revokedPath, ocspReq.SerialNumber) - if err != nil { - return nil, err - } - - info := ocspRespInfo{ - serialNumber: ocspReq.SerialNumber, - ocspStatus: ocsp.Good, - } - - if revEntryRaw != nil { - var revEntry revocationInfo - if err := revEntryRaw.DecodeJSON(&revEntry); err != nil { - return nil, err - } - - info.ocspStatus = ocsp.Revoked - info.revocationTimeUTC = &revEntry.RevocationTimeUTC - info.issuerID = revEntry.CertificateIssuer // This might be empty if the CRL hasn't been rebuilt - } - - return &info, nil -} - -func lookupOcspIssuer(sc *storageContext, req *ocsp.Request, optRevokedIssuer issuerID) (*certutil.ParsedCertBundle, error) { - reqHash := req.HashAlgorithm - if !reqHash.Available() { - return nil, x509.ErrUnsupportedAlgorithm - } - - // This will prime up issuerIds, with either the optRevokedIssuer value if set - // or if we are operating in legacy storage mode, the shim bundle id or finally - // a list of all our issuers in this mount. - issuerIds, err := lookupIssuerIds(sc, optRevokedIssuer) - if err != nil { - return nil, err - } - - matchedButNoUsage := false - for _, issuerId := range issuerIds { - parsedBundle, issuer, err := getOcspIssuerParsedBundle(sc, issuerId) - if err != nil { - // A bit touchy here as if we get an ErrUnknownIssuer for an issuer id that we picked up - // from a revocation entry, we still return an ErrUnknownOcspIssuer as we can't validate - // the end-user actually meant this specific issuer's cert with serial X. - if errors.Is(err, ErrUnknownIssuer) || errors.Is(err, ErrIssuerHasNoKey) { - // This skips either bad issuer ids, or root certs with no keys that we can't use. - continue - } - return nil, err - } - - // Make sure the client and Vault are talking about the same issuer, otherwise - // we might have a case of a matching serial number for a different issuer which - // we should not respond back in the affirmative about. - matches, err := doesRequestMatchIssuer(parsedBundle, req) - if err != nil { - return nil, err - } - - if matches { - if !issuer.Usage.HasUsage(OCSPSigningUsage) { - matchedButNoUsage = true - // We found a matching issuer, but it's not allowed to sign the - // response, there might be another issuer that we rotated - // that will match though, so keep iterating. - continue - } - - return parsedBundle, nil - } - } - - if matchedButNoUsage { - // We matched an issuer but it did not have an OCSP signing usage set so bail. - return nil, ErrMissingOcspUsage - } - - return nil, ErrUnknownIssuer -} - -func getOcspIssuerParsedBundle(sc *storageContext, issuerId issuerID) (*certutil.ParsedCertBundle, *issuerEntry, error) { - issuer, bundle, err := sc.fetchCertBundleByIssuerId(issuerId, true) - if err != nil { - switch err.(type) { - case errutil.UserError: - // Most likely the issuer id no longer exists skip it - return nil, nil, ErrUnknownIssuer - default: - return nil, nil, err - } - } - - if issuer.KeyID == "" { - // No point if the key does not exist from the issuer to use as a signer. - return nil, nil, ErrIssuerHasNoKey - } - - caBundle, err := parseCABundle(sc.Context, sc.Backend, bundle) - if err != nil { - return nil, nil, err - } - - return caBundle, issuer, nil -} - -func lookupIssuerIds(sc *storageContext, optRevokedIssuer issuerID) ([]issuerID, error) { - if optRevokedIssuer != "" { - return []issuerID{optRevokedIssuer}, nil - } - - if sc.Backend.useLegacyBundleCaStorage() { - return []issuerID{legacyBundleShimID}, nil - } - - return sc.listIssuers() -} - -func doesRequestMatchIssuer(parsedBundle *certutil.ParsedCertBundle, req *ocsp.Request) (bool, error) { - var pkInfo struct { - Algorithm pkix.AlgorithmIdentifier - PublicKey asn1.BitString - } - if _, err := asn1.Unmarshal(parsedBundle.Certificate.RawSubjectPublicKeyInfo, &pkInfo); err != nil { - return false, err - } - - h := req.HashAlgorithm.New() - h.Write(pkInfo.PublicKey.RightAlign()) - issuerKeyHash := h.Sum(nil) - - h.Reset() - h.Write(parsedBundle.Certificate.RawSubject) - issuerNameHash := h.Sum(nil) - - return bytes.Equal(req.IssuerKeyHash, issuerKeyHash) && bytes.Equal(req.IssuerNameHash, issuerNameHash), nil -} - -func genResponse(cfg *crlConfig, caBundle *certutil.ParsedCertBundle, info *ocspRespInfo, reqHash crypto.Hash) ([]byte, error) { - curTime := time.Now() - duration, err := time.ParseDuration(cfg.OcspExpiry) - if err != nil { - return nil, err - } - template := ocsp.Response{ - IssuerHash: reqHash, - Status: info.ocspStatus, - SerialNumber: info.serialNumber, - ThisUpdate: curTime, - NextUpdate: curTime.Add(duration), - Certificate: caBundle.Certificate, - ExtraExtensions: []pkix.Extension{}, - } - - if info.ocspStatus == ocsp.Revoked { - template.RevokedAt = *info.revocationTimeUTC - template.RevocationReason = ocsp.Unspecified - } - - return ocsp.CreateResponse(caBundle.Certificate, caBundle.Certificate, template, caBundle.PrivateKey) -} - -const pathOcspHelpSyn = ` -Query a certificate's revocation status through OCSP' -` - -const pathOcspHelpDesc = ` -This endpoint expects DER encoded OCSP requests and returns DER encoded OCSP responses -` diff --git a/builtin/logical/pki/ocsp_test.go b/builtin/logical/pki/ocsp_test.go deleted file mode 100644 index edae1eb490ba6..0000000000000 --- a/builtin/logical/pki/ocsp_test.go +++ /dev/null @@ -1,620 +0,0 @@ -package pki - -import ( - "bytes" - "context" - "crypto" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/rsa" - "crypto/sha256" - "crypto/x509" - "encoding/base64" - "fmt" - "io" - "net/http" - "strconv" - "strings" - "testing" - "time" - - "github.com/hashicorp/vault/sdk/logical" - "github.com/stretchr/testify/require" - "golang.org/x/crypto/ocsp" -) - -// If the ocsp_disabled flag is set to true in the crl configuration make sure we always -// return an Unauthorized error back as we assume an end-user disabling the feature does -// not want us to act as the OCSP authority and the RFC specifies this is the appropriate response. -func TestOcsp_Disabled(t *testing.T) { - t.Parallel() - type testArgs struct { - reqType string - } - var tests []testArgs - for _, reqType := range []string{"get", "post"} { - tests = append(tests, testArgs{ - reqType: reqType, - }) - } - for _, tt := range tests { - localTT := tt - t.Run(localTT.reqType, func(t *testing.T) { - b, s, testEnv := setupOcspEnv(t, "rsa") - resp, err := CBWrite(b, s, "config/crl", map[string]interface{}{ - "ocsp_disable": "true", - }) - requireSuccessNilResponse(t, resp, err) - resp, err = sendOcspRequest(t, b, s, localTT.reqType, testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) - require.NoError(t, err) - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 401, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - require.Equal(t, ocsp.UnauthorizedErrorResponse, respDer) - }) - } -} - -// If we can't find the issuer within the request and have no default issuer to sign an Unknown response -// with return an UnauthorizedErrorResponse/according to/the RFC, similar to if we are disabled (lack of authority) -// This behavior differs from CRLs when an issuer is removed from a mount. -func TestOcsp_UnknownIssuerWithNoDefault(t *testing.T) { - t.Parallel() - - _, _, testEnv := setupOcspEnv(t, "ec") - // Create another completely empty mount so the created issuer/certificate above is unknown - b, s := createBackendWithStorage(t) - - resp, err := sendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) - require.NoError(t, err) - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 401, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - require.Equal(t, ocsp.UnauthorizedErrorResponse, respDer) -} - -// If the issuer in the request does exist, but the request coming in associates the serial with the -// wrong issuer return an Unknown response back to the caller. -func TestOcsp_WrongIssuerInRequest(t *testing.T) { - t.Parallel() - - b, s, testEnv := setupOcspEnv(t, "ec") - serial := serialFromCert(testEnv.leafCertIssuer1) - resp, err := CBWrite(b, s, "revoke", map[string]interface{}{ - "serial_number": serial, - }) - requireSuccessNonNilResponse(t, resp, err, "revoke") - - resp, err = sendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer2, crypto.SHA1) - require.NoError(t, err) - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 200, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - ocspResp, err := ocsp.ParseResponse(respDer, testEnv.issuer1) - require.NoError(t, err, "parsing ocsp get response") - - require.Equal(t, ocsp.Unknown, ocspResp.Status) -} - -// Verify that requests we can't properly decode result in the correct response of MalformedRequestError -func TestOcsp_MalformedRequests(t *testing.T) { - t.Parallel() - type testArgs struct { - reqType string - } - var tests []testArgs - for _, reqType := range []string{"get", "post"} { - tests = append(tests, testArgs{ - reqType: reqType, - }) - } - for _, tt := range tests { - localTT := tt - t.Run(localTT.reqType, func(t *testing.T) { - b, s, _ := setupOcspEnv(t, "rsa") - badReq := []byte("this is a bad request") - var resp *logical.Response - var err error - switch localTT.reqType { - case "get": - resp, err = sendOcspGetRequest(b, s, badReq) - case "post": - resp, err = sendOcspPostRequest(b, s, badReq) - default: - t.Fatalf("bad request type") - } - require.NoError(t, err) - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 400, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - require.Equal(t, ocsp.MalformedRequestErrorResponse, respDer) - }) - } -} - -// Validate that we properly handle a revocation entry that contains an issuer ID that no longer exists, -// the best we can do in this use case is to respond back with the default issuer that we don't know -// the issuer that they are requesting (we can't guarantee that the client is actually requesting a serial -// from that issuer) -func TestOcsp_InvalidIssuerIdInRevocationEntry(t *testing.T) { - t.Parallel() - - b, s, testEnv := setupOcspEnv(t, "ec") - ctx := context.Background() - - // Revoke the entry - serial := serialFromCert(testEnv.leafCertIssuer1) - resp, err := CBWrite(b, s, "revoke", map[string]interface{}{ - "serial_number": serial, - }) - requireSuccessNonNilResponse(t, resp, err, "revoke") - - // Twiddle the entry so that the issuer id is no longer valid. - storagePath := revokedPath + normalizeSerial(serial) - var revInfo revocationInfo - revEntry, err := s.Get(ctx, storagePath) - require.NoError(t, err, "failed looking up storage path: %s", storagePath) - err = revEntry.DecodeJSON(&revInfo) - require.NoError(t, err, "failed decoding storage entry: %v", revEntry) - revInfo.CertificateIssuer = "00000000-0000-0000-0000-000000000000" - revEntry, err = logical.StorageEntryJSON(storagePath, revInfo) - require.NoError(t, err, "failed re-encoding revocation info: %v", revInfo) - err = s.Put(ctx, revEntry) - require.NoError(t, err, "failed writing out new revocation entry: %v", revEntry) - - // Send the request - resp, err = sendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) - require.NoError(t, err) - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 200, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - ocspResp, err := ocsp.ParseResponse(respDer, testEnv.issuer1) - require.NoError(t, err, "parsing ocsp get response") - - require.Equal(t, ocsp.Unknown, ocspResp.Status) -} - -// Validate that we properly handle an unknown issuer use-case but that the default issuer -// does not have the OCSP usage flag set, we can't do much else other than reply with an -// Unauthorized response. -func TestOcsp_UnknownIssuerIdWithDefaultHavingOcspUsageRemoved(t *testing.T) { - t.Parallel() - - b, s, testEnv := setupOcspEnv(t, "ec") - ctx := context.Background() - - // Revoke the entry - serial := serialFromCert(testEnv.leafCertIssuer1) - resp, err := CBWrite(b, s, "revoke", map[string]interface{}{ - "serial_number": serial, - }) - requireSuccessNonNilResponse(t, resp, err, "revoke") - - // Twiddle the entry so that the issuer id is no longer valid. - storagePath := revokedPath + normalizeSerial(serial) - var revInfo revocationInfo - revEntry, err := s.Get(ctx, storagePath) - require.NoError(t, err, "failed looking up storage path: %s", storagePath) - err = revEntry.DecodeJSON(&revInfo) - require.NoError(t, err, "failed decoding storage entry: %v", revEntry) - revInfo.CertificateIssuer = "00000000-0000-0000-0000-000000000000" - revEntry, err = logical.StorageEntryJSON(storagePath, revInfo) - require.NoError(t, err, "failed re-encoding revocation info: %v", revInfo) - err = s.Put(ctx, revEntry) - require.NoError(t, err, "failed writing out new revocation entry: %v", revEntry) - - // Update our issuers to no longer have the OcspSigning usage - resp, err = CBPatch(b, s, "issuer/"+testEnv.issuerId1.String(), map[string]interface{}{ - "usage": "read-only,issuing-certificates,crl-signing", - }) - requireSuccessNonNilResponse(t, resp, err, "failed resetting usage flags on issuer1") - resp, err = CBPatch(b, s, "issuer/"+testEnv.issuerId2.String(), map[string]interface{}{ - "usage": "read-only,issuing-certificates,crl-signing", - }) - requireSuccessNonNilResponse(t, resp, err, "failed resetting usage flags on issuer2") - - // Send the request - resp, err = sendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) - require.NoError(t, err) - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 401, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - require.Equal(t, ocsp.UnauthorizedErrorResponse, respDer) -} - -// Verify that if we do have a revoked certificate entry for the request, that matches an -// issuer but that issuer does not have the OcspUsage flag set that we return an Unauthorized -// response back to the caller -func TestOcsp_RevokedCertHasIssuerWithoutOcspUsage(t *testing.T) { - b, s, testEnv := setupOcspEnv(t, "ec") - - // Revoke our certificate - resp, err := CBWrite(b, s, "revoke", map[string]interface{}{ - "serial_number": serialFromCert(testEnv.leafCertIssuer1), - }) - requireSuccessNonNilResponse(t, resp, err, "revoke") - - // Update our issuer to no longer have the OcspSigning usage - resp, err = CBPatch(b, s, "issuer/"+testEnv.issuerId1.String(), map[string]interface{}{ - "usage": "read-only,issuing-certificates,crl-signing", - }) - requireSuccessNonNilResponse(t, resp, err, "failed resetting usage flags on issuer") - requireFieldsSetInResp(t, resp, "usage") - - // Do not assume a specific ordering for usage... - usages, err := NewIssuerUsageFromNames(strings.Split(resp.Data["usage"].(string), ",")) - require.NoError(t, err, "failed parsing usage return value") - require.True(t, usages.HasUsage(IssuanceUsage)) - require.True(t, usages.HasUsage(CRLSigningUsage)) - require.False(t, usages.HasUsage(OCSPSigningUsage)) - - // Request an OCSP request from it, we should get an Unauthorized response back - resp, err = sendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) - requireSuccessNonNilResponse(t, resp, err, "ocsp get request") - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 401, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - require.Equal(t, ocsp.UnauthorizedErrorResponse, respDer) -} - -// Verify if our matching issuer for a revocation entry has no key associated with it that -// we bail with an Unauthorized response. -func TestOcsp_RevokedCertHasIssuerWithoutAKey(t *testing.T) { - b, s, testEnv := setupOcspEnv(t, "ec") - - // Revoke our certificate - resp, err := CBWrite(b, s, "revoke", map[string]interface{}{ - "serial_number": serialFromCert(testEnv.leafCertIssuer1), - }) - requireSuccessNonNilResponse(t, resp, err, "revoke") - - // Delete the key associated with our issuer - resp, err = CBRead(b, s, "issuer/"+testEnv.issuerId1.String()) - requireSuccessNonNilResponse(t, resp, err, "failed reading issuer") - requireFieldsSetInResp(t, resp, "key_id") - keyId := resp.Data["key_id"].(keyID) - - // This is a bit naughty but allow me to delete the key... - sc := b.makeStorageContext(context.Background(), s) - issuer, err := sc.fetchIssuerById(testEnv.issuerId1) - require.NoError(t, err, "failed to get issuer from storage") - issuer.KeyID = "" - err = sc.writeIssuer(issuer) - require.NoError(t, err, "failed to write issuer update") - - resp, err = CBDelete(b, s, "key/"+keyId.String()) - requireSuccessNonNilResponse(t, resp, err, "failed deleting key") - - // Request an OCSP request from it, we should get an Unauthorized response back - resp, err = sendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) - requireSuccessNonNilResponse(t, resp, err, "ocsp get request") - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 401, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - require.Equal(t, ocsp.UnauthorizedErrorResponse, respDer) -} - -// Verify if for some reason an end-user has rotated an existing certificate using the same -// key so our algo matches multiple issuers and one has OCSP usage disabled. We expect that -// even if a prior issuer issued the certificate, the new matching issuer can respond and sign -// the response to the caller on its behalf. -// -// NOTE: This test is a bit at the mercy of iteration order of the issuer ids. -// -// If it becomes flaky, most likely something is wrong in the code -// and not the test. -func TestOcsp_MultipleMatchingIssuersOneWithoutSigningUsage(t *testing.T) { - b, s, testEnv := setupOcspEnv(t, "ec") - - // Create a matching issuer as issuer1 with the same backing key - resp, err := CBWrite(b, s, "root/rotate/existing", map[string]interface{}{ - "key_ref": testEnv.keyId1, - "ttl": "40h", - "common_name": "example-ocsp.com", - }) - requireSuccessNonNilResponse(t, resp, err, "rotate issuer failed") - requireFieldsSetInResp(t, resp, "issuer_id") - rotatedCert := parseCert(t, resp.Data["certificate"].(string)) - - // Remove ocsp signing from our issuer - resp, err = CBPatch(b, s, "issuer/"+testEnv.issuerId1.String(), map[string]interface{}{ - "usage": "read-only,issuing-certificates,crl-signing", - }) - requireSuccessNonNilResponse(t, resp, err, "failed resetting usage flags on issuer") - requireFieldsSetInResp(t, resp, "usage") - // Do not assume a specific ordering for usage... - usages, err := NewIssuerUsageFromNames(strings.Split(resp.Data["usage"].(string), ",")) - require.NoError(t, err, "failed parsing usage return value") - require.True(t, usages.HasUsage(IssuanceUsage)) - require.True(t, usages.HasUsage(CRLSigningUsage)) - require.False(t, usages.HasUsage(OCSPSigningUsage)) - - // Request an OCSP request from it, we should get a Good response back, from the rotated cert - resp, err = sendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) - requireSuccessNonNilResponse(t, resp, err, "ocsp get request") - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 200, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - ocspResp, err := ocsp.ParseResponse(respDer, testEnv.issuer1) - require.NoError(t, err, "parsing ocsp get response") - - require.Equal(t, ocsp.Good, ocspResp.Status) - require.Equal(t, crypto.SHA1, ocspResp.IssuerHash) - require.Equal(t, 0, ocspResp.RevocationReason) - require.Equal(t, testEnv.leafCertIssuer1.SerialNumber, ocspResp.SerialNumber) - require.Equal(t, rotatedCert, ocspResp.Certificate) - - requireOcspSignatureAlgoForKey(t, rotatedCert.PublicKey, ocspResp.SignatureAlgorithm) - requireOcspResponseSignedBy(t, ocspResp, rotatedCert.PublicKey) -} - -func TestOcsp_ValidRequests(t *testing.T) { - t.Parallel() - type testArgs struct { - reqType string - caKeyType string - reqHash crypto.Hash - } - var tests []testArgs - for _, reqType := range []string{"get", "post"} { - for _, caKeyType := range []string{"rsa", "ec"} { // "ed25519" is not supported at the moment in x/crypto/ocsp - for _, requestHash := range []crypto.Hash{crypto.SHA1, crypto.SHA256} { - tests = append(tests, testArgs{ - reqType: reqType, - caKeyType: caKeyType, - reqHash: requestHash, - }) - } - } - } - for _, tt := range tests { - localTT := tt - testName := fmt.Sprintf("%s-%s-%s", localTT.reqType, localTT.caKeyType, localTT.reqHash) - t.Run(testName, func(t *testing.T) { - runOcspRequestTest(t, localTT.reqType, localTT.caKeyType, localTT.reqHash) - }) - } -} - -func runOcspRequestTest(t *testing.T, requestType string, caKeyType string, requestHash crypto.Hash) { - b, s, testEnv := setupOcspEnv(t, caKeyType) - - // Non-revoked cert - resp, err := sendOcspRequest(t, b, s, requestType, testEnv.leafCertIssuer1, testEnv.issuer1, requestHash) - requireSuccessNonNilResponse(t, resp, err, "ocsp get request") - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 200, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - ocspResp, err := ocsp.ParseResponse(respDer, testEnv.issuer1) - require.NoError(t, err, "parsing ocsp get response") - - require.Equal(t, ocsp.Good, ocspResp.Status) - require.Equal(t, requestHash, ocspResp.IssuerHash) - require.Equal(t, testEnv.issuer1, ocspResp.Certificate) - require.Equal(t, 0, ocspResp.RevocationReason) - require.Equal(t, testEnv.leafCertIssuer1.SerialNumber, ocspResp.SerialNumber) - - requireOcspSignatureAlgoForKey(t, testEnv.issuer1.PublicKey, ocspResp.SignatureAlgorithm) - requireOcspResponseSignedBy(t, ocspResp, testEnv.issuer1.PublicKey) - - // Now revoke it - resp, err = CBWrite(b, s, "revoke", map[string]interface{}{ - "serial_number": serialFromCert(testEnv.leafCertIssuer1), - }) - requireSuccessNonNilResponse(t, resp, err, "revoke") - - resp, err = sendOcspRequest(t, b, s, requestType, testEnv.leafCertIssuer1, testEnv.issuer1, requestHash) - requireSuccessNonNilResponse(t, resp, err, "ocsp get request with revoked") - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 200, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer = resp.Data["http_raw_body"].([]byte) - - ocspResp, err = ocsp.ParseResponse(respDer, testEnv.issuer1) - require.NoError(t, err, "parsing ocsp get response with revoked") - - require.Equal(t, ocsp.Revoked, ocspResp.Status) - require.Equal(t, requestHash, ocspResp.IssuerHash) - require.Equal(t, testEnv.issuer1, ocspResp.Certificate) - require.Equal(t, 0, ocspResp.RevocationReason) - require.Equal(t, testEnv.leafCertIssuer1.SerialNumber, ocspResp.SerialNumber) - - requireOcspSignatureAlgoForKey(t, testEnv.issuer1.PublicKey, ocspResp.SignatureAlgorithm) - requireOcspResponseSignedBy(t, ocspResp, testEnv.issuer1.PublicKey) - - // Request status for our second issuer - resp, err = sendOcspRequest(t, b, s, requestType, testEnv.leafCertIssuer2, testEnv.issuer2, requestHash) - requireSuccessNonNilResponse(t, resp, err, "ocsp get request") - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 200, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer = resp.Data["http_raw_body"].([]byte) - - ocspResp, err = ocsp.ParseResponse(respDer, testEnv.issuer2) - require.NoError(t, err, "parsing ocsp get response") - - require.Equal(t, ocsp.Good, ocspResp.Status) - require.Equal(t, requestHash, ocspResp.IssuerHash) - require.Equal(t, testEnv.issuer2, ocspResp.Certificate) - require.Equal(t, 0, ocspResp.RevocationReason) - require.Equal(t, testEnv.leafCertIssuer2.SerialNumber, ocspResp.SerialNumber) - - // Verify that our thisUpdate and nextUpdate fields are updated as expected - thisUpdate := ocspResp.ThisUpdate - nextUpdate := ocspResp.NextUpdate - require.True(t, thisUpdate.Before(nextUpdate), - fmt.Sprintf("thisUpdate %s, should have been before nextUpdate: %s", thisUpdate, nextUpdate)) - nextUpdateDiff := nextUpdate.Sub(thisUpdate) - expectedDiff, err := time.ParseDuration(defaultCrlConfig.OcspExpiry) - require.NoError(t, err, "failed to parse default ocsp expiry value") - require.Equal(t, expectedDiff, nextUpdateDiff, - fmt.Sprintf("the delta between thisUpdate %s and nextUpdate: %s should have been around: %s but was %s", - thisUpdate, nextUpdate, defaultCrlConfig.OcspExpiry, nextUpdateDiff)) - - requireOcspSignatureAlgoForKey(t, testEnv.issuer2.PublicKey, ocspResp.SignatureAlgorithm) - requireOcspResponseSignedBy(t, ocspResp, testEnv.issuer2.PublicKey) -} - -func requireOcspSignatureAlgoForKey(t *testing.T, key crypto.PublicKey, algorithm x509.SignatureAlgorithm) { - switch key.(type) { - case *rsa.PublicKey: - require.Equal(t, x509.SHA256WithRSA, algorithm) - case *ecdsa.PublicKey: - require.Equal(t, x509.ECDSAWithSHA256, algorithm) - case ed25519.PublicKey: - require.Equal(t, x509.PureEd25519, algorithm) - default: - t.Fatalf("unsupported public key type %T", key) - } -} - -type ocspTestEnv struct { - issuer1 *x509.Certificate - issuer2 *x509.Certificate - - issuerId1 issuerID - issuerId2 issuerID - - leafCertIssuer1 *x509.Certificate - leafCertIssuer2 *x509.Certificate - - keyId1 keyID - keyId2 keyID -} - -func setupOcspEnv(t *testing.T, keyType string) (*backend, logical.Storage, *ocspTestEnv) { - b, s := createBackendWithStorage(t) - var issuerCerts []*x509.Certificate - var leafCerts []*x509.Certificate - var issuerIds []issuerID - var keyIds []keyID - - for i := 0; i < 2; i++ { - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "key_type": keyType, - "ttl": "40h", - "common_name": "example-ocsp.com", - }) - requireSuccessNonNilResponse(t, resp, err, "root/generate/internal") - requireFieldsSetInResp(t, resp, "issuer_id", "key_id") - issuerId := resp.Data["issuer_id"].(issuerID) - keyId := resp.Data["key_id"].(keyID) - - resp, err = CBWrite(b, s, "roles/test"+strconv.FormatInt(int64(i), 10), map[string]interface{}{ - "allow_bare_domains": true, - "allow_subdomains": true, - "allowed_domains": "foobar.com", - "no_store": false, - "generate_lease": false, - "issuer_ref": issuerId, - "key_type": keyType, - }) - requireSuccessNilResponse(t, resp, err, "roles/test"+strconv.FormatInt(int64(i), 10)) - - resp, err = CBWrite(b, s, "issue/test"+strconv.FormatInt(int64(i), 10), map[string]interface{}{ - "common_name": "test.foobar.com", - }) - requireSuccessNonNilResponse(t, resp, err, "roles/test"+strconv.FormatInt(int64(i), 10)) - requireFieldsSetInResp(t, resp, "certificate", "issuing_ca", "serial_number") - leafCert := parseCert(t, resp.Data["certificate"].(string)) - issuingCa := parseCert(t, resp.Data["issuing_ca"].(string)) - - issuerCerts = append(issuerCerts, issuingCa) - leafCerts = append(leafCerts, leafCert) - issuerIds = append(issuerIds, issuerId) - keyIds = append(keyIds, keyId) - } - - testEnv := &ocspTestEnv{ - issuerId1: issuerIds[0], - issuer1: issuerCerts[0], - leafCertIssuer1: leafCerts[0], - keyId1: keyIds[0], - - issuerId2: issuerIds[1], - issuer2: issuerCerts[1], - leafCertIssuer2: leafCerts[1], - keyId2: keyIds[1], - } - - return b, s, testEnv -} - -func sendOcspRequest(t *testing.T, b *backend, s logical.Storage, getOrPost string, cert, issuer *x509.Certificate, requestHash crypto.Hash) (*logical.Response, error) { - ocspRequest := generateRequest(t, requestHash, cert, issuer) - - switch strings.ToLower(getOrPost) { - case "get": - return sendOcspGetRequest(b, s, ocspRequest) - case "post": - return sendOcspPostRequest(b, s, ocspRequest) - default: - t.Fatalf("unsupported value for sendOcspRequest getOrPost arg: %s", getOrPost) - } - return nil, nil -} - -func sendOcspGetRequest(b *backend, s logical.Storage, ocspRequest []byte) (*logical.Response, error) { - urlEncoded := base64.StdEncoding.EncodeToString(ocspRequest) - return CBRead(b, s, "ocsp/"+urlEncoded) -} - -func sendOcspPostRequest(b *backend, s logical.Storage, ocspRequest []byte) (*logical.Response, error) { - reader := io.NopCloser(bytes.NewReader(ocspRequest)) - resp, err := b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "ocsp", - Storage: s, - MountPoint: "pki/", - HTTPRequest: &http.Request{ - Body: reader, - }, - }) - - return resp, err -} - -func generateRequest(t *testing.T, requestHash crypto.Hash, cert *x509.Certificate, issuer *x509.Certificate) []byte { - opts := &ocsp.RequestOptions{Hash: requestHash} - ocspRequestDer, err := ocsp.CreateRequest(cert, issuer, opts) - require.NoError(t, err, "Failed generating OCSP request") - return ocspRequestDer -} - -func requireOcspResponseSignedBy(t *testing.T, ocspResp *ocsp.Response, key crypto.PublicKey) { - require.Contains(t, []x509.SignatureAlgorithm{x509.SHA256WithRSA, x509.ECDSAWithSHA256}, ocspResp.SignatureAlgorithm) - - hasher := sha256.New() - hashAlgo := crypto.SHA256 - hasher.Write(ocspResp.TBSResponseData) - hashData := hasher.Sum(nil) - - switch typedKey := key.(type) { - case *rsa.PublicKey: - err := rsa.VerifyPKCS1v15(typedKey, hashAlgo, hashData, ocspResp.Signature) - require.NoError(t, err, "the ocsp response was not signed by the expected public rsa key.") - case *ecdsa.PublicKey: - verify := ecdsa.VerifyASN1(typedKey, hashData, ocspResp.Signature) - require.True(t, verify, "the certificate was not signed by the expected public ecdsa key.") - } -} diff --git a/builtin/logical/pki/path_config_ca.go b/builtin/logical/pki/path_config_ca.go index dd2010e90684f..2dcc406209ac4 100644 --- a/builtin/logical/pki/path_config_ca.go +++ b/builtin/logical/pki/path_config_ca.go @@ -101,8 +101,7 @@ func (b *backend) pathCAIssuersRead(ctx context.Context, req *logical.Request, _ return logical.ErrorResponse("Cannot read defaults until migration has completed"), nil } - sc := b.makeStorageContext(ctx, req.Storage) - config, err := sc.getIssuersConfig() + config, err := getIssuersConfig(ctx, req.Storage) if err != nil { return logical.ErrorResponse("Error loading issuers configuration: " + err.Error()), nil } @@ -129,8 +128,7 @@ func (b *backend) pathCAIssuersWrite(ctx context.Context, req *logical.Request, return logical.ErrorResponse("Invalid issuer specification; must be non-empty and can't be 'default'."), nil } - sc := b.makeStorageContext(ctx, req.Storage) - parsedIssuer, err := sc.resolveIssuerReference(newDefault) + parsedIssuer, err := resolveIssuerReference(ctx, req.Storage, newDefault) if err != nil { return logical.ErrorResponse("Error resolving issuer reference: " + err.Error()), nil } @@ -141,7 +139,7 @@ func (b *backend) pathCAIssuersWrite(ctx context.Context, req *logical.Request, }, } - entry, err := sc.fetchIssuerById(parsedIssuer) + entry, err := fetchIssuerById(ctx, req.Storage, parsedIssuer) if err != nil { return logical.ErrorResponse("Unable to fetch issuer: " + err.Error()), nil } @@ -152,7 +150,7 @@ func (b *backend) pathCAIssuersWrite(ctx context.Context, req *logical.Request, b.Logger().Error(msg) } - err = sc.updateDefaultIssuerId(parsedIssuer) + err = updateDefaultIssuerId(ctx, req.Storage, parsedIssuer) if err != nil { return logical.ErrorResponse("Error updating issuer configuration: " + err.Error()), nil } @@ -206,8 +204,7 @@ func (b *backend) pathKeyDefaultRead(ctx context.Context, req *logical.Request, return logical.ErrorResponse("Cannot read key defaults until migration has completed"), nil } - sc := b.makeStorageContext(ctx, req.Storage) - config, err := sc.getKeysConfig() + config, err := getKeysConfig(ctx, req.Storage) if err != nil { return logical.ErrorResponse("Error loading keys configuration: " + err.Error()), nil } @@ -234,13 +231,12 @@ func (b *backend) pathKeyDefaultWrite(ctx context.Context, req *logical.Request, return logical.ErrorResponse("Invalid key specification; must be non-empty and can't be 'default'."), nil } - sc := b.makeStorageContext(ctx, req.Storage) - parsedKey, err := sc.resolveKeyReference(newDefault) + parsedKey, err := resolveKeyReference(ctx, req.Storage, newDefault) if err != nil { return logical.ErrorResponse("Error resolving issuer reference: " + err.Error()), nil } - err = sc.updateDefaultKeyId(parsedKey) + err = updateDefaultKeyId(ctx, req.Storage, parsedKey) if err != nil { return logical.ErrorResponse("Error updating issuer configuration: " + err.Error()), nil } diff --git a/builtin/logical/pki/path_config_crl.go b/builtin/logical/pki/path_config_crl.go index 30b8047b53fc6..5692e4eb5bbc6 100644 --- a/builtin/logical/pki/path_config_crl.go +++ b/builtin/logical/pki/path_config_crl.go @@ -10,32 +10,10 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -const latestCrlConfigVersion = 1 - // CRLConfig holds basic CRL configuration information type crlConfig struct { - Version int `json:"version"` - Expiry string `json:"expiry"` - Disable bool `json:"disable"` - OcspDisable bool `json:"ocsp_disable"` - AutoRebuild bool `json:"auto_rebuild"` - AutoRebuildGracePeriod string `json:"auto_rebuild_grace_period"` - OcspExpiry string `json:"ocsp_expiry"` - EnableDelta bool `json:"enable_delta"` - DeltaRebuildInterval string `json:"delta_rebuild_interval"` -} - -// Implicit default values for the config if it does not exist. -var defaultCrlConfig = crlConfig{ - Version: latestCrlConfigVersion, - Expiry: "72h", - Disable: false, - OcspDisable: false, - OcspExpiry: "12h", - AutoRebuild: false, - AutoRebuildGracePeriod: "12h", - EnableDelta: false, - DeltaRebuildInterval: "15m", + Expiry string `json:"expiry" mapstructure:"expiry"` + Disable bool `json:"disable"` } func pathConfigCRL(b *backend) *framework.Path { @@ -52,34 +30,6 @@ valid; defaults to 72 hours`, Type: framework.TypeBool, Description: `If set to true, disables generating the CRL entirely.`, }, - "ocsp_disable": { - Type: framework.TypeBool, - Description: `If set to true, ocsp unauthorized responses will be returned.`, - }, - "ocsp_expiry": { - Type: framework.TypeString, - Description: `The amount of time an OCSP response will be valid (controls -the NextUpdate field); defaults to 12 hours`, - Default: "1h", - }, - "auto_rebuild": { - Type: framework.TypeBool, - Description: `If set to true, enables automatic rebuilding of the CRL`, - }, - "auto_rebuild_grace_period": { - Type: framework.TypeString, - Description: `The time before the CRL expires to automatically rebuild it, when enabled. Must be shorter than the CRL expiry. Defaults to 12h.`, - Default: "12h", - }, - "enable_delta": { - Type: framework.TypeBool, - Description: `Whether to enable delta CRLs between authoritative CRL rebuilds`, - }, - "delta_rebuild_interval": { - Type: framework.TypeString, - Description: `The time between delta CRL rebuilds if a new revocation has occurred. Must be shorter than the CRL expiry. Defaults to 15m.`, - Default: "15m", - }, }, Operations: map[logical.Operation]framework.OperationHandler{ @@ -99,30 +49,43 @@ the NextUpdate field); defaults to 12 hours`, } } +func (b *backend) CRL(ctx context.Context, s logical.Storage) (*crlConfig, error) { + entry, err := s.Get(ctx, "config/crl") + if err != nil { + return nil, err + } + + var result crlConfig + result.Expiry = b.crlLifetime.String() + result.Disable = false + + if entry == nil { + return &result, nil + } + + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + func (b *backend) pathCRLRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { - sc := b.makeStorageContext(ctx, req.Storage) - config, err := sc.getRevocationConfig() + config, err := b.CRL(ctx, req.Storage) if err != nil { return nil, err } return &logical.Response{ Data: map[string]interface{}{ - "expiry": config.Expiry, - "disable": config.Disable, - "ocsp_disable": config.OcspDisable, - "ocsp_expiry": config.OcspExpiry, - "auto_rebuild": config.AutoRebuild, - "auto_rebuild_grace_period": config.AutoRebuildGracePeriod, - "enable_delta": config.EnableDelta, - "delta_rebuild_interval": config.DeltaRebuildInterval, + "expiry": config.Expiry, + "disable": config.Disable, }, }, nil } func (b *backend) pathCRLWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - sc := b.makeStorageContext(ctx, req.Storage) - config, err := sc.getRevocationConfig() + config, err := b.CRL(ctx, req.Storage) if err != nil { return nil, err } @@ -136,71 +99,12 @@ func (b *backend) pathCRLWrite(ctx context.Context, req *logical.Request, d *fra config.Expiry = expiry } - oldDisable := config.Disable + var oldDisable bool if disableRaw, ok := d.GetOk("disable"); ok { + oldDisable = config.Disable config.Disable = disableRaw.(bool) } - if ocspDisableRaw, ok := d.GetOk("ocsp_disable"); ok { - config.OcspDisable = ocspDisableRaw.(bool) - } - - if expiryRaw, ok := d.GetOk("ocsp_expiry"); ok { - expiry := expiryRaw.(string) - duration, err := time.ParseDuration(expiry) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf("given ocsp_expiry could not be decoded: %s", err)), nil - } - if duration < 0 { - return logical.ErrorResponse(fmt.Sprintf("ocsp_expiry must be greater than or equal to 0 got: %s", duration)), nil - } - config.OcspExpiry = expiry - } - - oldAutoRebuild := config.AutoRebuild - if autoRebuildRaw, ok := d.GetOk("auto_rebuild"); ok { - config.AutoRebuild = autoRebuildRaw.(bool) - } - - if autoRebuildGracePeriodRaw, ok := d.GetOk("auto_rebuild_grace_period"); ok { - autoRebuildGracePeriod := autoRebuildGracePeriodRaw.(string) - if _, err := time.ParseDuration(autoRebuildGracePeriod); err != nil { - return logical.ErrorResponse(fmt.Sprintf("given auto_rebuild_grace_period could not be decoded: %s", err)), nil - } - config.AutoRebuildGracePeriod = autoRebuildGracePeriod - } - - if enableDeltaRaw, ok := d.GetOk("enable_delta"); ok { - config.EnableDelta = enableDeltaRaw.(bool) - } - - if deltaRebuildIntervalRaw, ok := d.GetOk("delta_rebuild_interval"); ok { - deltaRebuildInterval := deltaRebuildIntervalRaw.(string) - if _, err := time.ParseDuration(deltaRebuildInterval); err != nil { - return logical.ErrorResponse(fmt.Sprintf("given delta_rebuild_interval could not be decoded: %s", err)), nil - } - config.DeltaRebuildInterval = deltaRebuildInterval - } - - expiry, _ := time.ParseDuration(config.Expiry) - if config.AutoRebuild { - gracePeriod, _ := time.ParseDuration(config.AutoRebuildGracePeriod) - if gracePeriod >= expiry { - return logical.ErrorResponse(fmt.Sprintf("CRL auto-rebuilding grace period (%v) must be strictly shorter than CRL expiry (%v) value when auto-rebuilding of CRLs is enabled", config.AutoRebuildGracePeriod, config.Expiry)), nil - } - } - - if config.EnableDelta { - deltaRebuildInterval, _ := time.ParseDuration(config.DeltaRebuildInterval) - if deltaRebuildInterval >= expiry { - return logical.ErrorResponse(fmt.Sprintf("CRL delta rebuild window (%v) must be strictly shorter than CRL expiry (%v) value when delta CRLs are enabled", config.DeltaRebuildInterval, config.Expiry)), nil - } - } - - if config.EnableDelta && !config.AutoRebuild { - return logical.ErrorResponse("Delta CRLs cannot be enabled when auto rebuilding is disabled as the complete CRL is always regenerated!"), nil - } - entry, err := logical.StorageEntryJSON("config/crl", config) if err != nil { return nil, err @@ -210,12 +114,8 @@ func (b *backend) pathCRLWrite(ctx context.Context, req *logical.Request, d *fra return nil, err } - b.crlBuilder.markConfigDirty() - b.crlBuilder.reloadConfigIfRequired(sc) - - if oldDisable != config.Disable || (oldAutoRebuild && !config.AutoRebuild) { - // It wasn't disabled but now it is (or equivalently, we were set to - // auto-rebuild and we aren't now), so rotate the CRL. + if oldDisable != config.Disable { + // It wasn't disabled but now it is, rotate crlErr := b.crlBuilder.rebuild(ctx, b, req, true) if crlErr != nil { switch crlErr.(type) { diff --git a/builtin/logical/pki/path_config_urls.go b/builtin/logical/pki/path_config_urls.go index 830ca34ad099d..f27d541d6770f 100644 --- a/builtin/logical/pki/path_config_urls.go +++ b/builtin/logical/pki/path_config_urls.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/asaskevich/govalidator" + "github.com/fatih/structs" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" @@ -57,8 +58,8 @@ func validateURLs(urls []string) string { return "" } -func getGlobalAIAURLs(ctx context.Context, storage logical.Storage) (*certutil.URLEntries, error) { - entry, err := storage.Get(ctx, "urls") +func getURLs(ctx context.Context, req *logical.Request) (*certutil.URLEntries, error) { + entry, err := req.Storage.Get(ctx, "urls") if err != nil { return nil, err } @@ -80,7 +81,7 @@ func getGlobalAIAURLs(ctx context.Context, storage logical.Storage) (*certutil.U return entries, nil } -func writeURLs(ctx context.Context, storage logical.Storage, entries *certutil.URLEntries) error { +func writeURLs(ctx context.Context, req *logical.Request, entries *certutil.URLEntries) error { entry, err := logical.StorageEntryJSON("urls", entries) if err != nil { return err @@ -89,7 +90,7 @@ func writeURLs(ctx context.Context, storage logical.Storage, entries *certutil.U return fmt.Errorf("unable to marshal entry into JSON") } - err = storage.Put(ctx, entry) + err = req.Storage.Put(ctx, entry) if err != nil { return err } @@ -98,24 +99,20 @@ func writeURLs(ctx context.Context, storage logical.Storage, entries *certutil.U } func (b *backend) pathReadURL(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { - entries, err := getGlobalAIAURLs(ctx, req.Storage) + entries, err := getURLs(ctx, req) if err != nil { return nil, err } resp := &logical.Response{ - Data: map[string]interface{}{ - "issuing_certificates": entries.IssuingCertificates, - "crl_distribution_points": entries.CRLDistributionPoints, - "ocsp_servers": entries.OCSPServers, - }, + Data: structs.New(entries).Map(), } return resp, nil } func (b *backend) pathWriteURL(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - entries, err := getGlobalAIAURLs(ctx, req.Storage) + entries, err := getURLs(ctx, req) if err != nil { return nil, err } @@ -124,25 +121,25 @@ func (b *backend) pathWriteURL(ctx context.Context, req *logical.Request, data * entries.IssuingCertificates = urlsInt.([]string) if badURL := validateURLs(entries.IssuingCertificates); badURL != "" { return logical.ErrorResponse(fmt.Sprintf( - "invalid URL found in Authority Information Access (AIA) parameter issuing_certificates: %s", badURL)), nil + "invalid URL found in issuing certificates: %s", badURL)), nil } } if urlsInt, ok := data.GetOk("crl_distribution_points"); ok { entries.CRLDistributionPoints = urlsInt.([]string) if badURL := validateURLs(entries.CRLDistributionPoints); badURL != "" { return logical.ErrorResponse(fmt.Sprintf( - "invalid URL found in Authority Information Access (AIA) parameter crl_distribution_points: %s", badURL)), nil + "invalid URL found in CRL distribution points: %s", badURL)), nil } } if urlsInt, ok := data.GetOk("ocsp_servers"); ok { entries.OCSPServers = urlsInt.([]string) if badURL := validateURLs(entries.OCSPServers); badURL != "" { return logical.ErrorResponse(fmt.Sprintf( - "invalid URL found in Authority Information Access (AIA) parameter ocsp_servers: %s", badURL)), nil + "invalid URL found in OCSP servers: %s", badURL)), nil } } - return nil, writeURLs(ctx, req.Storage, entries) + return nil, writeURLs(ctx, req, entries) } const pathConfigURLsHelpSyn = ` diff --git a/builtin/logical/pki/path_fetch.go b/builtin/logical/pki/path_fetch.go index e0a1079eade76..f4a2a6632c0f1 100644 --- a/builtin/logical/pki/path_fetch.go +++ b/builtin/logical/pki/path_fetch.go @@ -46,7 +46,7 @@ func pathFetchCAChain(b *backend) *framework.Path { // Returns the CRL in raw format func pathFetchCRL(b *backend) *framework.Path { return &framework.Path{ - Pattern: `crl(/pem|/delta(/pem)?)?`, + Pattern: `crl(/pem)?`, Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ @@ -109,7 +109,7 @@ hyphen-separated octal`, // This returns the CRL in a non-raw format func pathFetchCRLViaCertPath(b *backend) *framework.Path { return &framework.Path{ - Pattern: `cert/(crl|delta-crl)`, + Pattern: `cert/crl`, Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ @@ -159,63 +159,35 @@ func (b *backend) pathFetchRead(ctx context.Context, req *logical.Request, data response = &logical.Response{ Data: map[string]interface{}{}, } - sc := b.makeStorageContext(ctx, req.Storage) // Some of these need to return raw and some non-raw; // this is basically handled by setting contentType or not. // Errors don't cause an immediate exit, because the raw // paths still need to return raw output. - modifiedCtx := &IfModifiedSinceHelper{ - req: req, - issuerRef: defaultRef, - } switch { - case req.Path == "ca" || req.Path == "ca/pem" || req.Path == "cert/ca" || req.Path == "cert/ca/raw" || req.Path == "cert/ca/raw/pem": - modifiedCtx.reqType = ifModifiedCA - ret, err := sendNotModifiedResponseIfNecessary(modifiedCtx, sc, response) - if err != nil || ret { - retErr = err - goto reply - } - + case req.Path == "ca" || req.Path == "ca/pem": serial = "ca" contentType = "application/pkix-cert" - if req.Path == "ca/pem" || req.Path == "cert/ca/raw/pem" { + if req.Path == "ca/pem" { pemType = "CERTIFICATE" contentType = "application/pem-certificate-chain" - } else if req.Path == "cert/ca" { - pemType = "CERTIFICATE" - contentType = "" } case req.Path == "ca_chain" || req.Path == "cert/ca_chain": serial = "ca_chain" if req.Path == "ca_chain" { contentType = "application/pkix-cert" } - case req.Path == "crl" || req.Path == "crl/pem" || req.Path == "crl/delta" || req.Path == "crl/delta/pem" || req.Path == "cert/crl" || req.Path == "cert/crl/raw" || req.Path == "cert/crl/raw/pem" || req.Path == "cert/delta-crl": - modifiedCtx.reqType = ifModifiedCRL - if strings.Contains(req.Path, "delta") { - modifiedCtx.reqType = ifModifiedDeltaCRL - } - ret, err := sendNotModifiedResponseIfNecessary(modifiedCtx, sc, response) - if err != nil || ret { - retErr = err - goto reply - } - + case req.Path == "crl" || req.Path == "crl/pem": serial = legacyCRLPath - if req.Path == "crl/delta" || req.Path == "crl/delta/pem" || req.Path == "cert/delta-crl" { - serial = deltaCRLPath - } contentType = "application/pkix-crl" - if req.Path == "crl/pem" || req.Path == "crl/delta/pem" { + if req.Path == "crl/pem" { pemType = "X509 CRL" contentType = "application/x-pem-file" - } else if req.Path == "cert/crl" || req.Path == "cert/delta-crl" { - pemType = "X509 CRL" - contentType = "" } + case req.Path == "cert/crl": + serial = legacyCRLPath + pemType = "X509 CRL" case strings.HasSuffix(req.Path, "/pem") || strings.HasSuffix(req.Path, "/raw"): serial = data.Get("serial").(string) contentType = "application/pkix-cert" @@ -234,7 +206,7 @@ func (b *backend) pathFetchRead(ctx context.Context, req *logical.Request, data // Prefer fetchCAInfo to fetchCertBySerial for CA certificates. if serial == "ca_chain" || serial == "ca" { - caInfo, err := sc.fetchCAInfo(defaultRef, ReadOnlyUsage) + caInfo, err := fetchCAInfo(ctx, b, req, defaultRef, ReadOnlyUsage) if err != nil { switch err.(type) { case errutil.UserError: diff --git a/builtin/logical/pki/path_fetch_issuers.go b/builtin/logical/pki/path_fetch_issuers.go index 2f4402a508e1e..352048783e6b2 100644 --- a/builtin/logical/pki/path_fetch_issuers.go +++ b/builtin/logical/pki/path_fetch_issuers.go @@ -2,11 +2,9 @@ package pki import ( "context" - "crypto/x509" "encoding/pem" "fmt" "strings" - "time" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" @@ -36,13 +34,12 @@ func (b *backend) pathListIssuersHandler(ctx context.Context, req *logical.Reque var responseKeys []string responseInfo := make(map[string]interface{}) - sc := b.makeStorageContext(ctx, req.Storage) - entries, err := sc.listIssuers() + entries, err := listIssuers(ctx, req.Storage) if err != nil { return nil, err } - config, err := sc.getIssuersConfig() + config, err := getIssuersConfig(ctx, req.Storage) if err != nil { return nil, err } @@ -51,7 +48,7 @@ func (b *backend) pathListIssuersHandler(ctx context.Context, req *logical.Reque // listIssuers), but also the name of the issuer. This means we have to // fetch the actual issuer object as well. for _, identifier := range entries { - issuer, err := sc.fetchIssuerById(identifier) + issuer, err := fetchIssuerById(ctx, req.Storage, identifier) if err != nil { return nil, err } @@ -103,36 +100,10 @@ intermediate CAs and "permit" only for root CAs.`, fields["usage"] = &framework.FieldSchema{ Type: framework.TypeCommaStringSlice, Description: `Comma-separated list (or string slice) of usages for -this issuer; valid values are "read-only", "issuing-certificates", -"crl-signing", and "ocsp-signing". Multiple values may be specified. Read-only -is implicit and always set.`, - Default: []string{"read-only", "issuing-certificates", "crl-signing", "ocsp-signing"}, - } - fields["revocation_signature_algorithm"] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: `Which x509.SignatureAlgorithm name to use for -signing CRLs. This parameter allows differentiation between PKCS#1v1.5 -and PSS keys and choice of signature hash algorithm. The default (empty -string) value is for Go to select the signature algorithm. This can fail -if the underlying key does not support the requested signature algorithm, -which may not be known at modification time (such as with PKCS#11 managed -RSA keys).`, - Default: "", - } - fields["issuing_certificates"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `Comma-separated list of URLs to be used -for the issuing certificate attribute. See also RFC 5280 Section 4.2.2.1.`, - } - fields["crl_distribution_points"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `Comma-separated list of URLs to be used -for the CRL distribution points attribute. See also RFC 5280 Section 4.2.1.13.`, - } - fields["ocsp_servers"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `Comma-separated list of URLs to be used -for the OCSP servers attribute. See also RFC 5280 Section 4.2.2.1.`, +this issuer; valid values are "read-only", "issuing-certificates", and +"crl-signing". Multiple values may be specified. Read-only is implicit +and always set.`, + Default: []string{"read-only", "issuing-certificates", "crl-signing"}, } return &framework.Path{ @@ -184,8 +155,7 @@ func (b *backend) pathGetIssuer(ctx context.Context, req *logical.Request, data return logical.ErrorResponse("missing issuer reference"), nil } - sc := b.makeStorageContext(ctx, req.Storage) - ref, err := sc.resolveIssuerReference(issuerName) + ref, err := resolveIssuerReference(ctx, req.Storage, issuerName) if err != nil { return nil, err } @@ -193,7 +163,7 @@ func (b *backend) pathGetIssuer(ctx context.Context, req *logical.Request, data return logical.ErrorResponse("unable to resolve issuer id for reference: " + issuerName), nil } - issuer, err := sc.fetchIssuerById(ref) + issuer, err := fetchIssuerById(ctx, req.Storage, ref) if err != nil { return nil, err } @@ -207,40 +177,17 @@ func respondReadIssuer(issuer *issuerEntry) (*logical.Response, error) { respManualChain = append(respManualChain, string(entity)) } - revSigAlgStr := issuer.RevocationSigAlg.String() - if issuer.RevocationSigAlg == x509.UnknownSignatureAlgorithm { - revSigAlgStr = "" - } - - data := map[string]interface{}{ - "issuer_id": issuer.ID, - "issuer_name": issuer.Name, - "key_id": issuer.KeyID, - "certificate": issuer.Certificate, - "manual_chain": respManualChain, - "ca_chain": issuer.CAChain, - "leaf_not_after_behavior": issuer.LeafNotAfterBehavior.String(), - "usage": issuer.Usage.Names(), - "revocation_signature_algorithm": revSigAlgStr, - "revoked": issuer.Revoked, - "issuing_certificates": []string{}, - "crl_distribution_points": []string{}, - "ocsp_servers": []string{}, - } - - if issuer.Revoked { - data["revocation_time"] = issuer.RevocationTime - data["revocation_time_rfc3339"] = issuer.RevocationTimeUTC.Format(time.RFC3339Nano) - } - - if issuer.AIAURIs != nil { - data["issuing_certificates"] = issuer.AIAURIs.IssuingCertificates - data["crl_distribution_points"] = issuer.AIAURIs.CRLDistributionPoints - data["ocsp_servers"] = issuer.AIAURIs.OCSPServers - } - return &logical.Response{ - Data: data, + Data: map[string]interface{}{ + "issuer_id": issuer.ID, + "issuer_name": issuer.Name, + "key_id": issuer.KeyID, + "certificate": issuer.Certificate, + "manual_chain": respManualChain, + "ca_chain": issuer.CAChain, + "leaf_not_after_behavior": issuer.LeafNotAfterBehavior.String(), + "usage": issuer.Usage.Names(), + }, }, nil } @@ -259,8 +206,7 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da return logical.ErrorResponse("missing issuer reference"), nil } - sc := b.makeStorageContext(ctx, req.Storage) - ref, err := sc.resolveIssuerReference(issuerName) + ref, err := resolveIssuerReference(ctx, req.Storage, issuerName) if err != nil { return nil, err } @@ -268,12 +214,12 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da return logical.ErrorResponse("unable to resolve issuer id for reference: " + issuerName), nil } - issuer, err := sc.fetchIssuerById(ref) + issuer, err := fetchIssuerById(ctx, req.Storage, ref) if err != nil { return nil, err } - newName, err := getIssuerName(sc, data) + newName, err := getIssuerName(ctx, req.Storage, data) if err != nil && err != errIssuerNameInUse { // If the error is name already in use, and the new name is the // old name for this issuer, we're not actually updating the @@ -309,47 +255,12 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da return logical.ErrorResponse(fmt.Sprintf("Unable to parse specified usages: %v - valid values are %v", rawUsage, AllIssuerUsages.Names())), nil } - // Revocation signature algorithm changes - revSigAlgStr := data.Get("revocation_signature_algorithm").(string) - revSigAlg, present := certutil.SignatureAlgorithmNames[strings.ToLower(revSigAlgStr)] - if !present && revSigAlgStr != "" { - var knownAlgos []string - for algoName := range certutil.SignatureAlgorithmNames { - knownAlgos = append(knownAlgos, algoName) - } - - return logical.ErrorResponse(fmt.Sprintf("Unknown signature algorithm value: %v - valid values are %v", revSigAlg, strings.Join(knownAlgos, ", "))), nil - } else if revSigAlgStr == "" { - revSigAlg = x509.UnknownSignatureAlgorithm - } - if err := issuer.CanMaybeSignWithAlgo(revSigAlg); err != nil { - return nil, err - } - - // AIA access changes - issuerCertificates := data.Get("issuing_certificates").([]string) - if badURL := validateURLs(issuerCertificates); badURL != "" { - return logical.ErrorResponse(fmt.Sprintf("invalid URL found in Authority Information Access (AIA) parameter issuing_certificates: %s", badURL)), nil - } - crlDistributionPoints := data.Get("crl_distribution_points").([]string) - if badURL := validateURLs(crlDistributionPoints); badURL != "" { - return logical.ErrorResponse(fmt.Sprintf("invalid URL found in Authority Information Access (AIA) parameter crl_distribution_points: %s", badURL)), nil - } - ocspServers := data.Get("ocsp_servers").([]string) - if badURL := validateURLs(ocspServers); badURL != "" { - return logical.ErrorResponse(fmt.Sprintf("invalid URL found in Authority Information Access (AIA) parameter ocsp_servers: %s", badURL)), nil - } - modified := false var oldName string if newName != issuer.Name { oldName = issuer.Name issuer.Name = newName - issuer.LastModified = time.Now().UTC() - // See note in updateDefaultIssuerId about why this is necessary. - b.crlBuilder.invalidateCRLBuildTime() - b.crlBuilder.flushCRLBuildTimeInvalidation(sc) modified = true } @@ -359,73 +270,10 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da } if newUsage != issuer.Usage { - if issuer.Revoked && newUsage.HasUsage(IssuanceUsage) { - // Forbid allowing cert signing on its usage. - return logical.ErrorResponse("This issuer was revoked; unable to modify its usage to include certificate signing again. Reissue this certificate (preferably with a new key) and modify that entry instead."), nil - } - - // Ensure we deny adding CRL usage if the bits are missing from the - // cert itself. - cert, err := issuer.GetCertificate() - if err != nil { - return nil, fmt.Errorf("unable to parse issuer's certificate: %v", err) - } - if (cert.KeyUsage&x509.KeyUsageCRLSign) == 0 && newUsage.HasUsage(CRLSigningUsage) { - return logical.ErrorResponse("This issuer's underlying certificate lacks the CRLSign KeyUsage value; unable to set CRLSigningUsage on this issuer as a result."), nil - } - issuer.Usage = newUsage modified = true } - if revSigAlg != issuer.RevocationSigAlg { - issuer.RevocationSigAlg = revSigAlg - modified = true - } - - if issuer.AIAURIs == nil && (len(issuerCertificates) > 0 || len(crlDistributionPoints) > 0 || len(ocspServers) > 0) { - issuer.AIAURIs = &certutil.URLEntries{} - } - if issuer.AIAURIs != nil { - // Associative mapping from data source to destination on the - // backing issuer object. - type aiaPair struct { - Source *[]string - Dest *[]string - } - pairs := []aiaPair{ - { - Source: &issuerCertificates, - Dest: &issuer.AIAURIs.IssuingCertificates, - }, - { - Source: &crlDistributionPoints, - Dest: &issuer.AIAURIs.CRLDistributionPoints, - }, - { - Source: &ocspServers, - Dest: &issuer.AIAURIs.OCSPServers, - }, - } - - // For each pair, if it is different on the object, update it. - for _, pair := range pairs { - if isStringArrayDifferent(*pair.Source, *pair.Dest) { - *pair.Dest = *pair.Source - modified = true - } - } - - // If no AIA URLs exist on the issuer, set the AIA URLs entry to nil - // to ease usage later. - if len(issuer.AIAURIs.IssuingCertificates) == 0 && len(issuer.AIAURIs.CRLDistributionPoints) == 0 && len(issuer.AIAURIs.OCSPServers) == 0 { - issuer.AIAURIs = nil - } - } - - // Updating the chain should be the last modification as there's a chance - // it'll write it out to disk for us. We'd hate to then modify the issuer - // again and write it a second time. var updateChain bool var constructedChain []issuerID for index, newPathRef := range newPath { @@ -434,7 +282,7 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da newPathRef = string(ref) } - resolvedId, err := sc.resolveIssuerReference(newPathRef) + resolvedId, err := resolveIssuerReference(ctx, req.Storage, newPathRef) if err != nil { return nil, err } @@ -459,14 +307,14 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da // Building the chain will write the issuer to disk; no need to do it // twice. modified = false - err := sc.rebuildIssuersChains(issuer) + err := rebuildIssuersChains(ctx, req.Storage, issuer) if err != nil { return nil, err } } if modified { - err := sc.writeIssuer(issuer) + err := writeIssuer(ctx, req.Storage, issuer) if err != nil { return nil, err } @@ -474,7 +322,7 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da response, err := respondReadIssuer(issuer) if newName != oldName { - addWarningOnDereferencing(sc, oldName, response) + addWarningOnDereferencing(oldName, response, ctx, req.Storage) } return response, err @@ -496,8 +344,7 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat return logical.ErrorResponse("missing issuer reference"), nil } - sc := b.makeStorageContext(ctx, req.Storage) - ref, err := sc.resolveIssuerReference(issuerName) + ref, err := resolveIssuerReference(ctx, req.Storage, issuerName) if err != nil { return nil, err } @@ -505,7 +352,7 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat return logical.ErrorResponse("unable to resolve issuer id for reference: " + issuerName), nil } - issuer, err := sc.fetchIssuerById(ref) + issuer, err := fetchIssuerById(ctx, req.Storage, ref) if err != nil { return nil, err } @@ -518,7 +365,7 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat var oldName string var newName string if ok { - newName, err = getIssuerName(sc, data) + newName, err = getIssuerName(ctx, req.Storage, data) if err != nil && err != errIssuerNameInUse { // If the error is name already in use, and the new name is the // old name for this issuer, we're not actually updating the @@ -536,10 +383,6 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat if newName != issuer.Name { oldName = issuer.Name issuer.Name = newName - issuer.LastModified = time.Now().UTC() - // See note in updateDefaultIssuerId about why this is necessary. - b.crlBuilder.invalidateCRLBuildTime() - b.crlBuilder.flushCRLBuildTimeInvalidation(sc) modified = true } } @@ -574,100 +417,11 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat return logical.ErrorResponse(fmt.Sprintf("Unable to parse specified usages: %v - valid values are %v", rawUsage, AllIssuerUsages.Names())), nil } if newUsage != issuer.Usage { - if issuer.Revoked && newUsage.HasUsage(IssuanceUsage) { - // Forbid allowing cert signing on its usage. - return logical.ErrorResponse("This issuer was revoked; unable to modify its usage to include certificate signing again. Reissue this certificate (preferably with a new key) and modify that entry instead."), nil - } - - cert, err := issuer.GetCertificate() - if err != nil { - return nil, fmt.Errorf("unable to parse issuer's certificate: %v", err) - } - if (cert.KeyUsage&x509.KeyUsageCRLSign) == 0 && newUsage.HasUsage(CRLSigningUsage) { - return logical.ErrorResponse("This issuer's underlying certificate lacks the CRLSign KeyUsage value; unable to set CRLSigningUsage on this issuer as a result."), nil - } - issuer.Usage = newUsage modified = true } } - // Revocation signature algorithm changes - rawRevSigAlg, ok := data.GetOk("revocation_signature_algorithm") - if ok { - revSigAlgStr := rawRevSigAlg.(string) - revSigAlg, present := certutil.SignatureAlgorithmNames[strings.ToLower(revSigAlgStr)] - if !present && revSigAlgStr != "" { - var knownAlgos []string - for algoName := range certutil.SignatureAlgorithmNames { - knownAlgos = append(knownAlgos, algoName) - } - - return logical.ErrorResponse(fmt.Sprintf("Unknown signature algorithm value: %v - valid values are %v", revSigAlg, strings.Join(knownAlgos, ", "))), nil - } else if revSigAlgStr == "" { - revSigAlg = x509.UnknownSignatureAlgorithm - } - - if err := issuer.CanMaybeSignWithAlgo(revSigAlg); err != nil { - return nil, err - } - - if revSigAlg != issuer.RevocationSigAlg { - issuer.RevocationSigAlg = revSigAlg - modified = true - } - } - - // AIA access changes. - if issuer.AIAURIs == nil { - issuer.AIAURIs = &certutil.URLEntries{} - } - - // Associative mapping from data source to destination on the - // backing issuer object. For PATCH requests, we use the source - // data parameter as we still need to validate them and process - // it into a string list. - type aiaPair struct { - Source string - Dest *[]string - } - pairs := []aiaPair{ - { - Source: "issuing_certificates", - Dest: &issuer.AIAURIs.IssuingCertificates, - }, - { - Source: "crl_distribution_points", - Dest: &issuer.AIAURIs.CRLDistributionPoints, - }, - { - Source: "ocsp_servers", - Dest: &issuer.AIAURIs.OCSPServers, - }, - } - - // For each pair, if it is different on the object, update it. - for _, pair := range pairs { - rawURLsValue, ok := data.GetOk(pair.Source) - if ok { - urlsValue := rawURLsValue.([]string) - if badURL := validateURLs(urlsValue); badURL != "" { - return logical.ErrorResponse(fmt.Sprintf("invalid URL found in Authority Information Access (AIA) parameter %v: %s", pair.Source, badURL)), nil - } - - if isStringArrayDifferent(urlsValue, *pair.Dest) { - modified = true - *pair.Dest = urlsValue - } - } - } - - // If no AIA URLs exist on the issuer, set the AIA URLs entry to nil to - // ease usage later. - if len(issuer.AIAURIs.IssuingCertificates) == 0 && len(issuer.AIAURIs.CRLDistributionPoints) == 0 && len(issuer.AIAURIs.OCSPServers) == 0 { - issuer.AIAURIs = nil - } - // Manual Chain Changes newPathData, ok := data.GetOk("manual_chain") if ok { @@ -680,7 +434,7 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat newPathRef = string(ref) } - resolvedId, err := sc.resolveIssuerReference(newPathRef) + resolvedId, err := resolveIssuerReference(ctx, req.Storage, newPathRef) if err != nil { return nil, err } @@ -705,7 +459,7 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat // Building the chain will write the issuer to disk; no need to do it // twice. modified = false - err := sc.rebuildIssuersChains(issuer) + err := rebuildIssuersChains(ctx, req.Storage, issuer) if err != nil { return nil, err } @@ -713,7 +467,7 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat } if modified { - err := sc.writeIssuer(issuer) + err := writeIssuer(ctx, req.Storage, issuer) if err != nil { return nil, err } @@ -721,7 +475,7 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat response, err := respondReadIssuer(issuer) if newName != oldName { - addWarningOnDereferencing(sc, oldName, response) + addWarningOnDereferencing(oldName, response, ctx, req.Storage) } return response, err @@ -737,8 +491,7 @@ func (b *backend) pathGetRawIssuer(ctx context.Context, req *logical.Request, da return logical.ErrorResponse("missing issuer reference"), nil } - sc := b.makeStorageContext(ctx, req.Storage) - ref, err := sc.resolveIssuerReference(issuerName) + ref, err := resolveIssuerReference(ctx, req.Storage, issuerName) if err != nil { return nil, err } @@ -746,25 +499,14 @@ func (b *backend) pathGetRawIssuer(ctx context.Context, req *logical.Request, da return logical.ErrorResponse("unable to resolve issuer id for reference: " + issuerName), nil } - issuer, err := sc.fetchIssuerById(ref) + issuer, err := fetchIssuerById(ctx, req.Storage, ref) if err != nil { return nil, err } - var contentType string - var certificate []byte - - response := &logical.Response{} - ret, err := sendNotModifiedResponseIfNecessary(&IfModifiedSinceHelper{req: req, reqType: ifModifiedCA, issuerRef: ref}, sc, response) - if err != nil { - return nil, err - } - if ret { - return response, nil - } - - certificate = []byte(issuer.Certificate) + certificate := []byte(issuer.Certificate) + var contentType string if strings.HasSuffix(req.Path, "/pem") { contentType = "application/pem-certificate-chain" } else if strings.HasSuffix(req.Path, "/der") { @@ -818,8 +560,7 @@ func (b *backend) pathDeleteIssuer(ctx context.Context, req *logical.Request, da return logical.ErrorResponse("missing issuer reference"), nil } - sc := b.makeStorageContext(ctx, req.Storage) - ref, err := sc.resolveIssuerReference(issuerName) + ref, err := resolveIssuerReference(ctx, req.Storage, issuerName) if err != nil { // Return as if we deleted it if we fail to lookup the issuer. if ref == IssuerRefNotFound { @@ -830,28 +571,28 @@ func (b *backend) pathDeleteIssuer(ctx context.Context, req *logical.Request, da response := &logical.Response{} - issuer, err := sc.fetchIssuerById(ref) + issuer, err := fetchIssuerById(ctx, req.Storage, ref) if err != nil { return nil, err } if issuer.Name != "" { - addWarningOnDereferencing(sc, issuer.Name, response) + addWarningOnDereferencing(issuer.Name, response, ctx, req.Storage) } - addWarningOnDereferencing(sc, string(issuer.ID), response) + addWarningOnDereferencing(string(issuer.ID), response, ctx, req.Storage) - wasDefault, err := sc.deleteIssuer(ref) + wasDefault, err := deleteIssuer(ctx, req.Storage, ref) if err != nil { return nil, err } if wasDefault { response.AddWarning(fmt.Sprintf("Deleted issuer %v (via issuer_ref %v); this was configured as the default issuer. Operations without an explicit issuer will not work until a new default is configured.", ref, issuerName)) - addWarningOnDereferencing(sc, defaultRef, response) + addWarningOnDereferencing(defaultRef, response, ctx, req.Storage) } // Since we've deleted an issuer, the chains might've changed. Call the // rebuild code. We shouldn't technically err (as the issuer was deleted // successfully), but log a warning (and to the response) if this fails. - if err := sc.rebuildIssuersChains(nil); err != nil { + if err := rebuildIssuersChains(ctx, req.Storage, nil); err != nil { msg := fmt.Sprintf("Failed to rebuild remaining issuers' chains: %v", err) b.Logger().Error(msg) response.AddWarning(msg) @@ -860,8 +601,8 @@ func (b *backend) pathDeleteIssuer(ctx context.Context, req *logical.Request, da return response, nil } -func addWarningOnDereferencing(sc *storageContext, name string, resp *logical.Response) { - timeout, inUseBy, err := sc.checkForRolesReferencing(name) +func addWarningOnDereferencing(name string, resp *logical.Response, ctx context.Context, s logical.Storage) { + timeout, inUseBy, err := checkForRolesReferencing(name, ctx, s) if err != nil || timeout { if inUseBy == 0 { resp.AddWarning(fmt.Sprint("Unable to check if any roles referenced this issuer by ", name)) @@ -894,7 +635,7 @@ the certificate. ) func pathGetIssuerCRL(b *backend) *framework.Path { - pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/crl(/pem|/der|/delta(/pem|/der)?)?" + pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/crl(/pem|/der)?" return buildPathGetIssuerCRL(b, pattern) } @@ -932,40 +673,22 @@ func (b *backend) pathGetIssuerCRL(ctx context.Context, req *logical.Request, da return nil, err } - var certificate []byte - var contentType string - - sc := b.makeStorageContext(ctx, req.Storage) - response := &logical.Response{} - var crlType ifModifiedReqType = ifModifiedCRL - if strings.Contains(req.Path, "delta") { - crlType = ifModifiedDeltaCRL - } - ret, err := sendNotModifiedResponseIfNecessary(&IfModifiedSinceHelper{req: req, reqType: crlType}, sc, response) + crlPath, err := resolveIssuerCRLPath(ctx, b, req.Storage, issuerName) if err != nil { return nil, err } - if ret { - return response, nil - } - crlPath, err := sc.resolveIssuerCRLPath(issuerName) - if err != nil { - return nil, err - } - - if strings.Contains(req.Path, "delta") { - crlPath += deltaCRLPathSuffix - } crlEntry, err := req.Storage.Get(ctx, crlPath) if err != nil { return nil, err } + var certificate []byte if crlEntry != nil && len(crlEntry.Value) > 0 { certificate = []byte(crlEntry.Value) } + var contentType string if strings.HasSuffix(req.Path, "/der") { contentType = "application/pkix-crl" } else if strings.HasSuffix(req.Path, "/pem") { diff --git a/builtin/logical/pki/path_fetch_keys.go b/builtin/logical/pki/path_fetch_keys.go index 2e718240dc8d4..8ce99b81f6753 100644 --- a/builtin/logical/pki/path_fetch_keys.go +++ b/builtin/logical/pki/path_fetch_keys.go @@ -41,19 +41,18 @@ func (b *backend) pathListKeysHandler(ctx context.Context, req *logical.Request, var responseKeys []string responseInfo := make(map[string]interface{}) - sc := b.makeStorageContext(ctx, req.Storage) - entries, err := sc.listKeys() + entries, err := listKeys(ctx, req.Storage) if err != nil { return nil, err } - config, err := sc.getKeysConfig() + config, err := getKeysConfig(ctx, req.Storage) if err != nil { return nil, err } for _, identifier := range entries { - key, err := sc.fetchKeyById(identifier) + key, err := fetchKeyById(ctx, req.Storage, identifier) if err != nil { return nil, err } @@ -135,8 +134,7 @@ func (b *backend) pathGetKeyHandler(ctx context.Context, req *logical.Request, d return logical.ErrorResponse("missing key reference"), nil } - sc := b.makeStorageContext(ctx, req.Storage) - keyId, err := sc.resolveKeyReference(keyRef) + keyId, err := resolveKeyReference(ctx, req.Storage, keyRef) if err != nil { return nil, err } @@ -144,7 +142,7 @@ func (b *backend) pathGetKeyHandler(ctx context.Context, req *logical.Request, d return logical.ErrorResponse("unable to resolve key id for reference" + keyRef), nil } - key, err := sc.fetchKeyById(keyId) + key, err := fetchKeyById(ctx, req.Storage, keyId) if err != nil { return nil, err } @@ -191,8 +189,7 @@ func (b *backend) pathUpdateKeyHandler(ctx context.Context, req *logical.Request return logical.ErrorResponse("missing key reference"), nil } - sc := b.makeStorageContext(ctx, req.Storage) - keyId, err := sc.resolveKeyReference(keyRef) + keyId, err := resolveKeyReference(ctx, req.Storage, keyRef) if err != nil { return nil, err } @@ -200,7 +197,7 @@ func (b *backend) pathUpdateKeyHandler(ctx context.Context, req *logical.Request return logical.ErrorResponse("unable to resolve key id for reference" + keyRef), nil } - key, err := sc.fetchKeyById(keyId) + key, err := fetchKeyById(ctx, req.Storage, keyId) if err != nil { return nil, err } @@ -213,7 +210,7 @@ func (b *backend) pathUpdateKeyHandler(ctx context.Context, req *logical.Request if newName != key.Name { key.Name = newName - err := sc.writeKey(*key) + err := writeKey(ctx, req.Storage, *key) if err != nil { return nil, err } @@ -249,8 +246,7 @@ func (b *backend) pathDeleteKeyHandler(ctx context.Context, req *logical.Request return logical.ErrorResponse("missing key reference"), nil } - sc := b.makeStorageContext(ctx, req.Storage) - keyId, err := sc.resolveKeyReference(keyRef) + keyId, err := resolveKeyReference(ctx, req.Storage, keyRef) if err != nil { if keyId == KeyRefNotFound { // We failed to lookup the key, we should ignore any error here and reply as if it was deleted. @@ -259,7 +255,7 @@ func (b *backend) pathDeleteKeyHandler(ctx context.Context, req *logical.Request return nil, err } - keyInUse, issuerId, err := sc.isKeyInUse(keyId.String()) + keyInUse, issuerId, err := isKeyInUse(keyId.String(), ctx, req.Storage) if err != nil { return nil, err } @@ -267,7 +263,7 @@ func (b *backend) pathDeleteKeyHandler(ctx context.Context, req *logical.Request return logical.ErrorResponse(fmt.Sprintf("Failed to Delete Key. Key in Use by Issuer: %s", issuerId)), nil } - wasDefault, err := sc.deleteKey(keyId) + wasDefault, err := deleteKey(ctx, req.Storage, keyId) if err != nil { return nil, err } diff --git a/builtin/logical/pki/path_intermediate.go b/builtin/logical/pki/path_intermediate.go index c875842219dab..bc388f4008869 100644 --- a/builtin/logical/pki/path_intermediate.go +++ b/builtin/logical/pki/path_intermediate.go @@ -66,26 +66,20 @@ func (b *backend) pathGenerateIntermediate(ctx context.Context, req *logical.Req // Nasty hack part two. :-) For generation of CSRs, certutil presently doesn't // support configuration of this. However, because we need generation parameters, // which create a role and attempt to read this parameter, we need to provide - // a value (which will be ignored). Hence, we stub in the missing parameters here, + // a value (which will be ignored). Hence, we stub in the missing parameter here, // including its schema, just enough for it to work.. data.Schema["signature_bits"] = &framework.FieldSchema{ Type: framework.TypeInt, Default: 0, } data.Raw["signature_bits"] = 0 - data.Schema["use_pss"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Default: false, - } - data.Raw["use_pss"] = false - sc := b.makeStorageContext(ctx, req.Storage) - exported, format, role, errorResp := getGenerationParams(sc, data) + exported, format, role, errorResp := b.getGenerationParams(ctx, req.Storage, data) if errorResp != nil { return errorResp, nil } - keyName, err := getKeyName(sc, data) + keyName, err := getKeyName(ctx, req.Storage, data) if err != nil { return logical.ErrorResponse(err.Error()), nil } @@ -95,7 +89,7 @@ func (b *backend) pathGenerateIntermediate(ctx context.Context, req *logical.Req req: req, apiData: data, } - parsedBundle, warnings, err := generateIntermediateCSR(sc, input, b.Backend.GetRandomReader()) + parsedBundle, err := generateIntermediateCSR(ctx, b, input, b.Backend.GetRandomReader()) if err != nil { switch err.(type) { case errutil.UserError: @@ -114,12 +108,12 @@ func (b *backend) pathGenerateIntermediate(ctx context.Context, req *logical.Req Data: map[string]interface{}{}, } - entries, err := getGlobalAIAURLs(ctx, req.Storage) + entries, err := getURLs(ctx, req) if err == nil && len(entries.OCSPServers) == 0 && len(entries.IssuingCertificates) == 0 && len(entries.CRLDistributionPoints) == 0 { // If the operator hasn't configured any of the URLs prior to // generating this issuer, we should add a warning to the response, // informing them they might want to do so and re-generate the issuer. - resp.AddWarning("This mount hasn't configured any authority information access (AIA) fields; this may make it harder for systems to find missing certificates in the chain or to validate revocation status of certificates. Consider updating /config/urls or the newly generated issuer with this information. Since this certificate is an intermediate, it might be useful to regenerate this certificate after fixing this problem for the root mount.") + resp.AddWarning("This mount hasn't configured any authority access information fields; this may make it harder for systems to find missing certificates in the chain or to validate revocation status of certificates. Consider updating /config/urls with this information.") } switch format { @@ -155,14 +149,12 @@ func (b *backend) pathGenerateIntermediate(ctx context.Context, req *logical.Req } } - myKey, _, err := sc.importKey(csrb.PrivateKey, keyName, csrb.PrivateKeyType) + myKey, _, err := importKey(ctx, b, req.Storage, csrb.PrivateKey, keyName, csrb.PrivateKeyType) if err != nil { return nil, err } resp.Data["key_id"] = myKey.ID - resp = addWarnings(resp, warnings) - return resp, nil } diff --git a/builtin/logical/pki/path_issue_sign.go b/builtin/logical/pki/path_issue_sign.go index 7203d56c73cf0..17cdc93d835cc 100644 --- a/builtin/logical/pki/path_issue_sign.go +++ b/builtin/logical/pki/path_issue_sign.go @@ -1,11 +1,9 @@ package pki import ( - "bytes" "context" "crypto/rand" "encoding/base64" - "encoding/pem" "fmt" "strings" "time" @@ -141,25 +139,6 @@ this value to an empty list.`, Description: `A comma-separated string or list of extended key usage oids.`, } - ret.Fields["signature_bits"] = &framework.FieldSchema{ - Type: framework.TypeInt, - Default: 0, - Description: `The number of bits to use in the signature -algorithm; accepts 256 for SHA-2-256, 384 for SHA-2-384, and 512 for -SHA-2-512. Defaults to 0 to automatically detect based on key length -(SHA-2-256 for RSA keys, and matching the curve size for NIST P-Curves).`, - DisplayAttrs: &framework.DisplayAttributes{ - Value: 0, - }, - } - - ret.Fields["use_pss"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Default: false, - Description: `Whether or not to use PSS signatures when using a -RSA key-type issuer. Defaults to false.`, - } - return ret } @@ -214,13 +193,10 @@ func (b *backend) pathSignVerbatim(ctx context.Context, req *logical.Request, da AllowedOtherSANs: []string{"*"}, AllowedSerialNumbers: []string{"*"}, AllowedURISANs: []string{"*"}, - CNValidations: []string{"disabled"}, GenerateLease: new(bool), KeyUsage: data.Get("key_usage").([]string), ExtKeyUsage: data.Get("ext_key_usage").([]string), ExtKeyUsageOIDs: data.Get("ext_key_usage_oids").([]string), - SignatureBits: data.Get("signature_bits").(int), - UsePSS: data.Get("use_pss").(bool), } *entry.AllowWildcardCertificates = true @@ -289,8 +265,7 @@ func (b *backend) pathIssueSignCert(ctx context.Context, req *logical.Request, d } var caErr error - sc := b.makeStorageContext(ctx, req.Storage) - signingBundle, caErr := sc.fetchCAInfo(issuerName, IssuanceUsage) + signingBundle, caErr := fetchCAInfo(ctx, b, req, issuerName, IssuanceUsage) if caErr != nil { switch caErr.(type) { case errutil.UserError: @@ -309,11 +284,10 @@ func (b *backend) pathIssueSignCert(ctx context.Context, req *logical.Request, d } var parsedBundle *certutil.ParsedCertBundle var err error - var warnings []string if useCSR { - parsedBundle, warnings, err = signCert(b, input, signingBundle, false, useCSRValues) + parsedBundle, err = signCert(b, input, signingBundle, false, useCSRValues) } else { - parsedBundle, warnings, err = generateCert(sc, input, signingBundle, false, rand.Reader) + parsedBundle, err = generateCert(ctx, b, input, signingBundle, false, rand.Reader) } if err != nil { switch err.(type) { @@ -336,8 +310,6 @@ func (b *backend) pathIssueSignCert(ctx context.Context, req *logical.Request, d return nil, fmt.Errorf("error converting raw cert bundle to cert bundle: %w", err) } - caChainGen := newCaChainOutput(parsedBundle, data) - respData := map[string]interface{}{ "expiration": int64(parsedBundle.Certificate.NotAfter.Unix()), "serial_number": cb.SerialNumber, @@ -347,8 +319,8 @@ func (b *backend) pathIssueSignCert(ctx context.Context, req *logical.Request, d case "pem": respData["issuing_ca"] = signingCB.Certificate respData["certificate"] = cb.Certificate - if caChainGen.containsChain() { - respData["ca_chain"] = caChainGen.pemEncodedChain() + if cb.CAChain != nil && len(cb.CAChain) > 0 { + respData["ca_chain"] = cb.CAChain } if !useCSR { respData["private_key"] = cb.PrivateKey @@ -358,8 +330,8 @@ func (b *backend) pathIssueSignCert(ctx context.Context, req *logical.Request, d case "pem_bundle": respData["issuing_ca"] = signingCB.Certificate respData["certificate"] = cb.ToPEMBundle() - if caChainGen.containsChain() { - respData["ca_chain"] = caChainGen.pemEncodedChain() + if cb.CAChain != nil && len(cb.CAChain) > 0 { + respData["ca_chain"] = cb.CAChain } if !useCSR { respData["private_key"] = cb.PrivateKey @@ -370,8 +342,12 @@ func (b *backend) pathIssueSignCert(ctx context.Context, req *logical.Request, d respData["certificate"] = base64.StdEncoding.EncodeToString(parsedBundle.CertificateBytes) respData["issuing_ca"] = base64.StdEncoding.EncodeToString(signingBundle.CertificateBytes) - if caChainGen.containsChain() { - respData["ca_chain"] = caChainGen.derEncodedChain() + var caChain []string + for _, caCert := range parsedBundle.CAChain { + caChain = append(caChain, base64.StdEncoding.EncodeToString(caCert.Bytes)) + } + if caChain != nil && len(caChain) > 0 { + respData["ca_chain"] = caChain } if !useCSR { @@ -386,7 +362,7 @@ func (b *backend) pathIssueSignCert(ctx context.Context, req *logical.Request, d switch { case role.GenerateLease == nil: return nil, fmt.Errorf("generate lease in role is nil") - case !*role.GenerateLease: + case *role.GenerateLease == false: // If lease generation is disabled do not populate `Secret` field in // the response resp = &logical.Response{ @@ -409,16 +385,13 @@ func (b *backend) pathIssueSignCert(ctx context.Context, req *logical.Request, d } if !role.NoStore { - key := "certs/" + normalizeSerial(cb.SerialNumber) - certsCounted := b.certsCounted.Load() err = req.Storage.Put(ctx, &logical.StorageEntry{ - Key: key, + Key: "certs/" + normalizeSerial(cb.SerialNumber), Value: parsedBundle.CertificateBytes, }) if err != nil { return nil, fmt.Errorf("unable to store certificate locally: %w", err) } - b.incrementTotalCertificatesCount(certsCounted, key) } if useCSR { @@ -430,55 +403,9 @@ func (b *backend) pathIssueSignCert(ctx context.Context, req *logical.Request, d } } - resp = addWarnings(resp, warnings) - return resp, nil } -type caChainOutput struct { - chain []*certutil.CertBlock -} - -func newCaChainOutput(parsedBundle *certutil.ParsedCertBundle, data *framework.FieldData) caChainOutput { - if filterCaChain := data.Get("remove_roots_from_chain").(bool); filterCaChain { - var myChain []*certutil.CertBlock - for _, certBlock := range parsedBundle.CAChain { - cert := certBlock.Certificate - - if (len(cert.AuthorityKeyId) > 0 && !bytes.Equal(cert.AuthorityKeyId, cert.SubjectKeyId)) || - (len(cert.AuthorityKeyId) == 0 && (!bytes.Equal(cert.RawIssuer, cert.RawSubject) || cert.CheckSignatureFrom(cert) != nil)) { - // We aren't self-signed so add it to the list. - myChain = append(myChain, certBlock) - } - } - return caChainOutput{chain: myChain} - } - - return caChainOutput{chain: parsedBundle.CAChain} -} - -func (cac *caChainOutput) containsChain() bool { - return len(cac.chain) > 0 -} - -func (cac *caChainOutput) pemEncodedChain() []string { - var chain []string - for _, cert := range cac.chain { - block := pem.Block{Type: "CERTIFICATE", Bytes: cert.Bytes} - certificate := strings.TrimSpace(string(pem.EncodeToMemory(&block))) - chain = append(chain, certificate) - } - return chain -} - -func (cac *caChainOutput) derEncodedChain() []string { - var derCaChain []string - for _, caCert := range cac.chain { - derCaChain = append(derCaChain, base64.StdEncoding.EncodeToString(caCert.Bytes)) - } - return derCaChain -} - const pathIssueHelpSyn = ` Request a certificate using a certain role with the provided details. ` diff --git a/builtin/logical/pki/path_manage_issuers.go b/builtin/logical/pki/path_manage_issuers.go index a22df03a40bba..32fc3374e4039 100644 --- a/builtin/logical/pki/path_manage_issuers.go +++ b/builtin/logical/pki/path_manage_issuers.go @@ -3,14 +3,11 @@ package pki import ( "bytes" "context" - "crypto/x509" "encoding/pem" "fmt" "strings" - "time" "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" ) @@ -85,7 +82,6 @@ with Active Directory Certificate Services.`, // signed certificate's bits (that's on the /sign-intermediate // endpoints). Remove it from the list of fields to avoid confusion. delete(ret.Fields, "signature_bits") - delete(ret.Fields, "use_pss") return ret } @@ -211,11 +207,9 @@ func (b *backend) pathImportIssuers(ctx context.Context, req *logical.Request, d return logical.ErrorResponse("private keys found in the PEM bundle but not allowed by the path; use /issuers/import/bundle"), nil } - sc := b.makeStorageContext(ctx, req.Storage) - for keyIndex, keyPem := range keys { // Handle import of private key. - key, existing, err := importKeyFromBytes(sc, keyPem, "") + key, existing, err := importKeyFromBytes(ctx, b, req.Storage, keyPem, "") if err != nil { return logical.ErrorResponse(fmt.Sprintf("Error parsing key %v: %v", keyIndex, err)), nil } @@ -226,7 +220,7 @@ func (b *backend) pathImportIssuers(ctx context.Context, req *logical.Request, d } for certIndex, certPem := range issuers { - cert, existing, err := sc.importIssuer(certPem, "") + cert, existing, err := importIssuer(ctx, b, req.Storage, certPem, "") if err != nil { return logical.ErrorResponse(fmt.Sprintf("Error parsing issuer %v: %v\n%v", certIndex, err, certPem)), nil } @@ -248,13 +242,6 @@ func (b *backend) pathImportIssuers(ctx context.Context, req *logical.Request, d if len(createdIssuers) > 0 { err := b.crlBuilder.rebuild(ctx, b, req, true) if err != nil { - // Before returning, check if the error message includes the - // string "PSS". If so, it indicates we might've wanted to modify - // this issuer, so convert the error to a warning. - if strings.Contains(err.Error(), "PSS") || strings.Contains(err.Error(), "pss") { - err = fmt.Errorf("Rebuilding the CRL failed with a message relating to the PSS signature algorithm. This likely means the revocation_signature_algorithm needs to be set on the newly imported issuer(s) because a managed key supports only the PSS algorithm; by default PKCS#1v1.5 was used to build the CRLs. CRLs will not be generated until this has been addressed, however the import was successful. The original error is reproduced below:\n\n\t%v", err) - } - return nil, err } } @@ -263,7 +250,7 @@ func (b *backend) pathImportIssuers(ctx context.Context, req *logical.Request, d // do this unconditionally if the issuer or key was modified, so the admin // is always warned. But if unrelated key material was imported, we do // not warn. - config, err := sc.getIssuersConfig() + config, err := getIssuersConfig(ctx, req.Storage) if err == nil && len(config.DefaultIssuerId) > 0 { // We can use the mapping above to check the issuer mapping. if keyId, ok := issuerKeyMap[string(config.DefaultIssuerId)]; ok && len(keyId) == 0 { @@ -297,13 +284,6 @@ func (b *backend) pathImportIssuers(ctx context.Context, req *logical.Request, d } } - // Also while we're here, we should let the user know the next steps. - // In particular, if there's no default AIA URLs configuration, we should - // tell the user that's probably next. - if entries, err := getGlobalAIAURLs(ctx, req.Storage); err == nil && len(entries.IssuingCertificates) == 0 && len(entries.CRLDistributionPoints) == 0 && len(entries.OCSPServers) == 0 { - response.AddWarning("This mount hasn't configured any authority information access (AIA) fields; this may make it harder for systems to find missing certificates in the chain or to validate revocation status of certificates. Consider updating /config/urls or the newly generated issuer with this information.") - } - return response, nil } @@ -321,227 +301,3 @@ either take PEM-formatted certificates, and, if :type="bundle", unencrypted secret-keys. ` ) - -func pathRevokeIssuer(b *backend) *framework.Path { - fields := addIssuerRefField(map[string]*framework.FieldSchema{}) - - return &framework.Path{ - Pattern: "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/revoke", - Fields: fields, - - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathRevokeIssuer, - // Read more about why these flags are set in backend.go - ForwardPerformanceStandby: true, - ForwardPerformanceSecondary: true, - }, - }, - - HelpSynopsis: pathRevokeIssuerHelpSyn, - HelpDescription: pathRevokeIssuerHelpDesc, - } -} - -func (b *backend) pathRevokeIssuer(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - // Since we're planning on updating issuers here, grab the lock so we've - // got a consistent view. - b.issuersLock.Lock() - defer b.issuersLock.Unlock() - - // Issuer revocation can't work on the legacy cert bundle. - if b.useLegacyBundleCaStorage() { - return logical.ErrorResponse("cannot revoke issuer until migration has completed"), nil - } - - issuerName := getIssuerRef(data) - if len(issuerName) == 0 { - return logical.ErrorResponse("missing issuer reference"), nil - } - - // Fetch the issuer. - sc := b.makeStorageContext(ctx, req.Storage) - ref, err := sc.resolveIssuerReference(issuerName) - if err != nil { - return nil, err - } - if ref == "" { - return logical.ErrorResponse("unable to resolve issuer id for reference: " + issuerName), nil - } - - issuer, err := sc.fetchIssuerById(ref) - if err != nil { - return nil, err - } - - // If its already been revoked, just return the read results sans warnings - // like we would otherwise. - if issuer.Revoked { - return respondReadIssuer(issuer) - } - - // When revoking, we want to forbid new certificate issuance. We allow - // new revocations of leaves issued by this issuer to trigger a CRL - // rebuild still. - issuer.Revoked = true - if issuer.Usage.HasUsage(IssuanceUsage) { - issuer.Usage.ToggleUsage(IssuanceUsage) - } - - currTime := time.Now() - issuer.RevocationTime = currTime.Unix() - issuer.RevocationTimeUTC = currTime.UTC() - - err = sc.writeIssuer(issuer) - if err != nil { - return nil, err - } - - // Now, if the parent issuer exists within this mount, we'd have written - // a storage entry for this certificate, making it appear as any other - // leaf. We need to add a revocationInfo entry for this into storage, - // so that it appears as if it was revoked. - // - // This is a _necessary_ but not necessarily _sufficient_ step to - // consider an arbitrary issuer revoked and the former step (setting - // issuer.Revoked = true) is more correct: if two intermediates have the - // same serial number, and one appears somehow in the storage but from a - // different issuer, we'd only include one in the CRLs, but we'd want to - // include both in two separate CRLs. Hence, the former is the condition - // we check in CRL building, but this step satisfies other guarantees - // within Vault. - certEntry, err := fetchCertBySerial(ctx, b, req, "certs/", issuer.SerialNumber) - if err == nil && certEntry != nil { - // We've inverted this error check as it doesn't matter; we already - // consider this certificate revoked. - storageCert, err := x509.ParseCertificate(certEntry.Value) - if err != nil { - return nil, fmt.Errorf("error parsing stored certificate value: %v", err) - } - - issuerCert, err := issuer.GetCertificate() - if err != nil { - return nil, fmt.Errorf("error parsing issuer certificate value: %v", err) - } - - if bytes.Equal(issuerCert.Raw, storageCert.Raw) { - // If the issuer is on disk at its serial number is the same as - // our issuer, we know we can write the revocation entry. Since - // Vault has historically forbid revocation of non-stored certs - // and issuers, we're the only ones to write this entry, so we - // don't need the write guard that exists in crl_util.go for the - // general case (forbidding a newer revocation time). - // - // We'll let a cleanup pass or CRL build identify the issuer for - // us. - revInfo := revocationInfo{ - CertificateBytes: issuerCert.Raw, - RevocationTime: issuer.RevocationTime, - RevocationTimeUTC: issuer.RevocationTimeUTC, - } - - revEntry, err := logical.StorageEntryJSON(revokedPath+normalizeSerial(issuer.SerialNumber), revInfo) - if err != nil { - return nil, fmt.Errorf("error creating revocation entry for issuer: %v", err) - } - - err = req.Storage.Put(ctx, revEntry) - if err != nil { - return nil, fmt.Errorf("error saving revoked issuer to new location: %v", err) - } - } - } - - // Rebuild the CRL to include the newly revoked issuer. - crlErr := b.crlBuilder.rebuild(ctx, b, req, false) - if crlErr != nil { - switch crlErr.(type) { - case errutil.UserError: - return logical.ErrorResponse(fmt.Sprintf("Error during CRL building: %s", crlErr)), nil - default: - return nil, fmt.Errorf("error encountered during CRL building: %w", crlErr) - } - } - - // Finally, respond with the issuer's updated data. - response, err := respondReadIssuer(issuer) - if err != nil { - // Impossible. - return nil, err - } - - // For sanity, we'll add a warning message here if there's no other - // issuer which verifies this issuer. - ourCert, err := issuer.GetCertificate() - if err != nil { - return nil, err - } - - allIssuers, err := sc.listIssuers() - if err != nil { - return nil, err - } - - isSelfSigned := false - haveOtherIssuer := false - for _, candidateID := range allIssuers { - candidate, err := sc.fetchIssuerById(candidateID) - if err != nil { - return nil, err - } - - candidateCert, err := candidate.GetCertificate() - if err != nil { - // Returning this error is fine because more things will fail - // if this issuer can't parse. - return nil, err - } - - if err := ourCert.CheckSignatureFrom(candidateCert); err == nil { - // Signature verification is a success. This means we have a - // parent for this cert. But notice above we didn't filter out - // ourselves: we want to see if this is a self-signed cert. So - // check that now. - if candidate.ID == issuer.ID { - isSelfSigned = true - } else { - haveOtherIssuer = true - } - } - - // If we have both possible warning candidates, no sense continuing - // to check signatures; exit. - if isSelfSigned && haveOtherIssuer { - break - } - } - - if isSelfSigned { - response.AddWarning("This issuer is a self-signed (potentially root) certificate. This means it may not be considered revoked if there is not an external, cross-signed variant of this certificate. This issuer's serial number will not appear on its own CRL.") - } - - if !haveOtherIssuer { - response.AddWarning("This issuer lacks another parent issuer within the mount. This means it will not appear on any other CRLs and may not be considered revoked by clients. Consider adding this issuer to its issuer's CRL as well if it is not self-signed.") - } - - config, err := sc.getIssuersConfig() - if err == nil && config != nil && config.DefaultIssuerId == issuer.ID { - response.AddWarning("This issuer is currently configured as the default issuer for this mount; operations such as certificate issuance may not work until a new default issuer is selected.") - } - - return response, nil -} - -const ( - pathRevokeIssuerHelpSyn = `Revoke the specified issuer certificate.` - pathRevokeIssuerHelpDesc = ` -This endpoint allows revoking the specified issuer certificates. - -This is useful when the issuer and its parent exist within the same PKI -mount point (utilizing the multi-issuer functionality). If no suitable -parent is found, this revocation may not appear on any CRL in this mount. - -Once revoked, issuers cannot be unrevoked and may not be used to sign any -more certificates. -` -) diff --git a/builtin/logical/pki/path_manage_keys.go b/builtin/logical/pki/path_manage_keys.go index 90119ce4e8a1e..264508e348600 100644 --- a/builtin/logical/pki/path_manage_keys.go +++ b/builtin/logical/pki/path_manage_keys.go @@ -80,8 +80,7 @@ func (b *backend) pathGenerateKeyHandler(ctx context.Context, req *logical.Reque return logical.ErrorResponse("Can not generate keys until migration has completed"), nil } - sc := b.makeStorageContext(ctx, req.Storage) - keyName, err := getKeyName(sc, data) + keyName, err := getKeyName(ctx, req.Storage, data) if err != nil { // Fail Immediately if Key Name is in Use, etc... return logical.ErrorResponse(err.Error()), nil } @@ -128,7 +127,7 @@ func (b *backend) pathGenerateKeyHandler(ctx context.Context, req *logical.Reque return nil, err } - key, _, err := sc.importKey(privateKeyPemString, keyName, keyBundle.PrivateKeyType) + key, _, err := importKey(ctx, b, req.Storage, privateKeyPemString, keyName, keyBundle.PrivateKeyType) if err != nil { return nil, err } @@ -189,9 +188,8 @@ func (b *backend) pathImportKeyHandler(ctx context.Context, req *logical.Request return logical.ErrorResponse("Cannot import keys until migration has completed"), nil } - sc := b.makeStorageContext(ctx, req.Storage) pemBundle := data.Get("pem_bundle").(string) - keyName, err := getKeyName(sc, data) + keyName, err := getKeyName(ctx, req.Storage, data) if err != nil { return logical.ErrorResponse(err.Error()), nil } @@ -231,7 +229,7 @@ func (b *backend) pathImportKeyHandler(ctx context.Context, req *logical.Request return logical.ErrorResponse("only a single key can be present within the pem_bundle for importing"), nil } - key, existed, err := importKeyFromBytes(sc, keys[0], keyName) + key, existed, err := importKeyFromBytes(ctx, b, req.Storage, keys[0], keyName) if err != nil { return logical.ErrorResponse(err.Error()), nil } diff --git a/builtin/logical/pki/path_manage_keys_test.go b/builtin/logical/pki/path_manage_keys_test.go index 979f8a203d4ab..b8a67d073c897 100644 --- a/builtin/logical/pki/path_manage_keys_test.go +++ b/builtin/logical/pki/path_manage_keys_test.go @@ -16,7 +16,6 @@ import ( ) func TestPKI_PathManageKeys_GenerateInternalKeys(t *testing.T) { - t.Parallel() b, s := createBackendWithStorage(t) tests := []struct { @@ -34,7 +33,6 @@ func TestPKI_PathManageKeys_GenerateInternalKeys(t *testing.T) { {"error-bad-type", "dskjfkdsfjdkf", []int{0}, true}, } for _, tt := range tests { - tt := tt for _, keyBitParam := range tt.keyBits { keyName := fmt.Sprintf("%s-%d", tt.name, keyBitParam) t.Run(keyName, func(t *testing.T) { @@ -81,7 +79,6 @@ func TestPKI_PathManageKeys_GenerateInternalKeys(t *testing.T) { } func TestPKI_PathManageKeys_GenerateExportedKeys(t *testing.T) { - t.Parallel() // We tested a lot of the logic above within the internal test, so just make sure we honor the exported contract b, s := createBackendWithStorage(t) @@ -114,7 +111,6 @@ func TestPKI_PathManageKeys_GenerateExportedKeys(t *testing.T) { } func TestPKI_PathManageKeys_ImportKeyBundle(t *testing.T) { - t.Parallel() b, s := createBackendWithStorage(t) bundle1, err := certutil.CreateKeyBundle("ec", 224, rand.Reader) @@ -247,7 +243,6 @@ func TestPKI_PathManageKeys_ImportKeyBundle(t *testing.T) { } func TestPKI_PathManageKeys_DeleteDefaultKeyWarns(t *testing.T) { - t.Parallel() b, s := createBackendWithStorage(t) resp, err := b.HandleRequest(context.Background(), &logical.Request{ @@ -275,7 +270,6 @@ func TestPKI_PathManageKeys_DeleteDefaultKeyWarns(t *testing.T) { } func TestPKI_PathManageKeys_DeleteUsedKeyFails(t *testing.T) { - t.Parallel() b, s := createBackendWithStorage(t) resp, err := b.HandleRequest(context.Background(), &logical.Request{ @@ -302,7 +296,6 @@ func TestPKI_PathManageKeys_DeleteUsedKeyFails(t *testing.T) { } func TestPKI_PathManageKeys_UpdateKeyDetails(t *testing.T) { - t.Parallel() b, s := createBackendWithStorage(t) resp, err := b.HandleRequest(context.Background(), &logical.Request{ @@ -355,7 +348,6 @@ func TestPKI_PathManageKeys_UpdateKeyDetails(t *testing.T) { } func TestPKI_PathManageKeys_ImportKeyBundleBadData(t *testing.T) { - t.Parallel() b, s := createBackendWithStorage(t) resp, err := b.HandleRequest(context.Background(), &logical.Request{ @@ -389,7 +381,6 @@ func TestPKI_PathManageKeys_ImportKeyBundleBadData(t *testing.T) { } func TestPKI_PathManageKeys_ImportKeyRejectsMultipleKeys(t *testing.T) { - t.Parallel() b, s := createBackendWithStorage(t) bundle1, err := certutil.CreateKeyBundle("ec", 224, rand.Reader) @@ -418,10 +409,9 @@ func TestPKI_PathManageKeys_ImportKeyRejectsMultipleKeys(t *testing.T) { require.True(t, resp.IsError(), "should have received an error response importing a pem bundle with more than 1 key") ctx := context.Background() - sc := b.makeStorageContext(ctx, s) - keys, _ := sc.listKeys() + keys, _ := listKeys(ctx, s) for _, keyId := range keys { - id, _ := sc.fetchKeyById(keyId) + id, _ := fetchKeyById(ctx, s, keyId) t.Logf("%s:%s", id.ID, id.Name) } } diff --git a/builtin/logical/pki/path_revoke.go b/builtin/logical/pki/path_revoke.go index d33c013caf9f3..52c5c63e7837c 100644 --- a/builtin/logical/pki/path_revoke.go +++ b/builtin/logical/pki/path_revoke.go @@ -2,17 +2,10 @@ package pki import ( "context" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/rsa" - "crypto/subtle" - "crypto/x509" - "encoding/pem" "fmt" "strings" "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" @@ -27,47 +20,6 @@ func pathRevoke(b *backend) *framework.Path { Description: `Certificate serial number, in colon- or hyphen-separated octal`, }, - "certificate": { - Type: framework.TypeString, - Description: `Certificate to revoke in PEM format; must be -signed by an issuer in this mount.`, - }, - }, - - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.metricsWrap("revoke", noRole, b.pathRevokeWrite), - // This should never be forwarded. See backend.go for more information. - // If this needs to write, the entire request will be forwarded to the - // active node of the current performance cluster, but we don't want to - // forward invalid revoke requests there. - }, - }, - - HelpSynopsis: pathRevokeHelpSyn, - HelpDescription: pathRevokeHelpDesc, - } -} - -func pathRevokeWithKey(b *backend) *framework.Path { - return &framework.Path{ - Pattern: `revoke-with-key`, - Fields: map[string]*framework.FieldSchema{ - "serial_number": { - Type: framework.TypeString, - Description: `Certificate serial number, in colon- or -hyphen-separated octal`, - }, - "certificate": { - Type: framework.TypeString, - Description: `Certificate to revoke in PEM format; must be -signed by an issuer in this mount.`, - }, - "private_key": { - Type: framework.TypeString, - Description: `Key to use to verify revocation permission; must -be in PEM format.`, - }, }, Operations: map[logical.Operation]framework.OperationHandler{ @@ -104,307 +56,19 @@ func pathRotateCRL(b *backend) *framework.Path { } } -func pathRotateDeltaCRL(b *backend) *framework.Path { - return &framework.Path{ - Pattern: `crl/rotate-delta`, - - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathRotateDeltaCRLRead, - // See backend.go; we will read a lot of data prior to calling write, - // so this request should be forwarded when it is first seen, not - // when it is ready to write. - ForwardPerformanceStandby: true, - }, - }, - - HelpSynopsis: pathRotateDeltaCRLHelpSyn, - HelpDescription: pathRotateDeltaCRLHelpDesc, - } -} - -func (b *backend) pathRevokeWriteHandleCertificate(ctx context.Context, req *logical.Request, certPem string) (string, bool, []byte, error) { - // This function handles just the verification of the certificate against - // the global issuer set, checking whether or not it is importable. - // - // We return the parsed serial number, an optionally-nil byte array to - // write out to disk, and an error if one occurred. - if b.useLegacyBundleCaStorage() { - // We require listing all issuers from the 1.11 method. If we're - // still using the legacy CA bundle but with the newer certificate - // attribute, we err and require the operator to upgrade and migrate - // prior to servicing new requests. - return "", false, nil, errutil.UserError{Err: "unable to process BYOC revocation until CA issuer migration has completed"} - } - - // First start by parsing the certificate. - if len(certPem) < 75 { - // See note in pathImportIssuers about this check. - return "", false, nil, errutil.UserError{Err: "provided certificate data was too short; perhaps a path was passed to the API rather than the contents of a PEM file"} - } - - pemBlock, _ := pem.Decode([]byte(certPem)) - if pemBlock == nil { - return "", false, nil, errutil.UserError{Err: "certificate contains no PEM data"} - } - - certReference, err := x509.ParseCertificate(pemBlock.Bytes) - if err != nil { - return "", false, nil, errutil.UserError{Err: fmt.Sprintf("certificate could not be parsed: %v", err)} - } - - // Ensure we have a well-formed serial number before continuing. - serial := serialFromCert(certReference) - if len(serial) == 0 { - return "", false, nil, errutil.UserError{Err: "invalid serial number on presented certificate"} - } - - // We have two approaches here: we could start verifying against issuers - // (which involves fetching and parsing them), or we could see if, by - // some chance we've already imported it (cheap). The latter tells us - // if we happen to have a serial number collision (which shouldn't - // happen in practice) versus an already-imported cert (which might - // happen and its fine to handle safely). - // - // Start with the latter since its cheaper. Fetch the cert (by serial) - // and if it exists, compare the contents. - certEntry, err := fetchCertBySerial(ctx, b, req, req.Path, serial) - if err != nil { - return serial, false, nil, err - } - - if certEntry != nil { - // As seen with importing issuers, it is best to parse the certificate - // and compare parsed values, rather than attempting to infer equality - // from the raw data. - certReferenceStored, err := x509.ParseCertificate(certEntry.Value) - if err != nil { - return serial, false, nil, err - } - - if !areCertificatesEqual(certReference, certReferenceStored) { - // Here we refuse the import with an error because the two certs - // are unequal but we would've otherwise overwritten the existing - // copy. - return serial, false, nil, fmt.Errorf("certificate with same serial but unequal value already present in this cluster's storage; refusing to revoke") - } else { - // Otherwise, we can return without an error as we've already - // imported this certificate, likely when we issued it. We don't - // need to re-verify the signature as we assume it was already - // verified when it was imported. - return serial, false, certEntry.Value, nil - } - } - - // Otherwise, we must not have a stored copy. From here on out, the second - // parameter (except in error cases) should cause the cert to write out. - // - // Fetch and iterate through each issuer. - sc := b.makeStorageContext(ctx, req.Storage) - issuers, err := sc.listIssuers() - if err != nil { - return serial, false, nil, err - } - - foundMatchingIssuer := false - for _, issuerId := range issuers { - issuer, err := sc.fetchIssuerById(issuerId) - if err != nil { - return serial, false, nil, err - } - - issuerCert, err := issuer.GetCertificate() - if err != nil { - return serial, false, nil, err - } - - if err := certReference.CheckSignatureFrom(issuerCert); err == nil { - // If the signature was valid, we found our match and can safely - // exit. - foundMatchingIssuer = true - break - } - } - - if foundMatchingIssuer { - return serial, true, certReference.Raw, nil - } - - return serial, false, nil, errutil.UserError{Err: "unable to verify signature on presented cert from any present issuer in this mount; certificates from previous CAs will need to have their issuing CA and key re-imported if revocation is necessary"} -} - -func (b *backend) pathRevokeWriteHandleKey(ctx context.Context, req *logical.Request, cert []byte, keyPem string) error { - if keyPem == "" { - // The only way to get here should be via the /revoke endpoint; - // validate the path one more time and return an error if necessary. - if req.Path != "revoke" { - return fmt.Errorf("must have private key to revoke via the /revoke-with-key path") - } - - // Otherwise, we don't need to validate the key and thus can return - // with success. - return nil - } - - // Parse the certificate for reference. - certReference, err := x509.ParseCertificate(cert) - if err != nil { - return errutil.UserError{Err: fmt.Sprintf("certificate could not be parsed: %v", err)} - } - - // Now parse the key's PEM block. - pemBlock, _ := pem.Decode([]byte(keyPem)) - if pemBlock == nil { - return errutil.UserError{Err: "provided key PEM block contained no data or failed to parse"} - } - - // Parse the inner DER key. - signer, _, err := certutil.ParseDERKey(pemBlock.Bytes) - if err != nil { - return fmt.Errorf("failed to parse provided private key: %v", err) - } - - // Finally, verify if the cert and key match. This code has been - // cribbed from the Go TLS config code, with minor modifications. - // - // In particular, we validate against the derived public key - // components and ensure we validate exponent and curve information - // as well. - // - // - // See: https://github.com/golang/go/blob/c6a2dada0df8c2d75cf3ae599d7caed77d416fa2/src/crypto/tls/tls.go#L304-L331 - switch certPub := certReference.PublicKey.(type) { - case *rsa.PublicKey: - privPub, ok := signer.Public().(*rsa.PublicKey) - if !ok { - return errutil.UserError{Err: "provided private key type does not match certificate's public key type"} - } - if err := signer.(*rsa.PrivateKey).Validate(); err != nil { - return err - } - if certPub.N.Cmp(privPub.N) != 0 || certPub.E != privPub.E { - return errutil.UserError{Err: "provided private key does not match certificate's public key"} - } - case *ecdsa.PublicKey: - privPub, ok := signer.Public().(*ecdsa.PublicKey) - if !ok { - return errutil.UserError{Err: "provided private key type does not match certificate's public key type"} - } - if certPub.X.Cmp(privPub.X) != 0 || certPub.Y.Cmp(privPub.Y) != 0 || certPub.Params().Name != privPub.Params().Name { - return errutil.UserError{Err: "provided private key does not match certificate's public key"} - } - case ed25519.PublicKey: - privPub, ok := signer.Public().(ed25519.PublicKey) - if !ok { - return errutil.UserError{Err: "provided private key type does not match certificate's public key type"} - } - if subtle.ConstantTimeCompare(privPub, certPub) == 0 { - return errutil.UserError{Err: "provided private key does not match certificate's public key"} - } - default: - return errutil.UserError{Err: "certificate has an unknown public key algorithm; unable to validate provided private key; ask an admin to revoke this certificate instead"} - } - - return nil -} - func (b *backend) pathRevokeWrite(ctx context.Context, req *logical.Request, data *framework.FieldData, _ *roleEntry) (*logical.Response, error) { - rawSerial, haveSerial := data.GetOk("serial_number") - rawCertificate, haveCert := data.GetOk("certificate") - - if !haveSerial && !haveCert { - return logical.ErrorResponse("The serial number or certificate to revoke must be provided."), nil - } else if haveSerial && haveCert { - return logical.ErrorResponse("Must provide either the certificate or the serial to revoke; not both."), nil - } - - var keyPem string - if req.Path == "revoke-with-key" { - rawKey, haveKey := data.GetOk("private_key") - if !haveKey { - return logical.ErrorResponse("Must have private key to revoke via the /revoke-with-key path."), nil - } - - keyPem = rawKey.(string) - if len(keyPem) < 64 { - // See note in pathImportKeyHandler... - return logical.ErrorResponse("Provided data for private_key was too short; perhaps a path was passed to the API rather than the contents of a PEM file?"), nil - } - } - - var serial string - if haveSerial { - // Easy case: this cert should be in storage already. - serial = rawSerial.(string) - if len(serial) == 0 { - return logical.ErrorResponse("The serial number must be provided"), nil - } - - // Here, fetch the certificate from disk to validate we can revoke it. - certEntry, err := fetchCertBySerial(ctx, b, req, req.Path, serial) - if err != nil { - switch err.(type) { - case errutil.UserError: - return logical.ErrorResponse(err.Error()), nil - default: - return nil, err - } - } - if certEntry == nil { - return logical.ErrorResponse(fmt.Sprintf("certificate with serial %s not found or was already revoked", serial)), nil - } - - // Now, if the user provided a key, we'll have to make sure the key - // and stored certificate match. - if err := b.pathRevokeWriteHandleKey(ctx, req, certEntry.Value, keyPem); err != nil { - return nil, err - } - } else { - // Otherwise, we've gotta parse the certificate from the request and - // then import it into cluster-local storage. Before writing the - // certificate (and forwarding), we want to verify this certificate - // was actually signed by one of our present issuers. - var err error - var writeCert bool - var certBytes []byte - serial, writeCert, certBytes, err = b.pathRevokeWriteHandleCertificate(ctx, req, rawCertificate.(string)) - if err != nil { - return nil, err - } - - // Before we write the certificate, we've gotta verify the request in - // the event of a PoP-based revocation scheme; we don't want to litter - // storage with issued-but-not-revoked certificates. - if err := b.pathRevokeWriteHandleKey(ctx, req, certBytes, keyPem); err != nil { - return nil, err - } - - // At this point, a forward operation will occur if we're on a standby - // node as we're now attempting to write the bytes of the cert out to - // disk. - if writeCert { - err = req.Storage.Put(ctx, &logical.StorageEntry{ - Key: "certs/" + serial, - Value: certBytes, - }) - if err != nil { - return nil, err - } - } - - // Finally, we have a valid serial number to use for BYOC revocation! + serial := data.Get("serial_number").(string) + if len(serial) == 0 { + return logical.ErrorResponse("The serial number must be provided"), nil } - // Assumption: this check is cheap. Call this twice, in the cert-import - // case, to allow cert verification to get rejected on the standby node, - // but we still need it to protect the serial number case. if b.System().ReplicationState().HasState(consts.ReplicationPerformanceStandby) { return nil, logical.ErrReadOnly } // We store and identify by lowercase colon-separated hex, but other // utilities use dashes and/or uppercase, so normalize - serial = strings.ReplaceAll(strings.ToLower(serial), "-", ":") + serial = strings.Replace(strings.ToLower(serial), "-", ":", -1) b.revokeStorageLock.Lock() defer b.revokeStorageLock.Unlock() @@ -433,49 +97,12 @@ func (b *backend) pathRotateCRLRead(ctx context.Context, req *logical.Request, _ }, nil } -func (b *backend) pathRotateDeltaCRLRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { - sc := b.makeStorageContext(ctx, req.Storage) - - cfg, err := b.crlBuilder.getConfigWithUpdate(sc) - if err != nil { - return nil, fmt.Errorf("error fetching CRL configuration: %v", err) - } - - isEnabled := cfg.EnableDelta - - crlErr := b.crlBuilder.rebuildDeltaCRLsIfForced(sc, true) - if crlErr != nil { - switch crlErr.(type) { - case errutil.UserError: - return logical.ErrorResponse(fmt.Sprintf("Error during delta CRL building: %s", crlErr)), nil - default: - return nil, fmt.Errorf("error encountered during delta CRL building: %w", crlErr) - } - } - - resp := &logical.Response{ - Data: map[string]interface{}{ - "success": true, - }, - } - - if !isEnabled { - resp.AddWarning("requested rebuild of delta CRL when delta CRL is not enabled; this is a no-op") - } - - return resp, nil -} - const pathRevokeHelpSyn = ` -Revoke a certificate by serial number or with explicit certificate. - -When calling /revoke-with-key, the private key corresponding to the -certificate must be provided to authenticate the request. +Revoke a certificate by serial number. ` const pathRevokeHelpDesc = ` -This allows certificates to be revoke. A root token or corresponding -private key is required. +This allows certificates to be revoked using its serial number. A root token is required. ` const pathRotateCRLHelpSyn = ` @@ -485,11 +112,3 @@ Force a rebuild of the CRL. const pathRotateCRLHelpDesc = ` Force a rebuild of the CRL. This can be used to remove expired certificates from it if no certificates have been revoked. A root token is required. ` - -const pathRotateDeltaCRLHelpSyn = ` -Force a rebuild of the delta CRL. -` - -const pathRotateDeltaCRLHelpDesc = ` -Force a rebuild of the delta CRL. This can be used to force an update of the otherwise periodically-rebuilt delta CRLs. -` diff --git a/builtin/logical/pki/path_roles.go b/builtin/logical/pki/path_roles.go index 89a899a8c24dc..da446d65d107b 100644 --- a/builtin/logical/pki/path_roles.go +++ b/builtin/logical/pki/path_roles.go @@ -12,7 +12,6 @@ import ( "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" ) @@ -238,13 +237,6 @@ SHA-2-512. Defaults to 0 to automatically detect based on key length (SHA-2-256 for RSA keys, and matching the curve size for NIST P-Curves).`, }, - "use_pss": { - Type: framework.TypeBool, - Default: false, - Description: `Whether or not to use PSS signatures when using a -RSA key-type issuer. Defaults to false.`, - }, - "key_usage": { Type: framework.TypeCommaStringSlice, Default: []string{"DigitalSignature", "KeyAgreement", "KeyEncipherment"}, @@ -392,21 +384,6 @@ for "generate_lease".`, }, }, - "cn_validations": { - Type: framework.TypeCommaStringSlice, - Default: []string{"email", "hostname"}, - Description: `List of allowed validations to run against the -Common Name field. Values can include 'email' to validate the CN is a email -address, 'hostname' to validate the CN is a valid hostname (potentially -including wildcards). When multiple validations are specified, these take -OR semantics (either email OR hostname are allowed). The special value -'disabled' allows disabling all CN name validations, allowing for arbitrary -non-Hostname, non-Email address CNs.`, - DisplayAttrs: &framework.DisplayAttributes{ - Name: "Common Name Validations", - }, - }, - "policy_identifiers": { Type: framework.TypeCommaStringSlice, Description: `A comma-separated string or list of policy OIDs, or a JSON list of qualified policy @@ -588,18 +565,6 @@ func (b *backend) getRole(ctx context.Context, s logical.Storage, n string) (*ro modified = true } - // Update CN Validations to be the present default, "email,hostname" - if len(result.CNValidations) == 0 { - result.CNValidations = []string{"email", "hostname"} - modified = true - } - - // Ensure the role is valid after updating. - _, err = validateRole(b, &result, ctx, s) - if err != nil { - return nil, err - } - if modified && (b.System().LocalMount() || !b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { jsonEntry, err := logical.StorageEntryJSON("role/"+n, &result) if err != nil { @@ -680,7 +645,6 @@ func (b *backend) pathRoleCreate(ctx context.Context, req *logical.Request, data KeyType: data.Get("key_type").(string), KeyBits: data.Get("key_bits").(int), SignatureBits: data.Get("signature_bits").(int), - UsePSS: data.Get("use_pss").(bool), UseCSRCommonName: data.Get("use_csr_common_name").(bool), UseCSRSANs: data.Get("use_csr_sans").(bool), KeyUsage: data.Get("key_usage").([]string), @@ -696,7 +660,6 @@ func (b *backend) pathRoleCreate(ctx context.Context, req *logical.Request, data GenerateLease: new(bool), NoStore: data.Get("no_store").(bool), RequireCN: data.Get("require_cn").(bool), - CNValidations: data.Get("cn_validations").([]string), AllowedSerialNumbers: data.Get("allowed_serial_numbers").([]string), PolicyIdentifiers: getPolicyIdentifier(data, nil), BasicConstraintsValidForNonCA: data.Get("basic_constraints_valid_for_non_ca").(bool), @@ -735,9 +698,6 @@ func (b *backend) pathRoleCreate(ctx context.Context, req *logical.Request, data } } else { *entry.GenerateLease = data.Get("generate_lease").(bool) - if *entry.GenerateLease { - warning = "it is encouraged to disable generate_lease and rely on PKI's native capabilities when possible; this option can cause Vault-wide issues with large numbers of issued certificates" - } } resp, err := validateRole(b, entry, ctx, req.Storage) @@ -805,8 +765,7 @@ func validateRole(b *backend, entry *roleEntry, ctx context.Context, s logical.S } // Check that the issuers reference set resolves to something if !b.useLegacyBundleCaStorage() { - sc := b.makeStorageContext(ctx, s) - issuerId, err := sc.resolveIssuerReference(entry.Issuer) + issuerId, err := resolveIssuerReference(ctx, s, entry.Issuer) if err != nil { if issuerId == IssuerRefNotFound { resp = &logical.Response{} @@ -822,12 +781,6 @@ func validateRole(b *backend, entry *roleEntry, ctx context.Context, s logical.S } - // Ensures CNValidations are alright - entry.CNValidations, err = checkCNValidations(entry.CNValidations) - if err != nil { - return nil, errutil.UserError{Err: err.Error()} - } - return resp, nil } @@ -880,7 +833,6 @@ func (b *backend) pathRolePatch(ctx context.Context, req *logical.Request, data KeyType: getWithExplicitDefault(data, "key_type", oldEntry.KeyType).(string), KeyBits: getWithExplicitDefault(data, "key_bits", oldEntry.KeyBits).(int), SignatureBits: getWithExplicitDefault(data, "signature_bits", oldEntry.SignatureBits).(int), - UsePSS: getWithExplicitDefault(data, "use_pss", oldEntry.UsePSS).(bool), UseCSRCommonName: getWithExplicitDefault(data, "use_csr_common_name", oldEntry.UseCSRCommonName).(bool), UseCSRSANs: getWithExplicitDefault(data, "use_csr_sans", oldEntry.UseCSRSANs).(bool), KeyUsage: getWithExplicitDefault(data, "key_usage", oldEntry.KeyUsage).([]string), @@ -896,7 +848,6 @@ func (b *backend) pathRolePatch(ctx context.Context, req *logical.Request, data GenerateLease: new(bool), NoStore: getWithExplicitDefault(data, "no_store", oldEntry.NoStore).(bool), RequireCN: getWithExplicitDefault(data, "require_cn", oldEntry.RequireCN).(bool), - CNValidations: getWithExplicitDefault(data, "cn_validations", oldEntry.CNValidations).([]string), AllowedSerialNumbers: getWithExplicitDefault(data, "allowed_serial_numbers", oldEntry.AllowedSerialNumbers).([]string), PolicyIdentifiers: getPolicyIdentifier(data, &oldEntry.PolicyIdentifiers), BasicConstraintsValidForNonCA: getWithExplicitDefault(data, "basic_constraints_valid_for_non_ca", oldEntry.BasicConstraintsValidForNonCA).(bool), @@ -933,7 +884,7 @@ func (b *backend) pathRolePatch(ctx context.Context, req *logical.Request, data // no_store implies generate_lease := false if entry.NoStore { *entry.GenerateLease = false - if ok && generateLease.(bool) || !ok && *oldEntry.GenerateLease { + if ok && generateLease.(bool) || !ok && (*oldEntry.GenerateLease == true) { warning = "mutually exclusive values no_store=true and generate_lease=true were both specified; no_store=true takes priority" } } else { @@ -942,10 +893,6 @@ func (b *backend) pathRolePatch(ctx context.Context, req *logical.Request, data } else { entry.GenerateLease = oldEntry.GenerateLease } - - if *entry.GenerateLease { - warning = "it is encouraged to disable generate_lease and rely on PKI's native capabilities when possible; this option can cause Vault-wide issues with large numbers of issued certificates" - } } resp, err := validateRole(b, entry, ctx, req.Storage) @@ -1053,61 +1000,59 @@ func parseExtKeyUsages(role *roleEntry) certutil.CertExtKeyUsage { type roleEntry struct { LeaseMax string `json:"lease_max"` Lease string `json:"lease"` - DeprecatedMaxTTL string `json:"max_ttl"` - DeprecatedTTL string `json:"ttl"` - TTL time.Duration `json:"ttl_duration"` - MaxTTL time.Duration `json:"max_ttl_duration"` - AllowLocalhost bool `json:"allow_localhost"` - AllowedBaseDomain string `json:"allowed_base_domain"` + DeprecatedMaxTTL string `json:"max_ttl" mapstructure:"max_ttl"` + DeprecatedTTL string `json:"ttl" mapstructure:"ttl"` + TTL time.Duration `json:"ttl_duration" mapstructure:"ttl_duration"` + MaxTTL time.Duration `json:"max_ttl_duration" mapstructure:"max_ttl_duration"` + AllowLocalhost bool `json:"allow_localhost" mapstructure:"allow_localhost"` + AllowedBaseDomain string `json:"allowed_base_domain" mapstructure:"allowed_base_domain"` AllowedDomainsOld string `json:"allowed_domains,omitempty"` - AllowedDomains []string `json:"allowed_domains_list"` + AllowedDomains []string `json:"allowed_domains_list" mapstructure:"allowed_domains"` AllowedDomainsTemplate bool `json:"allowed_domains_template"` AllowBaseDomain bool `json:"allow_base_domain"` - AllowBareDomains bool `json:"allow_bare_domains"` - AllowTokenDisplayName bool `json:"allow_token_displayname"` - AllowSubdomains bool `json:"allow_subdomains"` - AllowGlobDomains bool `json:"allow_glob_domains"` - AllowWildcardCertificates *bool `json:"allow_wildcard_certificates,omitempty"` - AllowAnyName bool `json:"allow_any_name"` - EnforceHostnames bool `json:"enforce_hostnames"` - AllowIPSANs bool `json:"allow_ip_sans"` - ServerFlag bool `json:"server_flag"` - ClientFlag bool `json:"client_flag"` - CodeSigningFlag bool `json:"code_signing_flag"` - EmailProtectionFlag bool `json:"email_protection_flag"` - UseCSRCommonName bool `json:"use_csr_common_name"` - UseCSRSANs bool `json:"use_csr_sans"` - KeyType string `json:"key_type"` - KeyBits int `json:"key_bits"` - UsePSS bool `json:"use_pss"` - SignatureBits int `json:"signature_bits"` - MaxPathLength *int `json:",omitempty"` + AllowBareDomains bool `json:"allow_bare_domains" mapstructure:"allow_bare_domains"` + AllowTokenDisplayName bool `json:"allow_token_displayname" mapstructure:"allow_token_displayname"` + AllowSubdomains bool `json:"allow_subdomains" mapstructure:"allow_subdomains"` + AllowGlobDomains bool `json:"allow_glob_domains" mapstructure:"allow_glob_domains"` + AllowWildcardCertificates *bool `json:"allow_wildcard_certificates,omitempty" mapstructure:"allow_wildcard_certificates"` + AllowAnyName bool `json:"allow_any_name" mapstructure:"allow_any_name"` + EnforceHostnames bool `json:"enforce_hostnames" mapstructure:"enforce_hostnames"` + AllowIPSANs bool `json:"allow_ip_sans" mapstructure:"allow_ip_sans"` + ServerFlag bool `json:"server_flag" mapstructure:"server_flag"` + ClientFlag bool `json:"client_flag" mapstructure:"client_flag"` + CodeSigningFlag bool `json:"code_signing_flag" mapstructure:"code_signing_flag"` + EmailProtectionFlag bool `json:"email_protection_flag" mapstructure:"email_protection_flag"` + UseCSRCommonName bool `json:"use_csr_common_name" mapstructure:"use_csr_common_name"` + UseCSRSANs bool `json:"use_csr_sans" mapstructure:"use_csr_sans"` + KeyType string `json:"key_type" mapstructure:"key_type"` + KeyBits int `json:"key_bits" mapstructure:"key_bits"` + SignatureBits int `json:"signature_bits" mapstructure:"signature_bits"` + MaxPathLength *int `json:",omitempty" mapstructure:"max_path_length"` KeyUsageOld string `json:"key_usage,omitempty"` - KeyUsage []string `json:"key_usage_list"` - ExtKeyUsage []string `json:"extended_key_usage_list"` + KeyUsage []string `json:"key_usage_list" mapstructure:"key_usage"` + ExtKeyUsage []string `json:"extended_key_usage_list" mapstructure:"extended_key_usage"` OUOld string `json:"ou,omitempty"` - OU []string `json:"ou_list"` + OU []string `json:"ou_list" mapstructure:"ou"` OrganizationOld string `json:"organization,omitempty"` - Organization []string `json:"organization_list"` - Country []string `json:"country"` - Locality []string `json:"locality"` - Province []string `json:"province"` - StreetAddress []string `json:"street_address"` - PostalCode []string `json:"postal_code"` + Organization []string `json:"organization_list" mapstructure:"organization"` + Country []string `json:"country" mapstructure:"country"` + Locality []string `json:"locality" mapstructure:"locality"` + Province []string `json:"province" mapstructure:"province"` + StreetAddress []string `json:"street_address" mapstructure:"street_address"` + PostalCode []string `json:"postal_code" mapstructure:"postal_code"` GenerateLease *bool `json:"generate_lease,omitempty"` - NoStore bool `json:"no_store"` - RequireCN bool `json:"require_cn"` - CNValidations []string `json:"cn_validations"` - AllowedOtherSANs []string `json:"allowed_other_sans"` - AllowedSerialNumbers []string `json:"allowed_serial_numbers"` - AllowedURISANs []string `json:"allowed_uri_sans"` + NoStore bool `json:"no_store" mapstructure:"no_store"` + RequireCN bool `json:"require_cn" mapstructure:"require_cn"` + AllowedOtherSANs []string `json:"allowed_other_sans" mapstructure:"allowed_other_sans"` + AllowedSerialNumbers []string `json:"allowed_serial_numbers" mapstructure:"allowed_serial_numbers"` + AllowedURISANs []string `json:"allowed_uri_sans" mapstructure:"allowed_uri_sans"` AllowedURISANsTemplate bool `json:"allowed_uri_sans_template"` - PolicyIdentifiers []string `json:"policy_identifiers"` - ExtKeyUsageOIDs []string `json:"ext_key_usage_oids"` - BasicConstraintsValidForNonCA bool `json:"basic_constraints_valid_for_non_ca"` - NotBeforeDuration time.Duration `json:"not_before_duration"` - NotAfter string `json:"not_after"` - Issuer string `json:"issuer"` + PolicyIdentifiers []string `json:"policy_identifiers" mapstructure:"policy_identifiers"` + ExtKeyUsageOIDs []string `json:"ext_key_usage_oids" mapstructure:"ext_key_usage_oids"` + BasicConstraintsValidForNonCA bool `json:"basic_constraints_valid_for_non_ca" mapstructure:"basic_constraints_valid_for_non_ca"` + NotBeforeDuration time.Duration `json:"not_before_duration" mapstructure:"not_before_duration"` + NotAfter string `json:"not_after" mapstructure:"not_after"` + Issuer string `json:"issuer" mapstructure:"issuer"` } func (r *roleEntry) ToResponseData() map[string]interface{} { @@ -1135,7 +1080,6 @@ func (r *roleEntry) ToResponseData() map[string]interface{} { "key_type": r.KeyType, "key_bits": r.KeyBits, "signature_bits": r.SignatureBits, - "use_pss": r.UsePSS, "key_usage": r.KeyUsage, "ext_key_usage": r.ExtKeyUsage, "ext_key_usage_oids": r.ExtKeyUsageOIDs, @@ -1151,7 +1095,6 @@ func (r *roleEntry) ToResponseData() map[string]interface{} { "allowed_serial_numbers": r.AllowedSerialNumbers, "allowed_uri_sans": r.AllowedURISANs, "require_cn": r.RequireCN, - "cn_validations": r.CNValidations, "policy_identifiers": r.PolicyIdentifiers, "basic_constraints_valid_for_non_ca": r.BasicConstraintsValidForNonCA, "not_before_duration": int64(r.NotBeforeDuration.Seconds()), @@ -1167,52 +1110,6 @@ func (r *roleEntry) ToResponseData() map[string]interface{} { return responseData } -func checkCNValidations(validations []string) ([]string, error) { - var haveDisabled bool - var haveEmail bool - var haveHostname bool - - var result []string - - if len(validations) == 0 { - return []string{"email", "hostname"}, nil - } - - for _, validation := range validations { - switch strings.ToLower(validation) { - case "disabled": - if haveDisabled { - return nil, fmt.Errorf("cn_validations value incorrect: `disabled` specified multiple times") - } - haveDisabled = true - case "email": - if haveEmail { - return nil, fmt.Errorf("cn_validations value incorrect: `email` specified multiple times") - } - haveEmail = true - case "hostname": - if haveHostname { - return nil, fmt.Errorf("cn_validations value incorrect: `hostname` specified multiple times") - } - haveHostname = true - default: - return nil, fmt.Errorf("cn_validations value incorrect: unknown type: `%s`", validation) - } - - result = append(result, strings.ToLower(validation)) - } - - if !haveDisabled && !haveEmail && !haveHostname { - return nil, fmt.Errorf("cn_validations value incorrect: must specify a value (`email` and/or `hostname`) or `disabled`") - } - - if haveDisabled && (haveEmail || haveHostname) { - return nil, fmt.Errorf("cn_validations value incorrect: cannot specify `disabled` along with `email` or `hostname`") - } - - return result, nil -} - const pathListRolesHelpSyn = `List the existing roles in this backend` const pathListRolesHelpDesc = `Roles will be listed by the role name.` diff --git a/builtin/logical/pki/path_roles_test.go b/builtin/logical/pki/path_roles_test.go index 34952d4772209..97c465b27c7b5 100644 --- a/builtin/logical/pki/path_roles_test.go +++ b/builtin/logical/pki/path_roles_test.go @@ -8,16 +8,17 @@ import ( "encoding/pem" "fmt" "testing" + "time" "github.com/go-errors/errors" "github.com/hashicorp/go-secure-stdlib/strutil" "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestPki_RoleGenerateLease(t *testing.T) { - t.Parallel() var resp *logical.Response var err error b, storage := createBackendWithStorage(t) @@ -58,21 +59,22 @@ func TestPki_RoleGenerateLease(t *testing.T) { t.Fatalf("generate_lease should not be set by default") } - // To test upgrade of generate_lease, we read the storage entry, - // modify it to remove generate_lease, and rewrite it. - entry, err := storage.Get(context.Background(), "role/testrole") - if err != nil || entry == nil { - t.Fatal(err) - } - + // Update values due to switching of ttl type + resp.Data["ttl_duration"] = resp.Data["ttl"] + resp.Data["ttl"] = (time.Duration(resp.Data["ttl"].(int64)) * time.Second).String() + resp.Data["max_ttl_duration"] = resp.Data["max_ttl"] + resp.Data["max_ttl"] = (time.Duration(resp.Data["max_ttl"].(int64)) * time.Second).String() + // role.GenerateLease will be nil after the decode var role roleEntry - if err := entry.DecodeJSON(&role); err != nil { + err = mapstructure.Decode(resp.Data, &role) + if err != nil { t.Fatal(err) } + // Make it explicit role.GenerateLease = nil - entry, err = logical.StorageEntryJSON("role/testrole", role) + entry, err := logical.StorageEntryJSON("role/testrole", role) if err != nil { t.Fatal(err) } @@ -122,7 +124,6 @@ func TestPki_RoleGenerateLease(t *testing.T) { } func TestPki_RoleKeyUsage(t *testing.T) { - t.Parallel() var resp *logical.Response var err error b, storage := createBackendWithStorage(t) @@ -156,23 +157,26 @@ func TestPki_RoleKeyUsage(t *testing.T) { t.Fatalf("key_usage should have 2 values") } - // To test the upgrade of KeyUsageOld into KeyUsage, we read - // the storage entry, modify it to set KUO and unset KU, and - // rewrite it. - entry, err := storage.Get(context.Background(), "role/testrole") - if err != nil || entry == nil { - t.Fatal(err) - } - + // Update values due to switching of ttl type + resp.Data["ttl_duration"] = resp.Data["ttl"] + resp.Data["ttl"] = (time.Duration(resp.Data["ttl"].(int64)) * time.Second).String() + resp.Data["max_ttl_duration"] = resp.Data["max_ttl"] + resp.Data["max_ttl"] = (time.Duration(resp.Data["max_ttl"].(int64)) * time.Second).String() + // Check that old key usage value is nil var role roleEntry - if err := entry.DecodeJSON(&role); err != nil { + err = mapstructure.Decode(resp.Data, &role) + if err != nil { t.Fatal(err) } + if role.KeyUsageOld != "" { + t.Fatalf("old key usage storage value should be blank") + } + // Make it explicit role.KeyUsageOld = "KeyEncipherment,DigitalSignature" role.KeyUsage = nil - entry, err = logical.StorageEntryJSON("role/testrole", role) + entry, err := logical.StorageEntryJSON("role/testrole", role) if err != nil { t.Fatal(err) } @@ -213,7 +217,6 @@ func TestPki_RoleKeyUsage(t *testing.T) { } func TestPki_RoleOUOrganizationUpgrade(t *testing.T) { - t.Parallel() var resp *logical.Response var err error b, storage := createBackendWithStorage(t) @@ -252,23 +255,31 @@ func TestPki_RoleOUOrganizationUpgrade(t *testing.T) { t.Fatalf("organization should have 2 values") } - // To test upgrade of O/OU, we read the storage entry, modify it to set - // the old O/OU value over the new one, and rewrite it. - entry, err := storage.Get(context.Background(), "role/testrole") - if err != nil || entry == nil { - t.Fatal(err) - } - + // Update values due to switching of ttl type + resp.Data["ttl_duration"] = resp.Data["ttl"] + resp.Data["ttl"] = (time.Duration(resp.Data["ttl"].(int64)) * time.Second).String() + resp.Data["max_ttl_duration"] = resp.Data["max_ttl"] + resp.Data["max_ttl"] = (time.Duration(resp.Data["max_ttl"].(int64)) * time.Second).String() + // Check that old key usage value is nil var role roleEntry - if err := entry.DecodeJSON(&role); err != nil { + err = mapstructure.Decode(resp.Data, &role) + if err != nil { t.Fatal(err) } + if role.OUOld != "" { + t.Fatalf("old ou storage value should be blank") + } + if role.OrganizationOld != "" { + t.Fatalf("old organization storage value should be blank") + } + + // Make it explicit role.OUOld = "abc,123" role.OU = nil role.OrganizationOld = "org1,org2" role.Organization = nil - entry, err = logical.StorageEntryJSON("role/testrole", role) + entry, err := logical.StorageEntryJSON("role/testrole", role) if err != nil { t.Fatal(err) } @@ -319,7 +330,6 @@ func TestPki_RoleOUOrganizationUpgrade(t *testing.T) { } func TestPki_RoleAllowedDomains(t *testing.T) { - t.Parallel() var resp *logical.Response var err error b, storage := createBackendWithStorage(t) @@ -352,21 +362,26 @@ func TestPki_RoleAllowedDomains(t *testing.T) { t.Fatalf("allowed_domains should have 2 values") } - // To test upgrade of allowed_domains, we read the storage entry, - // set the old one, and rewrite it. - entry, err := storage.Get(context.Background(), "role/testrole") - if err != nil || entry == nil { - t.Fatal(err) - } - + // Update values due to switching of ttl type + resp.Data["ttl_duration"] = resp.Data["ttl"] + resp.Data["ttl"] = (time.Duration(resp.Data["ttl"].(int64)) * time.Second).String() + resp.Data["max_ttl_duration"] = resp.Data["max_ttl"] + resp.Data["max_ttl"] = (time.Duration(resp.Data["max_ttl"].(int64)) * time.Second).String() + // Check that old key usage value is nil var role roleEntry - if err := entry.DecodeJSON(&role); err != nil { + err = mapstructure.Decode(resp.Data, &role) + if err != nil { t.Fatal(err) } + if role.AllowedDomainsOld != "" { + t.Fatalf("old allowed_domains storage value should be blank") + } + + // Make it explicit role.AllowedDomainsOld = "foobar.com,*example.com" role.AllowedDomains = nil - entry, err = logical.StorageEntryJSON("role/testrole", role) + entry, err := logical.StorageEntryJSON("role/testrole", role) if err != nil { t.Fatal(err) } @@ -407,7 +422,6 @@ func TestPki_RoleAllowedDomains(t *testing.T) { } func TestPki_RoleAllowedURISANs(t *testing.T) { - t.Parallel() var resp *logical.Response var err error b, storage := createBackendWithStorage(t) @@ -442,7 +456,6 @@ func TestPki_RoleAllowedURISANs(t *testing.T) { } func TestPki_RolePkixFields(t *testing.T) { - t.Parallel() var resp *logical.Response var err error b, storage := createBackendWithStorage(t) @@ -534,7 +547,6 @@ func TestPki_RolePkixFields(t *testing.T) { } func TestPki_RoleNoStore(t *testing.T) { - t.Parallel() var resp *logical.Response var err error b, storage := createBackendWithStorage(t) @@ -655,7 +667,6 @@ func TestPki_RoleNoStore(t *testing.T) { } func TestPki_CertsLease(t *testing.T) { - t.Parallel() var resp *logical.Response var err error b, storage := createBackendWithStorage(t) @@ -737,7 +748,6 @@ func TestPki_CertsLease(t *testing.T) { } func TestPki_RolePatch(t *testing.T) { - t.Parallel() type TestCase struct { Field string Before interface{} @@ -1017,7 +1027,6 @@ func TestPki_RolePatch(t *testing.T) { } func TestPKI_RolePolicyInformation_Flat(t *testing.T) { - t.Parallel() type TestCase struct { Input interface{} ASN interface{} diff --git a/builtin/logical/pki/path_root.go b/builtin/logical/pki/path_root.go index b7640afe01e1c..28bc7d22aac7c 100644 --- a/builtin/logical/pki/path_root.go +++ b/builtin/logical/pki/path_root.go @@ -54,14 +54,13 @@ func (b *backend) pathCADeleteRoot(ctx context.Context, req *logical.Request, _ b.issuersLock.Lock() defer b.issuersLock.Unlock() - sc := b.makeStorageContext(ctx, req.Storage) if !b.useLegacyBundleCaStorage() { - issuers, err := sc.listIssuers() + issuers, err := listIssuers(ctx, req.Storage) if err != nil { return nil, err } - keys, err := sc.listKeys() + keys, err := listKeys(ctx, req.Storage) if err != nil { return nil, err } @@ -69,12 +68,12 @@ func (b *backend) pathCADeleteRoot(ctx context.Context, req *logical.Request, _ // Delete all issuers and keys. Ignore deleting the default since we're // explicitly deleting everything. for _, issuer := range issuers { - if _, err = sc.deleteIssuer(issuer); err != nil { + if _, err = deleteIssuer(ctx, req.Storage, issuer); err != nil { return nil, err } } for _, key := range keys { - if _, err = sc.deleteKey(key); err != nil { + if _, err = deleteKey(ctx, req.Storage, key); err != nil { return nil, err } } @@ -109,9 +108,7 @@ func (b *backend) pathCAGenerateRoot(ctx context.Context, req *logical.Request, return logical.ErrorResponse("Can not create root CA until migration has completed"), nil } - sc := b.makeStorageContext(ctx, req.Storage) - - exported, format, role, errorResp := getGenerationParams(sc, data) + exported, format, role, errorResp := b.getGenerationParams(ctx, req.Storage, data) if errorResp != nil { return errorResp, nil } @@ -122,7 +119,7 @@ func (b *backend) pathCAGenerateRoot(ctx context.Context, req *logical.Request, role.MaxPathLength = &maxPathLength } - issuerName, err := getIssuerName(sc, data) + issuerName, err := getIssuerName(ctx, req.Storage, data) if err != nil { return logical.ErrorResponse(err.Error()), nil } @@ -130,13 +127,13 @@ func (b *backend) pathCAGenerateRoot(ctx context.Context, req *logical.Request, // only do it if its not in use. if strings.HasPrefix(req.Path, "root/rotate/") && len(issuerName) == 0 { // err is nil when the issuer name is in use. - _, err = sc.resolveIssuerReference("next") + _, err = resolveIssuerReference(ctx, req.Storage, "next") if err != nil { issuerName = "next" } } - keyName, err := getKeyName(sc, data) + keyName, err := getKeyName(ctx, req.Storage, data) if err != nil { return logical.ErrorResponse(err.Error()), nil } @@ -146,7 +143,7 @@ func (b *backend) pathCAGenerateRoot(ctx context.Context, req *logical.Request, apiData: data, role: role, } - parsedBundle, warnings, err := generateCert(sc, input, nil, true, b.Backend.GetRandomReader()) + parsedBundle, err := generateCert(ctx, b, input, nil, true, b.Backend.GetRandomReader()) if err != nil { switch err.(type) { case errutil.UserError: @@ -184,8 +181,8 @@ func (b *backend) pathCAGenerateRoot(ctx context.Context, req *logical.Request, if len(parsedBundle.Certificate.OCSPServer) == 0 && len(parsedBundle.Certificate.IssuingCertificateURL) == 0 && len(parsedBundle.Certificate.CRLDistributionPoints) == 0 { // If the operator hasn't configured any of the URLs prior to // generating this issuer, we should add a warning to the response, - // informing them they might want to do so prior to issuing leaves. - resp.AddWarning("This mount hasn't configured any authority information access (AIA) fields; this may make it harder for systems to find missing certificates in the chain or to validate revocation status of certificates. Consider updating /config/urls or the newly generated issuer with this information.") + // informing them they might want to do so and re-generate the issuer. + resp.AddWarning("This mount hasn't configured any authority access information fields; this may make it harder for systems to find missing certificates in the chain or to validate revocation status of certificates. Consider updating /config/urls with this information.") } switch format { @@ -227,7 +224,7 @@ func (b *backend) pathCAGenerateRoot(ctx context.Context, req *logical.Request, } // Store it as the CA bundle - myIssuer, myKey, err := sc.writeCaBundle(cb, issuerName, keyName) + myIssuer, myKey, err := writeCaBundle(ctx, b, req.Storage, cb, issuerName, keyName) if err != nil { return nil, err } @@ -236,37 +233,15 @@ func (b *backend) pathCAGenerateRoot(ctx context.Context, req *logical.Request, resp.Data["key_id"] = myKey.ID resp.Data["key_name"] = myKey.Name - // Update the issuer to reflect the PSS status here for revocation; this - // allows CRL building to succeed if the root is using a managed key with - // only PSS support. - if input.role.KeyType == "rsa" && input.role.UsePSS { - // The one time that it is safe (and good) to copy the - // SignatureAlgorithm field off the certificate (for the purposes of - // detecting PSS support) is when we've freshly generated it AND it - // is a root (exactly this endpoint). - // - // For intermediates, this doesn't hold (not this endpoint) as that - // reflects the parent key's preferences. For imports, this doesn't - // hold as the old system might've allowed other signature types that - // the new system (whether Vault or a managed key) doesn't. - myIssuer.RevocationSigAlg = parsedBundle.Certificate.SignatureAlgorithm - if err := sc.writeIssuer(myIssuer); err != nil { - return nil, fmt.Errorf("unable to store PSS-updated issuer: %v", err) - } - } - // Also store it as just the certificate identified by serial number, so it // can be revoked - key := "certs/" + normalizeSerial(cb.SerialNumber) - certsCounted := b.certsCounted.Load() err = req.Storage.Put(ctx, &logical.StorageEntry{ - Key: key, + Key: "certs/" + normalizeSerial(cb.SerialNumber), Value: parsedBundle.CertificateBytes, }) if err != nil { return nil, fmt.Errorf("unable to store certificate locally: %w", err) } - b.incrementTotalCertificatesCount(certsCounted, key) // Build a fresh CRL err = b.crlBuilder.rebuild(ctx, b, req, true) @@ -278,8 +253,6 @@ func (b *backend) pathCAGenerateRoot(ctx context.Context, req *logical.Request, resp.AddWarning("Max path length of the generated certificate is zero. This certificate cannot be used to issue intermediate CA certificates.") } - resp = addWarnings(resp, warnings) - return resp, nil } @@ -313,14 +286,11 @@ func (b *backend) pathIssuerSignIntermediate(ctx context.Context, req *logical.R AllowWildcardCertificates: new(bool), EnforceHostnames: false, KeyType: "any", - SignatureBits: data.Get("signature_bits").(int), - UsePSS: data.Get("use_pss").(bool), AllowedOtherSANs: []string{"*"}, AllowedSerialNumbers: []string{"*"}, AllowedURISANs: []string{"*"}, NotAfter: data.Get("not_after").(string), NotBeforeDuration: time.Duration(data.Get("not_before_duration").(int)) * time.Second, - CNValidations: []string{"disabled"}, } *role.AllowWildcardCertificates = true @@ -329,8 +299,7 @@ func (b *backend) pathIssuerSignIntermediate(ctx context.Context, req *logical.R } var caErr error - sc := b.makeStorageContext(ctx, req.Storage) - signingBundle, caErr := sc.fetchCAInfo(issuerName, IssuanceUsage) + signingBundle, caErr := fetchCAInfo(ctx, b, req, issuerName, IssuanceUsage) if caErr != nil { switch caErr.(type) { case errutil.UserError: @@ -360,7 +329,7 @@ func (b *backend) pathIssuerSignIntermediate(ctx context.Context, req *logical.R apiData: data, role: role, } - parsedBundle, warnings, err := signCert(b, input, signingBundle, true, useCSRValues) + parsedBundle, err := signCert(b, input, signingBundle, true, useCSRValues) if err != nil { switch err.(type) { case errutil.UserError: @@ -412,8 +381,8 @@ func (b *backend) pathIssuerSignIntermediate(ctx context.Context, req *logical.R if len(parsedBundle.Certificate.OCSPServer) == 0 && len(parsedBundle.Certificate.IssuingCertificateURL) == 0 && len(parsedBundle.Certificate.CRLDistributionPoints) == 0 { // If the operator hasn't configured any of the URLs prior to // generating this issuer, we should add a warning to the response, - // informing them they might want to do so prior to issuing leaves. - resp.AddWarning("This mount hasn't configured any authority information access (AIA) fields; this may make it harder for systems to find missing certificates in the chain or to validate revocation status of certificates. Consider updating /config/urls or the newly generated issuer with this information.") + // informing them they might want to do so and re-generate the issuer. + resp.AddWarning("This mount hasn't configured any authority access information fields; this may make it harder for systems to find missing certificates in the chain or to validate revocation status of certificates. Consider updating /config/urls with this information.") } caChain := append([]string{cb.Certificate}, cb.CAChain...) @@ -444,23 +413,18 @@ func (b *backend) pathIssuerSignIntermediate(ctx context.Context, req *logical.R return nil, fmt.Errorf("unsupported format argument: %s", format) } - key := "certs/" + normalizeSerial(cb.SerialNumber) - certsCounted := b.certsCounted.Load() err = req.Storage.Put(ctx, &logical.StorageEntry{ - Key: key, + Key: "certs/" + normalizeSerial(cb.SerialNumber), Value: parsedBundle.CertificateBytes, }) if err != nil { return nil, fmt.Errorf("unable to store certificate locally: %w", err) } - b.incrementTotalCertificatesCount(certsCounted, key) if parsedBundle.Certificate.MaxPathLen == 0 { resp.AddWarning("Max path length of the signed certificate is zero. This certificate cannot be used to issue intermediate CA certificates.") } - resp = addWarnings(resp, warnings) - return resp, nil } @@ -494,8 +458,7 @@ func (b *backend) pathIssuerSignSelfIssued(ctx context.Context, req *logical.Req } var caErr error - sc := b.makeStorageContext(ctx, req.Storage) - signingBundle, caErr := sc.fetchCAInfo(issuerName, IssuanceUsage) + signingBundle, caErr := fetchCAInfo(ctx, b, req, issuerName, IssuanceUsage) if caErr != nil { switch caErr.(type) { case errutil.UserError: diff --git a/builtin/logical/pki/path_sign_issuers.go b/builtin/logical/pki/path_sign_issuers.go index cadbac5553f36..96b7666065bb4 100644 --- a/builtin/logical/pki/path_sign_issuers.go +++ b/builtin/logical/pki/path_sign_issuers.go @@ -7,15 +7,15 @@ import ( func pathIssuerSignIntermediate(b *backend) *framework.Path { pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/sign-intermediate" - return buildPathIssuerSignIntermediateRaw(b, pattern) + return pathIssuerSignIntermediateRaw(b, pattern) } func pathSignIntermediate(b *backend) *framework.Path { pattern := "root/sign-intermediate" - return buildPathIssuerSignIntermediateRaw(b, pattern) + return pathIssuerSignIntermediateRaw(b, pattern) } -func buildPathIssuerSignIntermediateRaw(b *backend, pattern string) *framework.Path { +func pathIssuerSignIntermediateRaw(b *backend, pattern string) *framework.Path { fields := addIssuerRefField(map[string]*framework.FieldSchema{}) path := &framework.Path{ Pattern: pattern, @@ -55,43 +55,6 @@ the non-repudiation flag; into the issued certificate.`, } - fields["signature_bits"] = &framework.FieldSchema{ - Type: framework.TypeInt, - Default: 0, - Description: `The number of bits to use in the signature -algorithm; accepts 256 for SHA-2-256, 384 for SHA-2-384, and 512 for -SHA-2-512. Defaults to 0 to automatically detect based on key length -(SHA-2-256 for RSA keys, and matching the curve size for NIST P-Curves).`, - DisplayAttrs: &framework.DisplayAttributes{ - Value: 0, - }, - } - - fields["skid"] = &framework.FieldSchema{ - Type: framework.TypeString, - Default: "", - Description: `Value for the Subject Key Identifier field -(RFC 5280 Section 4.2.1.2). This value should ONLY be used when -cross-signing to mimic the existing certificate's SKID value; this -is necessary to allow certain TLS implementations (such as OpenSSL) -which use SKID/AKID matches in chain building to restrict possible -valid chains. - -Specified as a string in hex format. Default is empty, allowing -Vault to automatically calculate the SKID according to method one -in the above RFC section.`, - DisplayAttrs: &framework.DisplayAttributes{ - Value: "", - }, - } - - fields["use_pss"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Default: false, - Description: `Whether or not to use PSS signatures when using a -RSA key-type issuer. Defaults to false.`, - } - return path } diff --git a/builtin/logical/pki/path_tidy.go b/builtin/logical/pki/path_tidy.go index 198a2f81154af..556e4c3488dd0 100644 --- a/builtin/logical/pki/path_tidy.go +++ b/builtin/logical/pki/path_tidy.go @@ -3,68 +3,58 @@ package pki import ( "context" "crypto/x509" - "errors" "fmt" "net/http" "sync/atomic" "time" "github.com/armon/go-metrics" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/logical" ) -var tidyCancelledError = errors.New("tidy operation cancelled") - -type tidyConfig struct { - Enabled bool `json:"enabled"` - Interval time.Duration `json:"interval_duration"` - CertStore bool `json:"tidy_cert_store"` - RevokedCerts bool `json:"tidy_revoked_certs"` - IssuerAssocs bool `json:"tidy_revoked_cert_issuer_associations"` - SafetyBuffer time.Duration `json:"safety_buffer"` - PauseDuration time.Duration `json:"pause_duration"` -} - -var defaultTidyConfig = tidyConfig{ - Enabled: false, - Interval: 12 * time.Hour, - CertStore: false, - RevokedCerts: false, - IssuerAssocs: false, - SafetyBuffer: 72 * time.Hour, - PauseDuration: 0 * time.Second, -} - func pathTidy(b *backend) *framework.Path { return &framework.Path{ Pattern: "tidy$", - Fields: addTidyFields(map[string]*framework.FieldSchema{}), - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathTidyWrite, - ForwardPerformanceStandby: true, + Fields: map[string]*framework.FieldSchema{ + "tidy_cert_store": { + Type: framework.TypeBool, + Description: `Set to true to enable tidying up +the certificate store`, + }, + + "tidy_revocation_list": { + Type: framework.TypeBool, + Description: `Deprecated; synonym for 'tidy_revoked_certs`, + }, + + "tidy_revoked_certs": { + Type: framework.TypeBool, + Description: `Set to true to expire all revoked +and expired certificates, removing them both from the CRL and from storage. The +CRL will be rotated if this causes any values to be removed.`, + }, + + "safety_buffer": { + Type: framework.TypeDurationSecond, + Description: `The amount of extra time that must have passed +beyond certificate expiration before it is removed +from the backend storage and/or revocation list. +Defaults to 72 hours.`, + Default: 259200, // 72h, but TypeDurationSecond currently requires defaults to be int }, }, - HelpSynopsis: pathTidyHelpSyn, - HelpDescription: pathTidyHelpDesc, - } -} -func pathTidyCancel(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "tidy-cancel$", Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathTidyCancelWrite, + Callback: b.pathTidyWrite, ForwardPerformanceStandby: true, }, }, - HelpSynopsis: pathTidyCancelHelpSyn, - HelpDescription: pathTidyCancelHelpDesc, + + HelpSynopsis: pathTidyHelpSyn, + HelpDescription: pathTidyHelpDesc, } } @@ -82,73 +72,18 @@ func pathTidyStatus(b *backend) *framework.Path { } } -func pathConfigAutoTidy(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "config/auto-tidy", - Fields: addTidyFields(map[string]*framework.FieldSchema{ - "enabled": { - Type: framework.TypeBool, - Description: `Set to true to enable automatic tidy operations.`, - }, - "interval_duration": { - Type: framework.TypeDurationSecond, - Description: `Interval at which to run an auto-tidy operation. This is the time between tidy invocations (after one finishes to the start of the next). Running a manual tidy will reset this duration.`, - Default: int(defaultTidyConfig.Interval / time.Second), // TypeDurationSecond currently requires the default to be an int. - }, - }), - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathConfigAutoTidyRead, - }, - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathConfigAutoTidyWrite, - // Read more about why these flags are set in backend.go. - ForwardPerformanceStandby: true, - ForwardPerformanceSecondary: true, - }, - }, - HelpSynopsis: pathConfigAutoTidySyn, - HelpDescription: pathConfigAutoTidyDesc, - } -} - func (b *backend) pathTidyWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { safetyBuffer := d.Get("safety_buffer").(int) tidyCertStore := d.Get("tidy_cert_store").(bool) - tidyRevokedCerts := d.Get("tidy_revoked_certs").(bool) || d.Get("tidy_revocation_list").(bool) - tidyRevokedAssocs := d.Get("tidy_revoked_cert_issuer_associations").(bool) - pauseDurationStr := d.Get("pause_duration").(string) - pauseDuration := 0 * time.Second + tidyRevokedCerts := d.Get("tidy_revoked_certs").(bool) + tidyRevocationList := d.Get("tidy_revocation_list").(bool) if safetyBuffer < 1 { return logical.ErrorResponse("safety_buffer must be greater than zero"), nil } - if pauseDurationStr != "" { - var err error - pauseDuration, err = time.ParseDuration(pauseDurationStr) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf("Error parsing pause_duration: %v", err)), nil - } - - if pauseDuration < (0 * time.Second) { - return logical.ErrorResponse("received invalid, negative pause_duration"), nil - } - } - bufferDuration := time.Duration(safetyBuffer) * time.Second - // Manual run with constructed configuration. - config := &tidyConfig{ - Enabled: true, - Interval: 0 * time.Second, - CertStore: tidyCertStore, - RevokedCerts: tidyRevokedCerts, - IssuerAssocs: tidyRevokedAssocs, - SafetyBuffer: bufferDuration, - PauseDuration: pauseDuration, - } - if !atomic.CompareAndSwapUint32(b.tidyCASGuard, 0, 1) { resp := &logical.Response{} resp.AddWarning("Tidy operation already in progress.") @@ -161,52 +96,138 @@ func (b *backend) pathTidyWrite(ctx context.Context, req *logical.Request, d *fr Storage: req.Storage, } - // Mark the last tidy operation as relatively recent, to ensure we don't - // try to trigger the periodic function. - b.tidyStatusLock.Lock() - b.lastTidy = time.Now() - b.tidyStatusLock.Unlock() - - // Kick off the actual tidy. - b.startTidyOperation(req, config) - - resp := &logical.Response{} - if !tidyCertStore && !tidyRevokedCerts && !tidyRevokedAssocs { - resp.AddWarning("No targets to tidy; specify tidy_cert_store=true or tidy_revoked_certs=true or tidy_revoked_cert_issuer_associations=true to start a tidy operation.") - } else { - resp.AddWarning("Tidy operation successfully started. Any information from the operation will be printed to Vault's server logs.") - } - - return logical.RespondWithStatusCode(resp, req, http.StatusAccepted) -} - -func (b *backend) startTidyOperation(req *logical.Request, config *tidyConfig) { go func() { - atomic.StoreUint32(b.tidyCancelCAS, 0) defer atomic.StoreUint32(b.tidyCASGuard, 0) - b.tidyStatusStart(config) + b.tidyStatusStart(safetyBuffer, tidyCertStore, tidyRevokedCerts || tidyRevocationList) - // Don't cancel when the original client request goes away. - ctx := context.Background() + // Don't cancel when the original client request goes away + ctx = context.Background() logger := b.Logger().Named("tidy") doTidy := func() error { - if config.CertStore { - if err := b.doTidyCertStore(ctx, req, logger, config); err != nil { - return err + if tidyCertStore { + serials, err := req.Storage.List(ctx, "certs/") + if err != nil { + return fmt.Errorf("error fetching list of certs: %w", err) } - } - // Check for cancel before continuing. - if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { - return tidyCancelledError + serialCount := len(serials) + metrics.SetGauge([]string{"secrets", "pki", "tidy", "cert_store_total_entries"}, float32(serialCount)) + for i, serial := range serials { + b.tidyStatusMessage(fmt.Sprintf("Tidying certificate store: checking entry %d of %d", i, serialCount)) + metrics.SetGauge([]string{"secrets", "pki", "tidy", "cert_store_current_entry"}, float32(i)) + + certEntry, err := req.Storage.Get(ctx, "certs/"+serial) + if err != nil { + return fmt.Errorf("error fetching certificate %q: %w", serial, err) + } + + if certEntry == nil { + logger.Warn("certificate entry is nil; tidying up since it is no longer useful for any server operations", "serial", serial) + if err := req.Storage.Delete(ctx, "certs/"+serial); err != nil { + return fmt.Errorf("error deleting nil entry with serial %s: %w", serial, err) + } + b.tidyStatusIncCertStoreCount() + continue + } + + if certEntry.Value == nil || len(certEntry.Value) == 0 { + logger.Warn("certificate entry has no value; tidying up since it is no longer useful for any server operations", "serial", serial) + if err := req.Storage.Delete(ctx, "certs/"+serial); err != nil { + return fmt.Errorf("error deleting entry with nil value with serial %s: %w", serial, err) + } + b.tidyStatusIncCertStoreCount() + continue + } + + cert, err := x509.ParseCertificate(certEntry.Value) + if err != nil { + return fmt.Errorf("unable to parse stored certificate with serial %q: %w", serial, err) + } + + if time.Now().After(cert.NotAfter.Add(bufferDuration)) { + if err := req.Storage.Delete(ctx, "certs/"+serial); err != nil { + return fmt.Errorf("error deleting serial %q from storage: %w", serial, err) + } + b.tidyStatusIncCertStoreCount() + } + } } - if config.RevokedCerts || config.IssuerAssocs { - if err := b.doTidyRevocationStore(ctx, req, logger, config); err != nil { - return err + if tidyRevokedCerts || tidyRevocationList { + b.revokeStorageLock.Lock() + defer b.revokeStorageLock.Unlock() + + rebuildCRL := false + + revokedSerials, err := req.Storage.List(ctx, "revoked/") + if err != nil { + return fmt.Errorf("error fetching list of revoked certs: %w", err) + } + + revokedSerialsCount := len(revokedSerials) + metrics.SetGauge([]string{"secrets", "pki", "tidy", "revoked_cert_total_entries"}, float32(revokedSerialsCount)) + + var revInfo revocationInfo + for i, serial := range revokedSerials { + b.tidyStatusMessage(fmt.Sprintf("Tidying revoked certificates: checking certificate %d of %d", i, len(revokedSerials))) + metrics.SetGauge([]string{"secrets", "pki", "tidy", "revoked_cert_current_entry"}, float32(i)) + + revokedEntry, err := req.Storage.Get(ctx, "revoked/"+serial) + if err != nil { + return fmt.Errorf("unable to fetch revoked cert with serial %q: %w", serial, err) + } + + if revokedEntry == nil { + logger.Warn("revoked entry is nil; tidying up since it is no longer useful for any server operations", "serial", serial) + if err := req.Storage.Delete(ctx, "revoked/"+serial); err != nil { + return fmt.Errorf("error deleting nil revoked entry with serial %s: %w", serial, err) + } + b.tidyStatusIncRevokedCertCount() + continue + } + + if revokedEntry.Value == nil || len(revokedEntry.Value) == 0 { + logger.Warn("revoked entry has nil value; tidying up since it is no longer useful for any server operations", "serial", serial) + if err := req.Storage.Delete(ctx, "revoked/"+serial); err != nil { + return fmt.Errorf("error deleting revoked entry with nil value with serial %s: %w", serial, err) + } + b.tidyStatusIncRevokedCertCount() + continue + } + + err = revokedEntry.DecodeJSON(&revInfo) + if err != nil { + return fmt.Errorf("error decoding revocation entry for serial %q: %w", serial, err) + } + + revokedCert, err := x509.ParseCertificate(revInfo.CertificateBytes) + if err != nil { + return fmt.Errorf("unable to parse stored revoked certificate with serial %q: %w", serial, err) + } + + // Only remove the entries from revoked/ and certs/ if we're + // past its NotAfter value. This is because we use the + // information on revoked/ to build the CRL and the + // information on certs/ for lookup. + if time.Now().After(revokedCert.NotAfter.Add(bufferDuration)) { + if err := req.Storage.Delete(ctx, "revoked/"+serial); err != nil { + return fmt.Errorf("error deleting serial %q from revoked list: %w", serial, err) + } + if err := req.Storage.Delete(ctx, "certs/"+serial); err != nil { + return fmt.Errorf("error deleting serial %q from store when tidying revoked: %w", serial, err) + } + rebuildCRL = true + b.tidyStatusIncRevokedCertCount() + } + } + + if rebuildCRL { + if err := b.crlBuilder.rebuild(ctx, b, req, false); err != nil { + return err + } } } @@ -218,254 +239,17 @@ func (b *backend) startTidyOperation(req *logical.Request, config *tidyConfig) { b.tidyStatusStop(err) } else { b.tidyStatusStop(nil) - - // Since the tidy operation finished without an error, we don't - // really want to start another tidy right away (if the interval - // is too short). So mark the last tidy as now. - b.tidyStatusLock.Lock() - b.lastTidy = time.Now() - b.tidyStatusLock.Unlock() } }() -} - -func (b *backend) doTidyCertStore(ctx context.Context, req *logical.Request, logger hclog.Logger, config *tidyConfig) error { - serials, err := req.Storage.List(ctx, "certs/") - if err != nil { - return fmt.Errorf("error fetching list of certs: %w", err) - } - - serialCount := len(serials) - metrics.SetGauge([]string{"secrets", "pki", "tidy", "cert_store_total_entries"}, float32(serialCount)) - for i, serial := range serials { - b.tidyStatusMessage(fmt.Sprintf("Tidying certificate store: checking entry %d of %d", i, serialCount)) - metrics.SetGauge([]string{"secrets", "pki", "tidy", "cert_store_current_entry"}, float32(i)) - - // Check for cancel before continuing. - if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { - return tidyCancelledError - } - - // Check for pause duration to reduce resource consumption. - if config.PauseDuration > (0 * time.Second) { - time.Sleep(config.PauseDuration) - } - - certEntry, err := req.Storage.Get(ctx, "certs/"+serial) - if err != nil { - return fmt.Errorf("error fetching certificate %q: %w", serial, err) - } - - if certEntry == nil { - logger.Warn("certificate entry is nil; tidying up since it is no longer useful for any server operations", "serial", serial) - if err := req.Storage.Delete(ctx, "certs/"+serial); err != nil { - return fmt.Errorf("error deleting nil entry with serial %s: %w", serial, err) - } - b.tidyStatusIncCertStoreCount() - continue - } - - if certEntry.Value == nil || len(certEntry.Value) == 0 { - logger.Warn("certificate entry has no value; tidying up since it is no longer useful for any server operations", "serial", serial) - if err := req.Storage.Delete(ctx, "certs/"+serial); err != nil { - return fmt.Errorf("error deleting entry with nil value with serial %s: %w", serial, err) - } - b.tidyStatusIncCertStoreCount() - continue - } - - cert, err := x509.ParseCertificate(certEntry.Value) - if err != nil { - return fmt.Errorf("unable to parse stored certificate with serial %q: %w", serial, err) - } - - if time.Now().After(cert.NotAfter.Add(config.SafetyBuffer)) { - if err := req.Storage.Delete(ctx, "certs/"+serial); err != nil { - return fmt.Errorf("error deleting serial %q from storage: %w", serial, err) - } - b.tidyStatusIncCertStoreCount() - } - } - - b.tidyStatusLock.RLock() - metrics.SetGauge([]string{"secrets", "pki", "tidy", "cert_store_total_entries_remaining"}, float32(uint(serialCount)-b.tidyStatus.certStoreDeletedCount)) - b.tidyStatusLock.RUnlock() - - return nil -} - -func (b *backend) doTidyRevocationStore(ctx context.Context, req *logical.Request, logger hclog.Logger, config *tidyConfig) error { - b.revokeStorageLock.Lock() - defer b.revokeStorageLock.Unlock() - - // Fetch and parse our issuers so we can associate them if necessary. - sc := b.makeStorageContext(ctx, req.Storage) - issuerIDCertMap, err := fetchIssuerMapForRevocationChecking(sc) - if err != nil { - return err - } - - rebuildCRL := false - - revokedSerials, err := req.Storage.List(ctx, "revoked/") - if err != nil { - return fmt.Errorf("error fetching list of revoked certs: %w", err) - } - - revokedSerialsCount := len(revokedSerials) - metrics.SetGauge([]string{"secrets", "pki", "tidy", "revoked_cert_total_entries"}, float32(revokedSerialsCount)) - - fixedIssuers := 0 - - var revInfo revocationInfo - for i, serial := range revokedSerials { - b.tidyStatusMessage(fmt.Sprintf("Tidying revoked certificates: checking certificate %d of %d", i, len(revokedSerials))) - metrics.SetGauge([]string{"secrets", "pki", "tidy", "revoked_cert_current_entry"}, float32(i)) - - // Check for cancel before continuing. - if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { - return tidyCancelledError - } - // Check for pause duration to reduce resource consumption. - if config.PauseDuration > (0 * time.Second) { - b.revokeStorageLock.Unlock() - time.Sleep(config.PauseDuration) - b.revokeStorageLock.Lock() - } - - revokedEntry, err := req.Storage.Get(ctx, "revoked/"+serial) - if err != nil { - return fmt.Errorf("unable to fetch revoked cert with serial %q: %w", serial, err) - } - - if revokedEntry == nil { - logger.Warn("revoked entry is nil; tidying up since it is no longer useful for any server operations", "serial", serial) - if err := req.Storage.Delete(ctx, "revoked/"+serial); err != nil { - return fmt.Errorf("error deleting nil revoked entry with serial %s: %w", serial, err) - } - b.tidyStatusIncRevokedCertCount() - continue - } - - if revokedEntry.Value == nil || len(revokedEntry.Value) == 0 { - logger.Warn("revoked entry has nil value; tidying up since it is no longer useful for any server operations", "serial", serial) - if err := req.Storage.Delete(ctx, "revoked/"+serial); err != nil { - return fmt.Errorf("error deleting revoked entry with nil value with serial %s: %w", serial, err) - } - b.tidyStatusIncRevokedCertCount() - continue - } - - err = revokedEntry.DecodeJSON(&revInfo) - if err != nil { - return fmt.Errorf("error decoding revocation entry for serial %q: %w", serial, err) - } - - revokedCert, err := x509.ParseCertificate(revInfo.CertificateBytes) - if err != nil { - return fmt.Errorf("unable to parse stored revoked certificate with serial %q: %w", serial, err) - } - - // Tidy operations over revoked certs should execute prior to - // tidyRevokedCerts as that may remove the entry. If that happens, - // we won't persist the revInfo changes (as it was deleted instead). - var storeCert bool - if config.IssuerAssocs { - if !isRevInfoIssuerValid(&revInfo, issuerIDCertMap) { - b.tidyStatusIncMissingIssuerCertCount() - revInfo.CertificateIssuer = issuerID("") - storeCert = true - if associateRevokedCertWithIsssuer(&revInfo, revokedCert, issuerIDCertMap) { - fixedIssuers += 1 - } - } - } - - if config.RevokedCerts { - // Only remove the entries from revoked/ and certs/ if we're - // past its NotAfter value. This is because we use the - // information on revoked/ to build the CRL and the - // information on certs/ for lookup. - if time.Now().After(revokedCert.NotAfter.Add(config.SafetyBuffer)) { - if err := req.Storage.Delete(ctx, "revoked/"+serial); err != nil { - return fmt.Errorf("error deleting serial %q from revoked list: %w", serial, err) - } - if err := req.Storage.Delete(ctx, "certs/"+serial); err != nil { - return fmt.Errorf("error deleting serial %q from store when tidying revoked: %w", serial, err) - } - rebuildCRL = true - storeCert = false - b.tidyStatusIncRevokedCertCount() - } - } - - // If the entry wasn't removed but was otherwise modified, - // go ahead and write it back out. - if storeCert { - revokedEntry, err = logical.StorageEntryJSON("revoked/"+serial, revInfo) - if err != nil { - return fmt.Errorf("error building entry to persist changes to serial %v from revoked list: %v", serial, err) - } - - err = req.Storage.Put(ctx, revokedEntry) - if err != nil { - return fmt.Errorf("error persisting changes to serial %v from revoked list: %v", serial, err) - } - } - } - - b.tidyStatusLock.RLock() - metrics.SetGauge([]string{"secrets", "pki", "tidy", "revoked_cert_total_entries_remaining"}, float32(uint(revokedSerialsCount)-b.tidyStatus.revokedCertDeletedCount)) - metrics.SetGauge([]string{"secrets", "pki", "tidy", "revoked_cert_entries_incorrect_issuers"}, float32(b.tidyStatus.missingIssuerCertCount)) - metrics.SetGauge([]string{"secrets", "pki", "tidy", "revoked_cert_entries_fixed_issuers"}, float32(fixedIssuers)) - b.tidyStatusLock.RUnlock() - - if rebuildCRL { - // Expired certificates isn't generally an important - // reason to trigger a CRL rebuild for. Check if - // automatic CRL rebuilds have been enabled and defer - // the rebuild if so. - config, err := sc.getRevocationConfig() - if err != nil { - return err - } - - if !config.AutoRebuild { - if err := b.crlBuilder.rebuild(ctx, b, req, false); err != nil { - return err - } - } - } - - return nil -} - -func (b *backend) pathTidyCancelWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - if b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary) && !b.System().LocalMount() { - return nil, logical.ErrReadOnly - } - - if atomic.LoadUint32(b.tidyCASGuard) == 0 { - resp := &logical.Response{} - resp.AddWarning("Tidy operation cannot be cancelled as none is currently running.") - return resp, nil - } - - // Grab the status lock before writing the cancel atomic. This lets us - // update the status correctly as well, avoiding writing it if we're not - // presently running. - // - // Unlock needs to occur prior to calling read. - b.tidyStatusLock.Lock() - if b.tidyStatus.state == tidyStatusStarted || atomic.LoadUint32(b.tidyCASGuard) == 1 { - if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 0, 1) { - b.tidyStatus.state = tidyStatusCancelling - } + resp := &logical.Response{} + if !tidyCertStore && !tidyRevokedCerts && !tidyRevocationList { + resp.AddWarning("No targets to tidy; specify tidy_cert_store=true or tidy_revoked_certs=true to start a tidy operation.") + } else { + resp.AddWarning("Tidy operation successfully started. Any information from the operation will be printed to Vault's server logs.") } - b.tidyStatusLock.Unlock() - return b.pathTidyStatusRead(ctx, req, d) + return logical.RespondWithStatusCode(resp, req, http.StatusAccepted) } func (b *backend) pathTidyStatusRead(_ context.Context, _ *logical.Request, _ *framework.FieldData) (*logical.Response, error) { @@ -480,21 +264,16 @@ func (b *backend) pathTidyStatusRead(_ context.Context, _ *logical.Request, _ *f resp := &logical.Response{ Data: map[string]interface{}{ - "safety_buffer": nil, - "tidy_cert_store": nil, - "tidy_revoked_certs": nil, - "tidy_revoked_cert_issuer_associations": nil, - "pause_duration": nil, - "state": "Inactive", - "error": nil, - "time_started": nil, - "time_finished": nil, - "message": nil, - "cert_store_deleted_count": nil, - "revoked_cert_deleted_count": nil, - "missing_issuer_cert_count": nil, - "current_cert_store_count": nil, - "current_revoked_cert_count": nil, + "safety_buffer": nil, + "tidy_cert_store": nil, + "tidy_revoked_certs": nil, + "state": "Inactive", + "error": nil, + "time_started": nil, + "time_finished": nil, + "message": nil, + "cert_store_deleted_count": nil, + "revoked_cert_deleted_count": nil, }, } @@ -505,13 +284,10 @@ func (b *backend) pathTidyStatusRead(_ context.Context, _ *logical.Request, _ *f resp.Data["safety_buffer"] = b.tidyStatus.safetyBuffer resp.Data["tidy_cert_store"] = b.tidyStatus.tidyCertStore resp.Data["tidy_revoked_certs"] = b.tidyStatus.tidyRevokedCerts - resp.Data["tidy_revoked_cert_issuer_associations"] = b.tidyStatus.tidyRevokedAssocs - resp.Data["pause_duration"] = b.tidyStatus.pauseDuration resp.Data["time_started"] = b.tidyStatus.timeStarted resp.Data["message"] = b.tidyStatus.message resp.Data["cert_store_deleted_count"] = b.tidyStatus.certStoreDeletedCount resp.Data["revoked_cert_deleted_count"] = b.tidyStatus.revokedCertDeletedCount - resp.Data["missing_issuer_cert_count"] = b.tidyStatus.missingIssuerCertCount switch b.tidyStatus.state { case tidyStatusStarted: @@ -525,113 +301,22 @@ func (b *backend) pathTidyStatusRead(_ context.Context, _ *logical.Request, _ *f resp.Data["time_finished"] = b.tidyStatus.timeFinished resp.Data["error"] = b.tidyStatus.err.Error() // Don't clear the message so that it serves as a hint about when - // the error occurred. - case tidyStatusCancelling: - resp.Data["state"] = "Cancelling" - case tidyStatusCancelled: - resp.Data["state"] = "Cancelled" - resp.Data["time_finished"] = b.tidyStatus.timeFinished - } - - resp.Data["current_cert_store_count"] = b.certCount - resp.Data["current_revoked_cert_count"] = b.revokedCertCount - - if !b.certsCounted.Load() { - resp.AddWarning("Certificates in storage are still being counted, current counts provided may be " + - "inaccurate") + // the error ocurred. } return resp, nil } -func (b *backend) pathConfigAutoTidyRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - sc := b.makeStorageContext(ctx, req.Storage) - config, err := sc.getAutoTidyConfig() - if err != nil { - return nil, err - } - - return &logical.Response{ - Data: map[string]interface{}{ - "enabled": config.Enabled, - "interval_duration": int(config.Interval / time.Second), - "tidy_cert_store": config.CertStore, - "tidy_revoked_certs": config.RevokedCerts, - "tidy_revoked_cert_issuer_associations": config.IssuerAssocs, - "safety_buffer": int(config.SafetyBuffer / time.Second), - "pause_duration": config.PauseDuration.String(), - }, - }, nil -} - -func (b *backend) pathConfigAutoTidyWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - sc := b.makeStorageContext(ctx, req.Storage) - config, err := sc.getAutoTidyConfig() - if err != nil { - return nil, err - } - - if enabledRaw, ok := d.GetOk("enabled"); ok { - config.Enabled = enabledRaw.(bool) - } - - if intervalRaw, ok := d.GetOk("interval_duration"); ok { - config.Interval = time.Duration(intervalRaw.(int)) * time.Second - if config.Interval < 0 { - return logical.ErrorResponse(fmt.Sprintf("given interval_duration must be greater than or equal to zero seconds; got: %v", intervalRaw)), nil - } - } - - if certStoreRaw, ok := d.GetOk("tidy_cert_store"); ok { - config.CertStore = certStoreRaw.(bool) - } - - if revokedCertsRaw, ok := d.GetOk("tidy_revoked_certs"); ok { - config.RevokedCerts = revokedCertsRaw.(bool) - } - - if issuerAssocRaw, ok := d.GetOk("tidy_revoked_cert_issuer_associations"); ok { - config.IssuerAssocs = issuerAssocRaw.(bool) - } - - if safetyBufferRaw, ok := d.GetOk("safety_buffer"); ok { - config.SafetyBuffer = time.Duration(safetyBufferRaw.(int)) * time.Second - if config.SafetyBuffer < 1*time.Second { - return logical.ErrorResponse(fmt.Sprintf("given safety_buffer must be greater than zero seconds; got: %v", safetyBufferRaw)), nil - } - } - - if pauseDurationRaw, ok := d.GetOk("pause_duration"); ok { - config.PauseDuration, err = time.ParseDuration(pauseDurationRaw.(string)) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf("unable to parse given pause_duration: %v", err)), nil - } - - if config.PauseDuration < (0 * time.Second) { - return logical.ErrorResponse("received invalid, negative pause_duration"), nil - } - } - - if config.Enabled && !(config.CertStore || config.RevokedCerts || config.IssuerAssocs) { - return logical.ErrorResponse("Auto-tidy enabled but no tidy operations were requested. Enable at least one tidy operation to be run (tidy_cert_store / tidy_revoked_certs / tidy_revoked_cert_issuer_associations)."), nil - } - - return nil, sc.writeAutoTidyConfig(config) -} - -func (b *backend) tidyStatusStart(config *tidyConfig) { +func (b *backend) tidyStatusStart(safetyBuffer int, tidyCertStore, tidyRevokedCerts bool) { b.tidyStatusLock.Lock() defer b.tidyStatusLock.Unlock() b.tidyStatus = &tidyStatus{ - safetyBuffer: int(config.SafetyBuffer / time.Second), - tidyCertStore: config.CertStore, - tidyRevokedCerts: config.RevokedCerts, - tidyRevokedAssocs: config.IssuerAssocs, - pauseDuration: config.PauseDuration.String(), - - state: tidyStatusStarted, - timeStarted: time.Now(), + safetyBuffer: safetyBuffer, + tidyCertStore: tidyCertStore, + tidyRevokedCerts: tidyRevokedCerts, + state: tidyStatusStarted, + timeStarted: time.Now(), } metrics.SetGauge([]string{"secrets", "pki", "tidy", "start_time_epoch"}, float32(b.tidyStatus.timeStarted.Unix())) @@ -645,8 +330,6 @@ func (b *backend) tidyStatusStop(err error) { b.tidyStatus.err = err if err == nil { b.tidyStatus.state = tidyStatusFinished - } else if err == tidyCancelledError { - b.tidyStatus.state = tidyStatusCancelled } else { b.tidyStatus.state = tidyStatusError } @@ -675,8 +358,6 @@ func (b *backend) tidyStatusIncCertStoreCount() { defer b.tidyStatusLock.Unlock() b.tidyStatus.certStoreDeletedCount++ - - b.decrementTotalCertificatesCountReport() } func (b *backend) tidyStatusIncRevokedCertCount() { @@ -684,15 +365,6 @@ func (b *backend) tidyStatusIncRevokedCertCount() { defer b.tidyStatusLock.Unlock() b.tidyStatus.revokedCertDeletedCount++ - - b.decrementTotalRevokedCertificatesCountReport() -} - -func (b *backend) tidyStatusIncMissingIssuerCertCount() { - b.tidyStatusLock.Lock() - defer b.tidyStatusLock.Unlock() - - b.tidyStatus.missingIssuerCertCount++ } const pathTidyHelpSyn = ` @@ -722,18 +394,6 @@ current time, minus the value of 'safety_buffer', is greater than the expiration, it will be removed. ` -const pathTidyCancelHelpSyn = ` -Cancels a currently running tidy operation. -` - -const pathTidyCancelHelpDesc = ` -This endpoint allows cancelling a currently running tidy operation. - -Periodically throughout the invocation of tidy, we'll check if the operation -has been requested to be cancelled. If so, we'll stop the currently running -tidy operation. -` - const pathTidyStatusHelpSyn = ` Returns the status of the tidy operation. ` @@ -746,7 +406,6 @@ The result includes the following fields: * 'safety_buffer': the value of this parameter when initiating the tidy operation * 'tidy_cert_store': the value of this parameter when initiating the tidy operation * 'tidy_revoked_certs': the value of this parameter when initiating the tidy operation -* 'tidy_revoked_cert_issuer_associations': the value of this parameter when initiating the tidy operation * 'state': one of "Inactive", "Running", "Finished", "Error" * 'error': the error message, if the operation ran into an error * 'time_started': the time the operation started @@ -755,19 +414,4 @@ The result includes the following fields: "Tidying revoked certificates: checking certificate N of TOTAL" * 'cert_store_deleted_count': The number of certificate storage entries deleted * 'revoked_cert_deleted_count': The number of revoked certificate entries deleted -* 'missing_issuer_cert_count': The number of revoked certificates which were missing a valid issuer reference -` - -const pathConfigAutoTidySyn = ` -Modifies the current configuration for automatic tidy execution. -` - -const pathConfigAutoTidyDesc = ` -This endpoint accepts parameters to a tidy operation (see /tidy) that -will be used for automatic tidy execution. This takes two extra parameters, -enabled (to enable or disable auto-tidy) and interval_duration (which -controls the frequency of auto-tidy execution). - -Once enabled, a tidy operation will be kicked off automatically, as if it -were executed with the posted configuration. ` diff --git a/builtin/logical/pki/path_tidy_test.go b/builtin/logical/pki/path_tidy_test.go deleted file mode 100644 index af0d1199ecaf8..0000000000000 --- a/builtin/logical/pki/path_tidy_test.go +++ /dev/null @@ -1,235 +0,0 @@ -package pki - -import ( - "testing" - "time" - - "github.com/hashicorp/vault/api" - vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/vault" - - "github.com/stretchr/testify/require" -) - -func TestAutoTidy(t *testing.T) { - t.Parallel() - - // While we'd like to reduce this duration, we need to wait until - // the rollback manager timer ticks. With the new helper, we can - // modify the rollback manager timer period directly, allowing us - // to shorten the total test time significantly. - // - // We set the delta CRL time to ensure it executes prior to the - // main CRL rebuild, and the new CRL doesn't rebuild until after - // we're done. - newPeriod := 1 * time.Second - - // This test requires the periodicFunc to trigger, which requires we stand - // up a full test cluster. - coreConfig := &vault.CoreConfig{ - LogicalBackends: map[string]logical.Factory{ - "pki": Factory, - }, - // See notes below about usage of /sys/raw for reading cluster - // storage without barrier encryption. - EnableRaw: true, - } - cluster := vault.CreateTestClusterWithRollbackPeriod(t, newPeriod, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - defer cluster.Cleanup() - client := cluster.Cores[0].Client - - // Mount PKI - err := client.Sys().Mount("pki", &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{ - DefaultLeaseTTL: "10m", - MaxLeaseTTL: "60m", - }, - }) - require.NoError(t, err) - - // Generate root. - resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ - "ttl": "40h", - "common_name": "Root X1", - "key_type": "ec", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data) - require.NotEmpty(t, resp.Data["issuer_id"]) - - // Run tidy so status is not empty when we run it later... - _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ - "tidy_revoked_certs": true, - }) - require.NoError(t, err) - - // Setup a testing role. - _, err = client.Logical().Write("pki/roles/local-testing", map[string]interface{}{ - "allow_any_name": true, - "enforce_hostnames": false, - "key_type": "ec", - }) - require.NoError(t, err) - - // Write the auto-tidy config. - _, err = client.Logical().Write("pki/config/auto-tidy", map[string]interface{}{ - "enabled": true, - "interval_duration": "1s", - "tidy_cert_store": true, - "tidy_revoked_certs": true, - "safety_buffer": "1s", - }) - require.NoError(t, err) - - // Issue a cert and revoke it. - resp, err = client.Logical().Write("pki/issue/local-testing", map[string]interface{}{ - "common_name": "example.com", - "ttl": "10s", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["serial_number"]) - require.NotEmpty(t, resp.Data["certificate"]) - leafSerial := resp.Data["serial_number"].(string) - leafCert := parseCert(t, resp.Data["certificate"].(string)) - - _, err = client.Logical().Write("pki/revoke", map[string]interface{}{ - "serial_number": leafSerial, - }) - require.NoError(t, err) - - // Cert should still exist. - resp, err = client.Logical().Read("pki/cert/" + leafSerial) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["certificate"]) - - // Wait for cert to expire and the safety buffer to elapse. - time.Sleep(time.Until(leafCert.NotAfter) + 3*time.Second) - - // Wait for auto-tidy to run afterwards. - var foundTidyRunning string - var foundTidyFinished bool - timeoutChan := time.After(120 * time.Second) - for { - if foundTidyRunning != "" && foundTidyFinished { - break - } - - select { - case <-timeoutChan: - t.Fatalf("expected auto-tidy to run (%v) and finish (%v) before 120 seconds elapsed", foundTidyRunning, foundTidyFinished) - default: - time.Sleep(250 * time.Millisecond) - - resp, err = client.Logical().Read("pki/tidy-status") - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["state"]) - require.NotEmpty(t, resp.Data["time_started"]) - state := resp.Data["state"].(string) - started := resp.Data["time_started"].(string) - t.Logf("Resp: %v", resp.Data) - - // We want the _next_ tidy run after the cert expires. This - // means if we're currently finished when we hit this the - // first time, we want to wait for the next run. - if foundTidyRunning == "" { - foundTidyRunning = started - } else if foundTidyRunning != started && !foundTidyFinished && state == "Finished" { - foundTidyFinished = true - } - } - } - - // Cert should no longer exist. - resp, err = client.Logical().Read("pki/cert/" + leafSerial) - require.Nil(t, err) - require.Nil(t, resp) -} - -func TestTidyCancellation(t *testing.T) { - t.Parallel() - - numLeaves := 100 - - b, s := createBackendWithStorage(t) - - // Create a root, a role, and a bunch of leaves. - _, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "root example.com", - "issuer_name": "root", - "ttl": "20m", - "key_type": "ec", - }) - require.NoError(t, err) - _, err = CBWrite(b, s, "roles/local-testing", map[string]interface{}{ - "allow_any_name": true, - "enforce_hostnames": false, - "key_type": "ec", - }) - require.NoError(t, err) - for i := 0; i < numLeaves; i++ { - _, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ - "common_name": "testing", - "ttl": "1s", - }) - require.NoError(t, err) - } - - // Kick off a tidy operation (which runs in the background), but with - // a slow-ish pause between certificates. - _, err = CBWrite(b, s, "tidy", map[string]interface{}{ - "tidy_cert_store": true, - "safety_buffer": "1s", - "pause_duration": "1s", - }) - - // If we wait six seconds, the operation should still be running. That's - // how we check that pause_duration works. - time.Sleep(3 * time.Second) - - resp, err := CBRead(b, s, "tidy-status") - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.Equal(t, resp.Data["state"], "Running") - - // If we now cancel the operation, the response should say Cancelling. - cancelResp, err := CBWrite(b, s, "tidy-cancel", map[string]interface{}{}) - require.NoError(t, err) - require.NotNil(t, cancelResp) - require.NotNil(t, cancelResp.Data) - state := cancelResp.Data["state"].(string) - howMany := cancelResp.Data["cert_store_deleted_count"].(uint) - - if state == "Cancelled" { - // Rest of the test can't run; log and exit. - t.Log("Went to cancel the operation but response was already cancelled") - return - } - - require.Equal(t, state, "Cancelling") - - // Wait a little longer, and ensure we only processed at most 2 more certs - // after the cancellation respon. - time.Sleep(3 * time.Second) - - statusResp, err := CBRead(b, s, "tidy-status") - require.NoError(t, err) - require.NotNil(t, statusResp) - require.NotNil(t, statusResp.Data) - require.Equal(t, statusResp.Data["state"], "Cancelled") - nowMany := statusResp.Data["cert_store_deleted_count"].(uint) - if howMany+3 <= nowMany { - t.Fatalf("expected to only process at most 3 more certificates, but processed (%v >>> %v) certs", nowMany, howMany) - } -} diff --git a/builtin/logical/pki/storage.go b/builtin/logical/pki/storage.go index dd4b4174ba501..2ccc63105c939 100644 --- a/builtin/logical/pki/storage.go +++ b/builtin/logical/pki/storage.go @@ -6,9 +6,7 @@ import ( "crypto" "crypto/x509" "fmt" - "sort" "strings" - "time" "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/sdk/helper/certutil" @@ -26,18 +24,12 @@ const ( legacyMigrationBundleLogKey = "config/legacyMigrationBundleLog" legacyCertBundlePath = "config/ca_bundle" legacyCRLPath = "crl" - deltaCRLPath = "delta-crl" - deltaCRLPathSuffix = "-delta" - - autoTidyConfigPath = "config/auto-tidy" // Used as a quick sanity check for a reference id lookups... uuidLength = 36 maxRolesToScanOnIssuerChange = 100 maxRolesToFindOnIssuerChange = 10 - - latestIssuerVersion = 1 ) type keyID string @@ -64,10 +56,10 @@ const ( ) type keyEntry struct { - ID keyID `json:"id"` - Name string `json:"name"` - PrivateKeyType certutil.PrivateKeyType `json:"private_key_type"` - PrivateKey string `json:"private_key"` + ID keyID `json:"id" structs:"id" mapstructure:"id"` + Name string `json:"name" structs:"name" mapstructure:"name"` + PrivateKeyType certutil.PrivateKeyType `json:"private_key_type" structs:"private_key_type" mapstructure:"private_key_type"` + PrivateKey string `json:"private_key" structs:"private_key" mapstructure:"private_key"` } func (e keyEntry) getManagedKeyUUID() (UUIDKey, error) { @@ -84,22 +76,20 @@ func (e keyEntry) isManagedPrivateKey() bool { type issuerUsage uint const ( - ReadOnlyUsage issuerUsage = iota - IssuanceUsage issuerUsage = 1 << iota - CRLSigningUsage issuerUsage = 1 << iota - OCSPSigningUsage issuerUsage = 1 << iota + ReadOnlyUsage issuerUsage = iota + IssuanceUsage issuerUsage = 1 << iota + CRLSigningUsage issuerUsage = 1 << iota // When adding a new usage in the future, we'll need to create a usage // mask field on the IssuerEntry and handle migrations to a newer mask, // inferring a value for the new bits. - AllIssuerUsages = ReadOnlyUsage | IssuanceUsage | CRLSigningUsage | OCSPSigningUsage + AllIssuerUsages issuerUsage = ReadOnlyUsage | IssuanceUsage | CRLSigningUsage ) var namedIssuerUsages = map[string]issuerUsage{ "read-only": ReadOnlyUsage, "issuing-certificates": IssuanceUsage, "crl-signing": CRLSigningUsage, - "ocsp-signing": OCSPSigningUsage, } func (i *issuerUsage) ToggleUsage(usages ...issuerUsage) { @@ -116,16 +106,7 @@ func (i issuerUsage) Names() string { var names []string var builtUsage issuerUsage - // Return the known set of usages in a sorted order to not have Terraform state files flipping - // saying values are different when it's the same list in a different order. - keys := make([]string, 0, len(namedIssuerUsages)) - for k := range namedIssuerUsages { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, name := range keys { - usage := namedIssuerUsages[name] + for name, usage := range namedIssuerUsages { if i.HasUsage(usage) { names = append(names, name) builtUsage.ToggleUsage(usage) @@ -155,57 +136,32 @@ func NewIssuerUsageFromNames(names []string) (issuerUsage, error) { } type issuerEntry struct { - ID issuerID `json:"id"` - Name string `json:"name"` - KeyID keyID `json:"key_id"` - Certificate string `json:"certificate"` - CAChain []string `json:"ca_chain"` - ManualChain []issuerID `json:"manual_chain"` - SerialNumber string `json:"serial_number"` - LeafNotAfterBehavior certutil.NotAfterBehavior `json:"not_after_behavior"` - Usage issuerUsage `json:"usage"` - RevocationSigAlg x509.SignatureAlgorithm `json:"revocation_signature_algorithm"` - Revoked bool `json:"revoked"` - RevocationTime int64 `json:"revocation_time"` - RevocationTimeUTC time.Time `json:"revocation_time_utc"` - AIAURIs *certutil.URLEntries `json:"aia_uris,omitempty"` - LastModified time.Time `json:"last_modified"` - Version uint `json:"version"` + ID issuerID `json:"id" structs:"id" mapstructure:"id"` + Name string `json:"name" structs:"name" mapstructure:"name"` + KeyID keyID `json:"key_id" structs:"key_id" mapstructure:"key_id"` + Certificate string `json:"certificate" structs:"certificate" mapstructure:"certificate"` + CAChain []string `json:"ca_chain" structs:"ca_chain" mapstructure:"ca_chain"` + ManualChain []issuerID `json:"manual_chain" structs:"manual_chain" mapstructure:"manual_chain"` + SerialNumber string `json:"serial_number" structs:"serial_number" mapstructure:"serial_number"` + LeafNotAfterBehavior certutil.NotAfterBehavior `json:"not_after_behavior" structs:"not_after_behavior" mapstructure:"not_after_behavior"` + Usage issuerUsage `json:"usage" structs:"usage" mapstructure:"usage"` } type localCRLConfigEntry struct { - IssuerIDCRLMap map[issuerID]crlID `json:"issuer_id_crl_map"` - CRLNumberMap map[crlID]int64 `json:"crl_number_map"` - LastCompleteNumberMap map[crlID]int64 `json:"last_complete_number_map"` - CRLExpirationMap map[crlID]time.Time `json:"crl_expiration_map"` - LastModified time.Time `json:"last_modified"` - DeltaLastModified time.Time `json:"delta_last_modified"` + IssuerIDCRLMap map[issuerID]crlID `json:"issuer_id_crl_map" structs:"issuer_id_crl_map" mapstructure:"issuer_id_crl_map"` + CRLNumberMap map[crlID]int64 `json:"crl_number_map" structs:"crl_number_map" mapstructure:"crl_number_map"` } type keyConfigEntry struct { - DefaultKeyId keyID `json:"default"` + DefaultKeyId keyID `json:"default" structs:"default" mapstructure:"default"` } type issuerConfigEntry struct { - DefaultIssuerId issuerID `json:"default"` -} - -type storageContext struct { - Context context.Context - Storage logical.Storage - Backend *backend + DefaultIssuerId issuerID `json:"default" structs:"default" mapstructure:"default"` } -func (b *backend) makeStorageContext(ctx context.Context, s logical.Storage) *storageContext { - return &storageContext{ - Context: ctx, - Storage: s, - Backend: b, - } -} - -func (sc *storageContext) listKeys() ([]keyID, error) { - strList, err := sc.Storage.List(sc.Context, keyPrefix) +func listKeys(ctx context.Context, s logical.Storage) ([]keyID, error) { + strList, err := s.List(ctx, keyPrefix) if err != nil { return nil, err } @@ -218,16 +174,17 @@ func (sc *storageContext) listKeys() ([]keyID, error) { return keyIds, nil } -func (sc *storageContext) fetchKeyById(keyId keyID) (*keyEntry, error) { +func fetchKeyById(ctx context.Context, s logical.Storage, keyId keyID) (*keyEntry, error) { if len(keyId) == 0 { - return nil, errutil.InternalError{Err: "unable to fetch pki key: empty key identifier"} + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch pki key: empty key identifier")} } - entry, err := sc.Storage.Get(sc.Context, keyPrefix+keyId.String()) + entry, err := s.Get(ctx, keyPrefix+keyId.String()) if err != nil { return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch pki key: %v", err)} } if entry == nil { + // FIXME: Dedicated/specific error for this? return nil, errutil.UserError{Err: fmt.Sprintf("pki key id %s does not exist", keyId.String())} } @@ -239,7 +196,7 @@ func (sc *storageContext) fetchKeyById(keyId keyID) (*keyEntry, error) { return &key, nil } -func (sc *storageContext) writeKey(key keyEntry) error { +func writeKey(ctx context.Context, s logical.Storage, key keyEntry) error { keyId := key.ID json, err := logical.StorageEntryJSON(keyPrefix+keyId.String(), key) @@ -247,11 +204,11 @@ func (sc *storageContext) writeKey(key keyEntry) error { return err } - return sc.Storage.Put(sc.Context, json) + return s.Put(ctx, json) } -func (sc *storageContext) deleteKey(id keyID) (bool, error) { - config, err := sc.getKeysConfig() +func deleteKey(ctx context.Context, s logical.Storage, id keyID) (bool, error) { + config, err := getKeysConfig(ctx, s) if err != nil { return false, err } @@ -260,15 +217,15 @@ func (sc *storageContext) deleteKey(id keyID) (bool, error) { if config.DefaultKeyId == id { wasDefault = true config.DefaultKeyId = keyID("") - if err := sc.setKeysConfig(config); err != nil { + if err := setKeysConfig(ctx, s, config); err != nil { return wasDefault, err } } - return wasDefault, sc.Storage.Delete(sc.Context, keyPrefix+id.String()) + return wasDefault, s.Delete(ctx, keyPrefix+id.String()) } -func (sc *storageContext) importKey(keyValue string, keyName string, keyType certutil.PrivateKeyType) (*keyEntry, bool, error) { +func importKey(ctx context.Context, b *backend, s logical.Storage, keyValue string, keyName string, keyType certutil.PrivateKeyType) (*keyEntry, bool, error) { // importKey imports the specified PEM-format key (from keyValue) into // the new PKI storage format. The first return field is a reference to // the new key; the second is whether or not the key already existed @@ -283,7 +240,7 @@ func (sc *storageContext) importKey(keyValue string, keyName string, keyType cer // Before we can import a known key, we first need to know if the key // exists in storage already. This means iterating through all known // keys and comparing their private value against this value. - knownKeys, err := sc.listKeys() + knownKeys, err := listKeys(ctx, s) if err != nil { return nil, false, err } @@ -295,7 +252,7 @@ func (sc *storageContext) importKey(keyValue string, keyName string, keyType cer if err != nil { return nil, false, errutil.InternalError{Err: fmt.Sprintf("failed extracting managed key uuid from key: %v", err)} } - pkForImportingKey, err = getManagedKeyPublicKey(sc.Context, sc.Backend, managedKeyUUID) + pkForImportingKey, err = getManagedKeyPublicKey(ctx, b, managedKeyUUID) if err != nil { return nil, false, err } @@ -308,11 +265,11 @@ func (sc *storageContext) importKey(keyValue string, keyName string, keyType cer foundExistingKeyWithName := false for _, identifier := range knownKeys { - existingKey, err := sc.fetchKeyById(identifier) + existingKey, err := fetchKeyById(ctx, s, identifier) if err != nil { return nil, false, err } - areEqual, err := comparePublicKey(sc, existingKey, pkForImportingKey) + areEqual, err := comparePublicKey(ctx, b, existingKey, pkForImportingKey) if err != nil { return nil, false, err } @@ -343,7 +300,7 @@ func (sc *storageContext) importKey(keyValue string, keyName string, keyType cer result.PrivateKeyType = keyType // Finally, we can write the key to storage. - if err := sc.writeKey(result); err != nil { + if err := writeKey(ctx, s, result); err != nil { return nil, false, err } @@ -351,19 +308,19 @@ func (sc *storageContext) importKey(keyValue string, keyName string, keyType cer // one of them has a missing KeyId link, and if so, point it back to // ourselves. We fetch the list of issuers up front, even when don't need // it, to give ourselves a better chance of succeeding below. - knownIssuers, err := sc.listIssuers() + knownIssuers, err := listIssuers(ctx, s) if err != nil { return nil, false, err } - issuerDefaultSet, err := sc.isDefaultIssuerSet() + issuerDefaultSet, err := isDefaultIssuerSet(ctx, s) if err != nil { return nil, false, err } // Now, for each issuer, try and compute the issuer<->key link if missing. for _, identifier := range knownIssuers { - existingIssuer, err := sc.fetchIssuerById(identifier) + existingIssuer, err := fetchIssuerById(ctx, s, identifier) if err != nil { return nil, false, err } @@ -391,14 +348,14 @@ func (sc *storageContext) importKey(keyValue string, keyName string, keyType cer // These public keys are equal, so this key entry must be the // corresponding private key to this issuer; update it accordingly. existingIssuer.KeyID = result.ID - if err := sc.writeIssuer(existingIssuer); err != nil { + if err := writeIssuer(ctx, s, existingIssuer); err != nil { return nil, false, err } // If there was no prior default value set and/or we had no known // issuers when we started, set this issuer as default. if !issuerDefaultSet { - err = sc.updateDefaultIssuerId(existingIssuer.ID) + err = updateDefaultIssuerId(ctx, s, existingIssuer.ID) if err != nil { return nil, false, err } @@ -409,12 +366,12 @@ func (sc *storageContext) importKey(keyValue string, keyName string, keyType cer // If there was no prior default value set and/or we had no known // keys when we started, set this key as default. - keyDefaultSet, err := sc.isDefaultKeySet() + keyDefaultSet, err := isDefaultKeySet(ctx, s) if err != nil { return nil, false, err } if len(knownKeys) == 0 || !keyDefaultSet { - if err = sc.updateDefaultKeyId(result.ID); err != nil { + if err = updateDefaultKeyId(ctx, s, result.ID); err != nil { return nil, false, err } } @@ -456,66 +413,8 @@ func (i issuerEntry) EnsureUsage(usage issuerUsage) error { return fmt.Errorf("unknown delta between usages: %v -> %v / for issuer [%v]", usage.Names(), i.Usage.Names(), issuerRef) } -func (i issuerEntry) CanMaybeSignWithAlgo(algo x509.SignatureAlgorithm) error { - // Hack: Go isn't kind enough expose its lovely signatureAlgorithmDetails - // informational struct for our usage. However, we don't want to actually - // fetch the private key and attempt a signature with this algo (as we'll - // mint new, previously unsigned material in the process that could maybe - // be potentially abused if it leaks). - // - // So... - // - // ...we maintain our own mapping of cert.PKI<->sigAlgos. Notably, we - // exclude DSA support as the PKI engine has never supported DSA keys. - if algo == x509.UnknownSignatureAlgorithm { - // Special cased to indicate upgrade and letting Go automatically - // chose the correct value. - return nil - } - - cert, err := i.GetCertificate() - if err != nil { - return fmt.Errorf("unable to parse issuer's potential signature algorithm types: %v", err) - } - - switch cert.PublicKeyAlgorithm { - case x509.RSA: - switch algo { - case x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA, - x509.SHA256WithRSAPSS, x509.SHA384WithRSAPSS, - x509.SHA512WithRSAPSS: - return nil - } - case x509.ECDSA: - switch algo { - case x509.ECDSAWithSHA256, x509.ECDSAWithSHA384, x509.ECDSAWithSHA512: - return nil - } - case x509.Ed25519: - switch algo { - case x509.PureEd25519: - return nil - } - } - - return fmt.Errorf("unable to use issuer of type %v to sign with %v key type", cert.PublicKeyAlgorithm.String(), algo.String()) -} - -func (i issuerEntry) GetAIAURLs(sc *storageContext) (urls *certutil.URLEntries, err error) { - // Default to the per-issuer AIA URLs. - urls = i.AIAURIs - - // If none are set (either due to a nil entry or because no URLs have - // been provided), fall back to the global AIA URL config. - if urls == nil || (len(urls.IssuingCertificates) == 0 && len(urls.CRLDistributionPoints) == 0 && len(urls.OCSPServers) == 0) { - urls, err = getGlobalAIAURLs(sc.Context, sc.Storage) - } - - return urls, err -} - -func (sc *storageContext) listIssuers() ([]issuerID, error) { - strList, err := sc.Storage.List(sc.Context, issuerPrefix) +func listIssuers(ctx context.Context, s logical.Storage) ([]issuerID, error) { + strList, err := s.List(ctx, issuerPrefix) if err != nil { return nil, err } @@ -528,10 +427,10 @@ func (sc *storageContext) listIssuers() ([]issuerID, error) { return issuerIds, nil } -func (sc *storageContext) resolveKeyReference(reference string) (keyID, error) { +func resolveKeyReference(ctx context.Context, s logical.Storage, reference string) (keyID, error) { if reference == defaultRef { // Handle fetching the default key. - config, err := sc.getKeysConfig() + config, err := getKeysConfig(ctx, s) if err != nil { return keyID("config-error"), err } @@ -544,7 +443,7 @@ func (sc *storageContext) resolveKeyReference(reference string) (keyID, error) { // Lookup by a direct get first to see if our reference is an ID, this is quick and cached. if len(reference) == uuidLength { - entry, err := sc.Storage.Get(sc.Context, keyPrefix+reference) + entry, err := s.Get(ctx, keyPrefix+reference) if err != nil { return keyID("key-read"), err } @@ -554,12 +453,12 @@ func (sc *storageContext) resolveKeyReference(reference string) (keyID, error) { } // ... than to pull all keys from storage. - keys, err := sc.listKeys() + keys, err := listKeys(ctx, s) if err != nil { return keyID("list-error"), err } for _, keyId := range keys { - key, err := sc.fetchKeyById(keyId) + key, err := fetchKeyById(ctx, s, keyId) if err != nil { return keyID("key-read"), err } @@ -574,16 +473,17 @@ func (sc *storageContext) resolveKeyReference(reference string) (keyID, error) { } // fetchIssuerById returns an issuerEntry based on issuerId, if none found an error is returned. -func (sc *storageContext) fetchIssuerById(issuerId issuerID) (*issuerEntry, error) { +func fetchIssuerById(ctx context.Context, s logical.Storage, issuerId issuerID) (*issuerEntry, error) { if len(issuerId) == 0 { - return nil, errutil.InternalError{Err: "unable to fetch pki issuer: empty issuer identifier"} + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch pki issuer: empty issuer identifier")} } - entry, err := sc.Storage.Get(sc.Context, issuerPrefix+issuerId.String()) + entry, err := s.Get(ctx, issuerPrefix+issuerId.String()) if err != nil { return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch pki issuer: %v", err)} } if entry == nil { + // FIXME: Dedicated/specific error for this? return nil, errutil.UserError{Err: fmt.Sprintf("pki issuer id %s does not exist", issuerId.String())} } @@ -592,65 +492,22 @@ func (sc *storageContext) fetchIssuerById(issuerId issuerID) (*issuerEntry, erro return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode pki issuer with id %s: %v", issuerId.String(), err)} } - return sc.upgradeIssuerIfRequired(&issuer), nil -} - -func (sc *storageContext) upgradeIssuerIfRequired(issuer *issuerEntry) *issuerEntry { - // *NOTE*: Don't attempt to write out the issuer here as it may cause ErrReadOnly that will direct the - // request all the way up to the primary cluster which would be horrible for local cluster operations such - // as generating a leaf cert or a revoke. - // Also even though we could tell if we are the primary cluster's active node, we can't tell if we have the - // a full rw issuer lock, so it might not be safe to write. - if issuer.Version == latestIssuerVersion { - return issuer - } - - if issuer.Version == 0 { - // Upgrade at this step requires interrogating the certificate itself; - // if this decode fails, it indicates internal problems and the - // request will subsequently fail elsewhere. However, decoding this - // certificate is mildly expensive, so we only do it in the event of - // a Version 0 certificate. - cert, err := issuer.GetCertificate() - if err != nil { - return issuer - } - - hadCRL := issuer.Usage.HasUsage(CRLSigningUsage) - // Remove CRL signing usage if it exists on the issuer but doesn't - // exist in the KU of the x509 certificate. - if hadCRL && (cert.KeyUsage&x509.KeyUsageCRLSign) == 0 { - issuer.Usage.ToggleUsage(OCSPSigningUsage) - } - - // Handle our new OCSPSigning usage flag for earlier versions. If we - // had it (prior to removing it in this upgrade), we'll add the OCSP - // flag since EKUs don't matter. - if hadCRL && !issuer.Usage.HasUsage(OCSPSigningUsage) { - issuer.Usage.ToggleUsage(OCSPSigningUsage) - } - } - - issuer.Version = latestIssuerVersion - return issuer + return &issuer, nil } -func (sc *storageContext) writeIssuer(issuer *issuerEntry) error { +func writeIssuer(ctx context.Context, s logical.Storage, issuer *issuerEntry) error { issuerId := issuer.ID - if issuer.LastModified.IsZero() { - issuer.LastModified = time.Now().UTC() - } json, err := logical.StorageEntryJSON(issuerPrefix+issuerId.String(), issuer) if err != nil { return err } - return sc.Storage.Put(sc.Context, json) + return s.Put(ctx, json) } -func (sc *storageContext) deleteIssuer(id issuerID) (bool, error) { - config, err := sc.getIssuersConfig() +func deleteIssuer(ctx context.Context, s logical.Storage, id issuerID) (bool, error) { + config, err := getIssuersConfig(ctx, s) if err != nil { return false, err } @@ -659,15 +516,15 @@ func (sc *storageContext) deleteIssuer(id issuerID) (bool, error) { if config.DefaultIssuerId == id { wasDefault = true config.DefaultIssuerId = issuerID("") - if err := sc.setIssuersConfig(config); err != nil { + if err := setIssuersConfig(ctx, s, config); err != nil { return wasDefault, err } } - return wasDefault, sc.Storage.Delete(sc.Context, issuerPrefix+id.String()) + return wasDefault, s.Delete(ctx, issuerPrefix+id.String()) } -func (sc *storageContext) importIssuer(certValue string, issuerName string) (*issuerEntry, bool, error) { +func importIssuer(ctx context.Context, b *backend, s logical.Storage, certValue string, issuerName string) (*issuerEntry, bool, error) { // importIssuers imports the specified PEM-format certificate (from // certValue) into the new PKI storage format. The first return field is a // reference to the new issuer; the second is whether or not the issuer @@ -702,23 +559,17 @@ func (sc *storageContext) importIssuer(certValue string, issuerName string) (*is return nil, false, errutil.UserError{Err: "Refusing to import non-CA certificate"} } - // Ensure this certificate has a parsed public key. Otherwise, we've - // likely been given a bad certificate. - if issuerCert.PublicKeyAlgorithm == x509.UnknownPublicKeyAlgorithm || issuerCert.PublicKey == nil { - return nil, false, errutil.UserError{Err: "Refusing to import CA certificate with empty PublicKey. This usually means the SubjectPublicKeyInfo field has an OID not recognized by Go, such as 1.2.840.113549.1.1.10 for rsaPSS."} - } - // Before we can import a known issuer, we first need to know if the issuer // exists in storage already. This means iterating through all known // issuers and comparing their private value against this value. - knownIssuers, err := sc.listIssuers() + knownIssuers, err := listIssuers(ctx, s) if err != nil { return nil, false, err } foundExistingIssuerWithName := false for _, identifier := range knownIssuers { - existingIssuer, err := sc.fetchIssuerById(identifier) + existingIssuer, err := fetchIssuerById(ctx, s, identifier) if err != nil { return nil, false, err } @@ -750,14 +601,7 @@ func (sc *storageContext) importIssuer(certValue string, issuerName string) (*is result.Name = issuerName result.Certificate = certValue result.LeafNotAfterBehavior = certutil.ErrNotAfterBehavior - result.Usage.ToggleUsage(AllIssuerUsages) - result.Version = latestIssuerVersion - - // If we lack relevant bits for CRL, prohibit it from being set - // on the usage side. - if (issuerCert.KeyUsage&x509.KeyUsageCRLSign) == 0 && result.Usage.HasUsage(CRLSigningUsage) { - result.Usage.ToggleUsage(CRLSigningUsage) - } + result.Usage.ToggleUsage(IssuanceUsage, CRLSigningUsage) // We shouldn't add CSRs or multiple certificates in this countCertificates := strings.Count(result.Certificate, "-BEGIN ") @@ -765,13 +609,13 @@ func (sc *storageContext) importIssuer(certValue string, issuerName string) (*is return nil, false, fmt.Errorf("bad issuer: potentially multiple PEM blobs in one certificate storage entry:\n%v", result.Certificate) } - result.SerialNumber = serialFromCert(issuerCert) + result.SerialNumber = strings.TrimSpace(certutil.GetHexFormatted(issuerCert.SerialNumber.Bytes(), ":")) // Before we return below, we need to iterate over _all_ keys and see if // one of them a public key matching this certificate, and if so, update our // link accordingly. We fetch the list of keys up front, even may not need // it, to give ourselves a better chance of succeeding below. - knownKeys, err := sc.listKeys() + knownKeys, err := listKeys(ctx, s) if err != nil { return nil, false, err } @@ -780,12 +624,12 @@ func (sc *storageContext) importIssuer(certValue string, issuerName string) (*is // writing issuer to storage as we won't need to update the key, only // the issuer. for _, identifier := range knownKeys { - existingKey, err := sc.fetchKeyById(identifier) + existingKey, err := fetchKeyById(ctx, s, identifier) if err != nil { return nil, false, err } - equal, err := comparePublicKey(sc, existingKey, issuerCert.PublicKey) + equal, err := comparePublicKey(ctx, b, existingKey, issuerCert.PublicKey) if err != nil { return nil, false, err } @@ -802,18 +646,18 @@ func (sc *storageContext) importIssuer(certValue string, issuerName string) (*is // Finally, rebuild the chains. In this process, because the provided // reference issuer is non-nil, we'll save this issuer to storage. - if err := sc.rebuildIssuersChains(&result); err != nil { + if err := rebuildIssuersChains(ctx, s, &result); err != nil { return nil, false, err } // If there was no prior default value set and/or we had no known // issuers when we started, set this issuer as default. - issuerDefaultSet, err := sc.isDefaultIssuerSet() + issuerDefaultSet, err := isDefaultIssuerSet(ctx, s) if err != nil { return nil, false, err } if (len(knownIssuers) == 0 || !issuerDefaultSet) && len(result.KeyID) != 0 { - if err = sc.updateDefaultIssuerId(result.ID); err != nil { + if err = updateDefaultIssuerId(ctx, s, result.ID); err != nil { return nil, false, err } } @@ -823,20 +667,20 @@ func (sc *storageContext) importIssuer(certValue string, issuerName string) (*is } func areCertificatesEqual(cert1 *x509.Certificate, cert2 *x509.Certificate) bool { - return bytes.Equal(cert1.Raw, cert2.Raw) + return bytes.Compare(cert1.Raw, cert2.Raw) == 0 } -func (sc *storageContext) setLocalCRLConfig(mapping *localCRLConfigEntry) error { +func setLocalCRLConfig(ctx context.Context, s logical.Storage, mapping *localCRLConfigEntry) error { json, err := logical.StorageEntryJSON(storageLocalCRLConfig, mapping) if err != nil { return err } - return sc.Storage.Put(sc.Context, json) + return s.Put(ctx, json) } -func (sc *storageContext) getLocalCRLConfig() (*localCRLConfigEntry, error) { - entry, err := sc.Storage.Get(sc.Context, storageLocalCRLConfig) +func getLocalCRLConfig(ctx context.Context, s logical.Storage) (*localCRLConfigEntry, error) { + entry, err := s.Get(ctx, storageLocalCRLConfig) if err != nil { return nil, err } @@ -856,42 +700,20 @@ func (sc *storageContext) getLocalCRLConfig() (*localCRLConfigEntry, error) { mapping.CRLNumberMap = make(map[crlID]int64) } - if len(mapping.LastCompleteNumberMap) == 0 { - mapping.LastCompleteNumberMap = make(map[crlID]int64) - - // Since this might not exist on migration, we want to guess as - // to the last full CRL number was. This was likely the last - // value from CRLNumberMap if it existed, since we're just adding - // the mapping here in this block. - // - // After the next full CRL build, we will have set this value - // correctly, so it doesn't really matter in the long term if - // we're off here. - for id, number := range mapping.CRLNumberMap { - // Decrement by one, since CRLNumberMap is the future number, - // not the last built number. - mapping.LastCompleteNumberMap[id] = number - 1 - } - } - - if len(mapping.CRLExpirationMap) == 0 { - mapping.CRLExpirationMap = make(map[crlID]time.Time) - } - return mapping, nil } -func (sc *storageContext) setKeysConfig(config *keyConfigEntry) error { +func setKeysConfig(ctx context.Context, s logical.Storage, config *keyConfigEntry) error { json, err := logical.StorageEntryJSON(storageKeyConfig, config) if err != nil { return err } - return sc.Storage.Put(sc.Context, json) + return s.Put(ctx, json) } -func (sc *storageContext) getKeysConfig() (*keyConfigEntry, error) { - entry, err := sc.Storage.Get(sc.Context, storageKeyConfig) +func getKeysConfig(ctx context.Context, s logical.Storage) (*keyConfigEntry, error) { + entry, err := s.Get(ctx, storageKeyConfig) if err != nil { return nil, err } @@ -906,17 +728,17 @@ func (sc *storageContext) getKeysConfig() (*keyConfigEntry, error) { return keyConfig, nil } -func (sc *storageContext) setIssuersConfig(config *issuerConfigEntry) error { +func setIssuersConfig(ctx context.Context, s logical.Storage, config *issuerConfigEntry) error { json, err := logical.StorageEntryJSON(storageIssuerConfig, config) if err != nil { return err } - return sc.Storage.Put(sc.Context, json) + return s.Put(ctx, json) } -func (sc *storageContext) getIssuersConfig() (*issuerConfigEntry, error) { - entry, err := sc.Storage.Get(sc.Context, storageIssuerConfig) +func getIssuersConfig(ctx context.Context, s logical.Storage) (*issuerConfigEntry, error) { + entry, err := s.Get(ctx, storageIssuerConfig) if err != nil { return nil, err } @@ -935,10 +757,10 @@ func (sc *storageContext) getIssuersConfig() (*issuerConfigEntry, error) { // returning the converted issuerID or an error if not found. This method will not properly resolve the // special legacyBundleShimID value as we do not want to confuse our special value and a user-provided name of the // same value. -func (sc *storageContext) resolveIssuerReference(reference string) (issuerID, error) { +func resolveIssuerReference(ctx context.Context, s logical.Storage, reference string) (issuerID, error) { if reference == defaultRef { // Handle fetching the default issuer. - config, err := sc.getIssuersConfig() + config, err := getIssuersConfig(ctx, s) if err != nil { return issuerID("config-error"), err } @@ -951,7 +773,7 @@ func (sc *storageContext) resolveIssuerReference(reference string) (issuerID, er // Lookup by a direct get first to see if our reference is an ID, this is quick and cached. if len(reference) == uuidLength { - entry, err := sc.Storage.Get(sc.Context, issuerPrefix+reference) + entry, err := s.Get(ctx, issuerPrefix+reference) if err != nil { return issuerID("issuer-read"), err } @@ -961,13 +783,13 @@ func (sc *storageContext) resolveIssuerReference(reference string) (issuerID, er } // ... than to pull all issuers from storage. - issuers, err := sc.listIssuers() + issuers, err := listIssuers(ctx, s) if err != nil { return issuerID("list-error"), err } for _, issuerId := range issuers { - issuer, err := sc.fetchIssuerById(issuerId) + issuer, err := fetchIssuerById(ctx, s, issuerId) if err != nil { return issuerID("issuer-read"), err } @@ -981,17 +803,17 @@ func (sc *storageContext) resolveIssuerReference(reference string) (issuerID, er return IssuerRefNotFound, errutil.UserError{Err: fmt.Sprintf("unable to find PKI issuer for reference: %v", reference)} } -func (sc *storageContext) resolveIssuerCRLPath(reference string) (string, error) { - if sc.Backend.useLegacyBundleCaStorage() { +func resolveIssuerCRLPath(ctx context.Context, b *backend, s logical.Storage, reference string) (string, error) { + if b.useLegacyBundleCaStorage() { return legacyCRLPath, nil } - issuer, err := sc.resolveIssuerReference(reference) + issuer, err := resolveIssuerReference(ctx, s, reference) if err != nil { return legacyCRLPath, err } - crlConfig, err := sc.getLocalCRLConfig() + crlConfig, err := getLocalCRLConfig(ctx, s) if err != nil { return legacyCRLPath, err } @@ -1006,11 +828,11 @@ func (sc *storageContext) resolveIssuerCRLPath(reference string) (string, error) // Builds a certutil.CertBundle from the specified issuer identifier, // optionally loading the key or not. This method supports loading legacy // bundles using the legacyBundleShimID issuerId, and if no entry is found will return an error. -func (sc *storageContext) fetchCertBundleByIssuerId(id issuerID, loadKey bool) (*issuerEntry, *certutil.CertBundle, error) { +func fetchCertBundleByIssuerId(ctx context.Context, s logical.Storage, id issuerID, loadKey bool) (*issuerEntry, *certutil.CertBundle, error) { if id == legacyBundleShimID { // We have not completed the migration, or started a request in legacy mode, so // attempt to load the bundle from the legacy location - issuer, bundle, err := getLegacyCertBundle(sc.Context, sc.Storage) + issuer, bundle, err := getLegacyCertBundle(ctx, s) if err != nil { return nil, nil, err } @@ -1021,7 +843,7 @@ func (sc *storageContext) fetchCertBundleByIssuerId(id issuerID, loadKey bool) ( return issuer, bundle, err } - issuer, err := sc.fetchIssuerById(id) + issuer, err := fetchIssuerById(ctx, s, id) if err != nil { return nil, nil, err } @@ -1033,7 +855,7 @@ func (sc *storageContext) fetchCertBundleByIssuerId(id issuerID, loadKey bool) ( // Fetch the key if it exists. Sometimes we don't need the key immediately. if loadKey && issuer.KeyID != keyID("") { - key, err := sc.fetchKeyById(issuer.KeyID) + key, err := fetchKeyById(ctx, s, issuer.KeyID) if err != nil { return nil, nil, err } @@ -1045,8 +867,8 @@ func (sc *storageContext) fetchCertBundleByIssuerId(id issuerID, loadKey bool) ( return issuer, &bundle, nil } -func (sc *storageContext) writeCaBundle(caBundle *certutil.CertBundle, issuerName string, keyName string) (*issuerEntry, *keyEntry, error) { - myKey, _, err := sc.importKey(caBundle.PrivateKey, keyName, caBundle.PrivateKeyType) +func writeCaBundle(ctx context.Context, b *backend, s logical.Storage, caBundle *certutil.CertBundle, issuerName string, keyName string) (*issuerEntry, *keyEntry, error) { + myKey, _, err := importKey(ctx, b, s, caBundle.PrivateKey, keyName, caBundle.PrivateKeyType) if err != nil { return nil, nil, err } @@ -1057,13 +879,13 @@ func (sc *storageContext) writeCaBundle(caBundle *certutil.CertBundle, issuerNam return &issuerEntry{}, myKey, nil } - myIssuer, _, err := sc.importIssuer(caBundle.Certificate, issuerName) + myIssuer, _, err := importIssuer(ctx, b, s, caBundle.Certificate, issuerName) if err != nil { return nil, nil, err } for _, cert := range caBundle.CAChain { - if _, _, err = sc.importIssuer(cert, ""); err != nil { + if _, _, err = importIssuer(ctx, b, s, cert, ""); err != nil { return nil, nil, err } } @@ -1091,14 +913,14 @@ func genUuid() string { return aUuid } -func (sc *storageContext) isKeyInUse(keyId string) (inUse bool, issuerId string, err error) { - knownIssuers, err := sc.listIssuers() +func isKeyInUse(keyId string, ctx context.Context, s logical.Storage) (inUse bool, issuerId string, err error) { + knownIssuers, err := listIssuers(ctx, s) if err != nil { return true, "", err } for _, issuerId := range knownIssuers { - issuerEntry, err := sc.fetchIssuerById(issuerId) + issuerEntry, err := fetchIssuerById(ctx, s, issuerId) if err != nil { return true, issuerId.String(), errutil.InternalError{Err: fmt.Sprintf("unable to fetch pki issuer: %v", err)} } @@ -1113,8 +935,8 @@ func (sc *storageContext) isKeyInUse(keyId string) (inUse bool, issuerId string, return false, "", nil } -func (sc *storageContext) checkForRolesReferencing(issuerId string) (timeout bool, inUseBy int32, err error) { - roleEntries, err := sc.Storage.List(sc.Context, "role/") +func checkForRolesReferencing(issuerId string, ctx context.Context, storage logical.Storage) (timeout bool, inUseBy int32, err error) { + roleEntries, err := storage.List(ctx, "role/") if err != nil { return false, 0, err } @@ -1123,7 +945,7 @@ func (sc *storageContext) checkForRolesReferencing(issuerId string) (timeout boo checkedRoles := 0 for _, roleName := range roleEntries { - entry, err := sc.Storage.Get(sc.Context, "role/"+roleName) + entry, err := storage.Get(ctx, "role/"+roleName) if err != nil { return false, 0, err } @@ -1148,59 +970,3 @@ func (sc *storageContext) checkForRolesReferencing(issuerId string) (timeout boo return false, inUseBy, nil } - -func (sc *storageContext) getRevocationConfig() (*crlConfig, error) { - entry, err := sc.Storage.Get(sc.Context, "config/crl") - if err != nil { - return nil, err - } - - var result crlConfig - if entry == nil { - result = defaultCrlConfig - return &result, nil - } - - if err = entry.DecodeJSON(&result); err != nil { - return nil, err - } - - if result.Version == 0 { - // Automatically update existing configurations. - result.OcspDisable = defaultCrlConfig.OcspDisable - result.OcspExpiry = defaultCrlConfig.OcspExpiry - result.AutoRebuild = defaultCrlConfig.AutoRebuild - result.AutoRebuildGracePeriod = defaultCrlConfig.AutoRebuildGracePeriod - result.Version = 1 - } - - return &result, nil -} - -func (sc *storageContext) getAutoTidyConfig() (*tidyConfig, error) { - entry, err := sc.Storage.Get(sc.Context, autoTidyConfigPath) - if err != nil { - return nil, err - } - - var result tidyConfig - if entry == nil { - result = defaultTidyConfig - return &result, nil - } - - if err = entry.DecodeJSON(&result); err != nil { - return nil, err - } - - return &result, nil -} - -func (sc *storageContext) writeAutoTidyConfig(config *tidyConfig) error { - entry, err := logical.StorageEntryJSON(autoTidyConfigPath, config) - if err != nil { - return err - } - - return sc.Storage.Put(sc.Context, entry) -} diff --git a/builtin/logical/pki/storage_migrations.go b/builtin/logical/pki/storage_migrations.go index 79ff697124a73..e431f95e33fe6 100644 --- a/builtin/logical/pki/storage_migrations.go +++ b/builtin/logical/pki/storage_migrations.go @@ -21,11 +21,11 @@ const ( ) type legacyBundleMigrationLog struct { - Hash string `json:"hash"` - Created time.Time `json:"created"` - CreatedIssuer issuerID `json:"issuer_id"` - CreatedKey keyID `json:"key_id"` - MigrationVersion int `json:"migrationVersion"` + Hash string `json:"hash" structs:"hash" mapstructure:"hash"` + Created time.Time `json:"created" structs:"created" mapstructure:"created"` + CreatedIssuer issuerID `json:"issuer_id" structs:"issuer_id" mapstructure:"issuer_id"` + CreatedKey keyID `json:"key_id" structs:"key_id" mapstructure:"key_id"` + MigrationVersion int `json:"migrationVersion" structs:"migrationVersion" mapstructure:"migrationVersion"` } type migrationInfo struct { @@ -89,8 +89,7 @@ func migrateStorage(ctx context.Context, b *backend, s logical.Storage) error { migrationName := fmt.Sprintf("current-%d", time.Now().Unix()) b.Logger().Info("performing PKI migration to new keys/issuers layout") - sc := b.makeStorageContext(ctx, s) - anIssuer, aKey, err := sc.writeCaBundle(migrationInfo.legacyBundle, migrationName, migrationName) + anIssuer, aKey, err := writeCaBundle(ctx, b, s, migrationInfo.legacyBundle, migrationName, migrationName) if err != nil { return err } @@ -193,7 +192,7 @@ func getLegacyCertBundle(ctx context.Context, s logical.Storage) (*issuerEntry, SerialNumber: cb.SerialNumber, LeafNotAfterBehavior: certutil.ErrNotAfterBehavior, } - issuer.Usage.ToggleUsage(AllIssuerUsages) + issuer.Usage.ToggleUsage(IssuanceUsage, CRLSigningUsage) return issuer, cb, nil } diff --git a/builtin/logical/pki/storage_migrations_test.go b/builtin/logical/pki/storage_migrations_test.go index 96da109bbf571..14e48d573310d 100644 --- a/builtin/logical/pki/storage_migrations_test.go +++ b/builtin/logical/pki/storage_migrations_test.go @@ -12,11 +12,9 @@ import ( ) func Test_migrateStorageEmptyStorage(t *testing.T) { - t.Parallel() startTime := time.Now() ctx := context.Background() b, s := createBackendWithStorage(t) - sc := b.makeStorageContext(ctx, s) // Reset the version the helper above set to 1. b.pkiStorageVersion.Store(0) @@ -26,11 +24,11 @@ func Test_migrateStorageEmptyStorage(t *testing.T) { err := b.initialize(ctx, request) require.NoError(t, err) - issuerIds, err := sc.listIssuers() + issuerIds, err := listIssuers(ctx, s) require.NoError(t, err) require.Empty(t, issuerIds) - keyIds, err := sc.listKeys() + keyIds, err := listKeys(ctx, s) require.NoError(t, err) require.Empty(t, keyIds) @@ -65,7 +63,6 @@ func Test_migrateStorageOnlyKey(t *testing.T) { startTime := time.Now() ctx := context.Background() b, s := createBackendWithStorage(t) - sc := b.makeStorageContext(ctx, s) // Reset the version the helper above set to 1. b.pkiStorageVersion.Store(0) @@ -87,11 +84,11 @@ func Test_migrateStorageOnlyKey(t *testing.T) { err = b.initialize(ctx, request) require.NoError(t, err) - issuerIds, err := sc.listIssuers() + issuerIds, err := listIssuers(ctx, s) require.NoError(t, err) require.Equal(t, 0, len(issuerIds)) - keyIds, err := sc.listKeys() + keyIds, err := listKeys(ctx, s) require.NoError(t, err) require.Equal(t, 1, len(keyIds)) @@ -107,7 +104,7 @@ func Test_migrateStorageOnlyKey(t *testing.T) { require.Equal(t, logEntry.CreatedKey, keyIds[0]) keyId := keyIds[0] - key, err := sc.fetchKeyById(keyId) + key, err := fetchKeyById(ctx, s, keyId) require.NoError(t, err) require.True(t, strings.HasPrefix(key.Name, "current-"), "expected key name to start with current- was %s", key.Name) @@ -121,11 +118,11 @@ func Test_migrateStorageOnlyKey(t *testing.T) { require.Equal(t, bundle, certBundle) // Make sure we setup the default values - keysConfig, err := sc.getKeysConfig() + keysConfig, err := getKeysConfig(ctx, s) require.NoError(t, err) require.Equal(t, &keyConfigEntry{DefaultKeyId: keyId}, keysConfig) - issuersConfig, err := sc.getIssuersConfig() + issuersConfig, err := getIssuersConfig(ctx, s) require.NoError(t, err) require.Equal(t, &issuerConfigEntry{}, issuersConfig) @@ -143,12 +140,9 @@ func Test_migrateStorageOnlyKey(t *testing.T) { } func Test_migrateStorageSimpleBundle(t *testing.T) { - t.Parallel() startTime := time.Now() ctx := context.Background() b, s := createBackendWithStorage(t) - sc := b.makeStorageContext(ctx, s) - // Reset the version the helper above set to 1. b.pkiStorageVersion.Store(0) require.True(t, b.useLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") @@ -164,11 +158,11 @@ func Test_migrateStorageSimpleBundle(t *testing.T) { require.NoError(t, err) require.NoError(t, err) - issuerIds, err := sc.listIssuers() + issuerIds, err := listIssuers(ctx, s) require.NoError(t, err) require.Equal(t, 1, len(issuerIds)) - keyIds, err := sc.listKeys() + keyIds, err := listKeys(ctx, s) require.NoError(t, err) require.Equal(t, 1, len(keyIds)) @@ -185,13 +179,13 @@ func Test_migrateStorageSimpleBundle(t *testing.T) { issuerId := issuerIds[0] keyId := keyIds[0] - issuer, err := sc.fetchIssuerById(issuerId) + issuer, err := fetchIssuerById(ctx, s, issuerId) require.NoError(t, err) require.True(t, strings.HasPrefix(issuer.Name, "current-"), "expected issuer name to start with current- was %s", issuer.Name) require.Equal(t, certutil.ErrNotAfterBehavior, issuer.LeafNotAfterBehavior) - key, err := sc.fetchKeyById(keyId) + key, err := fetchKeyById(ctx, s, keyId) require.NoError(t, err) require.True(t, strings.HasPrefix(key.Name, "current-"), "expected key name to start with current- was %s", key.Name) @@ -215,11 +209,11 @@ func Test_migrateStorageSimpleBundle(t *testing.T) { require.Equal(t, bundle, certBundle) // Make sure we setup the default values - keysConfig, err := sc.getKeysConfig() + keysConfig, err := getKeysConfig(ctx, s) require.NoError(t, err) require.Equal(t, &keyConfigEntry{DefaultKeyId: keyId}, keysConfig) - issuersConfig, err := sc.getIssuersConfig() + issuersConfig, err := getIssuersConfig(ctx, s) require.NoError(t, err) require.Equal(t, &issuerConfigEntry{DefaultIssuerId: issuerId}, issuersConfig) @@ -251,7 +245,6 @@ func Test_migrateStorageSimpleBundle(t *testing.T) { } func TestExpectedOpsWork_PreMigration(t *testing.T) { - t.Parallel() ctx := context.Background() b, s := createBackendWithStorage(t) // Reset the version the helper above set to 1. diff --git a/builtin/logical/pki/storage_test.go b/builtin/logical/pki/storage_test.go index 3ff2de6d57156..48337bb4e636b 100644 --- a/builtin/logical/pki/storage_test.go +++ b/builtin/logical/pki/storage_test.go @@ -14,16 +14,14 @@ import ( var ctx = context.Background() func Test_ConfigsRoundTrip(t *testing.T) { - t.Parallel() - b, s := createBackendWithStorage(t) - sc := b.makeStorageContext(ctx, s) + _, s := createBackendWithStorage(t) // Verify we handle nothing stored properly - keyConfigEmpty, err := sc.getKeysConfig() + keyConfigEmpty, err := getKeysConfig(ctx, s) require.NoError(t, err) require.Equal(t, &keyConfigEntry{}, keyConfigEmpty) - issuerConfigEmpty, err := sc.getIssuersConfig() + issuerConfigEmpty, err := getIssuersConfig(ctx, s) require.NoError(t, err) require.Equal(t, &issuerConfigEntry{}, issuerConfigEmpty) @@ -35,70 +33,66 @@ func Test_ConfigsRoundTrip(t *testing.T) { DefaultIssuerId: genIssuerId(), } - err = sc.setKeysConfig(origKeyConfig) + err = setKeysConfig(ctx, s, origKeyConfig) require.NoError(t, err) - err = sc.setIssuersConfig(origIssuerConfig) + err = setIssuersConfig(ctx, s, origIssuerConfig) require.NoError(t, err) - keyConfig, err := sc.getKeysConfig() + keyConfig, err := getKeysConfig(ctx, s) require.NoError(t, err) require.Equal(t, origKeyConfig, keyConfig) - issuerConfig, err := sc.getIssuersConfig() + issuerConfig, err := getIssuersConfig(ctx, s) require.NoError(t, err) require.Equal(t, origIssuerConfig, issuerConfig) } func Test_IssuerRoundTrip(t *testing.T) { - t.Parallel() b, s := createBackendWithStorage(t) - sc := b.makeStorageContext(ctx, s) issuer1, key1 := genIssuerAndKey(t, b, s) issuer2, key2 := genIssuerAndKey(t, b, s) // We get an error when issuer id not found - _, err := sc.fetchIssuerById(issuer1.ID) + _, err := fetchIssuerById(ctx, s, issuer1.ID) require.Error(t, err) // We get an error when key id not found - _, err = sc.fetchKeyById(key1.ID) + _, err = fetchKeyById(ctx, s, key1.ID) require.Error(t, err) // Now write out our issuers and keys - err = sc.writeKey(key1) + err = writeKey(ctx, s, key1) require.NoError(t, err) - err = sc.writeIssuer(&issuer1) + err = writeIssuer(ctx, s, &issuer1) require.NoError(t, err) - err = sc.writeKey(key2) + err = writeKey(ctx, s, key2) require.NoError(t, err) - err = sc.writeIssuer(&issuer2) + err = writeIssuer(ctx, s, &issuer2) require.NoError(t, err) - fetchedKey1, err := sc.fetchKeyById(key1.ID) + fetchedKey1, err := fetchKeyById(ctx, s, key1.ID) require.NoError(t, err) - fetchedIssuer1, err := sc.fetchIssuerById(issuer1.ID) + fetchedIssuer1, err := fetchIssuerById(ctx, s, issuer1.ID) require.NoError(t, err) require.Equal(t, &key1, fetchedKey1) require.Equal(t, &issuer1, fetchedIssuer1) - keys, err := sc.listKeys() + keys, err := listKeys(ctx, s) require.NoError(t, err) require.ElementsMatch(t, []keyID{key1.ID, key2.ID}, keys) - issuers, err := sc.listIssuers() + issuers, err := listIssuers(ctx, s) require.NoError(t, err) require.ElementsMatch(t, []issuerID{issuer1.ID, issuer2.ID}, issuers) } func Test_KeysIssuerImport(t *testing.T) { - t.Parallel() b, s := createBackendWithStorage(t) - sc := b.makeStorageContext(ctx, s) issuer1, key1 := genIssuerAndKey(t, b, s) issuer2, key2 := genIssuerAndKey(t, b, s) @@ -109,21 +103,21 @@ func Test_KeysIssuerImport(t *testing.T) { issuer1.ID = "" issuer1.KeyID = "" - key1Ref1, existing, err := sc.importKey(key1.PrivateKey, "key1", key1.PrivateKeyType) + key1Ref1, existing, err := importKey(ctx, b, s, key1.PrivateKey, "key1", key1.PrivateKeyType) require.NoError(t, err) require.False(t, existing) require.Equal(t, strings.TrimSpace(key1.PrivateKey), strings.TrimSpace(key1Ref1.PrivateKey)) // Make sure if we attempt to re-import the same private key, no import/updates occur. // So the existing flag should be set to true, and we do not update the existing Name field. - key1Ref2, existing, err := sc.importKey(key1.PrivateKey, "ignore-me", key1.PrivateKeyType) + key1Ref2, existing, err := importKey(ctx, b, s, key1.PrivateKey, "ignore-me", key1.PrivateKeyType) require.NoError(t, err) require.True(t, existing) require.Equal(t, key1.PrivateKey, key1Ref1.PrivateKey) require.Equal(t, key1Ref1.ID, key1Ref2.ID) require.Equal(t, key1Ref1.Name, key1Ref2.Name) - issuer1Ref1, existing, err := sc.importIssuer(issuer1.Certificate, "issuer1") + issuer1Ref1, existing, err := importIssuer(ctx, b, s, issuer1.Certificate, "issuer1") require.NoError(t, err) require.False(t, existing) require.Equal(t, strings.TrimSpace(issuer1.Certificate), strings.TrimSpace(issuer1Ref1.Certificate)) @@ -132,7 +126,7 @@ func Test_KeysIssuerImport(t *testing.T) { // Make sure if we attempt to re-import the same issuer, no import/updates occur. // So the existing flag should be set to true, and we do not update the existing Name field. - issuer1Ref2, existing, err := sc.importIssuer(issuer1.Certificate, "ignore-me") + issuer1Ref2, existing, err := importIssuer(ctx, b, s, issuer1.Certificate, "ignore-me") require.NoError(t, err) require.True(t, existing) require.Equal(t, strings.TrimSpace(issuer1.Certificate), strings.TrimSpace(issuer1Ref1.Certificate)) @@ -140,14 +134,14 @@ func Test_KeysIssuerImport(t *testing.T) { require.Equal(t, key1Ref1.ID, issuer1Ref2.KeyID) require.Equal(t, issuer1Ref1.Name, issuer1Ref2.Name) - err = sc.writeIssuer(&issuer2) + err = writeIssuer(ctx, s, &issuer2) require.NoError(t, err) - err = sc.writeKey(key2) + err = writeKey(ctx, s, key2) require.NoError(t, err) // Same double import tests as above, but make sure if the previous was created through writeIssuer not importIssuer. - issuer2Ref, existing, err := sc.importIssuer(issuer2.Certificate, "ignore-me") + issuer2Ref, existing, err := importIssuer(ctx, b, s, issuer2.Certificate, "ignore-me") require.NoError(t, err) require.True(t, existing) require.Equal(t, strings.TrimSpace(issuer2.Certificate), strings.TrimSpace(issuer2Ref.Certificate)) @@ -156,7 +150,7 @@ func Test_KeysIssuerImport(t *testing.T) { require.Equal(t, issuer2.KeyID, issuer2Ref.KeyID) // Same double import tests as above, but make sure if the previous was created through writeKey not importKey. - key2Ref, existing, err := sc.importKey(key2.PrivateKey, "ignore-me", key2.PrivateKeyType) + key2Ref, existing, err := importKey(ctx, b, s, key2.PrivateKey, "ignore-me", key2.PrivateKeyType) require.NoError(t, err) require.True(t, existing) require.Equal(t, key2.PrivateKey, key2Ref.PrivateKey) @@ -164,41 +158,6 @@ func Test_KeysIssuerImport(t *testing.T) { require.Equal(t, "", key2Ref.Name) } -func Test_IssuerUpgrade(t *testing.T) { - t.Parallel() - b, s := createBackendWithStorage(t) - sc := b.makeStorageContext(ctx, s) - - // Make sure that we add OCSP signing to v0 issuers if CRLSigning is enabled - issuer, _ := genIssuerAndKey(t, b, s) - issuer.Version = 0 - issuer.Usage.ToggleUsage(OCSPSigningUsage) - - err := sc.writeIssuer(&issuer) - require.NoError(t, err, "failed writing out issuer") - - newIssuer, err := sc.fetchIssuerById(issuer.ID) - require.NoError(t, err, "failed fetching issuer") - - require.Equal(t, uint(1), newIssuer.Version) - require.True(t, newIssuer.Usage.HasUsage(OCSPSigningUsage)) - - // If CRLSigning is not present on a v0, we should not have OCSP signing after upgrade. - issuer, _ = genIssuerAndKey(t, b, s) - issuer.Version = 0 - issuer.Usage.ToggleUsage(OCSPSigningUsage) - issuer.Usage.ToggleUsage(CRLSigningUsage) - - err = sc.writeIssuer(&issuer) - require.NoError(t, err, "failed writing out issuer") - - newIssuer, err = sc.fetchIssuerById(issuer.ID) - require.NoError(t, err, "failed fetching issuer") - - require.Equal(t, uint(1), newIssuer.Version) - require.False(t, newIssuer.Usage.HasUsage(OCSPSigningUsage)) -} - func genIssuerAndKey(t *testing.T, b *backend, s logical.Storage) (issuerEntry, keyEntry) { certBundle := genCertBundle(t, b, s) @@ -218,8 +177,6 @@ func genIssuerAndKey(t *testing.T, b *backend, s logical.Storage) (issuerEntry, Certificate: strings.TrimSpace(certBundle.Certificate) + "\n", CAChain: certBundle.CAChain, SerialNumber: certBundle.SerialNumber, - Usage: AllIssuerUsages, - Version: latestIssuerVersion, } return pkiIssuer, pkiKey @@ -238,8 +195,7 @@ func genCertBundle(t *testing.T, b *backend, s logical.Storage) *certutil.CertBu "ttl": 3600, }, } - sc := b.makeStorageContext(ctx, s) - _, _, role, respErr := getGenerationParams(sc, apiData) + _, _, role, respErr := b.getGenerationParams(ctx, s, apiData) require.Nil(t, respErr) input := &inputBundle{ @@ -251,7 +207,7 @@ func genCertBundle(t *testing.T, b *backend, s logical.Storage) *certutil.CertBu apiData: apiData, role: role, } - parsedCertBundle, _, err := generateCert(sc, input, nil, true, b.GetRandomReader()) + parsedCertBundle, err := generateCert(ctx, b, input, nil, true, b.GetRandomReader()) require.NoError(t, err) certBundle, err := parsedCertBundle.ToCertBundle() diff --git a/builtin/logical/pki/test_helpers.go b/builtin/logical/pki/test_helpers.go index 907bafe58d948..e91dce2bbd11b 100644 --- a/builtin/logical/pki/test_helpers.go +++ b/builtin/logical/pki/test_helpers.go @@ -14,7 +14,7 @@ import ( "encoding/pem" "fmt" "hash" - "io" + "io/ioutil" "strings" "testing" @@ -41,7 +41,8 @@ func createBackendWithStorage(t testing.TB) (*backend, logical.Storage) { } func mountPKIEndpoint(t testing.TB, client *api.Client, path string) { - err := client.Sys().Mount(path, &api.MountInput{ + var err error + err = client.Sys().Mount(path, &api.MountInput{ Type: "pki", Config: api.MountConfigInput{ DefaultLeaseTTL: "16h", @@ -53,13 +54,13 @@ func mountPKIEndpoint(t testing.TB, client *api.Client, path string) { // Signing helpers func requireSignedBy(t *testing.T, cert *x509.Certificate, key crypto.PublicKey) { - switch typedKey := key.(type) { + switch key.(type) { case *rsa.PublicKey: - requireRSASignedBy(t, cert, typedKey) + requireRSASignedBy(t, cert, key.(*rsa.PublicKey)) case *ecdsa.PublicKey: - requireECDSASignedBy(t, cert, typedKey) + requireECDSASignedBy(t, cert, key.(*ecdsa.PublicKey)) case ed25519.PublicKey: - requireED25519SignedBy(t, cert, typedKey) + requireED25519SignedBy(t, cert, key.(ed25519.PublicKey)) default: require.Fail(t, "unknown public key type %#v", key) } @@ -180,6 +181,42 @@ func getParsedCrl(t *testing.T, client *api.Client, mountPoint string) *pkix.Cer return getParsedCrlAtPath(t, client, path) } +func getParsedCrlForIssuer(t *testing.T, client *api.Client, mountPoint string, issuer string) *pkix.CertificateList { + path := fmt.Sprintf("/v1/%v/issuer/%v/crl/der", mountPoint, issuer) + crl := getParsedCrlAtPath(t, client, path) + + // Now fetch the issuer as well and verify the certificate + path = fmt.Sprintf("/v1/%v/issuer/%v/der", mountPoint, issuer) + req := client.NewRequest("GET", path) + resp, err := client.RawRequest(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + + certBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(certBytes) == 0 { + t.Fatalf("expected certificate in response body") + } + + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + t.Fatal(err) + } + if cert == nil { + t.Fatalf("expected parsed certificate") + } + + if err := cert.CheckCRLSignature(crl); err != nil { + t.Fatalf("expected valid signature on CRL for issuer %v: %v", issuer, crl) + } + + return crl +} + func getParsedCrlAtPath(t *testing.T, client *api.Client, path string) *pkix.CertificateList { req := client.NewRequest("GET", path) resp, err := client.RawRequest(req) @@ -188,7 +225,7 @@ func getParsedCrlAtPath(t *testing.T, client *api.Client, path string) *pkix.Cer } defer resp.Body.Close() - crlBytes, err := io.ReadAll(resp.Body) + crlBytes, err := ioutil.ReadAll(resp.Body) if err != nil { t.Fatalf("err: %s", err) } @@ -247,10 +284,6 @@ func CBWrite(b *backend, s logical.Storage, path string, data map[string]interfa return CBReq(b, s, logical.UpdateOperation, path, data) } -func CBPatch(b *backend, s logical.Storage, path string, data map[string]interface{}) (*logical.Response, error) { - return CBReq(b, s, logical.PatchOperation, path, data) -} - func CBList(b *backend, s logical.Storage, path string) (*logical.Response, error) { return CBReq(b, s, logical.ListOperation, path, make(map[string]interface{})) } @@ -258,27 +291,3 @@ func CBList(b *backend, s logical.Storage, path string) (*logical.Response, erro func CBDelete(b *backend, s logical.Storage, path string) (*logical.Response, error) { return CBReq(b, s, logical.DeleteOperation, path, make(map[string]interface{})) } - -func requireFieldsSetInResp(t *testing.T, resp *logical.Response, fields ...string) { - var missingFields []string - for _, field := range fields { - value, ok := resp.Data[field] - if !ok || value == nil { - missingFields = append(missingFields, field) - } - } - - require.Empty(t, missingFields, "The following fields were required but missing from response:\n%v", resp.Data) -} - -func requireSuccessNonNilResponse(t *testing.T, resp *logical.Response, err error, msgAndArgs ...interface{}) { - require.NoError(t, err, msgAndArgs...) - require.False(t, resp.IsError(), msgAndArgs...) - require.NotNil(t, resp, msgAndArgs...) -} - -func requireSuccessNilResponse(t *testing.T, resp *logical.Response, err error, msgAndArgs ...interface{}) { - require.NoError(t, err, msgAndArgs...) - require.False(t, resp.IsError(), msgAndArgs...) - require.Nil(t, resp, msgAndArgs...) -} diff --git a/builtin/logical/pki/util.go b/builtin/logical/pki/util.go index 87e572fd939fd..f855e0d690651 100644 --- a/builtin/logical/pki/util.go +++ b/builtin/logical/pki/util.go @@ -1,30 +1,25 @@ package pki import ( + "context" "crypto" - "crypto/x509" "fmt" - "math/big" - "net/http" "regexp" "strings" - "time" + + "github.com/hashicorp/vault/sdk/helper/certutil" + + "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/errutil" - "github.com/hashicorp/vault/sdk/logical" ) const ( managedKeyNameArg = "managed_key_name" managedKeyIdArg = "managed_key_id" defaultRef = "default" - - // Constants for If-Modified-Since operation - headerIfModifiedSince = "If-Modified-Since" - headerLastModified = "Last-Modified" ) var ( @@ -33,20 +28,12 @@ var ( errKeyNameInUse = errutil.UserError{Err: "key name already in use"} ) -func serialFromCert(cert *x509.Certificate) string { - return serialFromBigInt(cert.SerialNumber) -} - -func serialFromBigInt(serial *big.Int) string { - return strings.TrimSpace(certutil.GetHexFormatted(serial.Bytes(), ":")) -} - func normalizeSerial(serial string) string { - return strings.ReplaceAll(strings.ToLower(serial), ":", "-") + return strings.Replace(strings.ToLower(serial), ":", "-", -1) } func denormalizeSerial(serial string) string { - return strings.ReplaceAll(strings.ToLower(serial), "-", ":") + return strings.Replace(strings.ToLower(serial), "-", ":", -1) } func kmsRequested(input *inputBundle) bool { @@ -117,7 +104,7 @@ func getKeyRefWithErr(data *framework.FieldData) (string, error) { keyRef := getKeyRef(data) if len(keyRef) == 0 { - return "", errutil.UserError{Err: "missing argument key_ref for existing type"} + return "", errutil.UserError{Err: fmt.Sprintf("missing argument key_ref for existing type")} } return keyRef, nil @@ -154,7 +141,7 @@ func getManagedKeyNameOrUUID(data *framework.FieldData) (name string, UUID strin return keyName, keyUUID, nil } -func getIssuerName(sc *storageContext, data *framework.FieldData) (string, error) { +func getIssuerName(ctx context.Context, s logical.Storage, data *framework.FieldData) (string, error) { issuerName := "" issuerNameIface, ok := data.GetOk("issuer_name") if ok { @@ -167,7 +154,7 @@ func getIssuerName(sc *storageContext, data *framework.FieldData) (string, error if !nameMatcher.MatchString(issuerName) { return issuerName, errutil.UserError{Err: "issuer name contained invalid characters"} } - issuerId, err := sc.resolveIssuerReference(issuerName) + issuerId, err := resolveIssuerReference(ctx, s, issuerName) if err == nil { return issuerName, errIssuerNameInUse } @@ -179,7 +166,7 @@ func getIssuerName(sc *storageContext, data *framework.FieldData) (string, error return issuerName, nil } -func getKeyName(sc *storageContext, data *framework.FieldData) (string, error) { +func getKeyName(ctx context.Context, s logical.Storage, data *framework.FieldData) (string, error) { keyName := "" keyNameIface, ok := data.GetOk(keyNameParam) if ok { @@ -192,7 +179,7 @@ func getKeyName(sc *storageContext, data *framework.FieldData) (string, error) { if !nameMatcher.MatchString(keyName) { return "", errutil.UserError{Err: "key name contained invalid characters"} } - keyId, err := sc.resolveKeyReference(keyName) + keyId, err := resolveKeyReference(ctx, s, keyName) if err == nil { return "", errKeyNameInUse } @@ -219,141 +206,3 @@ func extractRef(data *framework.FieldData, paramName string) string { } return value } - -func isStringArrayDifferent(a, b []string) bool { - if len(a) != len(b) { - return true - } - - for i, v := range a { - if v != b[i] { - return true - } - } - - return false -} - -func hasHeader(header string, req *logical.Request) bool { - var hasHeader bool - headerValue := req.Headers[header] - if len(headerValue) > 0 { - hasHeader = true - } - - return hasHeader -} - -func parseIfNotModifiedSince(req *logical.Request) (time.Time, error) { - var headerTimeValue time.Time - headerValue := req.Headers[headerIfModifiedSince] - - headerTimeValue, err := time.Parse(time.RFC1123, headerValue[0]) - if err != nil { - return headerTimeValue, fmt.Errorf("failed to parse given value for '%s' header: %v", headerIfModifiedSince, err) - } - - return headerTimeValue, nil -} - -type ifModifiedReqType int - -const ( - ifModifiedUnknown ifModifiedReqType = iota - ifModifiedCA = iota - ifModifiedCRL = iota - ifModifiedDeltaCRL = iota -) - -type IfModifiedSinceHelper struct { - req *logical.Request - reqType ifModifiedReqType - issuerRef issuerID -} - -func sendNotModifiedResponseIfNecessary(helper *IfModifiedSinceHelper, sc *storageContext, resp *logical.Response) (bool, error) { - responseHeaders := map[string][]string{} - if !hasHeader(headerIfModifiedSince, helper.req) { - return false, nil - } - - before, err := sc.isIfModifiedSinceBeforeLastModified(helper, responseHeaders) - if err != nil { - return false, err - } - - if !before { - return false, nil - } - - // Fill response - resp.Data = map[string]interface{}{ - logical.HTTPContentType: "", - logical.HTTPStatusCode: 304, - } - resp.Headers = responseHeaders - - return true, nil -} - -func (sc *storageContext) isIfModifiedSinceBeforeLastModified(helper *IfModifiedSinceHelper, responseHeaders map[string][]string) (bool, error) { - // False return --> we were last modified _before_ the requester's - // time --> keep using the cached copy and return 304. - var err error - var lastModified time.Time - ifModifiedSince, err := parseIfNotModifiedSince(helper.req) - if err != nil { - return false, err - } - - switch helper.reqType { - case ifModifiedCRL, ifModifiedDeltaCRL: - if sc.Backend.crlBuilder.invalidate.Load() { - // When we see the CRL is invalidated, respond with false - // regardless of what the local CRL state says. We've likely - // renamed some issuers or are about to rebuild a new CRL.... - // - // We do this earlier, ahead of config load, as it saves us a - // potential error condition. - return false, nil - } - - crlConfig, err := sc.getLocalCRLConfig() - if err != nil { - return false, err - } - - lastModified = crlConfig.LastModified - if helper.reqType == ifModifiedDeltaCRL { - lastModified = crlConfig.DeltaLastModified - } - case ifModifiedCA: - issuerId, err := sc.resolveIssuerReference(string(helper.issuerRef)) - if err != nil { - return false, err - } - - issuer, err := sc.fetchIssuerById(issuerId) - if err != nil { - return false, err - } - - lastModified = issuer.LastModified - default: - return false, fmt.Errorf("unknown if-modified-since request type: %v", helper.reqType) - } - - if !lastModified.IsZero() && lastModified.Before(ifModifiedSince) { - responseHeaders[headerLastModified] = []string{lastModified.Format(http.TimeFormat)} - return true, nil - } - - return false, nil -} - -func addWarnings(resp *logical.Response, warnings []string) *logical.Response { - for _, warning := range warnings { - resp.AddWarning(warning) - } - return resp -} diff --git a/builtin/logical/postgresql/query.go b/builtin/logical/postgresql/query.go index e250a6fe3bda0..e4f7f59ddfccc 100644 --- a/builtin/logical/postgresql/query.go +++ b/builtin/logical/postgresql/query.go @@ -8,7 +8,7 @@ import ( // Query templates a query for us. func Query(tpl string, data map[string]string) string { for k, v := range data { - tpl = strings.ReplaceAll(tpl, fmt.Sprintf("{{%s}}", k), v) + tpl = strings.Replace(tpl, fmt.Sprintf("{{%s}}", k), v, -1) } return tpl diff --git a/builtin/logical/rabbitmq/backend_test.go b/builtin/logical/rabbitmq/backend_test.go index 89659796b5e5c..9e146750f2424 100644 --- a/builtin/logical/rabbitmq/backend_test.go +++ b/builtin/logical/rabbitmq/backend_test.go @@ -132,7 +132,7 @@ func TestBackend_roleCrud(t *testing.T) { func TestBackend_roleWithPasswordPolicy(t *testing.T) { if os.Getenv(logicaltest.TestEnvVar) == "" { - t.Skip(fmt.Sprintf("Acceptance tests skipped unless env %q set", logicaltest.TestEnvVar)) + t.Skip(fmt.Sprintf("Acceptance tests skipped unless env '%s' set", logicaltest.TestEnvVar)) return } diff --git a/builtin/logical/ssh/backend.go b/builtin/logical/ssh/backend.go index fe4f40b334c97..c7250d0361751 100644 --- a/builtin/logical/ssh/backend.go +++ b/builtin/logical/ssh/backend.go @@ -61,7 +61,6 @@ func Backend(conf *logical.BackendConfig) (*backend, error) { pathVerify(&b), pathConfigCA(&b), pathSign(&b), - pathIssue(&b), pathFetchPublicKey(&b), }, diff --git a/builtin/logical/ssh/backend_test.go b/builtin/logical/ssh/backend_test.go index 27934d42af17f..2664f6225c21a 100644 --- a/builtin/logical/ssh/backend_test.go +++ b/builtin/logical/ssh/backend_test.go @@ -322,31 +322,6 @@ func TestBackend_AllowedUsers(t *testing.T) { } } -func TestBackend_AllowedDomainsTemplate(t *testing.T) { - testAllowedDomainsTemplate := "{{ identity.entity.metadata.ssh_username }}.example.com" - expectedValidPrincipal := "foo." + testUserName + ".example.com" - testAllowedPrincipalsTemplate( - t, testAllowedDomainsTemplate, - expectedValidPrincipal, - map[string]string{ - "ssh_username": testUserName, - }, - map[string]interface{}{ - "key_type": testCaKeyType, - "algorithm_signer": "rsa-sha2-256", - "allow_host_certificates": true, - "allow_subdomains": true, - "allowed_domains": testAllowedDomainsTemplate, - "allowed_domains_template": true, - }, - map[string]interface{}{ - "cert_type": "host", - "public_key": testCAPublicKey, - "valid_principals": expectedValidPrincipal, - }, - ) -} - func TestBackend_AllowedUsersTemplate(t *testing.T) { testAllowedUsersTemplate(t, "{{ identity.entity.metadata.ssh_username }}", @@ -365,147 +340,6 @@ func TestBackend_AllowedUsersTemplate_WithStaticPrefix(t *testing.T) { ) } -func TestBackend_DefaultUserTemplate(t *testing.T) { - testDefaultUserTemplate(t, - "{{ identity.entity.metadata.ssh_username }}", - testUserName, - map[string]string{ - "ssh_username": testUserName, - }, - ) -} - -func TestBackend_DefaultUserTemplate_WithStaticPrefix(t *testing.T) { - testDefaultUserTemplate(t, - "user-{{ identity.entity.metadata.ssh_username }}", - "user-"+testUserName, - map[string]string{ - "ssh_username": testUserName, - }, - ) -} - -func TestBackend_DefaultUserTemplateFalse_AllowedUsersTemplateTrue(t *testing.T) { - cluster, userpassToken := getSshCaTestCluster(t, testUserName) - defer cluster.Cleanup() - client := cluster.Cores[0].Client - - // set metadata "ssh_username" to userpass username - tokenLookupResponse, err := client.Logical().Write("/auth/token/lookup", map[string]interface{}{ - "token": userpassToken, - }) - if err != nil { - t.Fatal(err) - } - entityID := tokenLookupResponse.Data["entity_id"].(string) - _, err = client.Logical().Write("/identity/entity/id/"+entityID, map[string]interface{}{ - "metadata": map[string]string{ - "ssh_username": testUserName, - }, - }) - if err != nil { - t.Fatal(err) - } - - _, err = client.Logical().Write("ssh/roles/my-role", map[string]interface{}{ - "key_type": testCaKeyType, - "allow_user_certificates": true, - "default_user": "{{identity.entity.metadata.ssh_username}}", - // disable user templating but not allowed_user_template and the request should fail - "default_user_template": false, - "allowed_users": "{{identity.entity.metadata.ssh_username}}", - "allowed_users_template": true, - }) - if err != nil { - t.Fatal(err) - } - - // sign SSH key as userpass user - client.SetToken(userpassToken) - _, err = client.Logical().Write("ssh/sign/my-role", map[string]interface{}{ - "public_key": testCAPublicKey, - }) - if err == nil { - t.Errorf("signing request should fail when default_user is not in the allowed_users list, because allowed_users_template is true and default_user_template is not") - } - - expectedErrStr := "{{identity.entity.metadata.ssh_username}} is not a valid value for valid_principals" - if !strings.Contains(err.Error(), expectedErrStr) { - t.Errorf("expected error to include %q but it was: %q", expectedErrStr, err.Error()) - } -} - -func TestBackend_DefaultUserTemplateFalse_AllowedUsersTemplateFalse(t *testing.T) { - cluster, userpassToken := getSshCaTestCluster(t, testUserName) - defer cluster.Cleanup() - client := cluster.Cores[0].Client - - // set metadata "ssh_username" to userpass username - tokenLookupResponse, err := client.Logical().Write("/auth/token/lookup", map[string]interface{}{ - "token": userpassToken, - }) - if err != nil { - t.Fatal(err) - } - entityID := tokenLookupResponse.Data["entity_id"].(string) - _, err = client.Logical().Write("/identity/entity/id/"+entityID, map[string]interface{}{ - "metadata": map[string]string{ - "ssh_username": testUserName, - }, - }) - if err != nil { - t.Fatal(err) - } - - _, err = client.Logical().Write("ssh/roles/my-role", map[string]interface{}{ - "key_type": testCaKeyType, - "allow_user_certificates": true, - "default_user": "{{identity.entity.metadata.ssh_username}}", - "default_user_template": false, - "allowed_users": "{{identity.entity.metadata.ssh_username}}", - "allowed_users_template": false, - }) - if err != nil { - t.Fatal(err) - } - - // sign SSH key as userpass user - client.SetToken(userpassToken) - signResponse, err := client.Logical().Write("ssh/sign/my-role", map[string]interface{}{ - "public_key": testCAPublicKey, - }) - if err != nil { - t.Fatal(err) - } - - // check for the expected valid principals of certificate - signedKey := signResponse.Data["signed_key"].(string) - key, _ := base64.StdEncoding.DecodeString(strings.Split(signedKey, " ")[1]) - parsedKey, err := ssh.ParsePublicKey(key) - if err != nil { - t.Fatal(err) - } - actualPrincipals := parsedKey.(*ssh.Certificate).ValidPrincipals - if len(actualPrincipals) < 1 { - t.Fatal( - fmt.Sprintf("No ValidPrincipals returned: should have been %v", - []string{"{{identity.entity.metadata.ssh_username}}"}), - ) - } - if len(actualPrincipals) > 1 { - t.Error( - fmt.Sprintf("incorrect number ValidPrincipals, expected only 1: %v should be %v", - actualPrincipals, []string{"{{identity.entity.metadata.ssh_username}}"}), - ) - } - if actualPrincipals[0] != "{{identity.entity.metadata.ssh_username}}" { - t.Fatal( - fmt.Sprintf("incorrect ValidPrincipals: %v should be %v", - actualPrincipals, []string{"{{identity.entity.metadata.ssh_username}}"}), - ) - } -} - func newTestingFactory(t *testing.T) func(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { return func(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { defaultLeaseTTLVal := 2 * time.Minute @@ -1945,50 +1779,6 @@ func TestSSHBackend_ValidateNotBeforeDuration(t *testing.T) { logicaltest.Test(t, testCase) } -func TestSSHBackend_IssueSign(t *testing.T) { - config := logical.TestBackendConfig() - - b, err := Factory(context.Background(), config) - if err != nil { - t.Fatalf("Cannot create backend: %s", err) - } - - testCase := logicaltest.TestCase{ - LogicalBackend: b, - Steps: []logicaltest.TestStep{ - configCaStep(testCAPublicKey, testCAPrivateKey), - - createRoleStep("testing", map[string]interface{}{ - "key_type": "otp", - "default_user": "user", - }), - // Key pair not issued with invalid role key type - issueSSHKeyPairStep("testing", "rsa", 0, true, "role key type 'otp' not allowed to issue key pairs"), - - createRoleStep("testing", map[string]interface{}{ - "key_type": "ca", - "allow_user_key_ids": false, - "allow_user_certificates": true, - "allowed_user_key_lengths": map[string]interface{}{ - "ssh-rsa": []int{2048, 3072, 4096}, - "ecdsa-sha2-nistp521": 0, - "ed25519": 0, - }, - }), - // Key_type not in allowed_user_key_types_lengths - issueSSHKeyPairStep("testing", "ec", 256, true, "provided key_type value not in allowed_user_key_types"), - // Key_bits not in allowed_user_key_types_lengths for provided key_type - issueSSHKeyPairStep("testing", "rsa", 2560, true, "provided key_bits value not in list of role's allowed_user_key_types"), - // key_type `rsa` and key_bits `2048` successfully created - issueSSHKeyPairStep("testing", "rsa", 2048, false, ""), - // key_type `ed22519` and key_bits `0` successfully created - issueSSHKeyPairStep("testing", "ed25519", 0, false, ""), - }, - } - - logicaltest.Test(t, testCase) -} - func getSshCaTestCluster(t *testing.T, userIdentity string) (*vault.TestCluster, string) { coreConfig := &vault.CoreConfig{ CredentialBackends: map[string]logical.Factory{ @@ -2059,9 +1849,8 @@ func getSshCaTestCluster(t *testing.T, userIdentity string) (*vault.TestCluster, return cluster, userpassToken } -func testDefaultUserTemplate(t *testing.T, testDefaultUserTemplate string, - expectedValidPrincipal string, testEntityMetadata map[string]string, -) { +func testAllowedUsersTemplate(t *testing.T, testAllowedUsersTemplate string, + expectedValidPrincipal string, testEntityMetadata map[string]string) { cluster, userpassToken := getSshCaTestCluster(t, testUserName) defer cluster.Cleanup() client := cluster.Cores[0].Client @@ -2084,9 +1873,7 @@ func testDefaultUserTemplate(t *testing.T, testDefaultUserTemplate string, _, err = client.Logical().Write("ssh/roles/my-role", map[string]interface{}{ "key_type": testCaKeyType, "allow_user_certificates": true, - "default_user": testDefaultUserTemplate, - "default_user_template": true, - "allowed_users": testDefaultUserTemplate, + "allowed_users": testAllowedUsersTemplate, "allowed_users_template": true, }) if err != nil { @@ -2096,63 +1883,13 @@ func testDefaultUserTemplate(t *testing.T, testDefaultUserTemplate string, // sign SSH key as userpass user client.SetToken(userpassToken) signResponse, err := client.Logical().Write("ssh/sign/my-role", map[string]interface{}{ - "public_key": testCAPublicKey, - }) - if err != nil { - t.Fatal(err) - } - - // check for the expected valid principals of certificate - signedKey := signResponse.Data["signed_key"].(string) - key, _ := base64.StdEncoding.DecodeString(strings.Split(signedKey, " ")[1]) - parsedKey, err := ssh.ParsePublicKey(key) - if err != nil { - t.Fatal(err) - } - actualPrincipals := parsedKey.(*ssh.Certificate).ValidPrincipals - if actualPrincipals[0] != expectedValidPrincipal { - t.Fatal( - fmt.Sprintf("incorrect ValidPrincipals: %v should be %v", - actualPrincipals, []string{expectedValidPrincipal}), - ) - } -} - -func testAllowedPrincipalsTemplate(t *testing.T, testAllowedDomainsTemplate string, - expectedValidPrincipal string, testEntityMetadata map[string]string, - roleConfigPayload map[string]interface{}, signingPayload map[string]interface{}, -) { - cluster, userpassToken := getSshCaTestCluster(t, testUserName) - defer cluster.Cleanup() - client := cluster.Cores[0].Client - - // set metadata "ssh_username" to userpass username - tokenLookupResponse, err := client.Logical().Write("/auth/token/lookup", map[string]interface{}{ - "token": userpassToken, - }) - if err != nil { - t.Fatal(err) - } - entityID := tokenLookupResponse.Data["entity_id"].(string) - _, err = client.Logical().Write("/identity/entity/id/"+entityID, map[string]interface{}{ - "metadata": testEntityMetadata, + "public_key": testCAPublicKey, + "valid_principals": expectedValidPrincipal, }) if err != nil { t.Fatal(err) } - _, err = client.Logical().Write("ssh/roles/my-role", roleConfigPayload) - if err != nil { - t.Fatal(err) - } - - // sign SSH key as userpass user - client.SetToken(userpassToken) - signResponse, err := client.Logical().Write("ssh/sign/my-role", signingPayload) - if err != nil { - t.Fatal(err) - } - // check for the expected valid principals of certificate signedKey := signResponse.Data["signed_key"].(string) key, _ := base64.StdEncoding.DecodeString(strings.Split(signedKey, " ")[1]) @@ -2169,25 +1906,6 @@ func testAllowedPrincipalsTemplate(t *testing.T, testAllowedDomainsTemplate stri } } -func testAllowedUsersTemplate(t *testing.T, testAllowedUsersTemplate string, - expectedValidPrincipal string, testEntityMetadata map[string]string, -) { - testAllowedPrincipalsTemplate( - t, testAllowedUsersTemplate, - expectedValidPrincipal, testEntityMetadata, - map[string]interface{}{ - "key_type": testCaKeyType, - "allow_user_certificates": true, - "allowed_users": testAllowedUsersTemplate, - "allowed_users_template": true, - }, - map[string]interface{}{ - "public_key": testCAPublicKey, - "valid_principals": expectedValidPrincipal, - }, - ) -} - func configCaStep(caPublicKey, caPrivateKey string) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, @@ -2211,8 +1929,7 @@ func signCertificateStep( role, keyID string, certType int, validPrincipals []string, criticalOptionPermissions, extensionPermissions map[string]string, ttl time.Duration, - requestParameters map[string]interface{}, -) logicaltest.TestStep { + requestParameters map[string]interface{}) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "sign/" + role, @@ -2241,42 +1958,6 @@ func signCertificateStep( } } -func issueSSHKeyPairStep(role, keyType string, keyBits int, expectError bool, errorMsg string) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "issue/" + role, - Data: map[string]interface{}{ - "key_type": keyType, - "key_bits": keyBits, - }, - ErrorOk: true, - Check: func(resp *logical.Response) error { - if expectError { - var err error - if resp.Data["error"] != errorMsg { - err = fmt.Errorf("actual error message \"%s\" different from expected error message \"%s\"", resp.Data["error"], errorMsg) - } - - return err - } - - if resp.IsError() { - return fmt.Errorf("unexpected error response returned: %v", resp.Error()) - } - - if resp.Data["private_key_type"] != keyType { - return fmt.Errorf("response private_key_type (%s) does not match the provided key_type (%s)", resp.Data["private_key_type"], keyType) - } - - if resp.Data["signed_key"] == "" { - return errors.New("certificate/signed_key should not be empty") - } - - return nil - }, - } -} - func validateSSHCertificate(cert *ssh.Certificate, keyID string, certType int, validPrincipals []string, criticalOptionPermissions, extensionPermissions map[string]string, ttl time.Duration, ) error { diff --git a/builtin/logical/ssh/path_issue.go b/builtin/logical/ssh/path_issue.go deleted file mode 100644 index 77b644590fd04..0000000000000 --- a/builtin/logical/ssh/path_issue.go +++ /dev/null @@ -1,183 +0,0 @@ -package ssh - -import ( - "context" - "crypto/rand" - "errors" - "fmt" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -type keySpecs struct { - Type string - Bits int -} - -func pathIssue(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "issue/" + framework.GenericNameWithAtRegex("role"), - - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathIssue, - }, - }, - Fields: map[string]*framework.FieldSchema{ - "role": { - Type: framework.TypeString, - Description: `The desired role with configuration for this request.`, - }, - "key_type": { - Type: framework.TypeString, - Description: "Specifies the desired key type; must be `rsa`, `ed25519` or `ec`", - Default: "rsa", - }, - "key_bits": { - Type: framework.TypeInt, - Description: "Specifies the number of bits to use for the generated keys.", - Default: 0, - }, - "ttl": { - Type: framework.TypeDurationSecond, - Description: `The requested Time To Live for the SSH certificate; -sets the expiration date. If not specified -the role default, backend default, or system -default TTL is used, in that order. Cannot -be later than the role max TTL.`, - }, - "valid_principals": { - Type: framework.TypeString, - Description: `Valid principals, either usernames or hostnames, that the certificate should be signed for.`, - }, - "cert_type": { - Type: framework.TypeString, - Description: `Type of certificate to be created; either "user" or "host".`, - Default: "user", - }, - "key_id": { - Type: framework.TypeString, - Description: `Key id that the created certificate should have. If not specified, the display name of the token will be used.`, - }, - "critical_options": { - Type: framework.TypeMap, - Description: `Critical options that the certificate should be signed for.`, - }, - "extensions": { - Type: framework.TypeMap, - Description: `Extensions that the certificate should be signed for.`, - }, - }, - HelpSynopsis: pathIssueHelpSyn, - HelpDescription: pathIssueHelpDesc, - } -} - -func (b *backend) pathIssue(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - // Get the role - roleName := data.Get("role").(string) - role, err := b.getRole(ctx, req.Storage, roleName) - if err != nil { - return nil, err - } - if role == nil { - return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", roleName)), nil - } - - if role.KeyType != "ca" { - return logical.ErrorResponse("role key type '%s' not allowed to issue key pairs", role.KeyType), nil - } - - // Validate and extract key specifications - keySpecs, err := extractKeySpecs(role, data) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - // Issue certificate - return b.pathIssueCertificate(ctx, req, data, role, keySpecs) -} - -func (b *backend) pathIssueCertificate(ctx context.Context, req *logical.Request, data *framework.FieldData, role *sshRole, keySpecs *keySpecs) (*logical.Response, error) { - publicKey, privateKey, err := generateSSHKeyPair(rand.Reader, keySpecs.Type, keySpecs.Bits) - if err != nil { - return nil, err - } - - // Sign key - userPublicKey, err := parsePublicSSHKey(publicKey) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf("failed to parse public_key as SSH key: %s", err)), nil - } - - response, err := b.pathSignIssueCertificateHelper(ctx, req, data, role, userPublicKey) - if err != nil { - return nil, err - } - if response.IsError() { - return response, nil - } - - // Additional to sign response - response.Data["private_key"] = privateKey - response.Data["private_key_type"] = keySpecs.Type - - return response, nil -} - -func extractKeySpecs(role *sshRole, data *framework.FieldData) (*keySpecs, error) { - keyType := data.Get("key_type").(string) - keyBits := data.Get("key_bits").(int) - keySpecs := keySpecs{ - Type: keyType, - Bits: keyBits, - } - - keyTypeToMapKey := createKeyTypeToMapKey(keyType, keyBits) - - if len(role.AllowedUserKeyTypesLengths) != 0 { - var keyAllowed bool - var bitsAllowed bool - - keyTypeAliasesLoop: - for _, keyTypeAlias := range keyTypeToMapKey[keyType] { - allowedValues, allowed := role.AllowedUserKeyTypesLengths[keyTypeAlias] - if !allowed { - continue - } - keyAllowed = true - - for _, value := range allowedValues { - if value == keyBits { - bitsAllowed = true - break keyTypeAliasesLoop - } - } - } - - if !keyAllowed { - return nil, errors.New("provided key_type value not in allowed_user_key_types") - } - - if !bitsAllowed { - return nil, errors.New("provided key_bits value not in list of role's allowed_user_key_types") - } - } - - return &keySpecs, nil -} - -const pathIssueHelpSyn = ` -Request a certificate using a certain role with the provided details. -` - -const pathIssueHelpDesc = ` -This path allows requesting a certificate to be issued according to the -policy of the given role. The certificate will only be issued if the -requested details are allowed by the role policy. - -This path returns a certificate and a private key. If you want a workflow -that does not expose a private key, generate a CSR locally and use the -sign path instead. -` diff --git a/builtin/logical/ssh/path_issue_sign.go b/builtin/logical/ssh/path_issue_sign.go deleted file mode 100644 index e6f225ffdc6ff..0000000000000 --- a/builtin/logical/ssh/path_issue_sign.go +++ /dev/null @@ -1,554 +0,0 @@ -package ssh - -import ( - "context" - "crypto/dsa" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "errors" - "fmt" - "io" - "regexp" - "strconv" - "strings" - "time" - - "github.com/hashicorp/go-secure-stdlib/parseutil" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/certutil" - "github.com/hashicorp/vault/sdk/helper/strutil" - "github.com/hashicorp/vault/sdk/logical" - "golang.org/x/crypto/ssh" -) - -var containsTemplateRegex = regexp.MustCompile(`{{.+?}}`) - -var ecCurveBitsToAlgoName = map[int]string{ - 256: ssh.KeyAlgoECDSA256, - 384: ssh.KeyAlgoECDSA384, - 521: ssh.KeyAlgoECDSA521, -} - -// If the algorithm is not found, it could be that we have a curve -// that we haven't added a constant for yet. But they could allow it -// (assuming x/crypto/ssh can parse it) via setting a ec: -// mapping rather than using a named SSH key type, so erring out here -// isn't advisable. - -type creationBundle struct { - KeyID string - ValidPrincipals []string - PublicKey ssh.PublicKey - CertificateType uint32 - TTL time.Duration - Signer ssh.Signer - Role *sshRole - CriticalOptions map[string]string - Extensions map[string]string -} - -func (b *backend) pathSignIssueCertificateHelper(ctx context.Context, req *logical.Request, data *framework.FieldData, role *sshRole, publicKey ssh.PublicKey) (*logical.Response, error) { - // Note that these various functions always return "user errors" so we pass - // them as 4xx values - keyID, err := b.calculateKeyID(data, req, role, publicKey) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - certificateType, err := b.calculateCertificateType(data, role) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - var parsedPrincipals []string - if certificateType == ssh.HostCert { - parsedPrincipals, err = b.calculateValidPrincipals(data, req, role, "", role.AllowedDomains, role.AllowedDomainsTemplate, validateValidPrincipalForHosts(role)) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - } else { - defaultPrincipal := role.DefaultUser - if role.DefaultUserTemplate { - defaultPrincipal, err = b.renderPrincipal(role.DefaultUser, req) - if err != nil { - return nil, err - } - } - parsedPrincipals, err = b.calculateValidPrincipals(data, req, role, defaultPrincipal, role.AllowedUsers, role.AllowedUsersTemplate, strutil.StrListContains) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - } - - ttl, err := b.calculateTTL(data, role) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - criticalOptions, err := b.calculateCriticalOptions(data, role) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - extensions, err := b.calculateExtensions(data, req, role) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - privateKeyEntry, err := caKey(ctx, req.Storage, caPrivateKey) - if err != nil { - return nil, fmt.Errorf("failed to read CA private key: %w", err) - } - if privateKeyEntry == nil || privateKeyEntry.Key == "" { - return nil, errors.New("failed to read CA private key") - } - - signer, err := ssh.ParsePrivateKey([]byte(privateKeyEntry.Key)) - if err != nil { - return nil, fmt.Errorf("failed to parse stored CA private key: %w", err) - } - - cBundle := creationBundle{ - KeyID: keyID, - PublicKey: publicKey, - Signer: signer, - ValidPrincipals: parsedPrincipals, - TTL: ttl, - CertificateType: certificateType, - Role: role, - CriticalOptions: criticalOptions, - Extensions: extensions, - } - - certificate, err := cBundle.sign() - if err != nil { - return nil, err - } - - signedSSHCertificate := ssh.MarshalAuthorizedKey(certificate) - if len(signedSSHCertificate) == 0 { - return nil, errors.New("error marshaling signed certificate") - } - - response := &logical.Response{ - Data: map[string]interface{}{ - "serial_number": strconv.FormatUint(certificate.Serial, 16), - "signed_key": string(signedSSHCertificate), - }, - } - - return response, nil -} - -func (b *backend) renderPrincipal(principal string, req *logical.Request) (string, error) { - // Look for templating markers {{ .* }} - matched := containsTemplateRegex.MatchString(principal) - if matched { - if req.EntityID != "" { - // Retrieve principal based on template + entityID from request. - renderedPrincipal, err := framework.PopulateIdentityTemplate(principal, req.EntityID, b.System()) - if err != nil { - return "", fmt.Errorf("template '%s' could not be rendered -> %s", principal, err) - } - return renderedPrincipal, nil - } - } - // Static principal - return principal, nil -} - -func (b *backend) calculateValidPrincipals(data *framework.FieldData, req *logical.Request, role *sshRole, defaultPrincipal, principalsAllowedByRole string, enableTemplating bool, validatePrincipal func([]string, string) bool) ([]string, error) { - validPrincipals := "" - validPrincipalsRaw, ok := data.GetOk("valid_principals") - if ok { - validPrincipals = validPrincipalsRaw.(string) - } else { - validPrincipals = defaultPrincipal - } - - parsedPrincipals := strutil.RemoveDuplicates(strutil.ParseStringSlice(validPrincipals, ","), false) - // Build list of allowed Principals from template and static principalsAllowedByRole - var allowedPrincipals []string - for _, principal := range strutil.RemoveDuplicates(strutil.ParseStringSlice(principalsAllowedByRole, ","), false) { - if enableTemplating { - rendered, err := b.renderPrincipal(principal, req) - if err != nil { - return nil, err - } - // Template returned a principal - allowedPrincipals = append(allowedPrincipals, rendered) - } else { - // Static principal - allowedPrincipals = append(allowedPrincipals, principal) - } - } - - switch { - case len(parsedPrincipals) == 0: - // There is nothing to process - return nil, nil - case len(allowedPrincipals) == 0: - // User has requested principals to be set, but role is not configured - // with any principals - return nil, fmt.Errorf("role is not configured to allow any principals") - default: - // Role was explicitly configured to allow any principal. - if principalsAllowedByRole == "*" { - return parsedPrincipals, nil - } - - for _, principal := range parsedPrincipals { - if !validatePrincipal(strutil.RemoveDuplicates(allowedPrincipals, false), principal) { - return nil, fmt.Errorf("%v is not a valid value for valid_principals", principal) - } - } - return parsedPrincipals, nil - } -} - -func validateValidPrincipalForHosts(role *sshRole) func([]string, string) bool { - return func(allowedPrincipals []string, validPrincipal string) bool { - for _, allowedPrincipal := range allowedPrincipals { - if allowedPrincipal == validPrincipal && role.AllowBareDomains { - return true - } - if role.AllowSubdomains && strings.HasSuffix(validPrincipal, "."+allowedPrincipal) { - return true - } - } - - return false - } -} - -func (b *backend) calculateCertificateType(data *framework.FieldData, role *sshRole) (uint32, error) { - requestedCertificateType := data.Get("cert_type").(string) - - var certificateType uint32 - switch requestedCertificateType { - case "user": - if !role.AllowUserCertificates { - return 0, errors.New("cert_type 'user' is not allowed by role") - } - certificateType = ssh.UserCert - case "host": - if !role.AllowHostCertificates { - return 0, errors.New("cert_type 'host' is not allowed by role") - } - certificateType = ssh.HostCert - default: - return 0, errors.New("cert_type must be either 'user' or 'host'") - } - - return certificateType, nil -} - -func (b *backend) calculateKeyID(data *framework.FieldData, req *logical.Request, role *sshRole, pubKey ssh.PublicKey) (string, error) { - reqID := data.Get("key_id").(string) - - if reqID != "" { - if !role.AllowUserKeyIDs { - return "", fmt.Errorf("setting key_id is not allowed by role") - } - return reqID, nil - } - - keyIDFormat := "vault-{{token_display_name}}-{{public_key_hash}}" - if req.DisplayName == "" { - keyIDFormat = "vault-{{public_key_hash}}" - } - - if role.KeyIDFormat != "" { - keyIDFormat = role.KeyIDFormat - } - - keyID := substQuery(keyIDFormat, map[string]string{ - "token_display_name": req.DisplayName, - "role_name": data.Get("role").(string), - "public_key_hash": fmt.Sprintf("%x", sha256.Sum256(pubKey.Marshal())), - }) - - return keyID, nil -} - -func (b *backend) calculateCriticalOptions(data *framework.FieldData, role *sshRole) (map[string]string, error) { - unparsedCriticalOptions := data.Get("critical_options").(map[string]interface{}) - if len(unparsedCriticalOptions) == 0 { - return role.DefaultCriticalOptions, nil - } - - criticalOptions := convertMapToStringValue(unparsedCriticalOptions) - - if role.AllowedCriticalOptions != "" { - notAllowedOptions := []string{} - allowedCriticalOptions := strings.Split(role.AllowedCriticalOptions, ",") - - for option := range criticalOptions { - if !strutil.StrListContains(allowedCriticalOptions, option) { - notAllowedOptions = append(notAllowedOptions, option) - } - } - - if len(notAllowedOptions) != 0 { - return nil, fmt.Errorf("critical options not on allowed list: %v", notAllowedOptions) - } - } - - return criticalOptions, nil -} - -func (b *backend) calculateExtensions(data *framework.FieldData, req *logical.Request, role *sshRole) (map[string]string, error) { - unparsedExtensions := data.Get("extensions").(map[string]interface{}) - extensions := make(map[string]string) - - if len(unparsedExtensions) > 0 { - extensions := convertMapToStringValue(unparsedExtensions) - if role.AllowedExtensions == "*" { - // Allowed extensions was configured to allow all - return extensions, nil - } - - notAllowed := []string{} - allowedExtensions := strings.Split(role.AllowedExtensions, ",") - for extensionKey := range extensions { - if !strutil.StrListContains(allowedExtensions, extensionKey) { - notAllowed = append(notAllowed, extensionKey) - } - } - - if len(notAllowed) != 0 { - return nil, fmt.Errorf("extensions %v are not on allowed list", notAllowed) - } - return extensions, nil - } - - if role.DefaultExtensionsTemplate { - for extensionKey, extensionValue := range role.DefaultExtensions { - // Look for templating markers {{ .* }} - matched := containsTemplateRegex.MatchString(extensionValue) - if matched { - if req.EntityID != "" { - // Retrieve extension value based on template + entityID from request. - templateExtensionValue, err := framework.PopulateIdentityTemplate(extensionValue, req.EntityID, b.System()) - if err == nil { - // Template returned an extension value that we can use - extensions[extensionKey] = templateExtensionValue - } else { - return nil, fmt.Errorf("template '%s' could not be rendered -> %s", extensionValue, err) - } - } - } else { - // Static extension value or err template - extensions[extensionKey] = extensionValue - } - } - } else { - extensions = role.DefaultExtensions - } - - return extensions, nil -} - -func (b *backend) calculateTTL(data *framework.FieldData, role *sshRole) (time.Duration, error) { - var ttl, maxTTL time.Duration - var err error - - ttlRaw, specifiedTTL := data.GetOk("ttl") - if specifiedTTL { - ttl = time.Duration(ttlRaw.(int)) * time.Second - } else { - ttl, err = parseutil.ParseDurationSecond(role.TTL) - if err != nil { - return 0, err - } - } - if ttl == 0 { - ttl = b.System().DefaultLeaseTTL() - } - - maxTTL, err = parseutil.ParseDurationSecond(role.MaxTTL) - if err != nil { - return 0, err - } - if maxTTL == 0 { - maxTTL = b.System().MaxLeaseTTL() - } - - if ttl > maxTTL { - // Don't error if they were using system defaults, only error if - // they specifically chose a bad TTL - if !specifiedTTL { - ttl = maxTTL - } else { - return 0, fmt.Errorf("ttl is larger than maximum allowed %d", maxTTL/time.Second) - } - } - - return ttl, nil -} - -func (b *backend) validateSignedKeyRequirements(publickey ssh.PublicKey, role *sshRole) error { - if len(role.AllowedUserKeyTypesLengths) != 0 { - var keyType string - var keyBits int - - switch k := publickey.(type) { - case ssh.CryptoPublicKey: - ff := k.CryptoPublicKey() - switch k := ff.(type) { - case *rsa.PublicKey: - keyType = "rsa" - keyBits = k.N.BitLen() - case *dsa.PublicKey: - keyType = "dsa" - keyBits = k.Parameters.P.BitLen() - case *ecdsa.PublicKey: - keyType = "ecdsa" - keyBits = k.Curve.Params().BitSize - case ed25519.PublicKey: - keyType = "ed25519" - default: - return fmt.Errorf("public key type of %s is not allowed", keyType) - } - default: - return fmt.Errorf("pubkey not suitable for crypto (expected ssh.CryptoPublicKey but found %T)", k) - } - - keyTypeToMapKey := createKeyTypeToMapKey(keyType, keyBits) - - var present bool - var pass bool - for _, kstr := range keyTypeToMapKey[keyType] { - allowed_values, ok := role.AllowedUserKeyTypesLengths[kstr] - if !ok { - continue - } - - present = true - - for _, value := range allowed_values { - if keyType == "rsa" || keyType == "dsa" { - // Regardless of map naming, we always need to validate the - // bit length of RSA and DSA keys. Use the keyType flag to - if keyBits == value { - pass = true - } - } else if kstr == "ec" || kstr == "ecdsa" { - // If the map string is "ecdsa", we have to validate the keyBits - // are a match for an allowed value, meaning that our curve - // is allowed. This isn't necessary when a named curve (e.g. - // ssh.KeyAlgoECDSA256) is allowed (and hence kstr is that), - // because keyBits is already specified in the kstr. Thus, - // we have conditioned around kstr and not keyType (like with - // rsa or dsa). - if keyBits == value { - pass = true - } - } else { - // We get here in two cases: we have a algo-named EC key - // matching a format specifier in the key map (e.g., a P-256 - // key with a KeyAlgoECDSA256 entry in the map) or we have a - // ed25519 key (which is always allowed). - pass = true - } - } - } - - if !present { - return fmt.Errorf("key of type %s is not allowed", keyType) - } - - if !pass { - return fmt.Errorf("key is of an invalid size: %v", keyBits) - } - } - return nil -} - -func (b *creationBundle) sign() (retCert *ssh.Certificate, retErr error) { - defer func() { - if r := recover(); r != nil { - errMsg, ok := r.(string) - if ok { - retCert = nil - retErr = errors.New(errMsg) - } - } - }() - - serialNumber, err := certutil.GenerateSerialNumber() - if err != nil { - return nil, err - } - - now := time.Now() - - sshAlgorithmSigner, ok := b.Signer.(ssh.AlgorithmSigner) - if !ok { - return nil, fmt.Errorf("failed to generate signed SSH key: signer is not an AlgorithmSigner") - } - - // prepare certificate for signing - nonce := make([]byte, 32) - if _, err := io.ReadFull(rand.Reader, nonce); err != nil { - return nil, fmt.Errorf("failed to generate signed SSH key: error generating random nonce") - } - certificate := &ssh.Certificate{ - Serial: serialNumber.Uint64(), - Key: b.PublicKey, - KeyId: b.KeyID, - ValidPrincipals: b.ValidPrincipals, - ValidAfter: uint64(now.Add(-b.Role.NotBeforeDuration).In(time.UTC).Unix()), - ValidBefore: uint64(now.Add(b.TTL).In(time.UTC).Unix()), - CertType: b.CertificateType, - Permissions: ssh.Permissions{ - CriticalOptions: b.CriticalOptions, - Extensions: b.Extensions, - }, - Nonce: nonce, - SignatureKey: sshAlgorithmSigner.PublicKey(), - } - - // get bytes to sign; this is based on Certificate.bytesForSigning() from the go ssh lib - out := certificate.Marshal() - // Drop trailing signature length. - certificateBytes := out[:len(out)-4] - - algo := b.Role.AlgorithmSigner - - // Handle the new default algorithm selection process correctly. - if algo == DefaultAlgorithmSigner && sshAlgorithmSigner.PublicKey().Type() == ssh.KeyAlgoRSA { - algo = ssh.SigAlgoRSASHA2256 - } else if algo == DefaultAlgorithmSigner { - algo = "" - } - - sig, err := sshAlgorithmSigner.SignWithAlgorithm(rand.Reader, certificateBytes, algo) - if err != nil { - return nil, fmt.Errorf("failed to generate signed SSH key: sign error: %w", err) - } - - certificate.Signature = sig - - return certificate, nil -} - -func createKeyTypeToMapKey(keyType string, keyBits int) map[string][]string { - keyTypeToMapKey := map[string][]string{ - "rsa": {"rsa", ssh.KeyAlgoRSA}, - "dsa": {"dsa", ssh.KeyAlgoDSA}, - "ecdsa": {"ecdsa", "ec"}, - "ed25519": {"ed25519", ssh.KeyAlgoED25519}, - } - - if keyType == "ecdsa" { - if algo, ok := ecCurveBitsToAlgoName[keyBits]; ok { - keyTypeToMapKey[keyType] = append(keyTypeToMapKey[keyType], algo) - } - } - - return keyTypeToMapKey -} diff --git a/builtin/logical/ssh/path_roles.go b/builtin/logical/ssh/path_roles.go index 595839301b86c..3264b000c2b4c 100644 --- a/builtin/logical/ssh/path_roles.go +++ b/builtin/logical/ssh/path_roles.go @@ -40,7 +40,6 @@ type sshRole struct { KeyBits int `mapstructure:"key_bits" json:"key_bits"` AdminUser string `mapstructure:"admin_user" json:"admin_user"` DefaultUser string `mapstructure:"default_user" json:"default_user"` - DefaultUserTemplate bool `mapstructure:"default_user_template" json:"default_user_template"` CIDRList string `mapstructure:"cidr_list" json:"cidr_list"` ExcludeCIDRList string `mapstructure:"exclude_cidr_list" json:"exclude_cidr_list"` Port int `mapstructure:"port" json:"port"` @@ -48,7 +47,6 @@ type sshRole struct { AllowedUsers string `mapstructure:"allowed_users" json:"allowed_users"` AllowedUsersTemplate bool `mapstructure:"allowed_users_template" json:"allowed_users_template"` AllowedDomains string `mapstructure:"allowed_domains" json:"allowed_domains"` - AllowedDomainsTemplate bool `mapstructure:"allowed_domains_template" json:"allowed_domains_template"` KeyOptionSpecs string `mapstructure:"key_option_specs" json:"key_option_specs"` MaxTTL string `mapstructure:"max_ttl" json:"max_ttl"` TTL string `mapstructure:"ttl" json:"ttl"` @@ -124,15 +122,6 @@ func pathRoles(b *backend) *framework.Path { Name: "Default Username", }, }, - "default_user_template": { - Type: framework.TypeBool, - Description: ` - [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] - If set, Default user can be specified using identity template policies. - Non-templated users are also permitted. - `, - Default: false, - }, "cidr_list": { Type: framework.TypeString, Description: ` @@ -224,15 +213,6 @@ func pathRoles(b *backend) *framework.Path { valid host. If only certain domains are allowed, then this list enforces it. `, }, - "allowed_domains_template": { - Type: framework.TypeBool, - Description: ` - [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] - If set, Allowed domains can be specified using identity template policies. - Non-templated domains are also permitted. - `, - Default: false, - }, "key_option_specs": { Type: framework.TypeString, Description: ` @@ -577,9 +557,7 @@ func (b *backend) createCARole(allowedUsers, defaultUser, signer string, data *f AllowedUsers: allowedUsers, AllowedUsersTemplate: data.Get("allowed_users_template").(bool), AllowedDomains: data.Get("allowed_domains").(string), - AllowedDomainsTemplate: data.Get("allowed_domains_template").(bool), DefaultUser: defaultUser, - DefaultUserTemplate: data.Get("default_user_template").(bool), AllowBareDomains: data.Get("allow_bare_domains").(bool), AllowSubdomains: data.Get("allow_subdomains").(bool), AllowUserKeyIDs: data.Get("allow_user_key_ids").(bool), @@ -761,9 +739,7 @@ func (b *backend) parseRole(role *sshRole) (map[string]interface{}, error) { "allowed_users": role.AllowedUsers, "allowed_users_template": role.AllowedUsersTemplate, "allowed_domains": role.AllowedDomains, - "allowed_domains_template": role.AllowedDomainsTemplate, "default_user": role.DefaultUser, - "default_user_template": role.DefaultUserTemplate, "ttl": int64(ttl.Seconds()), "max_ttl": int64(maxTTL.Seconds()), "allowed_critical_options": role.AllowedCriticalOptions, diff --git a/builtin/logical/ssh/path_sign.go b/builtin/logical/ssh/path_sign.go index 19196013e6d50..8e21ad1e7e7f7 100644 --- a/builtin/logical/ssh/path_sign.go +++ b/builtin/logical/ssh/path_sign.go @@ -2,12 +2,42 @@ package ssh import ( "context" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "errors" "fmt" + "io" + "regexp" + "strconv" + "strings" + "time" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/strutil" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" + "golang.org/x/crypto/ed25519" + "golang.org/x/crypto/ssh" ) +type creationBundle struct { + KeyID string + ValidPrincipals []string + PublicKey ssh.PublicKey + CertificateType uint32 + TTL time.Duration + Signer ssh.Signer + Role *sshRole + CriticalOptions map[string]string + Extensions map[string]string +} + +var containsTemplateRegex = regexp.MustCompile(`{{.+?}}`) + func pathSign(b *backend) *framework.Path { return &framework.Path{ Pattern: "sign/" + framework.GenericNameWithAtRegex("role"), @@ -92,5 +122,497 @@ func (b *backend) pathSignCertificate(ctx context.Context, req *logical.Request, return logical.ErrorResponse(fmt.Sprintf("public_key failed to meet the key requirements: %s", err)), nil } - return b.pathSignIssueCertificateHelper(ctx, req, data, role, userPublicKey) + // Note that these various functions always return "user errors" so we pass + // them as 4xx values + keyID, err := b.calculateKeyID(data, req, role, userPublicKey) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + certificateType, err := b.calculateCertificateType(data, role) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + var parsedPrincipals []string + if certificateType == ssh.HostCert { + parsedPrincipals, err = b.calculateValidPrincipals(data, req, role, "", role.AllowedDomains, validateValidPrincipalForHosts(role)) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + } else { + parsedPrincipals, err = b.calculateValidPrincipals(data, req, role, role.DefaultUser, role.AllowedUsers, strutil.StrListContains) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + } + + ttl, err := b.calculateTTL(data, role) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + criticalOptions, err := b.calculateCriticalOptions(data, role) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + extensions, err := b.calculateExtensions(data, req, role) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + privateKeyEntry, err := caKey(ctx, req.Storage, caPrivateKey) + if err != nil { + return nil, fmt.Errorf("failed to read CA private key: %w", err) + } + if privateKeyEntry == nil || privateKeyEntry.Key == "" { + return nil, fmt.Errorf("failed to read CA private key") + } + + signer, err := ssh.ParsePrivateKey([]byte(privateKeyEntry.Key)) + if err != nil { + return nil, fmt.Errorf("failed to parse stored CA private key: %w", err) + } + + cBundle := creationBundle{ + KeyID: keyID, + PublicKey: userPublicKey, + Signer: signer, + ValidPrincipals: parsedPrincipals, + TTL: ttl, + CertificateType: certificateType, + Role: role, + CriticalOptions: criticalOptions, + Extensions: extensions, + } + + certificate, err := cBundle.sign() + if err != nil { + return nil, err + } + + signedSSHCertificate := ssh.MarshalAuthorizedKey(certificate) + if len(signedSSHCertificate) == 0 { + return nil, fmt.Errorf("error marshaling signed certificate") + } + + response := &logical.Response{ + Data: map[string]interface{}{ + "serial_number": strconv.FormatUint(certificate.Serial, 16), + "signed_key": string(signedSSHCertificate), + }, + } + + return response, nil +} + +func (b *backend) calculateValidPrincipals(data *framework.FieldData, req *logical.Request, role *sshRole, defaultPrincipal, principalsAllowedByRole string, validatePrincipal func([]string, string) bool) ([]string, error) { + validPrincipals := "" + validPrincipalsRaw, ok := data.GetOk("valid_principals") + if ok { + validPrincipals = validPrincipalsRaw.(string) + } else { + validPrincipals = defaultPrincipal + } + + parsedPrincipals := strutil.RemoveDuplicates(strutil.ParseStringSlice(validPrincipals, ","), false) + // Build list of allowed Principals from template and static principalsAllowedByRole + var allowedPrincipals []string + for _, principal := range strutil.RemoveDuplicates(strutil.ParseStringSlice(principalsAllowedByRole, ","), false) { + if role.AllowedUsersTemplate { + // Look for templating markers {{ .* }} + matched := containsTemplateRegex.MatchString(principal) + if matched { + if req.EntityID != "" { + // Retrieve principal based on template + entityID from request. + templatePrincipal, err := framework.PopulateIdentityTemplate(principal, req.EntityID, b.System()) + if err == nil { + // Template returned a principal + allowedPrincipals = append(allowedPrincipals, templatePrincipal) + } else { + return nil, fmt.Errorf("template '%s' could not be rendered -> %s", principal, err) + } + } + } else { + // Static principal or err template + allowedPrincipals = append(allowedPrincipals, principal) + } + } else { + // Static principal + allowedPrincipals = append(allowedPrincipals, principal) + } + } + + switch { + case len(parsedPrincipals) == 0: + // There is nothing to process + return nil, nil + case len(allowedPrincipals) == 0: + // User has requested principals to be set, but role is not configured + // with any principals + return nil, fmt.Errorf("role is not configured to allow any principals") + default: + // Role was explicitly configured to allow any principal. + if principalsAllowedByRole == "*" { + return parsedPrincipals, nil + } + + for _, principal := range parsedPrincipals { + if !validatePrincipal(strutil.RemoveDuplicates(allowedPrincipals, false), principal) { + return nil, fmt.Errorf("%v is not a valid value for valid_principals", principal) + } + } + return parsedPrincipals, nil + } +} + +func validateValidPrincipalForHosts(role *sshRole) func([]string, string) bool { + return func(allowedPrincipals []string, validPrincipal string) bool { + for _, allowedPrincipal := range allowedPrincipals { + if allowedPrincipal == validPrincipal && role.AllowBareDomains { + return true + } + if role.AllowSubdomains && strings.HasSuffix(validPrincipal, "."+allowedPrincipal) { + return true + } + } + + return false + } +} + +func (b *backend) calculateCertificateType(data *framework.FieldData, role *sshRole) (uint32, error) { + requestedCertificateType := data.Get("cert_type").(string) + + var certificateType uint32 + switch requestedCertificateType { + case "user": + if !role.AllowUserCertificates { + return 0, errors.New("cert_type 'user' is not allowed by role") + } + certificateType = ssh.UserCert + case "host": + if !role.AllowHostCertificates { + return 0, errors.New("cert_type 'host' is not allowed by role") + } + certificateType = ssh.HostCert + default: + return 0, errors.New("cert_type must be either 'user' or 'host'") + } + + return certificateType, nil +} + +func (b *backend) calculateKeyID(data *framework.FieldData, req *logical.Request, role *sshRole, pubKey ssh.PublicKey) (string, error) { + reqID := data.Get("key_id").(string) + + if reqID != "" { + if !role.AllowUserKeyIDs { + return "", fmt.Errorf("setting key_id is not allowed by role") + } + return reqID, nil + } + + keyIDFormat := "vault-{{token_display_name}}-{{public_key_hash}}" + if req.DisplayName == "" { + keyIDFormat = "vault-{{public_key_hash}}" + } + + if role.KeyIDFormat != "" { + keyIDFormat = role.KeyIDFormat + } + + keyID := substQuery(keyIDFormat, map[string]string{ + "token_display_name": req.DisplayName, + "role_name": data.Get("role").(string), + "public_key_hash": fmt.Sprintf("%x", sha256.Sum256(pubKey.Marshal())), + }) + + return keyID, nil +} + +func (b *backend) calculateCriticalOptions(data *framework.FieldData, role *sshRole) (map[string]string, error) { + unparsedCriticalOptions := data.Get("critical_options").(map[string]interface{}) + if len(unparsedCriticalOptions) == 0 { + return role.DefaultCriticalOptions, nil + } + + criticalOptions := convertMapToStringValue(unparsedCriticalOptions) + + if role.AllowedCriticalOptions != "" { + notAllowedOptions := []string{} + allowedCriticalOptions := strings.Split(role.AllowedCriticalOptions, ",") + + for option := range criticalOptions { + if !strutil.StrListContains(allowedCriticalOptions, option) { + notAllowedOptions = append(notAllowedOptions, option) + } + } + + if len(notAllowedOptions) != 0 { + return nil, fmt.Errorf("critical options not on allowed list: %v", notAllowedOptions) + } + } + + return criticalOptions, nil +} + +func (b *backend) calculateExtensions(data *framework.FieldData, req *logical.Request, role *sshRole) (map[string]string, error) { + unparsedExtensions := data.Get("extensions").(map[string]interface{}) + extensions := make(map[string]string) + + if len(unparsedExtensions) > 0 { + extensions := convertMapToStringValue(unparsedExtensions) + if role.AllowedExtensions == "*" { + // Allowed extensions was configured to allow all + return extensions, nil + } + + notAllowed := []string{} + allowedExtensions := strings.Split(role.AllowedExtensions, ",") + for extensionKey := range extensions { + if !strutil.StrListContains(allowedExtensions, extensionKey) { + notAllowed = append(notAllowed, extensionKey) + } + } + + if len(notAllowed) != 0 { + return nil, fmt.Errorf("extensions %v are not on allowed list", notAllowed) + } + return extensions, nil + } + + if role.DefaultExtensionsTemplate { + for extensionKey, extensionValue := range role.DefaultExtensions { + // Look for templating markers {{ .* }} + matched := containsTemplateRegex.MatchString(extensionValue) + if matched { + if req.EntityID != "" { + // Retrieve extension value based on template + entityID from request. + templateExtensionValue, err := framework.PopulateIdentityTemplate(extensionValue, req.EntityID, b.System()) + if err == nil { + // Template returned an extension value that we can use + extensions[extensionKey] = templateExtensionValue + } else { + return nil, fmt.Errorf("template '%s' could not be rendered -> %s", extensionValue, err) + } + } + } else { + // Static extension value or err template + extensions[extensionKey] = extensionValue + } + } + } else { + extensions = role.DefaultExtensions + } + + return extensions, nil +} + +func (b *backend) calculateTTL(data *framework.FieldData, role *sshRole) (time.Duration, error) { + var ttl, maxTTL time.Duration + var err error + + ttlRaw, specifiedTTL := data.GetOk("ttl") + if specifiedTTL { + ttl = time.Duration(ttlRaw.(int)) * time.Second + } else { + ttl, err = parseutil.ParseDurationSecond(role.TTL) + if err != nil { + return 0, err + } + } + if ttl == 0 { + ttl = b.System().DefaultLeaseTTL() + } + + maxTTL, err = parseutil.ParseDurationSecond(role.MaxTTL) + if err != nil { + return 0, err + } + if maxTTL == 0 { + maxTTL = b.System().MaxLeaseTTL() + } + + if ttl > maxTTL { + // Don't error if they were using system defaults, only error if + // they specifically chose a bad TTL + if !specifiedTTL { + ttl = maxTTL + } else { + return 0, fmt.Errorf("ttl is larger than maximum allowed %d", maxTTL/time.Second) + } + } + + return ttl, nil +} + +func (b *backend) validateSignedKeyRequirements(publickey ssh.PublicKey, role *sshRole) error { + if len(role.AllowedUserKeyTypesLengths) != 0 { + var keyType string + var keyBits int + + switch k := publickey.(type) { + case ssh.CryptoPublicKey: + ff := k.CryptoPublicKey() + switch k := ff.(type) { + case *rsa.PublicKey: + keyType = "rsa" + keyBits = k.N.BitLen() + case *dsa.PublicKey: + keyType = "dsa" + keyBits = k.Parameters.P.BitLen() + case *ecdsa.PublicKey: + keyType = "ecdsa" + keyBits = k.Curve.Params().BitSize + case ed25519.PublicKey: + keyType = "ed25519" + default: + return fmt.Errorf("public key type of %s is not allowed", keyType) + } + default: + return fmt.Errorf("pubkey not suitable for crypto (expected ssh.CryptoPublicKey but found %T)", k) + } + + keyTypeToMapKey := map[string][]string{ + "rsa": {"rsa", ssh.KeyAlgoRSA}, + "dsa": {"dsa", ssh.KeyAlgoDSA}, + "ecdsa": {"ecdsa", "ec"}, + "ed25519": {"ed25519", ssh.KeyAlgoED25519}, + } + + if keyType == "ecdsa" { + ecCurveBitsToAlgoName := map[int]string{ + 256: ssh.KeyAlgoECDSA256, + 384: ssh.KeyAlgoECDSA384, + 521: ssh.KeyAlgoECDSA521, + } + + if algo, ok := ecCurveBitsToAlgoName[keyBits]; ok { + keyTypeToMapKey[keyType] = append(keyTypeToMapKey[keyType], algo) + } + + // If the algorithm is not found, it could be that we have a curve + // that we haven't added a constant for yet. But they could allow it + // (assuming x/crypto/ssh can parse it) via setting a ec: + // mapping rather than using a named SSH key type, so erring out here + // isn't advisable. + } + + var present bool + var pass bool + for _, kstr := range keyTypeToMapKey[keyType] { + allowed_values, ok := role.AllowedUserKeyTypesLengths[kstr] + if !ok { + continue + } + + present = true + + for _, value := range allowed_values { + if keyType == "rsa" || keyType == "dsa" { + // Regardless of map naming, we always need to validate the + // bit length of RSA and DSA keys. Use the keyType flag to + if keyBits == value { + pass = true + } + } else if kstr == "ec" || kstr == "ecdsa" { + // If the map string is "ecdsa", we have to validate the keyBits + // are a match for an allowed value, meaning that our curve + // is allowed. This isn't necessary when a named curve (e.g. + // ssh.KeyAlgoECDSA256) is allowed (and hence kstr is that), + // because keyBits is already specified in the kstr. Thus, + // we have conditioned around kstr and not keyType (like with + // rsa or dsa). + if keyBits == value { + pass = true + } + } else { + // We get here in two cases: we have a algo-named EC key + // matching a format specifier in the key map (e.g., a P-256 + // key with a KeyAlgoECDSA256 entry in the map) or we have a + // ed25519 key (which is always allowed). + pass = true + } + } + } + + if !present { + return fmt.Errorf("key of type %s is not allowed", keyType) + } + + if !pass { + return fmt.Errorf("key is of an invalid size: %v", keyBits) + } + } + return nil +} + +func (b *creationBundle) sign() (retCert *ssh.Certificate, retErr error) { + defer func() { + if r := recover(); r != nil { + errMsg, ok := r.(string) + if ok { + retCert = nil + retErr = errors.New(errMsg) + } + } + }() + + serialNumber, err := certutil.GenerateSerialNumber() + if err != nil { + return nil, err + } + + now := time.Now() + + sshAlgorithmSigner, ok := b.Signer.(ssh.AlgorithmSigner) + if !ok { + return nil, fmt.Errorf("failed to generate signed SSH key: signer is not an AlgorithmSigner") + } + + // prepare certificate for signing + nonce := make([]byte, 32) + if _, err := io.ReadFull(rand.Reader, nonce); err != nil { + return nil, fmt.Errorf("failed to generate signed SSH key: error generating random nonce") + } + certificate := &ssh.Certificate{ + Serial: serialNumber.Uint64(), + Key: b.PublicKey, + KeyId: b.KeyID, + ValidPrincipals: b.ValidPrincipals, + ValidAfter: uint64(now.Add(-b.Role.NotBeforeDuration).In(time.UTC).Unix()), + ValidBefore: uint64(now.Add(b.TTL).In(time.UTC).Unix()), + CertType: b.CertificateType, + Permissions: ssh.Permissions{ + CriticalOptions: b.CriticalOptions, + Extensions: b.Extensions, + }, + Nonce: nonce, + SignatureKey: sshAlgorithmSigner.PublicKey(), + } + + // get bytes to sign; this is based on Certificate.bytesForSigning() from the go ssh lib + out := certificate.Marshal() + // Drop trailing signature length. + certificateBytes := out[:len(out)-4] + + algo := b.Role.AlgorithmSigner + + // Handle the new default algorithm selection process correctly. + if algo == DefaultAlgorithmSigner && sshAlgorithmSigner.PublicKey().Type() == ssh.KeyAlgoRSA { + algo = ssh.SigAlgoRSASHA2256 + } else if algo == DefaultAlgorithmSigner { + algo = "" + } + + sig, err := sshAlgorithmSigner.SignWithAlgorithm(rand.Reader, certificateBytes, algo) + if err != nil { + return nil, fmt.Errorf("failed to generate signed SSH key: sign error: %w", err) + } + + certificate.Signature = sig + + return certificate, nil } diff --git a/builtin/logical/ssh/util.go b/builtin/logical/ssh/util.go index 1923caa346b3d..5658232763a2a 100644 --- a/builtin/logical/ssh/util.go +++ b/builtin/logical/ssh/util.go @@ -238,7 +238,7 @@ func convertMapToIntSlice(initial map[string]interface{}) (map[string][]int, err // Serve a template processor for custom format inputs func substQuery(tpl string, data map[string]string) string { for k, v := range data { - tpl = strings.ReplaceAll(tpl, fmt.Sprintf("{{%s}}", k), v) + tpl = strings.Replace(tpl, fmt.Sprintf("{{%s}}", k), v, -1) } return tpl diff --git a/builtin/logical/transit/backend_test.go b/builtin/logical/transit/backend_test.go index c4d92a3ddbf57..abe14b51c745a 100644 --- a/builtin/logical/transit/backend_test.go +++ b/builtin/logical/transit/backend_test.go @@ -623,8 +623,7 @@ func testAccStepReadPolicyWithVersions(t *testing.T, name string, expectNone, de } func testAccStepEncrypt( - t *testing.T, name, plaintext string, decryptData map[string]interface{}, -) logicaltest.TestStep { + t *testing.T, name, plaintext string, decryptData map[string]interface{}) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "encrypt/" + name, @@ -648,8 +647,7 @@ func testAccStepEncrypt( } func testAccStepEncryptUpsert( - t *testing.T, name, plaintext string, decryptData map[string]interface{}, -) logicaltest.TestStep { + t *testing.T, name, plaintext string, decryptData map[string]interface{}) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.CreateOperation, Path: "encrypt/" + name, @@ -673,8 +671,7 @@ func testAccStepEncryptUpsert( } func testAccStepEncryptContext( - t *testing.T, name, plaintext, context string, decryptData map[string]interface{}, -) logicaltest.TestStep { + t *testing.T, name, plaintext, context string, decryptData map[string]interface{}) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "encrypt/" + name, @@ -700,8 +697,7 @@ func testAccStepEncryptContext( } func testAccStepDecrypt( - t *testing.T, name, plaintext string, decryptData map[string]interface{}, -) logicaltest.TestStep { + t *testing.T, name, plaintext string, decryptData map[string]interface{}) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "decrypt/" + name, @@ -729,8 +725,7 @@ func testAccStepDecrypt( } func testAccStepRewrap( - t *testing.T, name string, decryptData map[string]interface{}, expectedVer int, -) logicaltest.TestStep { + t *testing.T, name string, decryptData map[string]interface{}, expectedVer int) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "rewrap/" + name, @@ -749,7 +744,7 @@ func testAccStepRewrap( verString := splitStrings[1][1:] ver, err := strconv.Atoi(verString) if err != nil { - return fmt.Errorf("error pulling out version from verString %q, ciphertext was %s", verString, d.Ciphertext) + return fmt.Errorf("error pulling out version from verString '%s', ciphertext was %s", verString, d.Ciphertext) } if ver != expectedVer { return fmt.Errorf("did not get expected version") @@ -762,8 +757,7 @@ func testAccStepRewrap( func testAccStepEncryptVX( t *testing.T, name, plaintext string, decryptData map[string]interface{}, - ver int, encryptHistory map[int]map[string]interface{}, -) logicaltest.TestStep { + ver int, encryptHistory map[int]map[string]interface{}) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "encrypt/" + name, @@ -794,8 +788,7 @@ func testAccStepEncryptVX( func testAccStepLoadVX( t *testing.T, name string, decryptData map[string]interface{}, - ver int, encryptHistory map[int]map[string]interface{}, -) logicaltest.TestStep { + ver int, encryptHistory map[int]map[string]interface{}) logicaltest.TestStep { // This is really a no-op to allow us to do data manip in the check function return logicaltest.TestStep{ Operation: logical.ReadOperation, @@ -808,8 +801,7 @@ func testAccStepLoadVX( } func testAccStepDecryptExpectFailure( - t *testing.T, name, plaintext string, decryptData map[string]interface{}, -) logicaltest.TestStep { + t *testing.T, name, plaintext string, decryptData map[string]interface{}) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "decrypt/" + name, @@ -833,8 +825,7 @@ func testAccStepRotate(t *testing.T, name string) logicaltest.TestStep { func testAccStepWriteDatakey(t *testing.T, name string, noPlaintext bool, bits int, - dataKeyInfo map[string]interface{}, -) logicaltest.TestStep { + dataKeyInfo map[string]interface{}) logicaltest.TestStep { data := map[string]interface{}{} subPath := "plaintext" if noPlaintext { @@ -865,7 +856,7 @@ func testAccStepWriteDatakey(t *testing.T, name string, dataKeyInfo["plaintext"] = d.Plaintext plainBytes, err := base64.StdEncoding.DecodeString(d.Plaintext) if err != nil { - return fmt.Errorf("could not base64 decode plaintext string %q", d.Plaintext) + return fmt.Errorf("could not base64 decode plaintext string '%s'", d.Plaintext) } if len(plainBytes)*8 != bits { return fmt.Errorf("returned key does not have correct bit length") @@ -878,8 +869,7 @@ func testAccStepWriteDatakey(t *testing.T, name string, } func testAccStepDecryptDatakey(t *testing.T, name string, - dataKeyInfo map[string]interface{}, -) logicaltest.TestStep { + dataKeyInfo map[string]interface{}) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "decrypt/" + name, @@ -893,7 +883,7 @@ func testAccStepDecryptDatakey(t *testing.T, name string, } if d.Plaintext != dataKeyInfo["plaintext"].(string) { - return fmt.Errorf("plaintext mismatch: got %q, expected %q, decryptData was %#v", d.Plaintext, dataKeyInfo["plaintext"].(string), resp.Data) + return fmt.Errorf("plaintext mismatch: got '%s', expected '%s', decryptData was %#v", d.Plaintext, dataKeyInfo["plaintext"].(string), resp.Data) } return nil }, diff --git a/builtin/logical/transit/path_decrypt.go b/builtin/logical/transit/path_decrypt.go index 820079873ffca..7a5d53e54e581 100644 --- a/builtin/logical/transit/path_decrypt.go +++ b/builtin/logical/transit/path_decrypt.go @@ -4,6 +4,7 @@ import ( "context" "encoding/base64" "fmt" + "net/http" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/errutil" @@ -50,14 +51,6 @@ Base64 encoded nonce value used during encryption. Must be provided if convergent encryption is enabled for this key and the key was generated with Vault 0.6.1. Not required for keys created in 0.6.2+.`, }, - "partial_failure_response_code": { - Type: framework.TypeInt, - Description: ` -Ordinarily, if a batch item fails to decrypt due to a bad input, but other batch items succeed, -the HTTP response code is 400 (Bad Request). Some applications may want to treat partial failures differently. -Providing the parameter returns the given response code integer instead of a 400 in this case. If all values fail -HTTP 400 is still returned.`, - }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -149,7 +142,6 @@ func (b *backend) pathDecryptWrite(ctx context.Context, req *logical.Request, d p.Lock(false) } - successesInBatch := false for i, item := range batchInputItems { if batchResponseItems[i].Error != "" { continue @@ -166,7 +158,6 @@ func (b *backend) pathDecryptWrite(ctx context.Context, req *logical.Request, d batchResponseItems[i].Error = err.Error() continue } - successesInBatch = true batchResponseItems[i].Plaintext = plaintext } @@ -192,7 +183,18 @@ func (b *backend) pathDecryptWrite(ctx context.Context, req *logical.Request, d p.Unlock() - return batchRequestResponse(d, resp, req, successesInBatch, userErrorInBatch, internalErrorInBatch) + // Depending on the errors in the batch, different status codes should be returned. User errors + // will return a 400 and precede internal errors which return a 500. The reasoning behind this is + // that user errors are non-retryable without making changes to the request, and should be surfaced + // to the user first. + switch { + case userErrorInBatch: + return logical.RespondWithStatusCode(resp, req, http.StatusBadRequest) + case internalErrorInBatch: + return logical.RespondWithStatusCode(resp, req, http.StatusInternalServerError) + } + + return resp, nil } const pathDecryptHelpSyn = `Decrypt a ciphertext value using a named key` diff --git a/builtin/logical/transit/path_decrypt_test.go b/builtin/logical/transit/path_decrypt_test.go index 9a6b21aa12dde..c52cd6cf0ae1e 100644 --- a/builtin/logical/transit/path_decrypt_test.go +++ b/builtin/logical/transit/path_decrypt_test.go @@ -119,7 +119,6 @@ func TestTransit_BatchDecryption_DerivedKey(t *testing.T) { want []DecryptBatchResponseItem shouldErr bool wantHTTPStatus int - params map[string]interface{} }{ { name: "nil-input", @@ -183,19 +182,6 @@ func TestTransit_BatchDecryption_DerivedKey(t *testing.T) { }, wantHTTPStatus: http.StatusBadRequest, }, - { - name: "batch-partial-success-overridden-response", - in: []interface{}{ - map[string]interface{}{"ciphertext": encryptedItems[0].Ciphertext, "context": plaintextItems[1].context}, - map[string]interface{}{"ciphertext": encryptedItems[1].Ciphertext, "context": plaintextItems[1].context}, - }, - want: []DecryptBatchResponseItem{ - {Error: "cipher: message authentication failed"}, - {Plaintext: plaintextItems[1].plaintext}, - }, - params: map[string]interface{}{"partial_failure_response_code": http.StatusAccepted}, - wantHTTPStatus: http.StatusAccepted, - }, { name: "batch-full-failure", in: []interface{}{ @@ -208,20 +194,6 @@ func TestTransit_BatchDecryption_DerivedKey(t *testing.T) { }, wantHTTPStatus: http.StatusBadRequest, }, - { - name: "batch-full-failure-overridden-response", - in: []interface{}{ - map[string]interface{}{"ciphertext": encryptedItems[0].Ciphertext, "context": plaintextItems[1].context}, - map[string]interface{}{"ciphertext": encryptedItems[1].Ciphertext, "context": plaintextItems[0].context}, - }, - want: []DecryptBatchResponseItem{ - {Error: "cipher: message authentication failed"}, - {Error: "cipher: message authentication failed"}, - }, - params: map[string]interface{}{"partial_failure_response_code": http.StatusAccepted}, - // Full failure, shouldn't affect status code - wantHTTPStatus: http.StatusBadRequest, - }, } for _, tt := range tests { @@ -234,9 +206,6 @@ func TestTransit_BatchDecryption_DerivedKey(t *testing.T) { "batch_input": tt.in, }, } - for k, v := range tt.params { - req.Data[k] = v - } resp, err = b.HandleRequest(context.Background(), req) didErr := err != nil || (resp != nil && resp.IsError()) diff --git a/builtin/logical/transit/path_encrypt.go b/builtin/logical/transit/path_encrypt.go index 3e0c720377b64..aa5bb503bf9d9 100644 --- a/builtin/logical/transit/path_encrypt.go +++ b/builtin/logical/transit/path_encrypt.go @@ -113,14 +113,6 @@ will severely impact the ciphertext's security.`, Must be 0 (for latest) or a value greater than or equal to the min_encryption_version configured on the key.`, }, - "partial_failure_response_code": { - Type: framework.TypeInt, - Description: ` -Ordinarily, if a batch item fails to encrypt due to a bad input, but other batch items succeed, -the HTTP response code is 400 (Bad Request). Some applications may want to treat partial failures differently. -Providing the parameter returns the given response code integer instead of a 400 in this case. If all values fail -HTTP 400 is still returned.`, - }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -383,7 +375,6 @@ func (b *backend) pathEncryptWrite(ctx context.Context, req *logical.Request, d // item fails, respectively mark the error in the response // collection and continue to process other items. warnAboutNonceUsage := false - successesInBatch := false for i, item := range batchInputItems { if batchResponseItems[i].Error != "" { continue @@ -411,7 +402,6 @@ func (b *backend) pathEncryptWrite(ctx context.Context, req *logical.Request, d continue } - successesInBatch = true keyVersion := item.KeyVersion if keyVersion == 0 { keyVersion = p.LatestVersion @@ -453,27 +443,13 @@ func (b *backend) pathEncryptWrite(ctx context.Context, req *logical.Request, d p.Unlock() - return batchRequestResponse(d, resp, req, successesInBatch, userErrorInBatch, internalErrorInBatch) -} - -// Depending on the errors in the batch, different status codes should be returned. User errors -// will return a 400 and precede internal errors which return a 500. The reasoning behind this is -// that user errors are non-retryable without making changes to the request, and should be surfaced -// to the user first. -func batchRequestResponse(d *framework.FieldData, resp *logical.Response, req *logical.Request, successesInBatch, userErrorInBatch, internalErrorInBatch bool) (*logical.Response, error) { + // Depending on the errors in the batch, different status codes should be returned. User errors + // will return a 400 and precede internal errors which return a 500. The reasoning behind this is + // that user errors are non-retryable without making changes to the request, and should be surfaced + // to the user first. switch { case userErrorInBatch: - code := http.StatusBadRequest - if successesInBatch { - if codeRaw, ok := d.GetOk("partial_failure_response_code"); ok { - code = codeRaw.(int) - if code < 1 || code > 599 { - resp.AddWarning("invalid HTTP response code override from partial_failure_response_code, reverting to HTTP 400") - code = http.StatusBadRequest - } - } - } - return logical.RespondWithStatusCode(resp, req, code) + return logical.RespondWithStatusCode(resp, req, http.StatusBadRequest) case internalErrorInBatch: return logical.RespondWithStatusCode(resp, req, http.StatusInternalServerError) } diff --git a/builtin/logical/transit/path_hmac_test.go b/builtin/logical/transit/path_hmac_test.go index 16dbb1a4903b1..108d6218bfd79 100644 --- a/builtin/logical/transit/path_hmac_test.go +++ b/builtin/logical/transit/path_hmac_test.go @@ -14,204 +14,188 @@ import ( func TestTransit_HMAC(t *testing.T) { b, storage := createBackendWithSysView(t) - cases := []struct { - name string - typ string - }{ - { - name: "foo", - typ: "", - }, - { - name: "dedicated", - typ: "hmac", - }, - } - - for _, c := range cases { - req := &logical.Request{ - Storage: storage, - Operation: logical.UpdateOperation, - Path: "keys/" + c.name, - } - _, err := b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatal(err) - } + // First create a key + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/foo", + } + _, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } - // Now, change the key value to something we control - p, _, err := b.GetPolicy(context.Background(), keysutil.PolicyRequest{ - Storage: storage, - Name: c.name, - }, b.GetRandomReader()) - if err != nil { - t.Fatal(err) - } - // We don't care as we're the only one using this - latestVersion := strconv.Itoa(p.LatestVersion) - keyEntry := p.Keys[latestVersion] - keyEntry.HMACKey = []byte("01234567890123456789012345678901") - keyEntry.Key = []byte("01234567890123456789012345678901") - p.Keys[latestVersion] = keyEntry - if err = p.Persist(context.Background(), storage); err != nil { - t.Fatal(err) - } + // Now, change the key value to something we control + p, _, err := b.GetPolicy(context.Background(), keysutil.PolicyRequest{ + Storage: storage, + Name: "foo", + }, b.GetRandomReader()) + if err != nil { + t.Fatal(err) + } + // We don't care as we're the only one using this + latestVersion := strconv.Itoa(p.LatestVersion) + keyEntry := p.Keys[latestVersion] + keyEntry.HMACKey = []byte("01234567890123456789012345678901") + p.Keys[latestVersion] = keyEntry + if err = p.Persist(context.Background(), storage); err != nil { + t.Fatal(err) + } - req.Path = "hmac/" + c.name - req.Data = map[string]interface{}{ - "input": "dGhlIHF1aWNrIGJyb3duIGZveA==", - } + req.Path = "hmac/foo" + req.Data = map[string]interface{}{ + "input": "dGhlIHF1aWNrIGJyb3duIGZveA==", + } - doRequest := func(req *logical.Request, errExpected bool, expected string) { - path := req.Path - defer func() { req.Path = path }() + doRequest := func(req *logical.Request, errExpected bool, expected string) { + path := req.Path + defer func() { req.Path = path }() - resp, err := b.HandleRequest(context.Background(), req) - if err != nil && !errExpected { - panic(fmt.Sprintf("%v", err)) - } - if resp == nil { - t.Fatal("expected non-nil response") - } - if errExpected { - if !resp.IsError() { - t.Fatalf("bad: got error response: %#v", *resp) - } - return - } - if resp.IsError() { + resp, err := b.HandleRequest(context.Background(), req) + if err != nil && !errExpected { + panic(fmt.Sprintf("%v", err)) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if errExpected { + if !resp.IsError() { t.Fatalf("bad: got error response: %#v", *resp) } - value, ok := resp.Data["hmac"] - if !ok { - t.Fatalf("no hmac key found in returned data, got resp data %#v", resp.Data) - } - if value.(string) != expected { - panic(fmt.Sprintf("mismatched hashes; expected %s, got resp data %#v", expected, resp.Data)) - } + return + } + if resp.IsError() { + t.Fatalf("bad: got error response: %#v", *resp) + } + value, ok := resp.Data["hmac"] + if !ok { + t.Fatalf("no hmac key found in returned data, got resp data %#v", resp.Data) + } + if value.(string) != expected { + panic(fmt.Sprintf("mismatched hashes; expected %s, got resp data %#v", expected, resp.Data)) + } - // Now verify - req.Path = strings.ReplaceAll(req.Path, "hmac", "verify") - req.Data["hmac"] = value.(string) - resp, err = b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatalf("%v: %v", err, resp) - } - if resp == nil { - t.Fatal("expected non-nil response") - } - if resp.Data["valid"].(bool) == false { - panic(fmt.Sprintf("error validating hmac;\nreq:\n%#v\nresp:\n%#v", *req, *resp)) - } + // Now verify + req.Path = strings.Replace(req.Path, "hmac", "verify", -1) + req.Data["hmac"] = value.(string) + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("%v: %v", err, resp) } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.Data["valid"].(bool) == false { + panic(fmt.Sprintf("error validating hmac;\nreq:\n%#v\nresp:\n%#v", *req, *resp)) + } + } - // Comparisons are against values generated via openssl + // Comparisons are against values generated via openssl - // Test defaults -- sha2-256 - doRequest(req, false, "vault:v1:UcBvm5VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4=") + // Test defaults -- sha2-256 + doRequest(req, false, "vault:v1:UcBvm5VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4=") - // Test algorithm selection in the path - req.Path = "hmac/" + c.name + "/sha2-224" - doRequest(req, false, "vault:v1:3p+ZWVquYDvu2dSTCa65Y3fgoMfIAc6fNaBbtg==") + // Test algorithm selection in the path + req.Path = "hmac/foo/sha2-224" + doRequest(req, false, "vault:v1:3p+ZWVquYDvu2dSTCa65Y3fgoMfIAc6fNaBbtg==") - // Reset and test algorithm selection in the data - req.Path = "hmac/" + c.name - req.Data["algorithm"] = "sha2-224" - doRequest(req, false, "vault:v1:3p+ZWVquYDvu2dSTCa65Y3fgoMfIAc6fNaBbtg==") + // Reset and test algorithm selection in the data + req.Path = "hmac/foo" + req.Data["algorithm"] = "sha2-224" + doRequest(req, false, "vault:v1:3p+ZWVquYDvu2dSTCa65Y3fgoMfIAc6fNaBbtg==") - req.Data["algorithm"] = "sha2-384" - doRequest(req, false, "vault:v1:jDB9YXdPjpmr29b1JCIEJO93IydlKVfD9mA2EO9OmJtJQg3QAV5tcRRRb7IQGW9p") + req.Data["algorithm"] = "sha2-384" + doRequest(req, false, "vault:v1:jDB9YXdPjpmr29b1JCIEJO93IydlKVfD9mA2EO9OmJtJQg3QAV5tcRRRb7IQGW9p") - req.Data["algorithm"] = "sha2-512" - doRequest(req, false, "vault:v1:PSXLXvkvKF4CpU65e2bK1tGBZQpcpCEM32fq2iUoiTyQQCfBcGJJItQ+60tMwWXAPQrC290AzTrNJucGrr4GFA==") + req.Data["algorithm"] = "sha2-512" + doRequest(req, false, "vault:v1:PSXLXvkvKF4CpU65e2bK1tGBZQpcpCEM32fq2iUoiTyQQCfBcGJJItQ+60tMwWXAPQrC290AzTrNJucGrr4GFA==") - // Test returning as base64 - req.Data["format"] = "base64" - doRequest(req, false, "vault:v1:PSXLXvkvKF4CpU65e2bK1tGBZQpcpCEM32fq2iUoiTyQQCfBcGJJItQ+60tMwWXAPQrC290AzTrNJucGrr4GFA==") + // Test returning as base64 + req.Data["format"] = "base64" + doRequest(req, false, "vault:v1:PSXLXvkvKF4CpU65e2bK1tGBZQpcpCEM32fq2iUoiTyQQCfBcGJJItQ+60tMwWXAPQrC290AzTrNJucGrr4GFA==") - // Test SHA3 - req.Path = "hmac/" + c.name - req.Data["algorithm"] = "sha3-224" - doRequest(req, false, "vault:v1:TGipmKH8LR/BkMolYpDYy0BJCIhTtGPDhV2VkQ==") + // Test SHA3 + req.Path = "hmac/foo" + req.Data["algorithm"] = "sha3-224" + doRequest(req, false, "vault:v1:TGipmKH8LR/BkMolYpDYy0BJCIhTtGPDhV2VkQ==") - req.Data["algorithm"] = "sha3-256" - doRequest(req, false, "vault:v1:+px9V/7QYLfdK808zPESC2T/L33uFf4Blzsn9Jy838o=") + req.Data["algorithm"] = "sha3-256" + doRequest(req, false, "vault:v1:+px9V/7QYLfdK808zPESC2T/L33uFf4Blzsn9Jy838o=") - req.Data["algorithm"] = "sha3-384" - doRequest(req, false, "vault:v1:YGoRwN4UdTRYZeOER86jsQOB8piWenzLDzJ2wJQK/Jq59rAsY8lh7SCdqqCyFg70") + req.Data["algorithm"] = "sha3-384" + doRequest(req, false, "vault:v1:YGoRwN4UdTRYZeOER86jsQOB8piWenzLDzJ2wJQK/Jq59rAsY8lh7SCdqqCyFg70") - req.Data["algorithm"] = "sha3-512" - doRequest(req, false, "vault:v1:GrNA8sU88naMPEQ7UZGj9EJl7YJhl03AFHfxcEURFrtvnobdea9ZlZHePpxAx/oCaC7R2HkrAO+Tu3uXPIl3lg==") + req.Data["algorithm"] = "sha3-512" + doRequest(req, false, "vault:v1:GrNA8sU88naMPEQ7UZGj9EJl7YJhl03AFHfxcEURFrtvnobdea9ZlZHePpxAx/oCaC7R2HkrAO+Tu3uXPIl3lg==") - // Test returning SHA3 as base64 - req.Data["format"] = "base64" - doRequest(req, false, "vault:v1:GrNA8sU88naMPEQ7UZGj9EJl7YJhl03AFHfxcEURFrtvnobdea9ZlZHePpxAx/oCaC7R2HkrAO+Tu3uXPIl3lg==") + // Test returning SHA3 as base64 + req.Data["format"] = "base64" + doRequest(req, false, "vault:v1:GrNA8sU88naMPEQ7UZGj9EJl7YJhl03AFHfxcEURFrtvnobdea9ZlZHePpxAx/oCaC7R2HkrAO+Tu3uXPIl3lg==") - req.Data["algorithm"] = "foobar" - doRequest(req, true, "") + req.Data["algorithm"] = "foobar" + doRequest(req, true, "") - req.Data["algorithm"] = "sha2-256" - req.Data["input"] = "foobar" - doRequest(req, true, "") - req.Data["input"] = "dGhlIHF1aWNrIGJyb3duIGZveA==" + req.Data["algorithm"] = "sha2-256" + req.Data["input"] = "foobar" + doRequest(req, true, "") + req.Data["input"] = "dGhlIHF1aWNrIGJyb3duIGZveA==" - // Rotate - err = p.Rotate(context.Background(), storage, b.GetRandomReader()) - if err != nil { - t.Fatal(err) - } - keyEntry = p.Keys["2"] - // Set to another value we control - keyEntry.HMACKey = []byte("12345678901234567890123456789012") - p.Keys["2"] = keyEntry - if err = p.Persist(context.Background(), storage); err != nil { - t.Fatal(err) - } + // Rotate + err = p.Rotate(context.Background(), storage, b.GetRandomReader()) + if err != nil { + t.Fatal(err) + } + keyEntry = p.Keys["2"] + // Set to another value we control + keyEntry.HMACKey = []byte("12345678901234567890123456789012") + p.Keys["2"] = keyEntry + if err = p.Persist(context.Background(), storage); err != nil { + t.Fatal(err) + } - doRequest(req, false, "vault:v2:Dt+mO/B93kuWUbGMMobwUNX5Wodr6dL3JH4DMfpQ0kw=") + doRequest(req, false, "vault:v2:Dt+mO/B93kuWUbGMMobwUNX5Wodr6dL3JH4DMfpQ0kw=") - // Verify a previous version - req.Path = "verify/" + c.name + // Verify a previous version + req.Path = "verify/foo" - req.Data["hmac"] = "vault:v1:UcBvm5VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4=" - resp, err := b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatalf("%v: %v", err, resp) - } - if resp == nil { - t.Fatal("expected non-nil response") - } - if resp.Data["valid"].(bool) == false { - t.Fatalf("error validating hmac\nreq\n%#v\nresp\n%#v", *req, *resp) - } + req.Data["hmac"] = "vault:v1:UcBvm5VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4=" + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("%v: %v", err, resp) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.Data["valid"].(bool) == false { + t.Fatalf("error validating hmac\nreq\n%#v\nresp\n%#v", *req, *resp) + } - // Try a bad value - req.Data["hmac"] = "vault:v1:UcBvm4VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4=" - resp, err = b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatalf("%v: %v", err, resp) - } - if resp == nil { - t.Fatal("expected non-nil response") - } - if resp.Data["valid"].(bool) { - t.Fatalf("expected error validating hmac") - } + // Try a bad value + req.Data["hmac"] = "vault:v1:UcBvm4VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4=" + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("%v: %v", err, resp) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.Data["valid"].(bool) { + t.Fatalf("expected error validating hmac") + } - // Set min decryption version, attempt to verify - p.MinDecryptionVersion = 2 - if err = p.Persist(context.Background(), storage); err != nil { - t.Fatal(err) - } + // Set min decryption version, attempt to verify + p.MinDecryptionVersion = 2 + if err = p.Persist(context.Background(), storage); err != nil { + t.Fatal(err) + } - req.Data["hmac"] = "vault:v1:UcBvm5VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4=" - resp, err = b.HandleRequest(context.Background(), req) - if err == nil { - t.Fatalf("expected an error, got response %#v", resp) - } - if err != logical.ErrInvalidRequest { - t.Fatalf("expected invalid request error, got %v", err) - } + req.Data["hmac"] = "vault:v1:UcBvm5VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4=" + resp, err = b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatalf("expected an error, got response %#v", resp) + } + if err != logical.ErrInvalidRequest { + t.Fatalf("expected invalid request error, got %v", err) } } @@ -284,7 +268,7 @@ func TestTransit_batchHMAC(t *testing.T) { t.Fatalf("Expected HMAC %s got %s in result %d", expected[i].HMAC, m.HMAC, i) } if expected[i].Error != "" && expected[i].Error != m.Error { - t.Fatalf("Expected Error %q got %q in result %d", expected[i].Error, m.Error, i) + t.Fatalf("Expected Error '%s' got '%s' in result %d", expected[i].Error, m.Error, i) } } diff --git a/builtin/logical/transit/path_import.go b/builtin/logical/transit/path_import.go index 817cf5fc5ddf2..c9e9837510920 100644 --- a/builtin/logical/transit/path_import.go +++ b/builtin/logical/transit/path_import.go @@ -177,8 +177,6 @@ func (b *backend) pathImportWrite(ctx context.Context, req *logical.Request, d * polReq.KeyType = keysutil.KeyType_RSA3072 case "rsa-4096": polReq.KeyType = keysutil.KeyType_RSA4096 - case "hmac": - polReq.KeyType = keysutil.KeyType_HMAC default: return logical.ErrorResponse(fmt.Sprintf("unknown key type: %v", keyType)), logical.ErrInvalidRequest } @@ -258,9 +256,6 @@ func (b *backend) pathImportVersionWrite(ctx context.Context, req *logical.Reque return nil, err } importKey, err := b.decryptImportedKey(ctx, req.Storage, ciphertext, hashFn) - if err != nil { - return nil, err - } err = p.Import(ctx, req.Storage, importKey, b.GetRandomReader()) if err != nil { return nil, err diff --git a/builtin/logical/transit/path_import_test.go b/builtin/logical/transit/path_import_test.go index d31b12b454e4c..6e2748833db46 100644 --- a/builtin/logical/transit/path_import_test.go +++ b/builtin/logical/transit/path_import_test.go @@ -30,7 +30,6 @@ var keyTypes = []string{ "rsa-2048", "rsa-3072", "rsa-4096", - "hmac", } var hashFns = []string{ @@ -46,8 +45,6 @@ var ( keys = map[string]interface{}{} ) -const nssFormattedEd25519Key = "MGcCAQAwFAYHKoZIzj0CAQYJKwYBBAHaRw8BBEwwSgIBAQQgfJm5R+LK4FMwGzOpemTBXksimEVOVCE8QeC+XBBfNU+hIwMhADaif7IhYx46IHcRTy1z8LeyhABep+UB8Da6olMZGx0i" - func generateKeys(t *testing.T) { t.Helper() @@ -81,39 +78,6 @@ func getKey(t *testing.T, keyType string) interface{} { return key } -func TestTransit_ImportNSSEd25519Key(t *testing.T) { - generateKeys(t) - b, s := createBackendWithStorage(t) - - wrappingKey, err := b.getWrappingKey(context.Background(), s) - if err != nil || wrappingKey == nil { - t.Fatalf("failed to retrieve public wrapping key: %s", err) - } - privWrappingKey := wrappingKey.Keys[strconv.Itoa(wrappingKey.LatestVersion)].RSAKey - pubWrappingKey := &privWrappingKey.PublicKey - - rawPKCS8, err := base64.StdEncoding.DecodeString(nssFormattedEd25519Key) - if err != nil { - t.Fatalf("failed to parse nss base64: %v", err) - } - - blob := wrapTargetPKCS8ForImport(t, pubWrappingKey, rawPKCS8, "SHA256") - req := &logical.Request{ - Storage: s, - Operation: logical.UpdateOperation, - Path: "keys/nss-ed25519/import", - Data: map[string]interface{}{ - "ciphertext": blob, - "type": "ed25519", - }, - } - - _, err = b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatalf("failed to import NSS-formatted Ed25519 key: %v", err) - } -} - func TestTransit_Import(t *testing.T) { generateKeys(t) b, s := createBackendWithStorage(t) @@ -539,29 +503,6 @@ func TestTransit_ImportVersion(t *testing.T) { func wrapTargetKeyForImport(t *testing.T, wrappingKey *rsa.PublicKey, targetKey interface{}, targetKeyType string, hashFnName string) string { t.Helper() - // Format target key for wrapping - var preppedTargetKey []byte - var ok bool - var err error - switch targetKeyType { - case "aes128-gcm96", "aes256-gcm96", "chacha20-poly1305", "hmac": - preppedTargetKey, ok = targetKey.([]byte) - if !ok { - t.Fatal("failed to wrap target key for import: symmetric key not provided in byte format") - } - default: - preppedTargetKey, err = x509.MarshalPKCS8PrivateKey(targetKey) - if err != nil { - t.Fatalf("failed to wrap target key for import: %s", err) - } - } - - return wrapTargetPKCS8ForImport(t, wrappingKey, preppedTargetKey, hashFnName) -} - -func wrapTargetPKCS8ForImport(t *testing.T, wrappingKey *rsa.PublicKey, preppedTargetKey []byte, hashFnName string) string { - t.Helper() - // Generate an ephemeral AES-256 key ephKey, err := uuid.GenerateRandomBytes(32) if err != nil { @@ -586,6 +527,22 @@ func wrapTargetPKCS8ForImport(t *testing.T, wrappingKey *rsa.PublicKey, preppedT t.Fatalf("failed to wrap target key for import: %s", err) } + // Format target key for wrapping + var preppedTargetKey []byte + var ok bool + switch targetKeyType { + case "aes128-gcm96", "aes256-gcm96", "chacha20-poly1305": + preppedTargetKey, ok = targetKey.([]byte) + if !ok { + t.Fatal("failed to wrap target key for import: symmetric key not provided in byte format") + } + default: + preppedTargetKey, err = x509.MarshalPKCS8PrivateKey(targetKey) + if err != nil { + t.Fatalf("failed to wrap target key for import: %s", err) + } + } + // Wrap target key with KWP targetKeyWrapped, err := kwp.Wrap(preppedTargetKey) if err != nil { @@ -601,7 +558,7 @@ func generateKey(keyType string) (interface{}, error) { switch keyType { case "aes128-gcm96": return uuid.GenerateRandomBytes(16) - case "aes256-gcm96", "hmac": + case "aes256-gcm96": return uuid.GenerateRandomBytes(32) case "chacha20-poly1305": return uuid.GenerateRandomBytes(32) diff --git a/builtin/logical/transit/path_keys.go b/builtin/logical/transit/path_keys.go index e8edabc1769c5..e64adb24a81c4 100644 --- a/builtin/logical/transit/path_keys.go +++ b/builtin/logical/transit/path_keys.go @@ -103,11 +103,6 @@ being automatically rotated. A value of 0 (default) disables automatic rotation for the key.`, }, - "key_size": { - Type: framework.TypeInt, - Default: 0, - Description: fmt.Sprintf("The key size in bytes for the algorithm. Only applies to HMAC and must be no fewer than %d bytes and no more than %d", keysutil.HmacMinKeySize, keysutil.HmacMaxKeySize), - }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -135,7 +130,6 @@ func (b *backend) pathPolicyWrite(ctx context.Context, req *logical.Request, d * derived := d.Get("derived").(bool) convergent := d.Get("convergent_encryption").(bool) keyType := d.Get("type").(string) - keySize := d.Get("key_size").(int) exportable := d.Get("exportable").(bool) allowPlaintextBackup := d.Get("allow_plaintext_backup").(bool) autoRotatePeriod := time.Second * time.Duration(d.Get("auto_rotate_period").(int)) @@ -158,7 +152,6 @@ func (b *backend) pathPolicyWrite(ctx context.Context, req *logical.Request, d * AllowPlaintextBackup: allowPlaintextBackup, AutoRotatePeriod: autoRotatePeriod, } - switch keyType { case "aes128-gcm96": polReq.KeyType = keysutil.KeyType_AES128_GCM96 @@ -180,20 +173,9 @@ func (b *backend) pathPolicyWrite(ctx context.Context, req *logical.Request, d * polReq.KeyType = keysutil.KeyType_RSA3072 case "rsa-4096": polReq.KeyType = keysutil.KeyType_RSA4096 - case "hmac": - polReq.KeyType = keysutil.KeyType_HMAC default: return logical.ErrorResponse(fmt.Sprintf("unknown key type %v", keyType)), logical.ErrInvalidRequest } - if keySize != 0 { - if polReq.KeyType != keysutil.KeyType_HMAC { - return logical.ErrorResponse(fmt.Sprintf("key_size is not valid for algorithm %v", polReq.KeyType)), logical.ErrInvalidRequest - } - if keySize < keysutil.HmacMinKeySize || keySize > keysutil.HmacMaxKeySize { - return logical.ErrorResponse(fmt.Sprintf("invalid key_size %d", keySize)), logical.ErrInvalidRequest - } - polReq.KeySize = keySize - } p, upserted, err := b.GetPolicy(ctx, polReq, b.GetRandomReader()) if err != nil { @@ -260,9 +242,6 @@ func (b *backend) pathPolicyRead(ctx context.Context, req *logical.Request, d *f "imported_key": p.Imported, }, } - if p.KeySize != 0 { - resp.Data["key_size"] = p.KeySize - } if p.Imported { resp.Data["imported_key_allow_rotation"] = p.AllowImportedKeyRotation diff --git a/builtin/logical/transit/path_restore_test.go b/builtin/logical/transit/path_restore_test.go index 6e13b985ee65d..031679f9942f9 100644 --- a/builtin/logical/transit/path_restore_test.go +++ b/builtin/logical/transit/path_restore_test.go @@ -83,7 +83,7 @@ func TestTransit_Restore(t *testing.T) { return &b } - keyExitsError := fmt.Errorf("key %q already exists", keyName) + keyExitsError := fmt.Errorf("key \"%s\" already exists", keyName) testCases := []struct { Name string diff --git a/builtin/logical/transit/path_sign_verify.go b/builtin/logical/transit/path_sign_verify.go index 1f0a9f3cb622e..ade69530df78b 100644 --- a/builtin/logical/transit/path_sign_verify.go +++ b/builtin/logical/transit/path_sign_verify.go @@ -2,11 +2,8 @@ package transit import ( "context" - "crypto/rsa" "encoding/base64" "fmt" - "strconv" - "strings" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/errutil" @@ -134,13 +131,6 @@ Options are 'pss' or 'pkcs1v15'. Defaults to 'pss'`, Default: "asn1", Description: `The method by which to marshal the signature. The default is 'asn1' which is used by openssl and X.509. It can also be set to 'jws' which is used for JWT signatures; setting it to this will also cause the encoding of the signature to be url-safe base64 instead of using standard base64 encoding. Currently only valid for ECDSA P-256 key types".`, }, - - "salt_length": { - Type: framework.TypeString, - Default: "auto", - Description: `The salt length used to sign. Currently only applies to the RSA PSS signature scheme. -Options are 'auto' (the default used by Golang, causing the salt to be as large as possible when signing), 'hash' (causes the salt length to equal the length of the hash used in the signature), or an integer between the minimum and the maximum permissible salt lengths for the given RSA key size. Defaults to 'auto'.`, - }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -227,13 +217,6 @@ Options are 'pss' or 'pkcs1v15'. Defaults to 'pss'`, Default: "asn1", Description: `The method by which to unmarshal the signature when verifying. The default is 'asn1' which is used by openssl and X.509; can also be set to 'jws' which is used for JWT signatures in which case the signature is also expected to be url-safe base64 encoding instead of standard base64 encoding. Currently only valid for ECDSA P-256 key types".`, }, - - "salt_length": { - Type: framework.TypeString, - Default: "auto", - Description: `The salt length used to sign. Currently only applies to the RSA PSS signature scheme. -Options are 'auto' (the default used by Golang, causing the salt to be as large as possible when signing), 'hash' (causes the salt length to equal the length of the hash used in the signature), or an integer between the minimum and the maximum permissible salt lengths for the given RSA key size. Defaults to 'auto'.`, - }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -245,33 +228,6 @@ Options are 'auto' (the default used by Golang, causing the salt to be as large } } -func (b *backend) getSaltLength(d *framework.FieldData) (int, error) { - rawSaltLength, ok := d.GetOk("salt_length") - // This should only happen when something is wrong with the schema, - // so this is a reasonable default. - if !ok { - return rsa.PSSSaltLengthAuto, nil - } - - rawSaltLengthStr := rawSaltLength.(string) - lowerSaltLengthStr := strings.ToLower(rawSaltLengthStr) - switch lowerSaltLengthStr { - case "auto": - return rsa.PSSSaltLengthAuto, nil - case "hash": - return rsa.PSSSaltLengthEqualsHash, nil - default: - saltLengthInt, err := strconv.Atoi(lowerSaltLengthStr) - if err != nil { - return rsa.PSSSaltLengthEqualsHash - 1, fmt.Errorf("salt length neither 'auto', 'hash', nor an int: %s", rawSaltLength) - } - if saltLengthInt < rsa.PSSSaltLengthEqualsHash { - return rsa.PSSSaltLengthEqualsHash - 1, fmt.Errorf("salt length is invalid: %d", saltLengthInt) - } - return saltLengthInt, nil - } -} - func (b *backend) pathSignWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { name := d.Get("name").(string) ver := d.Get("key_version").(int) @@ -296,10 +252,6 @@ func (b *backend) pathSignWrite(ctx context.Context, req *logical.Request, d *fr prehashed := d.Get("prehashed").(bool) sigAlgorithm := d.Get("signature_algorithm").(string) - saltLength, err := b.getSaltLength(d) - if err != nil { - return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest - } // Get the policy p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ @@ -378,12 +330,7 @@ func (b *backend) pathSignWrite(ctx context.Context, req *logical.Request, d *fr } } - sig, err := p.SignWithOptions(ver, context, input, &keysutil.SigningOptions{ - HashAlgorithm: hashAlgorithm, - Marshaling: marshaling, - SaltLength: saltLength, - SigAlgorithm: sigAlgorithm, - }) + sig, err := p.Sign(ver, context, input, hashAlgorithm, sigAlgorithm, marshaling) if err != nil { if batchInputRaw != nil { response[i].Error = err.Error() @@ -523,10 +470,6 @@ func (b *backend) pathVerifyWrite(ctx context.Context, req *logical.Request, d * prehashed := d.Get("prehashed").(bool) sigAlgorithm := d.Get("signature_algorithm").(string) - saltLength, err := b.getSaltLength(d) - if err != nil { - return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest - } // Get the policy p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ @@ -590,12 +533,7 @@ func (b *backend) pathVerifyWrite(ctx context.Context, req *logical.Request, d * } } - valid, err := p.VerifySignatureWithOptions(context, input, sig, &keysutil.SigningOptions{ - HashAlgorithm: hashAlgorithm, - Marshaling: marshaling, - SaltLength: saltLength, - SigAlgorithm: sigAlgorithm, - }) + valid, err := p.VerifySignature(context, input, hashAlgorithm, sigAlgorithm, marshaling, sig) if err != nil { switch err.(type) { case errutil.UserError: diff --git a/builtin/logical/transit/path_sign_verify_test.go b/builtin/logical/transit/path_sign_verify_test.go index fa411b0a2c8c2..072f8a265fa15 100644 --- a/builtin/logical/transit/path_sign_verify_test.go +++ b/builtin/logical/transit/path_sign_verify_test.go @@ -8,8 +8,6 @@ import ( "strings" "testing" - "github.com/hashicorp/vault/helper/constants" - "golang.org/x/crypto/ed25519" "github.com/hashicorp/vault/sdk/helper/keysutil" @@ -702,266 +700,3 @@ func TestTransit_SignVerify_ED25519(t *testing.T) { outcome[1].valid = false verifyRequest(req, false, outcome, "bar", goodsig, true) } - -func TestTransit_SignVerify_RSA_PSS(t *testing.T) { - t.Run("2048", func(t *testing.T) { - testTransit_SignVerify_RSA_PSS(t, 2048) - }) - t.Run("3072", func(t *testing.T) { - testTransit_SignVerify_RSA_PSS(t, 3072) - }) - t.Run("4096", func(t *testing.T) { - testTransit_SignVerify_RSA_PSS(t, 4096) - }) -} - -func testTransit_SignVerify_RSA_PSS(t *testing.T, bits int) { - b, storage := createBackendWithSysView(t) - - // First create a key - req := &logical.Request{ - Storage: storage, - Operation: logical.UpdateOperation, - Path: "keys/foo", - Data: map[string]interface{}{ - "type": fmt.Sprintf("rsa-%d", bits), - }, - } - _, err := b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatal(err) - } - - signRequest := func(errExpected bool, postpath string) string { - t.Helper() - req.Path = "sign/foo" + postpath - resp, err := b.HandleRequest(context.Background(), req) - if err != nil && !errExpected { - t.Fatal(err) - } - if resp == nil { - t.Fatal("expected non-nil response") - } - if errExpected { - if !resp.IsError() { - t.Fatalf("bad: should have gotten error response: %#v", *resp) - } - return "" - } - if resp.IsError() { - t.Fatalf("bad: got error response: %#v", *resp) - } - // Since we are reusing the same request, let's clear the salt length each time. - delete(req.Data, "salt_length") - - value, ok := resp.Data["signature"] - if !ok { - t.Fatalf("no signature key found in returned data, got resp data %#v", resp.Data) - } - return value.(string) - } - - verifyRequest := func(errExpected bool, postpath, sig string) { - t.Helper() - req.Path = "verify/foo" + postpath - req.Data["signature"] = sig - resp, err := b.HandleRequest(context.Background(), req) - if err != nil { - if errExpected { - return - } - t.Fatalf("got error: %v, sig was %v", err, sig) - } - if resp == nil { - t.Fatal("expected non-nil response") - } - if resp.IsError() { - if errExpected { - return - } - t.Fatalf("bad: got error response: %#v", *resp) - } - value, ok := resp.Data["valid"] - if !ok { - t.Fatalf("no valid key found in returned data, got resp data %#v", resp.Data) - } - if !value.(bool) && !errExpected { - t.Fatalf("verification failed; req was %#v, resp is %#v", *req, *resp) - } else if value.(bool) && errExpected { - t.Fatalf("expected error and didn't get one; req was %#v, resp is %#v", *req, *resp) - } - // Since we are reusing the same request, let's clear the signature each time. - delete(req.Data, "signature") - } - - newReqData := func(hashAlgorithm string, marshalingName string) map[string]interface{} { - return map[string]interface{}{ - "input": "dGhlIHF1aWNrIGJyb3duIGZveA==", - "signature_algorithm": "pss", - "hash_algorithm": hashAlgorithm, - "marshaling_algorithm": marshalingName, - } - } - - signAndVerifyRequest := func(hashAlgorithm string, marshalingName string, signSaltLength string, signErrExpected bool, verifySaltLength string, verifyErrExpected bool) { - t.Log("\t\t\t", signSaltLength, "/", verifySaltLength) - req.Data = newReqData(hashAlgorithm, marshalingName) - - req.Data["salt_length"] = signSaltLength - t.Log("\t\t\t\t", "sign req data:", req.Data) - sig := signRequest(signErrExpected, "") - - req.Data["salt_length"] = verifySaltLength - t.Log("\t\t\t\t", "verify req data:", req.Data) - verifyRequest(verifyErrExpected, "", sig) - } - - invalidSaltLengths := []string{"bar", "-2"} - t.Log("invalidSaltLengths:", invalidSaltLengths) - - autoSaltLengths := []string{"auto", "0"} - t.Log("autoSaltLengths:", autoSaltLengths) - - hashSaltLengths := []string{"hash", "-1"} - t.Log("hashSaltLengths:", hashSaltLengths) - - positiveSaltLengths := []string{"1"} - t.Log("positiveSaltLengths:", positiveSaltLengths) - - nonAutoSaltLengths := append(hashSaltLengths, positiveSaltLengths...) - t.Log("nonAutoSaltLengths:", nonAutoSaltLengths) - - validSaltLengths := append(autoSaltLengths, nonAutoSaltLengths...) - t.Log("validSaltLengths:", validSaltLengths) - - testCombinatorics := func(t *testing.T, hashAlgorithm string, marshalingName string) { - t.Log("\t\t", "valid", "/", "invalid salt lengths") - for _, validSaltLength := range validSaltLengths { - for _, invalidSaltLength := range invalidSaltLengths { - signAndVerifyRequest(hashAlgorithm, marshalingName, validSaltLength, false, invalidSaltLength, true) - } - } - - t.Log("\t\t", "invalid", "/", "invalid salt lengths") - for _, invalidSaltLength1 := range invalidSaltLengths { - for _, invalidSaltLength2 := range invalidSaltLengths { - signAndVerifyRequest(hashAlgorithm, marshalingName, invalidSaltLength1, true, invalidSaltLength2, true) - } - } - - t.Log("\t\t", "invalid", "/", "valid salt lengths") - for _, invalidSaltLength := range invalidSaltLengths { - for _, validSaltLength := range validSaltLengths { - signAndVerifyRequest(hashAlgorithm, marshalingName, invalidSaltLength, true, validSaltLength, true) - } - } - - t.Log("\t\t", "valid", "/", "valid salt lengths") - for _, validSaltLength := range validSaltLengths { - signAndVerifyRequest(hashAlgorithm, marshalingName, validSaltLength, false, validSaltLength, false) - } - - t.Log("\t\t", "hash", "/", "hash salt lengths") - for _, hashSaltLength1 := range hashSaltLengths { - for _, hashSaltLength2 := range hashSaltLengths { - if hashSaltLength1 != hashSaltLength2 { - signAndVerifyRequest(hashAlgorithm, marshalingName, hashSaltLength1, false, hashSaltLength2, false) - } - } - } - - t.Log("\t\t", "hash", "/", "positive salt lengths") - for _, hashSaltLength := range hashSaltLengths { - for _, positiveSaltLength := range positiveSaltLengths { - signAndVerifyRequest(hashAlgorithm, marshalingName, hashSaltLength, false, positiveSaltLength, true) - } - } - - t.Log("\t\t", "positive", "/", "hash salt lengths") - for _, positiveSaltLength := range positiveSaltLengths { - for _, hashSaltLength := range hashSaltLengths { - signAndVerifyRequest(hashAlgorithm, marshalingName, positiveSaltLength, false, hashSaltLength, true) - } - } - - t.Log("\t\t", "auto", "/", "auto salt lengths") - for _, autoSaltLength1 := range autoSaltLengths { - for _, autoSaltLength2 := range autoSaltLengths { - if autoSaltLength1 != autoSaltLength2 { - signAndVerifyRequest(hashAlgorithm, marshalingName, autoSaltLength1, false, autoSaltLength2, false) - } - } - } - - t.Log("\t\t", "auto", "/", "non-auto salt lengths") - for _, autoSaltLength := range autoSaltLengths { - for _, nonAutoSaltLength := range nonAutoSaltLengths { - signAndVerifyRequest(hashAlgorithm, marshalingName, autoSaltLength, false, nonAutoSaltLength, true) - } - } - - t.Log("\t\t", "non-auto", "/", "auto salt lengths") - for _, nonAutoSaltLength := range nonAutoSaltLengths { - for _, autoSaltLength := range autoSaltLengths { - signAndVerifyRequest(hashAlgorithm, marshalingName, nonAutoSaltLength, false, autoSaltLength, false) - } - } - } - - testAutoSignAndVerify := func(t *testing.T, hashAlgorithm string, marshalingName string) { - t.Log("\t\t", "Make a signature with an implicit, automatic salt length") - req.Data = newReqData(hashAlgorithm, marshalingName) - t.Log("\t\t\t", "sign req data:", req.Data) - sig := signRequest(false, "") - - t.Log("\t\t", "Verify it with an implicit, automatic salt length") - t.Log("\t\t\t", "verify req data:", req.Data) - verifyRequest(false, "", sig) - - t.Log("\t\t", "Verify it with an explicit, automatic salt length") - for _, autoSaltLength := range autoSaltLengths { - t.Log("\t\t\t", "auto", "/", autoSaltLength) - req.Data["salt_length"] = autoSaltLength - t.Log("\t\t\t\t", "verify req data:", req.Data) - verifyRequest(false, "", sig) - } - - t.Log("\t\t", "Try to verify it with an explicit, incorrect salt length") - for _, nonAutoSaltLength := range nonAutoSaltLengths { - t.Log("\t\t\t", "auto", "/", nonAutoSaltLength) - req.Data["salt_length"] = nonAutoSaltLength - t.Log("\t\t\t\t", "verify req data:", req.Data) - verifyRequest(true, "", sig) - } - - t.Log("\t\t", "Make a signature with an explicit, valid salt length & and verify it with an implicit, automatic salt length") - for _, validSaltLength := range validSaltLengths { - t.Log("\t\t\t", validSaltLength, "/", "auto") - - req.Data = newReqData(hashAlgorithm, marshalingName) - req.Data["salt_length"] = validSaltLength - t.Log("\t\t\t", "sign req data:", req.Data) - sig := signRequest(false, "") - - t.Log("\t\t\t", "verify req data:", req.Data) - verifyRequest(false, "", sig) - } - } - - for hashAlgorithm := range keysutil.HashTypeMap { - t.Log("Hash algorithm:", hashAlgorithm) - - for marshalingName := range keysutil.MarshalingTypeMap { - t.Log("\t", "Marshaling type:", marshalingName) - testName := fmt.Sprintf("%s-%s", hashAlgorithm, marshalingName) - t.Run(testName, func(t *testing.T) { - if constants.IsFIPS() && strings.HasPrefix(hashAlgorithm, "sha3-") { - t.Skip("\t", "Skipping hashing algo on fips:", hashAlgorithm) - } - - testCombinatorics(t, hashAlgorithm, marshalingName) - testAutoSignAndVerify(t, hashAlgorithm, marshalingName) - }) - } - } -} diff --git a/builtin/logical/transit/stepwise_test.go b/builtin/logical/transit/stepwise_test.go index b64aca9861e99..89e6cbe8f61d9 100644 --- a/builtin/logical/transit/stepwise_test.go +++ b/builtin/logical/transit/stepwise_test.go @@ -162,8 +162,7 @@ func testAccStepwiseReadPolicyWithVersions(t *testing.T, name string, expectNone } func testAccStepwiseEncryptContext( - t *testing.T, name, plaintext, context string, decryptData map[string]interface{}, -) stepwise.Step { + t *testing.T, name, plaintext, context string, decryptData map[string]interface{}) stepwise.Step { return stepwise.Step{ Operation: stepwise.UpdateOperation, Path: "encrypt/" + name, @@ -189,8 +188,7 @@ func testAccStepwiseEncryptContext( } func testAccStepwiseDecrypt( - t *testing.T, name, plaintext string, decryptData map[string]interface{}, -) stepwise.Step { + t *testing.T, name, plaintext string, decryptData map[string]interface{}) stepwise.Step { return stepwise.Step{ Operation: stepwise.UpdateOperation, Path: "decrypt/" + name, diff --git a/builtin/plugin/backend.go b/builtin/plugin/backend.go index 92b6b4327ce5a..d33fe9c1a8eb8 100644 --- a/builtin/plugin/backend.go +++ b/builtin/plugin/backend.go @@ -7,11 +7,7 @@ import ( "reflect" "sync" - log "github.com/hashicorp/go-hclog" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/go-uuid" - v5 "github.com/hashicorp/vault/builtin/plugin/v5" + uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/logical" @@ -25,36 +21,24 @@ var ( // Factory returns a configured plugin logical.Backend. func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { - merr := &multierror.Error{} _, ok := conf.Config["plugin_name"] if !ok { return nil, fmt.Errorf("plugin_name not provided") } - b, err := v5.Backend(ctx, conf) - if err == nil { - if err := b.Setup(ctx, conf); err != nil { - return nil, err - } - return b, nil - } - merr = multierror.Append(merr, err) - - b, err = Backend(ctx, conf) + b, err := Backend(ctx, conf) if err != nil { - merr = multierror.Append(merr, err) - return nil, fmt.Errorf("invalid backend version: %s", merr) + return nil, err } if err := b.Setup(ctx, conf); err != nil { - merr = multierror.Append(merr, err) - return nil, merr.ErrorOrNil() + return nil, err } return b, nil } // Backend returns an instance of the backend, either as a plugin if external // or as a concrete implementation if builtin, casted as logical.Backend. -func Backend(ctx context.Context, conf *logical.BackendConfig) (*PluginBackend, error) { +func Backend(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { var b PluginBackend name := conf.Config["plugin_name"] @@ -62,12 +46,11 @@ func Backend(ctx context.Context, conf *logical.BackendConfig) (*PluginBackend, if err != nil { return nil, err } - version := conf.Config["plugin_version"] sys := conf.System - // NewBackendWithVersion with isMetadataMode set to true - raw, err := bplugin.NewBackendWithVersion(ctx, name, pluginType, sys, conf, true, version) + // NewBackend with isMetadataMode set to true + raw, err := bplugin.NewBackend(ctx, name, pluginType, sys, conf, true) if err != nil { return nil, err } @@ -79,28 +62,15 @@ func Backend(ctx context.Context, conf *logical.BackendConfig) (*PluginBackend, // Get SpecialPaths and BackendType paths := raw.SpecialPaths() btype := raw.Type() - runningVersion := "" - if versioner, ok := raw.(logical.PluginVersioner); ok { - runningVersion = versioner.PluginVersion().Version - } - - external := false - if externaler, ok := raw.(logical.Externaler); ok { - external = externaler.IsExternal() - } // Cleanup meta plugin backend raw.Cleanup(ctx) - // Initialize b.Backend with placeholder backend since plugin + // Initialize b.Backend with dummy backend since plugin // backends will need to be lazy loaded. - b.Backend = &placeholderBackend{ - Backend: framework.Backend{ - PathsSpecial: paths, - BackendType: btype, - RunningVersion: runningVersion, - }, - external: external, + b.Backend = &framework.Backend{ + PathsSpecial: paths, + BackendType: btype, } b.config = conf @@ -108,26 +78,9 @@ func Backend(ctx context.Context, conf *logical.BackendConfig) (*PluginBackend, return &b, nil } -// placeholderBackend is used a placeholder before a backend is lazy-loaded. -// It is mostly used to mark that the backend is an external backend. -type placeholderBackend struct { - framework.Backend - - external bool -} - -func (p *placeholderBackend) IsExternal() bool { - return p.external -} - -var ( - _ logical.Externaler = (*placeholderBackend)(nil) - _ logical.PluginVersioner = (*placeholderBackend)(nil) -) - // PluginBackend is a thin wrapper around plugin.BackendPluginClient type PluginBackend struct { - Backend logical.Backend + logical.Backend sync.RWMutex config *logical.BackendConfig @@ -150,7 +103,7 @@ func (b *PluginBackend) startBackend(ctx context.Context, storage logical.Storag // Ensure proper cleanup of the backend (i.e. call client.Kill()) b.Backend.Cleanup(ctx) - nb, err := bplugin.NewBackendWithVersion(ctx, pluginName, pluginType, b.config.System, b.config, false, b.config.Config["plugin_version"]) + nb, err := bplugin.NewBackend(ctx, pluginName, pluginType, b.config.System, b.config, false) if err != nil { return err } @@ -165,12 +118,12 @@ func (b *PluginBackend) startBackend(ctx context.Context, storage logical.Storag if !b.loaded { if b.Backend.Type() != nb.Type() { nb.Cleanup(ctx) - b.Backend.Logger().Warn("failed to start plugin process", "plugin", pluginName, "error", ErrMismatchType) + b.Logger().Warn("failed to start plugin process", "plugin", b.config.Config["plugin_name"], "error", ErrMismatchType) return ErrMismatchType } if !reflect.DeepEqual(b.Backend.SpecialPaths(), nb.SpecialPaths()) { nb.Cleanup(ctx) - b.Backend.Logger().Warn("failed to start plugin process", "plugin", pluginName, "error", ErrMismatchPaths) + b.Logger().Warn("failed to start plugin process", "plugin", b.config.Config["plugin_name"], "error", ErrMismatchPaths) return ErrMismatchPaths } } @@ -216,7 +169,7 @@ func (b *PluginBackend) lazyLoadBackend(ctx context.Context, storage logical.Sto // Reload plugin if it's an rpc.ErrShutdown b.Lock() if b.canary == canary { - b.Backend.Logger().Debug("reloading plugin backend", "plugin", b.config.Config["plugin_name"]) + b.Logger().Debug("reloading plugin backend", "plugin", b.config.Config["plugin_name"]) err := b.startBackend(ctx, storage) if err != nil { b.Unlock() @@ -267,71 +220,3 @@ func (b *PluginBackend) HandleExistenceCheck(ctx context.Context, req *logical.R func (b *PluginBackend) Initialize(ctx context.Context, req *logical.InitializationRequest) error { return nil } - -// SpecialPaths is a thin wrapper used to ensure we grab the lock for race purposes -func (b *PluginBackend) SpecialPaths() *logical.Paths { - b.RLock() - defer b.RUnlock() - return b.Backend.SpecialPaths() -} - -// System is a thin wrapper used to ensure we grab the lock for race purposes -func (b *PluginBackend) System() logical.SystemView { - b.RLock() - defer b.RUnlock() - return b.Backend.System() -} - -// Logger is a thin wrapper used to ensure we grab the lock for race purposes -func (b *PluginBackend) Logger() log.Logger { - b.RLock() - defer b.RUnlock() - return b.Backend.Logger() -} - -// Cleanup is a thin wrapper used to ensure we grab the lock for race purposes -func (b *PluginBackend) Cleanup(ctx context.Context) { - b.RLock() - defer b.RUnlock() - b.Backend.Cleanup(ctx) -} - -// InvalidateKey is a thin wrapper used to ensure we grab the lock for race purposes -func (b *PluginBackend) InvalidateKey(ctx context.Context, key string) { - b.RLock() - defer b.RUnlock() - b.Backend.InvalidateKey(ctx, key) -} - -// Setup is a thin wrapper used to ensure we grab the lock for race purposes -func (b *PluginBackend) Setup(ctx context.Context, config *logical.BackendConfig) error { - b.RLock() - defer b.RUnlock() - return b.Backend.Setup(ctx, config) -} - -// Type is a thin wrapper used to ensure we grab the lock for race purposes -func (b *PluginBackend) Type() logical.BackendType { - b.RLock() - defer b.RUnlock() - return b.Backend.Type() -} - -func (b *PluginBackend) PluginVersion() logical.PluginVersion { - if versioner, ok := b.Backend.(logical.PluginVersioner); ok { - return versioner.PluginVersion() - } - return logical.EmptyPluginVersion -} - -func (b *PluginBackend) IsExternal() bool { - if externaler, ok := b.Backend.(logical.Externaler); ok { - return externaler.IsExternal() - } - return false -} - -var ( - _ logical.PluginVersioner = (*PluginBackend)(nil) - _ logical.Externaler = (*PluginBackend)(nil) -) diff --git a/builtin/plugin/backend_lazyLoad_test.go b/builtin/plugin/backend_lazyLoad_test.go index 4d2727037adca..53c6f9611829d 100644 --- a/builtin/plugin/backend_lazyLoad_test.go +++ b/builtin/plugin/backend_lazyLoad_test.go @@ -2,7 +2,6 @@ package plugin import ( "context" - "errors" "testing" "github.com/hashicorp/vault/sdk/helper/logging" @@ -184,17 +183,3 @@ func (v testSystemView) LookupPlugin(context.Context, string, consts.PluginType) }, }, nil } - -func (v testSystemView) LookupPluginVersion(context.Context, string, consts.PluginType, string) (*pluginutil.PluginRunner, error) { - return &pluginutil.PluginRunner{ - Name: "test-plugin-runner", - Builtin: true, - BuiltinFactory: func() (interface{}, error) { - return v.factory, nil - }, - }, nil -} - -func (v testSystemView) ListVersionedPlugins(_ context.Context, _ consts.PluginType) ([]pluginutil.VersionedPlugin, error) { - return nil, errors.New("ListVersionedPlugins not implemented for testSystemView") -} diff --git a/builtin/plugin/backend_test.go b/builtin/plugin/backend_test.go index 27533a58272fc..9354463bf349a 100644 --- a/builtin/plugin/backend_test.go +++ b/builtin/plugin/backend_test.go @@ -24,66 +24,26 @@ func TestBackend_impl(t *testing.T) { } func TestBackend(t *testing.T) { - pluginCmds := []string{"TestBackend_PluginMain", "TestBackend_PluginMain_Multiplexed"} - - for _, pluginCmd := range pluginCmds { - t.Run(pluginCmd, func(t *testing.T) { - config, cleanup := testConfig(t, pluginCmd) - defer cleanup() - - _, err := plugin.Backend(context.Background(), config) - if err != nil { - t.Fatal(err) - } - }) - } -} + config, cleanup := testConfig(t) + defer cleanup() -func TestBackend_Factory(t *testing.T) { - pluginCmds := []string{"TestBackend_PluginMain", "TestBackend_PluginMain_Multiplexed"} - - for _, pluginCmd := range pluginCmds { - t.Run(pluginCmd, func(t *testing.T) { - config, cleanup := testConfig(t, pluginCmd) - defer cleanup() - - _, err := plugin.Factory(context.Background(), config) - if err != nil { - t.Fatal(err) - } - }) + _, err := plugin.Backend(context.Background(), config) + if err != nil { + t.Fatal(err) } } -func TestBackend_PluginMain(t *testing.T) { - args := []string{} - if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" && os.Getenv(pluginutil.PluginMetadataModeEnv) != "true" { - return - } - - caPEM := os.Getenv(pluginutil.PluginCACertPEMEnv) - if caPEM == "" { - t.Fatal("CA cert not passed in") - } - - args = append(args, fmt.Sprintf("--ca-cert=%s", caPEM)) - - apiClientMeta := &api.PluginAPIClientMeta{} - flags := apiClientMeta.FlagSet() - flags.Parse(args) - tlsConfig := apiClientMeta.GetTLSConfig() - tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) +func TestBackend_Factory(t *testing.T) { + config, cleanup := testConfig(t) + defer cleanup() - err := logicalPlugin.Serve(&logicalPlugin.ServeOpts{ - BackendFactoryFunc: mock.Factory, - TLSProviderFunc: tlsProviderFunc, - }) + _, err := plugin.Factory(context.Background(), config) if err != nil { t.Fatal(err) } } -func TestBackend_PluginMain_Multiplexed(t *testing.T) { +func TestBackend_PluginMain(t *testing.T) { args := []string{} if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" && os.Getenv(pluginutil.PluginMetadataModeEnv) != "true" { return @@ -102,7 +62,7 @@ func TestBackend_PluginMain_Multiplexed(t *testing.T) { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - err := logicalPlugin.ServeMultiplex(&logicalPlugin.ServeOpts{ + err := logicalPlugin.Serve(&logicalPlugin.ServeOpts{ BackendFactoryFunc: mock.Factory, TLSProviderFunc: tlsProviderFunc, }) @@ -111,7 +71,7 @@ func TestBackend_PluginMain_Multiplexed(t *testing.T) { } } -func testConfig(t *testing.T, pluginCmd string) (*logical.BackendConfig, func()) { +func testConfig(t *testing.T) (*logical.BackendConfig, func()) { cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, }) @@ -133,7 +93,7 @@ func testConfig(t *testing.T, pluginCmd string) (*logical.BackendConfig, func()) os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile) - vault.TestAddTestPlugin(t, core.Core, "mock-plugin", consts.PluginTypeSecrets, "", pluginCmd, []string{}, "") + vault.TestAddTestPlugin(t, core.Core, "mock-plugin", consts.PluginTypeSecrets, "TestBackend_PluginMain", []string{}, "") return config, func() { cluster.Cleanup() diff --git a/builtin/plugin/v5/backend.go b/builtin/plugin/v5/backend.go deleted file mode 100644 index 3f7a9a884ce77..0000000000000 --- a/builtin/plugin/v5/backend.go +++ /dev/null @@ -1,167 +0,0 @@ -package plugin - -import ( - "context" - "net/rpc" - "sync" - - "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/sdk/plugin" - bplugin "github.com/hashicorp/vault/sdk/plugin" -) - -// Backend returns an instance of the backend, either as a plugin if external -// or as a concrete implementation if builtin, casted as logical.Backend. -func Backend(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { - var b backend - name := conf.Config["plugin_name"] - pluginType, err := consts.ParsePluginType(conf.Config["plugin_type"]) - if err != nil { - return nil, err - } - pluginVersion := conf.Config["plugin_version"] - - sys := conf.System - - raw, err := plugin.NewBackendV5(ctx, name, pluginType, pluginVersion, sys, conf) - if err != nil { - return nil, err - } - b.Backend = raw - b.config = conf - - return &b, nil -} - -// backend is a thin wrapper around a builtin plugin or a plugin.BackendPluginClientV5 -type backend struct { - logical.Backend - mu sync.RWMutex - - config *logical.BackendConfig - - // Used to detect if we already reloaded - canary string -} - -func (b *backend) reloadBackend(ctx context.Context, storage logical.Storage) error { - pluginName := b.config.Config["plugin_name"] - pluginType, err := consts.ParsePluginType(b.config.Config["plugin_type"]) - if err != nil { - return err - } - pluginVersion := b.config.Config["plugin_version"] - - b.Logger().Debug("plugin: reloading plugin backend", "plugin", pluginName) - - // Ensure proper cleanup of the backend - // Pass a context value so that the plugin client will call the appropriate - // cleanup method for reloading - reloadCtx := context.WithValue(ctx, plugin.ContextKeyPluginReload, "reload") - b.Backend.Cleanup(reloadCtx) - - nb, err := plugin.NewBackendV5(ctx, pluginName, pluginType, pluginVersion, b.config.System, b.config) - if err != nil { - return err - } - err = nb.Setup(ctx, b.config) - if err != nil { - return err - } - b.Backend = nb - - // Re-initialize the backend in case plugin was reloaded - // after it crashed - err = b.Backend.Initialize(ctx, &logical.InitializationRequest{ - Storage: storage, - }) - - if err != nil { - return err - } - - return nil -} - -// HandleRequest is a thin wrapper implementation of HandleRequest that includes automatic plugin reload. -func (b *backend) HandleRequest(ctx context.Context, req *logical.Request) (*logical.Response, error) { - b.mu.RLock() - canary := b.canary - resp, err := b.Backend.HandleRequest(ctx, req) - b.mu.RUnlock() - // Need to compare string value for case were err comes from plugin RPC - // and is returned as plugin.BasicError type. - if err != nil && - (err.Error() == rpc.ErrShutdown.Error() || err == bplugin.ErrPluginShutdown) { - // Reload plugin if it's an rpc.ErrShutdown - b.mu.Lock() - if b.canary == canary { - err := b.reloadBackend(ctx, req.Storage) - if err != nil { - b.mu.Unlock() - return nil, err - } - b.canary, err = uuid.GenerateUUID() - if err != nil { - b.mu.Unlock() - return nil, err - } - } - b.mu.Unlock() - - // Try request once more - b.mu.RLock() - defer b.mu.RUnlock() - return b.Backend.HandleRequest(ctx, req) - } - return resp, err -} - -// HandleExistenceCheck is a thin wrapper implementation of HandleRequest that includes automatic plugin reload. -func (b *backend) HandleExistenceCheck(ctx context.Context, req *logical.Request) (bool, bool, error) { - b.mu.RLock() - canary := b.canary - checkFound, exists, err := b.Backend.HandleExistenceCheck(ctx, req) - b.mu.RUnlock() - if err != nil && - (err.Error() == rpc.ErrShutdown.Error() || err == bplugin.ErrPluginShutdown) { - // Reload plugin if it's an rpc.ErrShutdown - b.mu.Lock() - if b.canary == canary { - err := b.reloadBackend(ctx, req.Storage) - if err != nil { - b.mu.Unlock() - return false, false, err - } - b.canary, err = uuid.GenerateUUID() - if err != nil { - b.mu.Unlock() - return false, false, err - } - } - b.mu.Unlock() - - // Try request once more - b.mu.RLock() - defer b.mu.RUnlock() - return b.Backend.HandleExistenceCheck(ctx, req) - } - return checkFound, exists, err -} - -// InvalidateKey is a thin wrapper used to ensure we grab the lock for race purposes -func (b *backend) InvalidateKey(ctx context.Context, key string) { - b.mu.RLock() - defer b.mu.RUnlock() - b.Backend.InvalidateKey(ctx, key) -} - -func (b *backend) IsExternal() bool { - switch b.Backend.(type) { - case *plugin.BackendPluginClientV5: - return true - } - return false -} diff --git a/changelog/10467.txt b/changelog/10467.txt deleted file mode 100644 index 411bbf78eaeed..0000000000000 --- a/changelog/10467.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -storage/cassandra: tuning parameters for clustered environments `connection_timeout`, `initial_connection_timeout`, `simple_retry_policy_retries`. -``` diff --git a/changelog/11904.txt b/changelog/11904.txt deleted file mode 100644 index 584aeae8d3b33..0000000000000 --- a/changelog/11904.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -api: properly handle switching to/from unix domain socket when changing client address -``` diff --git a/changelog/11969.txt b/changelog/11969.txt deleted file mode 100644 index 668093565d33c..0000000000000 --- a/changelog/11969.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -agent: JWT auto auth now supports a `remove_jwt_after_reading` config option which defaults to true. -``` \ No newline at end of file diff --git a/changelog/14399.txt b/changelog/14399.txt deleted file mode 100644 index 5d5c6b179c9f3..0000000000000 --- a/changelog/14399.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -debug: Fix panic when capturing debug bundle on Windows -``` \ No newline at end of file diff --git a/changelog/14474.txt b/changelog/14474.txt deleted file mode 100644 index 1469c03334a5f..0000000000000 --- a/changelog/14474.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:improvement -auth/approle: SecretIDs can now be generated with an per-request specified TTL and num_uses. -When either the ttl and num_uses fields are not specified, the role's configuration is used. -``` \ No newline at end of file diff --git a/changelog/14751.txt b/changelog/14751.txt deleted file mode 100644 index 17cbfa02bdcbc..0000000000000 --- a/changelog/14751.txt +++ /dev/null @@ -1,4 +0,0 @@ - -```release-note:improvement -auth/cert: Add metadata to identity-alias -``` diff --git a/changelog/14946.txt b/changelog/14946.txt deleted file mode 100644 index 43ee1e55d09ac..0000000000000 --- a/changelog/14946.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -**Secrets/auth plugin multiplexing**: manage multiple plugin configurations with a single plugin process -``` diff --git a/changelog/15552.txt b/changelog/15552.txt deleted file mode 100644 index 22d854bc54b45..0000000000000 --- a/changelog/15552.txt +++ /dev/null @@ -1,6 +0,0 @@ -```release-note:bug -openapi: Fixed issue where information about /auth/token endpoints was not present with explicit policy permissions -``` -```release-note:bug -api: Fixed issue with internal/ui/mounts and internal/ui/mounts/(?P.+) endpoints where it was not properly handling /auth/ -``` \ No newline at end of file diff --git a/changelog/15561.txt b/changelog/15561.txt deleted file mode 100644 index 95787b654e184..0000000000000 --- a/changelog/15561.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ssh: Addition of an endpoint `ssh/issue/:role` to allow the creation of signed key pairs -``` diff --git a/changelog/15583.txt b/changelog/15583.txt deleted file mode 100644 index b6cda31682745..0000000000000 --- a/changelog/15583.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -core (enterprise): Fix bug where wrapping token lookup does not work within namespaces. -``` diff --git a/changelog/15638.txt b/changelog/15638.txt deleted file mode 100644 index 94179552f754c..0000000000000 --- a/changelog/15638.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -vault: Fix a bug where duplicate policies could be added to an identity group. -``` \ No newline at end of file diff --git a/changelog/15681.txt b/changelog/15681.txt deleted file mode 100644 index 2054411503d8f..0000000000000 --- a/changelog/15681.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -ui: Fixed bug where red spellcheck underline appears in sensitive/secret kv values when it should not appear -``` \ No newline at end of file diff --git a/changelog/15735.txt b/changelog/15735.txt deleted file mode 100644 index 3dd6600e3c9b8..0000000000000 --- a/changelog/15735.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -quotas/lease-count: Fix lease-count quotas on mounts not properly being enforced when the lease generating request is a read -``` diff --git a/changelog/15742.txt b/changelog/15742.txt deleted file mode 100644 index c7f69b6f70b90..0000000000000 --- a/changelog/15742.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secret/transit: Allow importing Ed25519 keys from PKCS#8 with inner RFC 5915 ECPrivateKey blobs (NSS-wrapped keys). -``` diff --git a/changelog/15809.txt b/changelog/15809.txt deleted file mode 100644 index 87e42c82e679a..0000000000000 --- a/changelog/15809.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secret/nomad: allow reading CA and client auth certificate from /nomad/config/access -``` diff --git a/changelog/15835.txt b/changelog/15835.txt deleted file mode 100644 index d689c2a38ca65..0000000000000 --- a/changelog/15835.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -api/sys/internal/specs/openapi: support a new "dynamic" query parameter to generate generic mountpaths -``` diff --git a/changelog/15852.txt b/changelog/15852.txt deleted file mode 100644 index 8ed97dcc30298..0000000000000 --- a/changelog/15852.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: Changed the tokenBoundCidrs tooltip content to clarify that comma separated values are not accepted in this field. -``` \ No newline at end of file diff --git a/changelog/15866.txt b/changelog/15866.txt deleted file mode 100644 index 384762e11af5b..0000000000000 --- a/changelog/15866.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -physical/postgresql: pass context to queries to propagate timeouts and cancellations on requests. -``` \ No newline at end of file diff --git a/changelog/15898.txt b/changelog/15898.txt deleted file mode 100644 index 02e380d2fcf03..0000000000000 --- a/changelog/15898.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: Removed deprecated version of core-js 2.6.11 -``` \ No newline at end of file diff --git a/changelog/15900.txt b/changelog/15900.txt deleted file mode 100644 index ec1e8b66ce97b..0000000000000 --- a/changelog/15900.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -core: Fixes parsing boolean values for ha_storage backends in config -``` \ No newline at end of file diff --git a/changelog/15912.txt b/changelog/15912.txt deleted file mode 100644 index 391d7353b6280..0000000000000 --- a/changelog/15912.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -identity: a request to `/identity/group` that includes `member_group_ids` that contains a cycle will now be responded to with a 400 rather than 500 -``` diff --git a/changelog/15989.txt b/changelog/15989.txt deleted file mode 100644 index 68ad2789f1b00..0000000000000 --- a/changelog/15989.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core/quotas: Added ability to add path suffixes for rate-limit resource quotas -``` \ No newline at end of file diff --git a/changelog/15996.txt b/changelog/15996.txt deleted file mode 100644 index b29f1da19fae1..0000000000000 --- a/changelog/15996.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secret/pki: Allow issuing certificates with non-domain, non-email Common Names from roles, sign-verbatim, and as issuers (`cn_validations`). -``` diff --git a/changelog/15998.txt b/changelog/15998.txt deleted file mode 100644 index 69274f6c3ff4b..0000000000000 --- a/changelog/15998.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -ui: UI support for Okta Number Challenge. -``` diff --git a/changelog/16000.txt b/changelog/16000.txt deleted file mode 100644 index fde39b9598823..0000000000000 --- a/changelog/16000.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core: Limit activity log client count usage by namespaces -``` \ No newline at end of file diff --git a/changelog/16056.txt b/changelog/16056.txt deleted file mode 100644 index 1652726ce5879..0000000000000 --- a/changelog/16056.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/ssh: Add allowed_domains_template to allow templating of allowed_domains. -``` diff --git a/changelog/16063.txt b/changelog/16063.txt deleted file mode 100644 index aa90becd6170e..0000000000000 --- a/changelog/16063.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:improvement -website/docs: Update replication docs to mention Integrated Storage -``` - diff --git a/changelog/16115.txt b/changelog/16115.txt deleted file mode 100644 index 82998b6568493..0000000000000 --- a/changelog/16115.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core/quotas: Added ability to add role information for rate-limit resource quotas, to limit login requests on auth mounts made using that role -``` \ No newline at end of file diff --git a/changelog/16124.txt b/changelog/16124.txt deleted file mode 100644 index 38eca2af9e402..0000000000000 --- a/changelog/16124.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secret/pki: Add signature_bits to sign-intermediate, sign-verbatim endpoints -``` diff --git a/changelog/16140.txt b/changelog/16140.txt deleted file mode 100644 index 1fffd7ef82e39..0000000000000 --- a/changelog/16140.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/ad: set config default length only if password_policy is missing -``` diff --git a/changelog/16146.txt b/changelog/16146.txt deleted file mode 100644 index 39086b3b044f4..0000000000000 --- a/changelog/16146.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core/activity: generate hyperloglogs containing clientIds for each month during precomputation -``` \ No newline at end of file diff --git a/changelog/16162.txt b/changelog/16162.txt deleted file mode 100644 index 5e3c348eae462..0000000000000 --- a/changelog/16162.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core/activity: refactor activity log api to reuse partial api functions in activity endpoint when current month is specified -``` \ No newline at end of file diff --git a/changelog/16181.txt b/changelog/16181.txt deleted file mode 100644 index 1e97d1e15c25d..0000000000000 --- a/changelog/16181.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -identity/oidc: allows filtering the list providers response by an allowed_client_id -``` diff --git a/changelog/16184.txt b/changelog/16184.txt deleted file mode 100644 index e7a8b065e3980..0000000000000 --- a/changelog/16184.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core/activity: use monthly hyperloglogs to calculate new clients approximation for current month -``` \ No newline at end of file diff --git a/changelog/16213.txt b/changelog/16213.txt deleted file mode 100644 index 489243dda48a3..0000000000000 --- a/changelog/16213.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -website/docs: API generate-recovery-token documentation. -``` diff --git a/changelog/16224.txt b/changelog/16224.txt deleted file mode 100644 index 822b24504df59..0000000000000 --- a/changelog/16224.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -core: fix GPG encryption to support subkeys. -``` diff --git a/changelog/16249.txt b/changelog/16249.txt deleted file mode 100644 index f84977d047b7a..0000000000000 --- a/changelog/16249.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Honor If-Modified-Since header on CA, CRL fetch; requires passthrough_request_headers modification on the mount point. -``` diff --git a/changelog/16274.txt b/changelog/16274.txt deleted file mode 100644 index 75374ed95d89b..0000000000000 --- a/changelog/16274.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -auth/oidc: Adds support for group membership parsing when using SecureAuth as an OIDC provider. -``` diff --git a/changelog/16351.txt b/changelog/16351.txt deleted file mode 100644 index 879c7f65be827..0000000000000 --- a/changelog/16351.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/ssh: Allow the use of Identity templates in the `default_user` field -``` diff --git a/changelog/16353.txt b/changelog/16353.txt deleted file mode 100644 index be247cc9f0394..0000000000000 --- a/changelog/16353.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core: remove gox -``` \ No newline at end of file diff --git a/changelog/16379.txt b/changelog/16379.txt deleted file mode 100644 index 99ed7e5ece740..0000000000000 --- a/changelog/16379.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -core: Validate input parameters for vault operator init command. Vault 1.12 CLI version is needed to run operator init now. -``` \ No newline at end of file diff --git a/changelog/16386.txt b/changelog/16386.txt deleted file mode 100644 index 4fa6a6ca6649b..0000000000000 --- a/changelog/16386.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -core/quotas: Added globbing functionality on the end of path suffix quota paths -``` diff --git a/changelog/16409.txt b/changelog/16409.txt deleted file mode 100644 index d8f83b029d026..0000000000000 --- a/changelog/16409.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -command/audit: Improve missing type error message -``` diff --git a/changelog/16421.txt b/changelog/16421.txt deleted file mode 100644 index 281d2e8717346..0000000000000 --- a/changelog/16421.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -command/server: add `-dev-tls` and `-dev-tls-cert-dir` subcommands to create a Vault dev server with generated certificates and private key. -``` diff --git a/changelog/16435.txt b/changelog/16435.txt deleted file mode 100644 index a7246e31eedcd..0000000000000 --- a/changelog/16435.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -auth/gcp: Add support for GCE regional instance groups -``` \ No newline at end of file diff --git a/changelog/16441.txt b/changelog/16441.txt deleted file mode 100644 index f265483d86ea2..0000000000000 --- a/changelog/16441.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -cli: CLI commands will print a warning if flags will be ignored because they are passed after positional arguments. -``` diff --git a/changelog/16455.txt b/changelog/16455.txt deleted file mode 100644 index 660dbc10588b3..0000000000000 --- a/changelog/16455.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -auth/aws: PKCS7 signatures will now use SHA256 by default in prep for Go 1.18 -``` diff --git a/changelog/16487.txt b/changelog/16487.txt deleted file mode 100644 index cbf2a2a586fef..0000000000000 --- a/changelog/16487.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -identity: Prevent possibility of data races on entity creation. -``` diff --git a/changelog/16489.txt b/changelog/16489.txt deleted file mode 100644 index 17c66ca9f795f..0000000000000 --- a/changelog/16489.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: Renamed labels under Tools for wrap, lookup, rewrap and unwrap with description. -``` diff --git a/changelog/16494.txt b/changelog/16494.txt deleted file mode 100644 index 40cf3643ad0c0..0000000000000 --- a/changelog/16494.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secret/pki: Allow specifying SKID for cross-signed issuance from older Vault versions. -``` diff --git a/changelog/16519.txt b/changelog/16519.txt deleted file mode 100644 index 1325202e62648..0000000000000 --- a/changelog/16519.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secret/pki: Add RSA PSS signature support for issuing certificates, signing CRLs -``` diff --git a/changelog/16523.txt b/changelog/16523.txt new file mode 100644 index 0000000000000..0e7350a09f679 --- /dev/null +++ b/changelog/16523.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/gcp: Fixes the ability to reset the configuration's credentials to use application default credentials. +``` diff --git a/changelog/16525.txt b/changelog/16525.txt deleted file mode 100644 index 2f611afc5c86c..0000000000000 --- a/changelog/16525.txt +++ /dev/null @@ -1,6 +0,0 @@ -```release-note:improvement -auth/jwt: Improves detection of Windows Subsystem for Linux (WSL) for CLI-based logins. -``` -```release-note:improvement -auth/jwt: Adds support for Microsoft US Gov L4 to the Azure provider for groups fetching. -``` diff --git a/changelog/16526.txt b/changelog/16526.txt new file mode 100644 index 0000000000000..50051731e914a --- /dev/null +++ b/changelog/16526.txt @@ -0,0 +1,3 @@ +```release-note:bug +database/elasticsearch: Fixes a bug in boolean parsing for initialize +``` diff --git a/changelog/16539.txt b/changelog/16539.txt deleted file mode 100644 index 9927329b5728a..0000000000000 --- a/changelog/16539.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -core/entities: Fixed stranding of aliases upon entity merge, and require explicit selection of which aliases should be kept when some must be deleted -``` diff --git a/changelog/16549.txt b/changelog/16549.txt deleted file mode 100644 index 101d1f924441f..0000000000000 --- a/changelog/16549.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/transit: Allow configuring the possible salt lengths for RSA PSS signatures. -``` \ No newline at end of file diff --git a/changelog/16553.txt b/changelog/16553.txt deleted file mode 100644 index 7031f04a16155..0000000000000 --- a/changelog/16553.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -command: Fix shell completion for KV v2 mounts -``` diff --git a/changelog/16563.txt b/changelog/16563.txt deleted file mode 100644 index e5ff2758adab3..0000000000000 --- a/changelog/16563.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Add support for per-issuer Authority Information Access (AIA) URLs -``` diff --git a/changelog/16564.txt b/changelog/16564.txt deleted file mode 100644 index 90a5524d81688..0000000000000 --- a/changelog/16564.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Allow revocation of certificates with explicitly provided certificate (bring your own certificate / BYOC). -``` diff --git a/changelog/16566.txt b/changelog/16566.txt deleted file mode 100644 index 269d8dafb0695..0000000000000 --- a/changelog/16566.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Allow revocation via proving possession of certificate's private key -``` diff --git a/changelog/16567.txt b/changelog/16567.txt deleted file mode 100644 index 78492e304549c..0000000000000 --- a/changelog/16567.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -identity/oidc: Adds support for detailed listing of clients and providers. -``` diff --git a/changelog/16609.txt b/changelog/16609.txt deleted file mode 100644 index 13ecb7bbcdb56..0000000000000 --- a/changelog/16609.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core: Upgrade github.com/hashicorp/raft -``` \ No newline at end of file diff --git a/changelog/16621.txt b/changelog/16621.txt deleted file mode 100644 index e447dbb6a824d..0000000000000 --- a/changelog/16621.txt +++ /dev/null @@ -1,2 +0,0 @@ -```release-note:improvement -secrets/pki: Allow revocation of issuers within the same mount. diff --git a/changelog/16631.txt b/changelog/16631.txt deleted file mode 100644 index 4e092b360e8b9..0000000000000 --- a/changelog/16631.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/database/hana: Add ability to customize dynamic usernames -``` \ No newline at end of file diff --git a/changelog/16668.txt b/changelog/16668.txt deleted file mode 100644 index 745cf0beb73c4..0000000000000 --- a/changelog/16668.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/transit: Add a dedicated HMAC key type, which can be used with key import. -``` \ No newline at end of file diff --git a/changelog/16676.txt b/changelog/16676.txt deleted file mode 100644 index e52c08614db2d..0000000000000 --- a/changelog/16676.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Added gauge metrics "secrets.pki.total_revoked_certificates_stored" and "secrets.pki.total_certificates_stored" to track the number of certificates in storage. -``` diff --git a/changelog/16688.txt b/changelog/16688.txt deleted file mode 100644 index ce192d5f220f8..0000000000000 --- a/changelog/16688.txt +++ /dev/null @@ -1,9 +0,0 @@ -```release-note:change -plugins: `GET /sys/plugins/catalog` endpoint now returns an additional `detailed` field in the response data with a list of additional plugin metadata. -``` -```release-note:change -plugins: `GET /sys/plugins/catalog/:type/:name` endpoint now returns an additional `version` field in the response data. -``` -```release-note:improvement -plugins: Plugin catalog supports registering and managing plugins with semantic version information. -``` diff --git a/changelog/16699.txt b/changelog/16699.txt deleted file mode 100644 index 4a96e868a191f..0000000000000 --- a/changelog/16699.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -api: Add a sentinel error for missing KV secrets -``` diff --git a/changelog/16702.txt b/changelog/16702.txt deleted file mode 100644 index a19764675cb9b..0000000000000 --- a/changelog/16702.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki/tidy: Add another pair of metrics counting certificates not deleted by the tidy operation. -``` diff --git a/changelog/16714.txt b/changelog/16714.txt deleted file mode 100644 index a13610699aaec..0000000000000 --- a/changelog/16714.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -debug: Remove extra empty lines from vault.log when debug command is run -``` \ No newline at end of file diff --git a/changelog/16723.txt b/changelog/16723.txt deleted file mode 100644 index faba4594c049c..0000000000000 --- a/changelog/16723.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:feature -**OCSP Responder**: PKI mounts now have an OCSP responder that implements a subset of RFC6960, answering single serial number OCSP requests for -a specific cluster's revoked certificates in a mount. -``` diff --git a/changelog/16762.txt b/changelog/16762.txt deleted file mode 100644 index ade57bd4dc7c2..0000000000000 --- a/changelog/16762.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Add ability to periodically rebuild CRL before expiry -``` diff --git a/changelog/16773.txt b/changelog/16773.txt deleted file mode 100644 index bebb0d572c56a..0000000000000 --- a/changelog/16773.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Support generating delta CRLs for up-to-date CRLs when auto-building is enabled. -``` diff --git a/changelog/16821.txt b/changelog/16821.txt deleted file mode 100644 index c414d801575bb..0000000000000 --- a/changelog/16821.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: redirect_to param forwards from auth route when authenticated -``` \ No newline at end of file diff --git a/changelog/16846.txt b/changelog/16846.txt deleted file mode 100644 index dd4aeaa790879..0000000000000 --- a/changelog/16846.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -plugins: Add Deprecation Status method to builtinregistry. -``` diff --git a/changelog/16849.txt b/changelog/16849.txt deleted file mode 100644 index a320eedcdf4df..0000000000000 --- a/changelog/16849.txt +++ /dev/null @@ -1,15 +0,0 @@ -```release-note:change -auth: `GET /sys/auth` endpoint now returns an additional `deprecation_status` field in the response data for builtins. -``` -```release-note:change -auth: `GET /sys/auth/:name` endpoint now returns an additional `deprecation_status` field in the response data for builtins. -``` -```release-note:change -secrets: `GET /sys/mounts` endpoint now returns an additional `deprecation_status` field in the response data for builtins. -``` -```release-note:change -secrets: `GET /sys/mounts/:name` endpoint now returns an additional `deprecation_status` field in the response data for builtins. -``` -```release-note:improvement -cli: `auth` and `secrets` list `-detailed` commands now show Deprecation Status for builtin plugins. -``` diff --git a/changelog/16856.txt b/changelog/16856.txt deleted file mode 100644 index 512dd67a76b1c..0000000000000 --- a/changelog/16856.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -plugins: Add plugin version to auth register, list, and mount table -``` diff --git a/changelog/16865.txt b/changelog/16865.txt deleted file mode 100644 index 2f03b83de4447..0000000000000 --- a/changelog/16865.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -secrets/pki: Allow import of issuers without CRLSign KeyUsage; prohibit setting crl-signing usage on such issuers -``` diff --git a/changelog/16871.txt b/changelog/16871.txt deleted file mode 100644 index 8b57c78e8e2de..0000000000000 --- a/changelog/16871.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Allow tidy to associate revoked certs with their issuers for OCSP performance -``` diff --git a/changelog/16874.txt b/changelog/16874.txt deleted file mode 100644 index f1dafa01738a5..0000000000000 --- a/changelog/16874.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Improve stability of association of revoked cert with its parent issuer; when an issuer loses crl-signing usage, do not place certs on default issuer's CRL. -``` diff --git a/changelog/16900.txt b/changelog/16900.txt deleted file mode 100644 index 35e2b5a964de5..0000000000000 --- a/changelog/16900.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Add ability to periodically run tidy operations to remove expired certificates. -``` diff --git a/changelog/16911.txt b/changelog/16911.txt deleted file mode 100644 index a451f690df57c..0000000000000 --- a/changelog/16911.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -api/mfa: Add namespace path to the MFA read/list endpoint -``` diff --git a/changelog/16935.txt b/changelog/16935.txt deleted file mode 100644 index 0b0b46fd14b89..0000000000000 --- a/changelog/16935.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Add a new flag to issue/sign APIs which can filter out root CAs from the returned ca_chain field -``` diff --git a/changelog/16950.txt b/changelog/16950.txt deleted file mode 100644 index 0ee2d5bdc65ec..0000000000000 --- a/changelog/16950.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -website/docs: Add documentation around the expensiveness of making lots of lease count quotas in a short period -``` diff --git a/changelog/16958.txt b/changelog/16958.txt deleted file mode 100644 index a77af9a7b5687..0000000000000 --- a/changelog/16958.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Add ability to cancel tidy operations, control tidy resource usage. -``` diff --git a/changelog/16959.txt b/changelog/16959.txt deleted file mode 100644 index aabfac3cae36c..0000000000000 --- a/changelog/16959.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -ui: adds HCP link status banner -``` \ No newline at end of file diff --git a/changelog/16970.txt b/changelog/16970.txt deleted file mode 100644 index 0f0a9f8d60363..0000000000000 --- a/changelog/16970.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -agent: Agent will now respect `max_retries` retry configuration even when caching is set. -``` diff --git a/changelog/16972.txt b/changelog/16972.txt deleted file mode 100644 index 3aec66adfa64b..0000000000000 --- a/changelog/16972.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -plugins: Added environment variable flag to opt-out specific plugins from multiplexing -``` \ No newline at end of file diff --git a/changelog/16995.txt b/changelog/16995.txt deleted file mode 100644 index c1adc9060b4e2..0000000000000 --- a/changelog/16995.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -plugins/multiplexing: Added multiplexing support to database plugins if run as external plugins -``` \ No newline at end of file diff --git a/changelog/17005.txt b/changelog/17005.txt deleted file mode 100644 index 04273d956128f..0000000000000 --- a/changelog/17005.txt +++ /dev/null @@ -1,13 +0,0 @@ -```release-note:change -auth: `auth enable` returns an error and `POST /sys/auth/:type` endpoint -reports an error for `Pending Removal` auth methods. -``` -```release-note:change -secrets: `secrets enable` returns an error and `POST /sys/mount/:type` endpoint -reports an error for `Pending Removal` secrets engines. -``` -```release-note:improvement -core: Handle and log deprecated builtin mounts. Introduces -`VAULT_ALLOW_PENDING_REMOVAL_MOUNTS` to override shutdown and error when -attempting to mount `Pending Removal` builtin plugins. -``` diff --git a/changelog/17028.txt b/changelog/17028.txt deleted file mode 100644 index fd49440448707..0000000000000 --- a/changelog/17028.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -core: Activity log goroutine management improvements to allow tests to be more deterministic. -``` \ No newline at end of file diff --git a/changelog/17038.txt b/changelog/17038.txt deleted file mode 100644 index f6451dc99622b..0000000000000 --- a/changelog/17038.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -secrets: All database-specific (standalone DB) secrets engines are now marked `Pending Removal`. -``` diff --git a/changelog/17045.txt b/changelog/17045.txt deleted file mode 100644 index 600641dbe00b2..0000000000000 --- a/changelog/17045.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/azure: Adds option to permanently delete AzureAD objects created by Vault. -``` diff --git a/changelog/17058.txt b/changelog/17058.txt deleted file mode 100644 index fd527cc8e748f..0000000000000 --- a/changelog/17058.txt +++ /dev/null @@ -1,6 +0,0 @@ -```release-note:change -auth: `POST /sys/auth/:type` endpoint response contains a warning for `Deprecated` auth methods. -``` -```release-note:change -secrets: `POST /sys/mounts/:type` endpoint response contains a warning for `Deprecated` secrets engines. -``` diff --git a/changelog/17070.txt b/changelog/17070.txt deleted file mode 100644 index 1b45fd6ba34dc..0000000000000 --- a/changelog/17070.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:feature -**Redis DB Engine**: Adding the new Redis database engine that supports the generation of static and dynamic user -roles and root credential rotation on a stand alone Redis server. -``` \ No newline at end of file diff --git a/changelog/17071.txt b/changelog/17071.txt deleted file mode 100644 index 926ca839a7b8c..0000000000000 --- a/changelog/17071.txt +++ /dev/null @@ -1,2 +0,0 @@ -```release-note:feature -**UI OIDC Provider Config**: Adds configuration of Vault as an OIDC identity provider, and offer Vault’s various authentication methods and source of identity to any client applications. \ No newline at end of file diff --git a/changelog/17073.txt b/changelog/17073.txt deleted file mode 100644 index 96ab50bbc5ccc..0000000000000 --- a/changelog/17073.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/pki: Add a warning to any successful response when the requested TTL is overwritten by MaxTTL -``` \ No newline at end of file diff --git a/changelog/17075.txt b/changelog/17075.txt deleted file mode 100644 index 1b122e5c0a656..0000000000000 --- a/changelog/17075.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -**Redis ElastiCache DB Plugin**: Added Redis ElastiCache as a built-in plugin. -``` \ No newline at end of file diff --git a/changelog/17077.txt b/changelog/17077.txt deleted file mode 100644 index 0f5d1f8cc2f1a..0000000000000 --- a/changelog/17077.txt +++ /dev/null @@ -1,12 +0,0 @@ -```release-note:change -plugins: `GET /sys/plugins/catalog/:type/:name` endpoint contains deprecation status for builtin plugins. -``` -```release-note:change -plugins: `GET /sys/plugins/catalog/` endpoint contains deprecation status in `detailed` list. -``` -```release-note:change -plugins: `plugin list` now accepts a `-detailed` flag, which display deprecation status and version info. -``` -```release-note:change -plugins: `plugin info` displays deprecation status for builtin plugins. -``` diff --git a/changelog/17088.txt b/changelog/17088.txt deleted file mode 100644 index dfd08c9a20c87..0000000000000 --- a/changelog/17088.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -plugins: Adding version to plugin GRPC interface -``` \ No newline at end of file diff --git a/changelog/17116.txt b/changelog/17116.txt deleted file mode 100644 index 73116ee37cf48..0000000000000 --- a/changelog/17116.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: Replaces non-inclusive terms -``` \ No newline at end of file diff --git a/changelog/17118.txt b/changelog/17118.txt deleted file mode 100644 index 76c7748f02828..0000000000000 --- a/changelog/17118.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:improvement -secrets/transit: Added a parameter to encrypt/decrypt batch operations to allow the caller to -override the HTTP response code in case of partial user-input failures. -``` \ No newline at end of file diff --git a/changelog/17136.txt b/changelog/17136.txt deleted file mode 100644 index e5e9744c52c00..0000000000000 --- a/changelog/17136.txt +++ /dev/null @@ -1,5 +0,0 @@ -```release-note:improvement -auth/cert: Operators can now specify a CRL distribution point URL, in which -case the cert auth engine will fetch and use the CRL from that location -rather than needing to push CRLs directly to auth/cert. -``` \ No newline at end of file diff --git a/changelog/17139.txt b/changelog/17139.txt deleted file mode 100644 index 81b7507f26e3b..0000000000000 --- a/changelog/17139.txt +++ /dev/null @@ -1,6 +0,0 @@ -```release-note:improvement -ui: Prevents requests to /sys/internal/ui/resultant-acl endpoint when unauthenticated -``` -```release-note:improvement -website/docs: Removes mentions of unauthenticated from internal ui resultant-acl doc -``` \ No newline at end of file diff --git a/changelog/17152.txt b/changelog/17152.txt deleted file mode 100644 index fe5fee71498d1..0000000000000 --- a/changelog/17152.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:feature -**LDAP Secrets Engine**: Adds the `ldap` secrets engine with service account check-out -functionality for all supported schemas. -``` \ No newline at end of file diff --git a/changelog/17153.txt b/changelog/17153.txt deleted file mode 100644 index 00fe707908eb3..0000000000000 --- a/changelog/17153.txt +++ /dev/null @@ -1,2 +0,0 @@ -```release-note:improvement -ui: add 'disable' param to pki crl configuration \ No newline at end of file diff --git a/changelog/17159.txt b/changelog/17159.txt deleted file mode 100644 index b480f81156215..0000000000000 --- a/changelog/17159.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/database/snowflake: Add multiplexing support -``` diff --git a/changelog/17160.txt b/changelog/17160.txt deleted file mode 100644 index 36b6d07dfd09e..0000000000000 --- a/changelog/17160.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -auth/gcp: Updates dependencies: `google.golang.org/api@v0.83.0`, `github.com/hashicorp/go-gcp-common@v0.8.0`. -``` \ No newline at end of file diff --git a/changelog/17161.txt b/changelog/17162.txt similarity index 51% rename from changelog/17161.txt rename to changelog/17162.txt index d23421ae97c73..5598b9bfc59c7 100644 --- a/changelog/17161.txt +++ b/changelog/17162.txt @@ -1,7 +1,3 @@ -```release-note:improvement -auth/kubernetes: Role resolution for K8S Auth [[GH-156](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/156)] -``` - ```release-note:bug auth/kubernetes: Restore support for JWT signature algorithm ES384 [[GH-160](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/160)] ``` diff --git a/changelog/17164.txt b/changelog/17164.txt deleted file mode 100644 index e09797d0d8529..0000000000000 --- a/changelog/17164.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/kubernetes: upgrade to v0.2.0 -``` diff --git a/changelog/17174.txt b/changelog/17174.txt deleted file mode 100644 index 302c99a4b5854..0000000000000 --- a/changelog/17174.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/gcp: Updates dependencies: `google.golang.org/api@v0.83.0`, `github.com/hashicorp/go-gcp-common@v0.8.0`. -``` \ No newline at end of file diff --git a/changelog/17180.txt b/changelog/17180.txt deleted file mode 100644 index 4e1e5d96d1001..0000000000000 --- a/changelog/17180.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -secrets/azure: Removed deprecated AAD graph API support from the secrets engine. -``` \ No newline at end of file diff --git a/changelog/17187.txt b/changelog/17187.txt deleted file mode 100644 index 71476ef3169e6..0000000000000 --- a/changelog/17187.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -core: Refactor lock grabbing code to simplify stateLock deadlock investigations -``` \ No newline at end of file diff --git a/changelog/17194.txt b/changelog/17194.txt deleted file mode 100644 index a3ea955d05471..0000000000000 --- a/changelog/17194.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -auth/azure: Enables Azure roles to be compatible with Vault's role based quotas. -``` \ No newline at end of file diff --git a/changelog/17196.txt b/changelog/17196.txt deleted file mode 100644 index 586205b94c57f..0000000000000 --- a/changelog/17196.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -auth/cf: Enables CF roles to be compatible with Vault's role based quotas. -``` \ No newline at end of file diff --git a/changelog/17199.txt b/changelog/17199.txt deleted file mode 100644 index e9600455b2cdd..0000000000000 --- a/changelog/17199.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -secrets/gcpkms: Update dependencies: google.golang.org/api@v0.83.0. -``` diff --git a/changelog/17204.txt b/changelog/17204.txt deleted file mode 100644 index 07ea8c02e7af7..0000000000000 --- a/changelog/17204.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -core: Fix panic when the plugin catalog returns neither a plugin nor an error. -``` diff --git a/changelog/17212.txt b/changelog/17212.txt deleted file mode 100644 index 1130ca68194f9..0000000000000 --- a/changelog/17212.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -auth/oci: Add support for role resolution. -``` diff --git a/changelog/17251.txt b/changelog/17251.txt deleted file mode 100644 index 2849ab0bcf9f8..0000000000000 --- a/changelog/17251.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -auth/alicloud: Enables AliCloud roles to be compatible with Vault's role based quotas. -``` diff --git a/changelog/9081.txt b/changelog/9081.txt deleted file mode 100644 index 03045e49dfff2..0000000000000 --- a/changelog/9081.txt +++ /dev/null @@ -1,4 +0,0 @@ - -```release-note:improvement -website/docs: changed to echo for all string examples instead of (<<<) here-string. -``` diff --git a/changelog/README.md b/changelog/README.md index cbf841f6c77b7..cf49db4264d08 100644 --- a/changelog/README.md +++ b/changelog/README.md @@ -1,56 +1,3 @@ # changelog -This folder holds changelog updates from commit 3bc7d15 onwards. - -Release notes are text files with three lines: - - 1. An opening code block with the `release-note:` type annotation. - - For example: - - ```release-note:bug - - Valid modes are: - - - `bug` - Any sort of non-security defect fix. - - `change` - A change in the product that may require action or - review by the operator. Examples would be any kind of API change - (as opposed to backwards compatible addition), a notable behavior - change, or anything that might require attention before updating. Go - version changes are also listed here since they can potentially have - large, sometimes unknown impacts. (Go updates are a special case, and - dep updates in general aren't a `change`). Discussion of any potential - `change` items in the pull request to see what other communication - might be warranted. - - `deprecation` - Announcement of a planned future removal of a - feature. Only use this if a deprecation notice also exists [in the - docs](https://www.vaultproject.io/docs/deprecation). - - `feature` - Large topical additions for a major release. These are - rarely in minor releases. Formatting for `feature` entries differs - from normal changelog formatting - see the [new features - instructions](#new-and-major-features). - - `improvement` - Most updates to the product that aren’t `bug`s, but - aren't big enough to be a `feature`, will be an `improvement`. - - 2. A component (for example, `secret/pki` or `sdk/framework` or), a colon and a space, and then a one-line description of the change. - - 3. An ending code block. - -This should be in a file named after the pull request number (e.g., `12345.txt`). - -There are many examples in this folder; check one out if you're stuck! - -See [hashicorp/go-changelog](https://github.com/hashicorp/go-changelog) for full documentation on the supported entries. - -## New and Major Features - -For features we are introducing in a new major release, we prefer a single -changelog entry representing that feature. This way, it is clear to readers -what feature is being introduced. You do not need to reference a specific PR, -and the formatting is slightly different - your changelog file should look -like: - - changelog/.txt: - ```release-note:feature - **Feature Name**: Description of feature - for example "Custom password policies are now supported for all database engines." - ``` +This folder holds changelog updates from commit 3bc7d15 onwards. See [hashicorp/go-changelog](https://github.com/hashicorp/go-changelog) for full documentation on the supported entries. diff --git a/changelog/_go-ver-1111.txt b/changelog/_go-ver-1111.txt new file mode 100644 index 0000000000000..c9e146855741c --- /dev/null +++ b/changelog/_go-ver-1111.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Bump Go version to 1.17.12. +``` diff --git a/changelog/_go-ver-1113.txt b/changelog/_go-ver-1113.txt new file mode 100644 index 0000000000000..a749401955d7b --- /dev/null +++ b/changelog/_go-ver-1113.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Bump Go version to 1.17.13. +``` diff --git a/changelog/_go-ver-1120.txt b/changelog/_go-ver-1120.txt deleted file mode 100644 index 6cb60f6055670..0000000000000 --- a/changelog/_go-ver-1120.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:change -core: Bump Go version to 1.19.1. -``` diff --git a/changelog/go-ver-1110.txt b/changelog/go-ver-1110.txt index 7f43e9fd544e8..5a5112885f087 100644 --- a/changelog/go-ver-1110.txt +++ b/changelog/go-ver-1110.txt @@ -1,3 +1,3 @@ ```release-note:change -core: Bump Go version to 1.17.9. +core: Bump Go version to 1.17.11. ``` diff --git a/command/agent.go b/command/agent.go index b90665683cc1c..1116707419c01 100644 --- a/command/agent.go +++ b/command/agent.go @@ -594,7 +594,7 @@ func (c *AgentCommand) Run(args []string) int { c.UI.Warn(fmt.Sprintf("Failed to close persistent cache file after getting retrieval token: %s", err)) } - km, err := keymanager.NewPassthroughKeyManager(ctx, token) + km, err := keymanager.NewPassthroughKeyManager(token) if err != nil { c.UI.Error(fmt.Sprintf("failed to configure persistence encryption for cache: %s", err)) return 1 @@ -658,7 +658,7 @@ func (c *AgentCommand) Run(args []string) int { } } } else { - km, err := keymanager.NewPassthroughKeyManager(ctx, nil) + km, err := keymanager.NewPassthroughKeyManager(nil) if err != nil { c.UI.Error(fmt.Sprintf("failed to configure persistence encryption for cache: %s", err)) return 1 @@ -676,7 +676,7 @@ func (c *AgentCommand) Run(args []string) int { cacheLogger.Info("configured persistent storage", "path", config.Cache.Persist.Path) // Stash the key material in bolt - token, err := km.RetrievalToken(ctx) + token, err := km.RetrievalToken() if err != nil { c.UI.Error(fmt.Sprintf("Error getting persistent key: %s", err)) return 1 @@ -970,7 +970,7 @@ func verifyRequestHeader(handler http.Handler) http.Handler { if val, ok := r.Header[consts.RequestHeaderName]; !ok || len(val) != 1 || val[0] != "true" { logical.RespondError(w, http.StatusPreconditionFailed, - fmt.Errorf("missing %q header", consts.RequestHeaderName)) + fmt.Errorf("missing '%s' header", consts.RequestHeaderName)) return } diff --git a/command/agent/alicloud_end_to_end_test.go b/command/agent/alicloud_end_to_end_test.go index 948f9fa5accf4..1684ecae4ad71 100644 --- a/command/agent/alicloud_end_to_end_test.go +++ b/command/agent/alicloud_end_to_end_test.go @@ -190,7 +190,7 @@ func setAliCloudEnvCreds() error { } assumeRoleReq := sts.CreateAssumeRoleRequest() assumeRoleReq.RoleArn = os.Getenv(envVarAlicloudRoleArn) - assumeRoleReq.RoleSessionName = strings.ReplaceAll(roleSessionName, "-", "") + assumeRoleReq.RoleSessionName = strings.Replace(roleSessionName, "-", "", -1) assumeRoleResp, err := client.AssumeRole(assumeRoleReq) if err != nil { return err diff --git a/command/agent/auth/alicloud/alicloud.go b/command/agent/auth/alicloud/alicloud.go index 6fc640c290e0a..ff9a4341f2afb 100644 --- a/command/agent/auth/alicloud/alicloud.go +++ b/command/agent/auth/alicloud/alicloud.go @@ -18,11 +18,13 @@ import ( ) /* -Creds can be inferred from instance metadata, and those creds -expire every 60 minutes, so we're going to need to poll for new -creds. Since we're polling anyways, let's poll once a minute so -all changes can be picked up rather quickly. This is configurable, -however. + + Creds can be inferred from instance metadata, and those creds + expire every 60 minutes, so we're going to need to poll for new + creds. Since we're polling anyways, let's poll once a minute so + all changes can be picked up rather quickly. This is configurable, + however. + */ const defaultCredCheckFreqSeconds = 60 diff --git a/command/agent/auth/jwt/jwt.go b/command/agent/auth/jwt/jwt.go index 8f088eb199e5e..0c97bee905ec3 100644 --- a/command/agent/auth/jwt/jwt.go +++ b/command/agent/auth/jwt/jwt.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io/fs" + "io/ioutil" "net/http" "os" "sync" @@ -14,23 +15,21 @@ import ( hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/command/agent/auth" - "github.com/hashicorp/vault/sdk/helper/parseutil" ) type jwtMethod struct { - logger hclog.Logger - path string - mountPath string - role string - removeJWTAfterReading bool - credsFound chan struct{} - watchCh chan string - stopCh chan struct{} - doneCh chan struct{} - credSuccessGate chan struct{} - ticker *time.Ticker - once *sync.Once - latestToken *atomic.Value + logger hclog.Logger + path string + mountPath string + role string + credsFound chan struct{} + watchCh chan string + stopCh chan struct{} + doneCh chan struct{} + credSuccessGate chan struct{} + ticker *time.Ticker + once *sync.Once + latestToken *atomic.Value } // NewJWTAuthMethod returns an implementation of Agent's auth.AuthMethod @@ -44,16 +43,15 @@ func NewJWTAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { } j := &jwtMethod{ - logger: conf.Logger, - mountPath: conf.MountPath, - removeJWTAfterReading: true, - credsFound: make(chan struct{}), - watchCh: make(chan string), - stopCh: make(chan struct{}), - doneCh: make(chan struct{}), - credSuccessGate: make(chan struct{}), - once: new(sync.Once), - latestToken: new(atomic.Value), + logger: conf.Logger, + mountPath: conf.MountPath, + credsFound: make(chan struct{}), + watchCh: make(chan string), + stopCh: make(chan struct{}), + doneCh: make(chan struct{}), + credSuccessGate: make(chan struct{}), + once: new(sync.Once), + latestToken: new(atomic.Value), } j.latestToken.Store("") @@ -75,14 +73,6 @@ func NewJWTAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { return nil, errors.New("could not convert 'role' config value to string") } - if removeJWTAfterReadingRaw, ok := conf.Config["remove_jwt_after_reading"]; ok { - removeJWTAfterReading, err := parseutil.ParseBool(removeJWTAfterReadingRaw) - if err != nil { - return nil, fmt.Errorf("error parsing 'remove_jwt_after_reading' value: %w", err) - } - j.removeJWTAfterReading = removeJWTAfterReading - } - switch { case j.path == "": return nil, errors.New("'path' value is empty") @@ -90,14 +80,7 @@ func NewJWTAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { return nil, errors.New("'role' value is empty") } - // If we don't delete the JWT after reading, use a slower reload period, - // otherwise we would re-read the whole file every 500ms, instead of just - // doing a stat on the file every 500ms. - readPeriod := 1 * time.Minute - if j.removeJWTAfterReading { - readPeriod = 500 * time.Millisecond - } - j.ticker = time.NewTicker(readPeriod) + j.ticker = time.NewTicker(500 * time.Millisecond) go j.runWatcher() @@ -162,7 +145,6 @@ func (j *jwtMethod) runWatcher() { j.ingressToken() newToken := j.latestToken.Load().(string) if newToken != latestToken { - j.logger.Debug("new jwt file found") j.credsFound <- struct{}{} } } @@ -179,9 +161,11 @@ func (j *jwtMethod) ingressToken() { return } + j.logger.Debug("new jwt file found") + // Check that the path refers to a file. // If it's a symlink, it could still be a symlink to a directory, - // but os.ReadFile below will return a descriptive error. + // but ioutil.ReadFile below will return a descriptive error. switch mode := fi.Mode(); { case mode.IsRegular(): // regular file @@ -192,7 +176,7 @@ func (j *jwtMethod) ingressToken() { return } - token, err := os.ReadFile(j.path) + token, err := ioutil.ReadFile(j.path) if err != nil { j.logger.Error("failed to read jwt file", "error", err) return @@ -206,9 +190,7 @@ func (j *jwtMethod) ingressToken() { j.latestToken.Store(string(token)) } - if j.removeJWTAfterReading { - if err := os.Remove(j.path); err != nil { - j.logger.Error("error removing jwt file", "error", err) - } + if err := os.Remove(j.path); err != nil { + j.logger.Error("error removing jwt file", "error", err) } } diff --git a/command/agent/auth/jwt/jwt_test.go b/command/agent/auth/jwt/jwt_test.go index 8e9a2ae86c136..f912006b15130 100644 --- a/command/agent/auth/jwt/jwt_test.go +++ b/command/agent/auth/jwt/jwt_test.go @@ -2,6 +2,7 @@ package jwt import ( "bytes" + "io/ioutil" "os" "path" "strings" @@ -9,7 +10,6 @@ import ( "testing" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/command/agent/auth" ) func TestIngressToken(t *testing.T) { @@ -21,18 +21,18 @@ func TestIngressToken(t *testing.T) { symlinked = "symlinked" ) - rootDir, err := os.MkdirTemp("", "vault-agent-jwt-auth-test") + rootDir, err := ioutil.TempDir("", "vault-agent-jwt-auth-test") if err != nil { t.Fatalf("failed to create temp dir: %s", err) } defer os.RemoveAll(rootDir) setupTestDir := func() string { - testDir, err := os.MkdirTemp(rootDir, "") + testDir, err := ioutil.TempDir(rootDir, "") if err != nil { t.Fatal(err) } - err = os.WriteFile(path.Join(testDir, file), []byte("test"), 0o644) + err = ioutil.WriteFile(path.Join(testDir, file), []byte("test"), 0o644) if err != nil { t.Fatal(err) } @@ -106,62 +106,3 @@ func TestIngressToken(t *testing.T) { } } } - -func TestDeleteAfterReading(t *testing.T) { - for _, tc := range map[string]struct { - configValue string - shouldDelete bool - }{ - "default": { - "", - true, - }, - "explicit true": { - "true", - true, - }, - "false": { - "false", - false, - }, - } { - rootDir, err := os.MkdirTemp("", "vault-agent-jwt-auth-test") - if err != nil { - t.Fatalf("failed to create temp dir: %s", err) - } - defer os.RemoveAll(rootDir) - tokenPath := path.Join(rootDir, "token") - err = os.WriteFile(tokenPath, []byte("test"), 0o644) - if err != nil { - t.Fatal(err) - } - - config := &auth.AuthConfig{ - Config: map[string]interface{}{ - "path": tokenPath, - "role": "unusedrole", - }, - Logger: hclog.Default(), - } - if tc.configValue != "" { - config.Config["remove_jwt_after_reading"] = tc.configValue - } - - jwtAuth, err := NewJWTAuthMethod(config) - if err != nil { - t.Fatal(err) - } - - jwtAuth.(*jwtMethod).ingressToken() - - if _, err := os.Lstat(tokenPath); tc.shouldDelete { - if err == nil || !os.IsNotExist(err) { - t.Fatal(err) - } - } else { - if err != nil { - t.Fatal(err) - } - } - } -} diff --git a/command/agent/cache/api_proxy.go b/command/agent/cache/api_proxy.go index 1a754e064ec16..9523d310bbc1b 100644 --- a/command/agent/cache/api_proxy.go +++ b/command/agent/cache/api_proxy.go @@ -112,7 +112,7 @@ func (ap *APIProxy) Send(ctx context.Context, req *SendRequest) (*SendResponse, } // Make the request to Vault and get the response - ap.logger.Info("forwarding request to Vault", "method", req.Request.Method, "path", req.Request.URL.Path) + ap.logger.Info("forwarding request", "method", req.Request.Method, "path", req.Request.URL.Path) resp, err := client.RawRequestWithContext(ctx, fwReq) if resp == nil && err != nil { diff --git a/command/agent/cache/cacheboltdb/bolt.go b/command/agent/cache/cacheboltdb/bolt.go index 72cb7f3b82464..c91b47a0da90b 100644 --- a/command/agent/cache/cacheboltdb/bolt.go +++ b/command/agent/cache/cacheboltdb/bolt.go @@ -10,7 +10,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/hashicorp/go-hclog" - wrapping "github.com/hashicorp/go-kms-wrapping/v2" + wrapping "github.com/hashicorp/go-kms-wrapping" "github.com/hashicorp/go-multierror" bolt "go.etcd.io/bbolt" ) @@ -228,7 +228,7 @@ func autoIncrementedLeaseKey(tx *bolt.Tx, id string) ([]byte, error) { // Set an index (token or lease) in bolt storage func (b *BoltStorage) Set(ctx context.Context, id string, plaintext []byte, indexType string) error { - blob, err := b.wrapper.Encrypt(ctx, plaintext, wrapping.WithAad([]byte(b.aad))) + blob, err := b.wrapper.Encrypt(ctx, plaintext, []byte(b.aad)) if err != nil { return fmt.Errorf("error encrypting %s index: %w", indexType, err) } @@ -296,12 +296,12 @@ func (b *BoltStorage) Delete(id string, indexType string) error { } func (b *BoltStorage) decrypt(ctx context.Context, ciphertext []byte) ([]byte, error) { - var blob wrapping.BlobInfo + var blob wrapping.EncryptedBlobInfo if err := proto.Unmarshal(ciphertext, &blob); err != nil { return nil, err } - return b.wrapper.Decrypt(ctx, &blob, wrapping.WithAad([]byte(b.aad))) + return b.wrapper.Decrypt(ctx, &blob, []byte(b.aad)) } // GetByType returns a list of stored items of the specified type diff --git a/command/agent/cache/cacheboltdb/bolt_test.go b/command/agent/cache/cacheboltdb/bolt_test.go index d6f5a742ef34a..ceb621005fd0c 100644 --- a/command/agent/cache/cacheboltdb/bolt_test.go +++ b/command/agent/cache/cacheboltdb/bolt_test.go @@ -22,7 +22,7 @@ import ( func getTestKeyManager(t *testing.T) keymanager.KeyManager { t.Helper() - km, err := keymanager.NewPassthroughKeyManager(context.Background(), nil) + km, err := keymanager.NewPassthroughKeyManager(nil) require.NoError(t, err) return km @@ -286,7 +286,7 @@ func TestBolt_MigrateFromV1ToV2Schema(t *testing.T) { // Manually insert some items into the v1 schema. err = db.Update(func(tx *bolt.Tx) error { - blob, err := b.wrapper.Encrypt(ctx, []byte("ignored-contents")) + blob, err := b.wrapper.Encrypt(ctx, []byte("ignored-contents"), []byte("")) if err != nil { return fmt.Errorf("error encrypting contents: %w", err) } diff --git a/command/agent/cache/handler.go b/command/agent/cache/handler.go index 60f9e046a0fa1..2beea6cc213d8 100644 --- a/command/agent/cache/handler.go +++ b/command/agent/cache/handler.go @@ -54,7 +54,7 @@ func Handler(ctx context.Context, logger hclog.Logger, proxier Proxier, inmemSin resp, err := proxier.Send(ctx, req) if err != nil { - // If this is an api.Response error, don't wrap the response. + // If this is a api.Response error, don't wrap the response. if resp != nil && resp.Response.Error() != nil { copyHeader(w.Header(), resp.Response.Header) w.WriteHeader(resp.Response.StatusCode) diff --git a/command/agent/cache/keymanager/manager.go b/command/agent/cache/keymanager/manager.go index ff4d0f2c00fa8..c695986233db9 100644 --- a/command/agent/cache/keymanager/manager.go +++ b/command/agent/cache/keymanager/manager.go @@ -1,10 +1,6 @@ package keymanager -import ( - "context" - - wrapping "github.com/hashicorp/go-kms-wrapping/v2" -) +import wrapping "github.com/hashicorp/go-kms-wrapping" const ( KeyID = "root" @@ -16,5 +12,5 @@ type KeyManager interface { // RetrievalToken is the material returned which can be used to source back the // encryption key. Depending on the implementation, the token can be the // encryption key itself or a token/identifier used to exchange the token. - RetrievalToken(ctx context.Context) ([]byte, error) + RetrievalToken() ([]byte, error) } diff --git a/command/agent/cache/keymanager/passthrough.go b/command/agent/cache/keymanager/passthrough.go index 68a1fc221b623..447dd41504187 100644 --- a/command/agent/cache/keymanager/passthrough.go +++ b/command/agent/cache/keymanager/passthrough.go @@ -1,12 +1,11 @@ package keymanager import ( - "context" "crypto/rand" "fmt" - wrapping "github.com/hashicorp/go-kms-wrapping/v2" - "github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2" + wrapping "github.com/hashicorp/go-kms-wrapping" + "github.com/hashicorp/go-kms-wrapping/wrappers/aead" ) var _ KeyManager = (*PassthroughKeyManager)(nil) @@ -18,7 +17,7 @@ type PassthroughKeyManager struct { // NewPassthroughKeyManager returns a new instance of the Kube encryption key. // If a key is provided, it will be used as the encryption key for the wrapper, // otherwise one will be generated. -func NewPassthroughKeyManager(ctx context.Context, key []byte) (*PassthroughKeyManager, error) { +func NewPassthroughKeyManager(key []byte) (*PassthroughKeyManager, error) { var rootKey []byte = nil switch len(key) { case 0: @@ -34,13 +33,13 @@ func NewPassthroughKeyManager(ctx context.Context, key []byte) (*PassthroughKeyM return nil, fmt.Errorf("invalid key size, should be 32, got %d", len(key)) } - wrapper := aead.NewWrapper() + wrapper := aead.NewWrapper(nil) - if _, err := wrapper.SetConfig(ctx, wrapping.WithConfigMap(map[string]string{"key_id": KeyID})); err != nil { + if _, err := wrapper.SetConfig(map[string]string{"key_id": KeyID}); err != nil { return nil, err } - if err := wrapper.SetAesGcmKeyBytes(rootKey); err != nil { + if err := wrapper.SetAESGCMKeyBytes(rootKey); err != nil { return nil, err } @@ -59,10 +58,10 @@ func (w *PassthroughKeyManager) Wrapper() wrapping.Wrapper { // RetrievalToken returns the key that was used on the wrapper since this key // manager is simply a passthrough and does not provide a mechanism to abstract // this key. -func (w *PassthroughKeyManager) RetrievalToken(ctx context.Context) ([]byte, error) { +func (w *PassthroughKeyManager) RetrievalToken() ([]byte, error) { if w.wrapper == nil { return nil, fmt.Errorf("unable to get wrapper for token retrieval") } - return w.wrapper.KeyBytes(ctx) + return w.wrapper.GetKeyBytes(), nil } diff --git a/command/agent/cache/keymanager/passthrough_test.go b/command/agent/cache/keymanager/passthrough_test.go index 084a71a143f26..794f15bc295a0 100644 --- a/command/agent/cache/keymanager/passthrough_test.go +++ b/command/agent/cache/keymanager/passthrough_test.go @@ -2,7 +2,6 @@ package keymanager import ( "bytes" - "context" "testing" "github.com/stretchr/testify/require" @@ -31,10 +30,9 @@ func TestKeyManager_PassthrougKeyManager(t *testing.T) { }, } - ctx := context.Background() for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - m, err := NewPassthroughKeyManager(ctx, tc.key) + m, err := NewPassthroughKeyManager(tc.key) if tc.wantErr { require.Error(t, err) return @@ -45,7 +43,7 @@ func TestKeyManager_PassthrougKeyManager(t *testing.T) { t.Fatalf("expected non-nil wrapper from the key manager") } - token, err := m.RetrievalToken(ctx) + token, err := m.RetrievalToken() if err != nil { t.Fatalf("unable to retrieve token: %s", err) } diff --git a/command/agent/cache/lease_cache.go b/command/agent/cache/lease_cache.go index 9bc79d4a2713b..1f7224691493b 100644 --- a/command/agent/cache/lease_cache.go +++ b/command/agent/cache/lease_cache.go @@ -274,7 +274,7 @@ func (c *LeaseCache) Send(ctx context.Context, req *SendRequest) (*SendResponse, return cachedResp, nil } - c.logger.Debug("forwarding request from cache", "method", req.Request.Method, "path", req.Request.URL.Path) + c.logger.Debug("forwarding request", "method", req.Request.Method, "path", req.Request.URL.Path) // Pass the request down and get a response resp, err := c.proxier.Send(ctx, req) diff --git a/command/agent/cache/lease_cache_test.go b/command/agent/cache/lease_cache_test.go index 1501fcfe56db0..4dbe23392f075 100644 --- a/command/agent/cache/lease_cache_test.go +++ b/command/agent/cache/lease_cache_test.go @@ -699,7 +699,7 @@ func TestLeaseCache_Concurrent_Cacheable(t *testing.T) { func setupBoltStorage(t *testing.T) (tempCacheDir string, boltStorage *cacheboltdb.BoltStorage) { t.Helper() - km, err := keymanager.NewPassthroughKeyManager(context.Background(), nil) + km, err := keymanager.NewPassthroughKeyManager(nil) require.NoError(t, err) tempCacheDir, err = ioutil.TempDir("", "agent-cache-test") diff --git a/command/agent/config/config_test.go b/command/agent/config/config_test.go index 16816f85e744d..982b5e16d5e6b 100644 --- a/command/agent/config/config_test.go +++ b/command/agent/config/config_test.go @@ -418,9 +418,9 @@ func TestLoadConfigFile_Bad_AgentCache_InconsisentAutoAuth(t *testing.T) { } func TestLoadConfigFile_Bad_AgentCache_ForceAutoAuthNoMethod(t *testing.T) { - _, err := LoadConfig("./test-fixtures/bad-config-cache-force-auto_auth.hcl") + _, err := LoadConfig("./test-fixtures/bad-config-cache-inconsistent-auto_auth.hcl") if err == nil { - t.Fatal("LoadConfig should return an error when use_auto_auth_token=force and no auto_auth section present") + t.Fatal("LoadConfig should return an error when use_auto_auth_token=true and no auto_auth section present") } } @@ -432,7 +432,7 @@ func TestLoadConfigFile_Bad_AgentCache_NoListeners(t *testing.T) { } func TestLoadConfigFile_Bad_AutoAuth_Wrapped_Multiple_Sinks(t *testing.T) { - _, err := LoadConfig("./test-fixtures/bad-config-auto_auth-wrapped-multiple-sinks.hcl") + _, err := LoadConfig("./test-fixtures/bad-config-auto_auth-wrapped-multiple-sinks") if err == nil { t.Fatal("LoadConfig should return an error when auth_auth.method.wrap_ttl nonzero and multiple sinks defined") } diff --git a/command/agent/template/template.go b/command/agent/template/template.go index 32e9022bb78bd..1dbfc8e613b01 100644 --- a/command/agent/template/template.go +++ b/command/agent/template/template.go @@ -264,8 +264,10 @@ func newRunnerConfig(sc *ServerConfig, templates ctconfig.TemplateConfigs) (*ctc ServerName: pointerutil.StringPtr(""), } - // We need to assign something to Vault.Retry or it will use its default of 12 retries. - // This retry value will be respected regardless of if we use the cache. + // The cache does its own retry management based on sc.AgentConfig.Retry, + // so we only want to set this up for templating if we're not routing + // templating through the cache. We do need to assign something to Retry + // though or it will use its default of 12 retries. var attempts int if sc.AgentConfig.Vault != nil && sc.AgentConfig.Vault.Retry != nil { attempts = sc.AgentConfig.Vault.Retry.NumRetries @@ -273,6 +275,21 @@ func newRunnerConfig(sc *ServerConfig, templates ctconfig.TemplateConfigs) (*ctc // Use the cache if available or fallback to the Vault server values. if sc.AgentConfig.Cache != nil { + attempts = 0 + + // If we don't want exit on template retry failure (i.e. unlimited + // retries), let consul-template handle retry and backoff logic. + // + // Note: This is a fixed value (12) that ends up being a multiplier to + // retry.num_retires (i.e. 12 * N total retries per runner restart). + // Since we are performing retries indefinitely this base number helps + // prevent agent from spamming Vault if retry.num_retries is set to a + // low value by forcing exponential backoff to be high towards the end + // of retries during the process. + if sc.AgentConfig.TemplateConfig != nil && !sc.AgentConfig.TemplateConfig.ExitOnRetryFailure { + attempts = ctconfig.DefaultRetryAttempts + } + if sc.AgentConfig.Cache.InProcDialer == nil { return nil, fmt.Errorf("missing in-process dialer configuration") } @@ -284,6 +301,7 @@ func newRunnerConfig(sc *ServerConfig, templates ctconfig.TemplateConfigs) (*ctc // setting it here to override the setting at the top of this function, // and to prevent the vault/http client from defaulting to https. conf.Vault.Address = pointerutil.StringPtr("http://127.0.0.1:8200") + } else if strings.HasPrefix(sc.AgentConfig.Vault.Address, "https") || sc.AgentConfig.Vault.CACert != "" { skipVerify := sc.AgentConfig.Vault.TLSSkipVerify verify := !skipVerify diff --git a/command/agent_test.go b/command/agent_test.go index c5138144ef9c0..4b62020e1dfb0 100644 --- a/command/agent_test.go +++ b/command/agent_test.go @@ -909,7 +909,7 @@ auto_auth { continue } if string(c) != templateRendered(i)+suffix { - err = fmt.Errorf("expected=%q, got=%q", templateRendered(i)+suffix, string(c)) + err = fmt.Errorf("expected='%s', got='%s'", templateRendered(i)+suffix, string(c)) continue } } @@ -1462,7 +1462,7 @@ template_config { continue } if string(c) != templateRendered(0) { - err = fmt.Errorf("expected=%q, got=%q", templateRendered(0), string(c)) + err = fmt.Errorf("expected='%s', got='%s'", templateRendered(0), string(c)) continue } return nil @@ -1982,7 +1982,7 @@ vault { continue } if strings.TrimSpace(string(c)) != tc.expectTemplateRender { - err = fmt.Errorf("expected=%q, got=%q", tc.expectTemplateRender, strings.TrimSpace(string(c))) + err = fmt.Errorf("expected='%s', got='%s'", tc.expectTemplateRender, strings.TrimSpace(string(c))) continue } return nil diff --git a/command/audit_enable.go b/command/audit_enable.go index 9ed7d5d30694a..fae42394462d5 100644 --- a/command/audit_enable.go +++ b/command/audit_enable.go @@ -110,7 +110,7 @@ func (c *AuditEnableCommand) Run(args []string) int { args = f.Args() if len(args) < 1 { - c.UI.Error("Error enabling audit device: audit type missing. Valid types include 'file', 'socket' and 'syslog'.") + c.UI.Error("Missing TYPE!") return 1 } diff --git a/command/audit_enable_test.go b/command/audit_enable_test.go index 7d19f086ad586..1f55703c27bf1 100644 --- a/command/audit_enable_test.go +++ b/command/audit_enable_test.go @@ -32,7 +32,7 @@ func TestAuditEnableCommand_Run(t *testing.T) { { "empty", nil, - "Error enabling audit device: audit type missing. Valid types include 'file', 'socket' and 'syslog'.", + "Missing TYPE!", 1, }, { diff --git a/command/auth_enable.go b/command/auth_enable.go index 4214bf3f70643..a23c7989f7a13 100644 --- a/command/auth_enable.go +++ b/command/auth_enable.go @@ -37,7 +37,6 @@ type AuthEnableCommand struct { flagExternalEntropyAccess bool flagTokenType string flagVersion int - flagPluginVersion string } func (c *AuthEnableCommand) Synopsis() string { @@ -200,13 +199,6 @@ func (c *AuthEnableCommand) Flags() *FlagSets { Usage: "Select the version of the auth method to run. Not supported by all auth methods.", }) - f.StringVar(&StringVar{ - Name: "plugin-version", - Target: &c.flagPluginVersion, - Default: "", - Usage: "Select the semantic version of the plugin to enable.", - }) - return set } @@ -270,7 +262,6 @@ func (c *AuthEnableCommand) Run(args []string) int { authOpts := &api.EnableAuthOptions{ Type: authType, - PluginVersion: c.flagPluginVersion, Description: c.flagDescription, Local: c.flagLocal, SealWrap: c.flagSealWrap, diff --git a/command/auth_enable_test.go b/command/auth_enable_test.go index 0ba9621e348ab..0cc125fc97563 100644 --- a/command/auth_enable_test.go +++ b/command/auth_enable_test.go @@ -2,7 +2,6 @@ package command import ( "io/ioutil" - "os" "strings" "testing" @@ -50,18 +49,6 @@ func TestAuthEnableCommand_Run(t *testing.T) { "", 2, }, - { - "deprecated builtin with standard mount", - []string{"app-id"}, - "", - 2, - }, - { - "deprecated builtin with different mount", - []string{"-path=/tmp", "app-id"}, - "", - 2, - }, } for _, tc := range cases { @@ -224,16 +211,6 @@ func TestAuthEnableCommand_Run(t *testing.T) { } for _, b := range backends { - var expectedResult int = 0 - status, _ := builtinplugins.Registry.DeprecationStatus(b, consts.PluginTypeCredential) - allowDeprecated := os.Getenv(consts.VaultAllowPendingRemovalMountsEnv) - - // Need to handle deprecated builtins specially - if (status == consts.PendingRemoval && allowDeprecated == "") || status == consts.Removed { - expectedResult = 2 - } - - // Not a builtin if b == "token" { continue } @@ -241,11 +218,11 @@ func TestAuthEnableCommand_Run(t *testing.T) { ui, cmd := testAuthEnableCommand(t) cmd.client = client - actualResult := cmd.Run([]string{ + code := cmd.Run([]string{ b, }) - if actualResult != expectedResult { - t.Errorf("type: %s - got: %d, expected: %d - %s", b, actualResult, expectedResult, ui.OutputWriter.String()+ui.ErrorWriter.String()) + if exp := 0; code != exp { + t.Errorf("type %s, expected %d to be %d - %s", b, code, exp, ui.OutputWriter.String()+ui.ErrorWriter.String()) } } }) diff --git a/command/auth_list.go b/command/auth_list.go index 259886a5e996c..5dc29a8282c83 100644 --- a/command/auth_list.go +++ b/command/auth_list.go @@ -118,10 +118,10 @@ func (c *AuthListCommand) simpleMounts(auths map[string]*api.AuthMount) []string } sort.Strings(paths) - out := []string{"Path | Type | Accessor | Description | Version"} + out := []string{"Path | Type | Accessor | Description"} for _, path := range paths { mount := auths[path] - out = append(out, fmt.Sprintf("%s | %s | %s | %s | %s", path, mount.Type, mount.Accessor, mount.Description, mount.PluginVersion)) + out = append(out, fmt.Sprintf("%s | %s | %s | %s", path, mount.Type, mount.Accessor, mount.Description)) } return out @@ -145,7 +145,7 @@ func (c *AuthListCommand) detailedMounts(auths map[string]*api.AuthMount) []stri } } - out := []string{"Path | Plugin | Accessor | Default TTL | Max TTL | Token Type | Replication | Seal Wrap | External Entropy Access | Options | Description | UUID | Version | Deprecation Status"} + out := []string{"Path | Plugin | Accessor | Default TTL | Max TTL | Token Type | Replication | Seal Wrap | External Entropy Access | Options | Description | UUID"} for _, path := range paths { mount := auths[path] @@ -162,7 +162,7 @@ func (c *AuthListCommand) detailedMounts(auths map[string]*api.AuthMount) []stri pluginName = mount.Config.PluginName } - out = append(out, fmt.Sprintf("%s | %s | %s | %s | %s | %s | %s | %t | %v | %s | %s | %s | %s | %s", + out = append(out, fmt.Sprintf("%s | %s | %s | %s | %s | %s | %s | %t | %v | %s | %s | %s", path, pluginName, mount.Accessor, @@ -175,8 +175,6 @@ func (c *AuthListCommand) detailedMounts(auths map[string]*api.AuthMount) []stri mount.Options, mount.Description, mount.UUID, - mount.PluginVersion, - mount.DeprecationStatus, )) } diff --git a/command/base.go b/command/base.go index c4dd042b12b75..5f6a171dcee2c 100644 --- a/command/base.go +++ b/command/base.go @@ -549,7 +549,6 @@ type FlagSets struct { mainSet *flag.FlagSet hiddens map[string]struct{} completions complete.Flags - ui cli.Ui } // NewFlagSets creates a new flag sets. @@ -565,7 +564,6 @@ func NewFlagSets(ui cli.Ui) *FlagSets { mainSet: mainSet, hiddens: make(map[string]struct{}), completions: complete.Flags{}, - ui: ui, } } @@ -584,16 +582,8 @@ func (f *FlagSets) Completions() complete.Flags { } // Parse parses the given flags, returning any errors. -// Warnings, if any, regarding the arguments format are sent to stdout func (f *FlagSets) Parse(args []string) error { - err := f.mainSet.Parse(args) - - warnings := generateFlagWarnings(f.Args()) - if warnings != "" && Format(f.ui) == "table" { - f.ui.Warn(warnings) - } - - return err + return f.mainSet.Parse(args) } // Parsed reports whether the command-line flags have been parsed. @@ -613,10 +603,10 @@ func (f *FlagSets) Visit(fn func(*flag.Flag)) { } // Help builds custom help for this command, grouping by flag set. -func (f *FlagSets) Help() string { +func (fs *FlagSets) Help() string { var out bytes.Buffer - for _, set := range f.flagSets { + for _, set := range fs.flagSets { printFlagTitle(&out, set.name+":") set.VisitAll(func(f *flag.Flag) { // Skip any hidden flags diff --git a/command/base_helpers.go b/command/base_helpers.go index b2a9f8c7fbd28..f20cfebf71025 100644 --- a/command/base_helpers.go +++ b/command/base_helpers.go @@ -292,33 +292,3 @@ func parseFlagFile(raw string) (string, error) { return raw, nil } - -func generateFlagWarnings(args []string) string { - var trailingFlags []string - for _, arg := range args { - // "-" can be used where a file is expected to denote stdin. - if !strings.HasPrefix(arg, "-") || arg == "-" { - continue - } - - isGlobalFlag := false - trimmedArg, _, _ := strings.Cut(strings.TrimLeft(arg, "-"), "=") - for _, flag := range globalFlags { - if trimmedArg == flag { - isGlobalFlag = true - } - } - if isGlobalFlag { - continue - } - - trailingFlags = append(trailingFlags, arg) - } - - if len(trailingFlags) > 0 { - return fmt.Sprintf("Command flags must be provided before positional arguments. "+ - "The following arguments will not be parsed as flags: [%s]", strings.Join(trailingFlags, ",")) - } else { - return "" - } -} diff --git a/command/base_helpers_test.go b/command/base_helpers_test.go index dee93b4bf44a1..df764d36e4db7 100644 --- a/command/base_helpers_test.go +++ b/command/base_helpers_test.go @@ -5,7 +5,6 @@ import ( "io" "io/ioutil" "os" - "strings" "testing" "time" ) @@ -210,72 +209,3 @@ func TestParseFlagFile(t *testing.T) { }) } } - -func TestArgWarnings(t *testing.T) { - t.Parallel() - - cases := []struct { - args []string - expected string - }{ - { - []string{"a", "b", "c"}, - "", - }, - { - []string{"a", "-b"}, - "-b", - }, - { - []string{"a", "--b"}, - "--b", - }, - { - []string{"a-b", "-c"}, - "-c", - }, - { - []string{"a", "-b-c"}, - "-b-c", - }, - { - []string{"-a", "b"}, - "-a", - }, - { - []string{globalFlagDetailed}, - "", - }, - { - []string{"-" + globalFlagOutputCurlString + "=true"}, - "", - }, - { - []string{"--" + globalFlagFormat + "=false"}, - "", - }, - { - []string{"-x" + globalFlagDetailed}, - "-x" + globalFlagDetailed, - }, - { - []string{"--x=" + globalFlagDetailed}, - "--x=" + globalFlagDetailed, - }, - { - []string{"policy", "write", "my-policy", "-"}, - "", - }, - } - - for _, tc := range cases { - tc := tc - - t.Run(tc.expected, func(t *testing.T) { - warnings := generateFlagWarnings(tc.args) - if !strings.Contains(warnings, tc.expected) { - t.Fatalf("expected %s to contain %s", warnings, tc.expected) - } - }) - } -} diff --git a/command/base_predict.go b/command/base_predict.go index 61cbe092d61da..13959bb5bcf79 100644 --- a/command/base_predict.go +++ b/command/base_predict.go @@ -250,19 +250,9 @@ func (p *Predict) vaultPaths(includeFiles bool) complete.PredictFunc { // Trim path with potential mount var relativePath string - mountInfos, err := p.mountInfos() - if err != nil { - return nil - } - - var mountType, mountVersion string - for mount, mountInfo := range mountInfos { + for _, mount := range p.mounts() { if strings.HasPrefix(path, mount) { relativePath = strings.TrimPrefix(path, mount+"/") - mountType = mountInfo.Type - if mountInfo.Options != nil { - mountVersion = mountInfo.Options["version"] - } break } } @@ -270,7 +260,7 @@ func (p *Predict) vaultPaths(includeFiles bool) complete.PredictFunc { // Predict path or mount depending on path separator var predictions []string if strings.Contains(relativePath, "/") { - predictions = p.paths(mountType, mountVersion, path, includeFiles) + predictions = p.paths(path, includeFiles) } else { predictions = p.filter(p.mounts(), path) } @@ -298,7 +288,7 @@ func (p *Predict) vaultPaths(includeFiles bool) complete.PredictFunc { } // paths predicts all paths which start with the given path. -func (p *Predict) paths(mountType, mountVersion, path string, includeFiles bool) []string { +func (p *Predict) paths(path string, includeFiles bool) []string { client := p.Client() if client == nil { return nil @@ -313,7 +303,7 @@ func (p *Predict) paths(mountType, mountVersion, path string, includeFiles bool) root = root[:idx+1] } - paths := p.listPaths(buildAPIListPath(root, mountType, mountVersion)) + paths := p.listPaths(root) var predictions []string for _, p := range paths { @@ -336,22 +326,6 @@ func (p *Predict) paths(mountType, mountVersion, path string, includeFiles bool) return predictions } -func buildAPIListPath(path, mountType, mountVersion string) string { - if mountType == "kv" && mountVersion == "2" { - return toKVv2ListPath(path) - } - return path -} - -func toKVv2ListPath(path string) string { - firstSlashIdx := strings.Index(path, "/") - if firstSlashIdx < 0 { - return path - } - - return path[:firstSlashIdx] + "/metadata" + path[firstSlashIdx:] -} - // audits returns a sorted list of the audit backends for Vault server for // which the client is configured to communicate with. func (p *Predict) audits() []string { @@ -447,28 +421,16 @@ func (p *Predict) policies() []string { return policies } -// mountInfos returns a map with mount paths as keys and MountOutputs as values -// for the Vault server which the client is configured to communicate with. -// Returns error if server communication fails. -func (p *Predict) mountInfos() (map[string]*api.MountOutput, error) { +// mounts returns a sorted list of the mount paths for Vault server for +// which the client is configured to communicate with. This function returns the +// default list of mounts if an error occurs. +func (p *Predict) mounts() []string { client := p.Client() if client == nil { - return nil, nil + return nil } mounts, err := client.Sys().ListMounts() - if err != nil { - return nil, err - } - - return mounts, nil -} - -// mounts returns a sorted list of the mount paths for Vault server for -// which the client is configured to communicate with. This function returns the -// default list of mounts if an error occurs. -func (p *Predict) mounts() []string { - mounts, err := p.mountInfos() if err != nil { return defaultPredictVaultMounts } diff --git a/command/base_predict_test.go b/command/base_predict_test.go index 644a366673d4e..12f364106f7a0 100644 --- a/command/base_predict_test.go +++ b/command/base_predict_test.go @@ -389,8 +389,6 @@ func TestPredict_Plugins(t *testing.T) { "postgresql-database-plugin", "rabbitmq", "radius", - "redis-database-plugin", - "redis-elasticache-database-plugin", "redshift-database-plugin", "snowflake-database-plugin", "ssh", @@ -554,80 +552,7 @@ func TestPredict_Paths(t *testing.T) { p := NewPredict() p.client = client - act := p.paths("kv", "1", tc.path, tc.includeFiles) - if !reflect.DeepEqual(act, tc.exp) { - t.Errorf("expected %q to be %q", act, tc.exp) - } - }) - } - }) -} - -func TestPredict_PathsKVv2(t *testing.T) { - t.Parallel() - - client, closer := testVaultServerWithKVVersion(t, "2") - defer closer() - - data := map[string]interface{}{"data": map[string]interface{}{"a": "b"}} - if _, err := client.Logical().Write("secret/data/bar", data); err != nil { - t.Fatal(err) - } - if _, err := client.Logical().Write("secret/data/foo", data); err != nil { - t.Fatal(err) - } - if _, err := client.Logical().Write("secret/data/zip/zap", data); err != nil { - t.Fatal(err) - } - - cases := []struct { - name string - path string - includeFiles bool - exp []string - }{ - { - "bad_path", - "nope/not/a/real/path/ever", - true, - []string{"nope/not/a/real/path/ever"}, - }, - { - "good_path", - "secret/", - true, - []string{"secret/bar", "secret/foo", "secret/zip/"}, - }, - { - "good_path_no_files", - "secret/", - false, - []string{"secret/zip/"}, - }, - { - "partial_match", - "secret/z", - true, - []string{"secret/zip/"}, - }, - { - "partial_match_no_files", - "secret/z", - false, - []string{"secret/zip/"}, - }, - } - - t.Run("group", func(t *testing.T) { - for _, tc := range cases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - - p := NewPredict() - p.client = client - - act := p.paths("kv", "2", tc.path, tc.includeFiles) + act := p.paths(tc.path, tc.includeFiles) if !reflect.DeepEqual(act, tc.exp) { t.Errorf("expected %q to be %q", act, tc.exp) } diff --git a/command/command_test.go b/command/command_test.go index f4b8c7fd251ad..f3249f2b51a35 100644 --- a/command/command_test.go +++ b/command/command_test.go @@ -67,13 +67,6 @@ func testVaultServer(tb testing.TB) (*api.Client, func()) { return client, closer } -func testVaultServerWithKVVersion(tb testing.TB, kvVersion string) (*api.Client, func()) { - tb.Helper() - - client, _, closer := testVaultServerUnsealWithKVVersion(tb, kvVersion) - return client, closer -} - func testVaultServerAllBackends(tb testing.TB) (*api.Client, func()) { tb.Helper() @@ -92,10 +85,6 @@ func testVaultServerAllBackends(tb testing.TB) (*api.Client, func()) { // testVaultServerUnseal creates a test vault cluster and returns a configured // API client, list of unseal keys (as strings), and a closer function. func testVaultServerUnseal(tb testing.TB) (*api.Client, []string, func()) { - return testVaultServerUnsealWithKVVersion(tb, "1") -} - -func testVaultServerUnsealWithKVVersion(tb testing.TB, kvVersion string) (*api.Client, []string, func()) { tb.Helper() logger := log.NewInterceptLogger(&log.LoggerOptions{ Output: log.DefaultOutput, @@ -103,7 +92,7 @@ func testVaultServerUnsealWithKVVersion(tb testing.TB, kvVersion string) (*api.C JSONFormat: logging.ParseEnvLogFormat() == logging.JSONFormat, }) - return testVaultServerCoreConfigWithOpts(tb, &vault.CoreConfig{ + return testVaultServerCoreConfig(tb, &vault.CoreConfig{ DisableMlock: true, DisableCache: true, Logger: logger, @@ -111,10 +100,6 @@ func testVaultServerUnsealWithKVVersion(tb testing.TB, kvVersion string) (*api.C AuditBackends: defaultVaultAuditBackends, LogicalBackends: defaultVaultLogicalBackends, BuiltinRegistry: builtinplugins.Registry, - }, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - NumCores: 1, - KVVersion: kvVersion, }) } @@ -136,19 +121,15 @@ func testVaultServerPluginDir(tb testing.TB, pluginDir string) (*api.Client, []s }) } -func testVaultServerCoreConfig(tb testing.TB, coreConfig *vault.CoreConfig) (*api.Client, []string, func()) { - return testVaultServerCoreConfigWithOpts(tb, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - NumCores: 1, // Default is 3, but we don't need that many - }) -} - // testVaultServerCoreConfig creates a new vault cluster with the given core // configuration. This is a lower-level test helper. -func testVaultServerCoreConfigWithOpts(tb testing.TB, coreConfig *vault.CoreConfig, opts *vault.TestClusterOptions) (*api.Client, []string, func()) { +func testVaultServerCoreConfig(tb testing.TB, coreConfig *vault.CoreConfig) (*api.Client, []string, func()) { tb.Helper() - cluster := vault.NewTestCluster(benchhelpers.TBtoT(tb), coreConfig, opts) + cluster := vault.NewTestCluster(benchhelpers.TBtoT(tb), coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + NumCores: 1, // Default is 3, but we don't need that many + }) cluster.Start() // Make it easy to get access to the active diff --git a/command/commands.go b/command/commands.go index f6aad476d1f39..6b9f5c89e7435 100644 --- a/command/commands.go +++ b/command/commands.go @@ -347,11 +347,6 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) { BaseCommand: getBaseCommand(), }, nil }, - "namespace patch": func() (cli.Command, error) { - return &NamespacePatchCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, "namespace delete": func() (cli.Command, error) { return &NamespaceDeleteCommand{ BaseCommand: getBaseCommand(), diff --git a/command/config/util.go b/command/config/util.go index 1ac47df7e0533..3dd09fdec773f 100644 --- a/command/config/util.go +++ b/command/config/util.go @@ -5,7 +5,6 @@ import ( ) // DefaultTokenHelper returns the token helper that is configured for Vault. -// This helper should only be used for non-server CLI commands. func DefaultTokenHelper() (token.TokenHelper, error) { config, err := LoadConfig("") if err != nil { diff --git a/command/debug.go b/command/debug.go index 71e1a97c8da7e..f0a8846939bd0 100644 --- a/command/debug.go +++ b/command/debug.go @@ -349,7 +349,7 @@ func (c *DebugCommand) generateIndex() error { dir, file := filepath.Split(relPath) if len(dir) != 0 { - dir = filepath.Clean(dir) + dir = strings.TrimSuffix(dir, "/") filesArr := outputLayout[dir].(map[string]interface{})["files"] outputLayout[dir].(map[string]interface{})["files"] = append(filesArr.([]string), file) } else { @@ -448,7 +448,7 @@ func (c *DebugCommand) preflight(rawArgs []string) (string, error) { } // Strip trailing slash before proceeding - c.flagOutput = filepath.Clean(c.flagOutput) + c.flagOutput = strings.TrimSuffix(c.flagOutput, "/") // If compression is enabled, trim the extension so that the files are // written to a directory even if compression somehow fails. We ensure the @@ -1091,15 +1091,13 @@ func (c *DebugCommand) writeLogs(ctx context.Context) { for { select { case log := <-logCh: - if len(log) > 0 { - if !strings.HasSuffix(log, "\n") { - log += "\n" - } - _, err = out.WriteString(log) - if err != nil { - c.captureError("log", err) - return - } + if !strings.HasSuffix(log, "\n") { + log += "\n" + } + _, err = out.WriteString(log) + if err != nil { + c.captureError("log", err) + return } case <-ctx.Done(): return diff --git a/command/format.go b/command/format.go index 4dffed6463592..6812ba4c85e2d 100644 --- a/command/format.go +++ b/command/format.go @@ -331,12 +331,6 @@ func (t TableFormatter) OutputSealStatusStruct(ui cli.Ui, secret *api.Secret, da out = append(out, fmt.Sprintf("Cluster ID | %s", status.ClusterID)) } - // Output if HCP link is configured - if status.HCPLinkStatus != "" { - out = append(out, fmt.Sprintf("HCP Link Status | %s", status.HCPLinkStatus)) - out = append(out, fmt.Sprintf("HCP Link Resource ID | %s", status.HCPLinkResourceID)) - } - // Output if HA is enabled out = append(out, fmt.Sprintf("HA Enabled | %t", status.HAEnabled)) diff --git a/command/kv_get.go b/command/kv_get.go index 391efaf882cec..3aca29d209c57 100644 --- a/command/kv_get.go +++ b/command/kv_get.go @@ -80,7 +80,7 @@ func (c *KVGetCommand) Flags() *FlagSets { } func (c *KVGetCommand) AutocompleteArgs() complete.Predictor { - return c.PredictVaultFiles() + return nil } func (c *KVGetCommand) AutocompleteFlags() complete.Flags { diff --git a/command/kv_patch.go b/command/kv_patch.go index 5368d9e0dbd03..1134248f5d7c5 100644 --- a/command/kv_patch.go +++ b/command/kv_patch.go @@ -120,7 +120,7 @@ func (c *KVPatchCommand) Flags() *FlagSets { } func (c *KVPatchCommand) AutocompleteArgs() complete.Predictor { - return c.PredictVaultFiles() + return nil } func (c *KVPatchCommand) AutocompleteFlags() complete.Flags { diff --git a/command/kv_put.go b/command/kv_put.go index fe991711a56b5..f380e2d64d995 100644 --- a/command/kv_put.go +++ b/command/kv_put.go @@ -96,7 +96,7 @@ func (c *KVPutCommand) Flags() *FlagSets { } func (c *KVPutCommand) AutocompleteArgs() complete.Predictor { - return c.PredictVaultFolders() + return nil } func (c *KVPutCommand) AutocompleteFlags() complete.Flags { diff --git a/command/login.go b/command/login.go index 9beab755b8435..fc2a0f7e6a59b 100644 --- a/command/login.go +++ b/command/login.go @@ -55,8 +55,8 @@ Usage: vault login [options] [AUTH K=V...] $ vault login -method=userpass username=my-username For more information about the list of configuration parameters available for - a given auth method, use the "vault auth help TYPE" command. You can also use - "vault auth list" to see the list of enabled auth methods. + a given auth method, use the "vault auth help TYPE". You can also use "vault + auth list" to see the list of enabled auth methods. If an auth method is enabled at a non-standard path, the -method flag still refers to the canonical type, but the -path flag refers to the enabled path. diff --git a/command/main.go b/command/main.go index 4a5e8028214e6..ecea00744abef 100644 --- a/command/main.go +++ b/command/main.go @@ -24,17 +24,6 @@ type VaultUI struct { detailed bool } -const ( - globalFlagOutputCurlString = "output-curl-string" - globalFlagOutputPolicy = "output-policy" - globalFlagFormat = "format" - globalFlagDetailed = "detailed" -) - -var globalFlags = []string{ - globalFlagOutputCurlString, globalFlagOutputPolicy, globalFlagFormat, globalFlagDetailed, -} - // setupEnv parses args and may replace them and sets some env vars to known // values based on format options func setupEnv(args []string) (retArgs []string, format string, detailed bool, outputCurlString bool, outputPolicy bool) { @@ -58,28 +47,38 @@ func setupEnv(args []string) (retArgs []string, format string, detailed bool, ou break } - if isGlobalFlag(arg, globalFlagOutputCurlString) { + if arg == "-output-curl-string" || arg == "--output-curl-string" { outputCurlString = true continue } - if isGlobalFlag(arg, globalFlagOutputPolicy) { + if arg == "-output-policy" || arg == "--output-policy" { outputPolicy = true continue } // Parse a given flag here, which overrides the env var - if isGlobalFlagWithValue(arg, globalFlagFormat) { - format = getGlobalFlagValue(arg) + if strings.HasPrefix(arg, "--format=") { + format = strings.TrimPrefix(arg, "--format=") + } + if strings.HasPrefix(arg, "-format=") { + format = strings.TrimPrefix(arg, "-format=") } // For backwards compat, it could be specified without an equal sign - if isGlobalFlag(arg, globalFlagFormat) { + if arg == "-format" || arg == "--format" { nextArgFormat = true } // Parse a given flag here, which overrides the env var - if isGlobalFlagWithValue(arg, globalFlagDetailed) { - detailed, err = strconv.ParseBool(getGlobalFlagValue(globalFlagDetailed)) + if strings.HasPrefix(arg, "--detailed=") { + detailed, err = strconv.ParseBool(strings.TrimPrefix(arg, "--detailed=")) + if err != nil { + detailed = false + } + haveDetailed = true + } + if strings.HasPrefix(arg, "-detailed=") { + detailed, err = strconv.ParseBool(strings.TrimPrefix(arg, "-detailed=")) if err != nil { detailed = false } @@ -87,7 +86,7 @@ func setupEnv(args []string) (retArgs []string, format string, detailed bool, ou } // For backwards compat, it could be specified without an equal sign to enable // detailed output. - if isGlobalFlag(arg, globalFlagDetailed) { + if arg == "-detailed" || arg == "--detailed" { detailed = true haveDetailed = true } @@ -116,20 +115,6 @@ func setupEnv(args []string) (retArgs []string, format string, detailed bool, ou return args, format, detailed, outputCurlString, outputPolicy } -func isGlobalFlag(arg string, flag string) bool { - return arg == "-"+flag || arg == "--"+flag -} - -func isGlobalFlagWithValue(arg string, flag string) bool { - return strings.HasPrefix(arg, "--"+flag+"=") || strings.HasPrefix(arg, "-"+flag+"=") -} - -func getGlobalFlagValue(arg string) string { - _, value, _ := strings.Cut(arg, "=") - - return value -} - type RunOptions struct { TokenHelper token.TokenHelper Stdout io.Writer diff --git a/command/namespace.go b/command/namespace.go index 702395753da8a..89cf2e0296855 100644 --- a/command/namespace.go +++ b/command/namespace.go @@ -36,10 +36,6 @@ Usage: vault namespace [options] [args] $ vault namespace create - Patch an existing namespace: - - $ vault namespace patch - Delete an existing namespace: $ vault namespace delete diff --git a/command/namespace_create.go b/command/namespace_create.go index 7d1f52fa8c9ba..80ce589f963d8 100644 --- a/command/namespace_create.go +++ b/command/namespace_create.go @@ -15,8 +15,6 @@ var ( type NamespaceCreateCommand struct { *BaseCommand - - flagCustomMetadata map[string]string } func (c *NamespaceCreateCommand) Synopsis() string { @@ -45,18 +43,7 @@ Usage: vault namespace create [options] PATH } func (c *NamespaceCreateCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) - - f := set.NewFlagSet("Command Options") - f.StringMapVar(&StringMapVar{ - Name: "custom-metadata", - Target: &c.flagCustomMetadata, - Default: map[string]string{}, - Usage: "Specifies arbitrary key=value metadata meant to describe a namespace." + - "This can be specified multiple times to add multiple pieces of metadata.", - }) - - return set + return c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) } func (c *NamespaceCreateCommand) AutocompleteArgs() complete.Predictor { @@ -93,11 +80,7 @@ func (c *NamespaceCreateCommand) Run(args []string) int { return 2 } - data := map[string]interface{}{ - "custom_metadata": c.flagCustomMetadata, - } - - secret, err := client.Logical().Write("sys/namespaces/"+namespacePath, data) + secret, err := client.Logical().Write("sys/namespaces/"+namespacePath, nil) if err != nil { c.UI.Error(fmt.Sprintf("Error creating namespace: %s", err)) return 2 diff --git a/command/namespace_patch.go b/command/namespace_patch.go deleted file mode 100644 index 6f5b8390f1e0d..0000000000000 --- a/command/namespace_patch.go +++ /dev/null @@ -1,137 +0,0 @@ -package command - -import ( - "context" - "fmt" - "strings" - - "github.com/posener/complete" - - "github.com/mitchellh/cli" -) - -var ( - _ cli.Command = (*NamespacePatchCommand)(nil) - _ cli.CommandAutocomplete = (*NamespacePatchCommand)(nil) -) - -type NamespacePatchCommand struct { - *BaseCommand - - flagCustomMetadata map[string]string - flagRemoveCustomMetadata []string -} - -func (c *NamespacePatchCommand) Synopsis() string { - return "Patch an existing namespace" -} - -func (c *NamespacePatchCommand) Help() string { - helpText := ` -Usage: vault namespace patch [options] PATH - - Patch an existing namespace. The namespace patched will be relative to the - namespace provided in either the VAULT_NAMESPACE environment variable or - -namespace CLI flag. - - Patch an existing child namespace by adding and removing custom-metadata (e.g. ns1/): - - $ vault namespace patch ns1 -custom-metadata=foo=abc -remove-custom-metadata=bar - - Patch an existing child namespace from a parent namespace (e.g. ns1/ns2/): - - $ vault namespace patch -namespace=ns1 ns2 -custom-metadata=foo=abc - -` + c.Flags().Help() - - return strings.TrimSpace(helpText) -} - -func (c *NamespacePatchCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) - - f := set.NewFlagSet("Command Options") - f.StringMapVar(&StringMapVar{ - Name: "custom-metadata", - Target: &c.flagCustomMetadata, - Default: map[string]string{}, - Usage: "Specifies arbitrary key=value metadata meant to describe a namespace." + - "This can be specified multiple times to add multiple pieces of metadata.", - }) - - f.StringSliceVar(&StringSliceVar{ - Name: "remove-custom-metadata", - Target: &c.flagRemoveCustomMetadata, - Default: []string{}, - Usage: "Key to remove from custom metadata. To specify multiple values, specify this flag multiple times.", - }) - - return set -} - -func (c *NamespacePatchCommand) AutocompleteArgs() complete.Predictor { - return complete.PredictNothing -} - -func (c *NamespacePatchCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} - -func (c *NamespacePatchCommand) Run(args []string) int { - f := c.Flags() - - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - - args = f.Args() - switch { - case len(args) < 1: - c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) - return 1 - case len(args) > 1: - c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) - return 1 - } - - namespacePath := strings.TrimSpace(args[0]) - - client, err := c.Client() - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - data := make(map[string]interface{}) - customMetadata := make(map[string]interface{}) - - for key, value := range c.flagCustomMetadata { - customMetadata[key] = value - } - - for _, key := range c.flagRemoveCustomMetadata { - // A null in a JSON merge patch payload will remove the associated key - customMetadata[key] = nil - } - - data["custom_metadata"] = customMetadata - - secret, err := client.Logical().JSONMergePatch(context.Background(), "sys/namespaces/"+namespacePath, data) - if err != nil { - c.UI.Error(fmt.Sprintf("Error patching namespace: %s", err)) - return 2 - } - - if secret == nil || secret.Data == nil { - c.UI.Error(fmt.Sprintf("No namespace found: %s", err)) - return 2 - } - - // Handle single field output - if c.flagField != "" { - return PrintRawField(c.UI, secret, c.flagField) - } - - return OutputSecret(c.UI, secret) -} diff --git a/command/operator_diagnose.go b/command/operator_diagnose.go index fc9ed178f5386..40136d174e86d 100644 --- a/command/operator_diagnose.go +++ b/command/operator_diagnose.go @@ -13,7 +13,7 @@ import ( "golang.org/x/term" - wrapping "github.com/hashicorp/go-kms-wrapping/v2" + wrapping "github.com/hashicorp/go-kms-wrapping" "github.com/docker/docker/pkg/ioutils" "github.com/hashicorp/consul/api" @@ -33,7 +33,6 @@ import ( srconsul "github.com/hashicorp/vault/serviceregistration/consul" "github.com/hashicorp/vault/vault" "github.com/hashicorp/vault/vault/diagnose" - "github.com/hashicorp/vault/vault/hcp_link" "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -456,7 +455,7 @@ func (c *OperatorDiagnoseCommand) offlineDiagnostics(ctx context.Context) error } // Ensure that the seal finalizer is called, even if using verify-only defer func(seal *vault.Seal) { - sealType := diagnose.CapitalizeFirstLetter((*seal).BarrierType().String()) + sealType := diagnose.CapitalizeFirstLetter((*seal).BarrierType()) finalizeSealContext, finalizeSealSpan := diagnose.StartSpan(ctx, "Finalize "+sealType+" Seal") err = (*seal).Finalize(finalizeSealContext) if err != nil { @@ -676,7 +675,7 @@ SEALFAIL: if barrierSeal == nil { return fmt.Errorf("Diagnose could not create a barrier seal object.") } - if barrierSeal.BarrierType() == wrapping.WrapperTypeShamir { + if barrierSeal.BarrierType() == wrapping.Shamir { diagnose.Skipped(ctx, "Skipping barrier encryption test. Only supported for auto-unseal.") return nil } @@ -712,49 +711,6 @@ SEALFAIL: } return nil }) - - // Checking HCP link to make sure Vault could connect to SCADA. - // If it could not connect to SCADA in 5 seconds, diagnose reports an issue - if !constants.IsEnterprise { - diagnose.Skipped(ctx, "HCP link check will not run on OSS Vault.") - } else { - if config.HCPLinkConf != nil { - // we need to override API and Passthrough capabilities - // as they could not be initialized when Vault http handler - // is not fully initialized - config.HCPLinkConf.EnablePassThroughCapability = false - config.HCPLinkConf.EnableAPICapability = false - - diagnose.Test(ctx, "Check HCP Connection", func(ctx context.Context) error { - hcpLink, err := hcp_link.NewHCPLink(config.HCPLinkConf, vaultCore, server.logger) - if err != nil || hcpLink == nil { - return fmt.Errorf("failed to start HCP link, %w", err) - } - - // check if a SCADA session is established successfully - deadline := time.Now().Add(5 * time.Second) - linkSessionStatus := "disconnected" - for time.Now().Before(deadline) { - linkSessionStatus = hcpLink.GetConnectionStatusMessage(hcpLink.GetScadaSessionStatus()) - if linkSessionStatus == "connected" { - break - } - time.Sleep(500 * time.Millisecond) - } - if linkSessionStatus != "connected" { - return fmt.Errorf("failed to connect to HCP in 5 seconds. HCP session status is: %s", linkSessionStatus) - } - - err = hcpLink.Shutdown() - if err != nil { - return fmt.Errorf("failed to shutdown HCP link: %w", err) - } - - return nil - }) - } - } - return nil } diff --git a/command/operator_init.go b/command/operator_init.go index 6d67dcd9b6a45..a8b8e56010245 100644 --- a/command/operator_init.go +++ b/command/operator_init.go @@ -40,10 +40,8 @@ type OperatorInitCommand struct { } const ( - defKeyShares = 5 - defKeyThreshold = 3 - defRecoveryShares = 5 - defRecoveryThreshold = 3 + defKeyShares = 5 + defKeyThreshold = 3 ) func (c *OperatorInitCommand) Synopsis() string { @@ -105,6 +103,7 @@ func (c *OperatorInitCommand) Flags() *FlagSets { Name: "key-shares", Aliases: []string{"n"}, Target: &c.flagKeyShares, + Default: defKeyShares, Completion: complete.PredictAnything, Usage: "Number of key shares to split the generated root key into. " + "This is the number of \"unseal keys\" to generate.", @@ -114,6 +113,7 @@ func (c *OperatorInitCommand) Flags() *FlagSets { Name: "key-threshold", Aliases: []string{"t"}, Target: &c.flagKeyThreshold, + Default: defKeyThreshold, Completion: complete.PredictAnything, Usage: "Number of key shares required to reconstruct the root key. " + "This must be less than or equal to -key-shares.", @@ -182,6 +182,7 @@ func (c *OperatorInitCommand) Flags() *FlagSets { f.IntVar(&IntVar{ Name: "recovery-shares", Target: &c.flagRecoveryShares, + Default: 5, Completion: complete.PredictAnything, Usage: "Number of key shares to split the recovery key into. " + "This is only used in auto-unseal mode.", @@ -190,6 +191,7 @@ func (c *OperatorInitCommand) Flags() *FlagSets { f.IntVar(&IntVar{ Name: "recovery-threshold", Target: &c.flagRecoveryThreshold, + Default: 3, Completion: complete.PredictAnything, Usage: "Number of key shares required to reconstruct the recovery key. " + "This is only used in Auto Unseal mode.", @@ -231,35 +233,6 @@ func (c *OperatorInitCommand) Run(args []string) int { if c.flagStoredShares != -1 { c.UI.Warn("-stored-shares has no effect and will be removed in Vault 1.3.\n") } - client, err := c.Client() - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - // Set defaults based on use of auto unseal seal - sealInfo, err := client.Sys().SealStatus() - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - switch sealInfo.RecoverySeal { - case true: - if c.flagRecoveryShares == 0 { - c.flagRecoveryShares = defRecoveryShares - } - if c.flagRecoveryThreshold == 0 { - c.flagRecoveryThreshold = defRecoveryThreshold - } - default: - if c.flagKeyShares == 0 { - c.flagKeyShares = defKeyShares - } - if c.flagKeyThreshold == 0 { - c.flagKeyThreshold = defKeyThreshold - } - } // Build the initial init request initReq := &api.InitRequest{ @@ -273,6 +246,12 @@ func (c *OperatorInitCommand) Run(args []string) int { RecoveryPGPKeys: c.flagRecoveryPGPKeys, } + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + // Check auto mode switch { case c.flagStatus: @@ -492,6 +471,14 @@ func (c *OperatorInitCommand) init(client *api.Client, req *api.InitRequest) int req.RecoveryThreshold))) } + if len(resp.RecoveryKeys) > 0 && (req.SecretShares != defKeyShares || req.SecretThreshold != defKeyThreshold) { + c.UI.Output("") + c.UI.Warn(wrapAtLength( + "WARNING! -key-shares and -key-threshold is ignored when " + + "Auto Unseal is used. Use -recovery-shares and -recovery-threshold instead.", + )) + } + return 0 } diff --git a/command/operator_init_test.go b/command/operator_init_test.go index ec02873587dfc..491d623a14732 100644 --- a/command/operator_init_test.go +++ b/command/operator_init_test.go @@ -355,7 +355,7 @@ func TestOperatorInitCommand_Run(t *testing.T) { t.Errorf("expected %d to be %d", code, exp) } - expected := "Error making API request" + expected := "Error initializing: " combined := ui.OutputWriter.String() + ui.ErrorWriter.String() if !strings.Contains(combined, expected) { t.Errorf("expected %q to contain %q", combined, expected) diff --git a/command/operator_migrate.go b/command/operator_migrate.go index a974f58d6cb61..5dcec8a56ed6d 100644 --- a/command/operator_migrate.go +++ b/command/operator_migrate.go @@ -332,11 +332,11 @@ func (c *OperatorMigrateCommand) loadMigratorConfig(path string) (*migratorConfi for _, stanza := range []string{"storage_source", "storage_destination"} { o := list.Filter(stanza) if len(o.Items) != 1 { - return nil, fmt.Errorf("exactly one %q block is required", stanza) + return nil, fmt.Errorf("exactly one '%s' block is required", stanza) } if err := parseStorage(&result, o, stanza); err != nil { - return nil, fmt.Errorf("error parsing %q: %w", stanza, err) + return nil, fmt.Errorf("error parsing '%s': %w", stanza, err) } } return &result, nil diff --git a/command/operator_raft_join.go b/command/operator_raft_join.go index 466ab84142b41..37bc77eedbafa 100644 --- a/command/operator_raft_join.go +++ b/command/operator_raft_join.go @@ -169,7 +169,7 @@ func (c *OperatorRaftJoinCommand) Run(args []string) int { } if c.flagAutoJoinScheme != "" && (c.flagAutoJoinScheme != "http" && c.flagAutoJoinScheme != "https") { - c.UI.Error(fmt.Sprintf("invalid scheme %q; must either be http or https", c.flagAutoJoinScheme)) + c.UI.Error(fmt.Sprintf("invalid scheme '%s'; must either be http or https", c.flagAutoJoinScheme)) return 1 } diff --git a/command/pgp_test.go b/command/pgp_test.go index b9f3ee2a91acd..8d0b5d6a0474a 100644 --- a/command/pgp_test.go +++ b/command/pgp_test.go @@ -13,8 +13,8 @@ import ( "github.com/hashicorp/vault/helper/pgpkeys" "github.com/hashicorp/vault/vault" - "github.com/ProtonMail/go-crypto/openpgp" - "github.com/ProtonMail/go-crypto/openpgp/packet" + "github.com/keybase/go-crypto/openpgp" + "github.com/keybase/go-crypto/openpgp/packet" ) func getPubKeyFiles(t *testing.T) (string, []string, error) { diff --git a/command/plugin_deregister.go b/command/plugin_deregister.go index a65bf6702a96d..7f0c4a614b8b4 100644 --- a/command/plugin_deregister.go +++ b/command/plugin_deregister.go @@ -4,7 +4,6 @@ import ( "fmt" "strings" - semver "github.com/hashicorp/go-version" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/mitchellh/cli" @@ -18,8 +17,6 @@ var ( type PluginDeregisterCommand struct { *BaseCommand - - flagPluginVersion string } func (c *PluginDeregisterCommand) Synopsis() string { @@ -31,36 +28,20 @@ func (c *PluginDeregisterCommand) Help() string { Usage: vault plugin deregister [options] TYPE NAME Deregister an existing plugin in the catalog. If the plugin does not exist, - no action is taken (the command is idempotent). The TYPE argument + no action is taken (the command is idempotent). The argument of type takes "auth", "database", or "secret". - Deregister the unversioned auth plugin named my-custom-plugin: + Deregister the plugin named my-custom-plugin: $ vault plugin deregister auth my-custom-plugin - Deregister the auth plugin named my-custom-plugin, version 1.0.0: - - $ vault plugin deregister -version=v1.0.0 auth my-custom-plugin - ` + c.Flags().Help() return strings.TrimSpace(helpText) } func (c *PluginDeregisterCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP) - - f := set.NewFlagSet("Command Options") - - f.StringVar(&StringVar{ - Name: "version", - Target: &c.flagPluginVersion, - Completion: complete.PredictAnything, - Usage: "Semantic version of the plugin to deregister. If unset, " + - "only an unversioned plugin may be deregistered.", - }) - - return set + return c.flagSet(FlagSetHTTP) } func (c *PluginDeregisterCommand) AutocompleteArgs() complete.Predictor { @@ -81,19 +62,21 @@ func (c *PluginDeregisterCommand) Run(args []string) int { var pluginNameRaw, pluginTypeRaw string args = f.Args() - switch len(args) { - case 0: - c.UI.Error("Not enough arguments (expected 1, or 2, got 0)") + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1 or 2, got %d)", len(args))) + return 1 + case len(args) > 2: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1 or 2, got %d)", len(args))) return 1 - case 1: + + // These cases should come after invalid cases have been checked + case len(args) == 1: pluginTypeRaw = "unknown" pluginNameRaw = args[0] - case 2: + case len(args) == 2: pluginTypeRaw = args[0] pluginNameRaw = args[1] - default: - c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, or 2, got %d)", len(args))) - return 1 } client, err := c.Client() @@ -108,18 +91,10 @@ func (c *PluginDeregisterCommand) Run(args []string) int { return 2 } pluginName := strings.TrimSpace(pluginNameRaw) - if c.flagPluginVersion != "" { - _, err := semver.NewSemver(c.flagPluginVersion) - if err != nil { - c.UI.Error(fmt.Sprintf("version %q is not a valid semantic version: %v", c.flagPluginVersion, err)) - return 2 - } - } if err := client.Sys().DeregisterPlugin(&api.DeregisterPluginInput{ - Name: pluginName, - Type: pluginType, - Version: c.flagPluginVersion, + Name: pluginName, + Type: pluginType, }); err != nil { c.UI.Error(fmt.Sprintf("Error deregistering plugin named %s: %s", pluginName, err)) return 2 diff --git a/command/plugin_deregister_test.go b/command/plugin_deregister_test.go index 7a6bc12d41bc8..9696c2f33c66d 100644 --- a/command/plugin_deregister_test.go +++ b/command/plugin_deregister_test.go @@ -6,7 +6,6 @@ import ( "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/vault" "github.com/mitchellh/cli" ) @@ -77,7 +76,7 @@ func TestPluginDeregisterCommand_Run(t *testing.T) { t.Run("integration", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := vault.MakeTestPluginDir(t) + pluginDir, cleanup := testPluginDir(t) defer cleanup(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) @@ -132,101 +131,6 @@ func TestPluginDeregisterCommand_Run(t *testing.T) { } }) - t.Run("integration with version", func(t *testing.T) { - t.Parallel() - - pluginDir, cleanup := vault.MakeTestPluginDir(t) - defer cleanup(t) - - client, _, closer := testVaultServerPluginDir(t, pluginDir) - defer closer() - - pluginName := "my-plugin" - _, _, version := testPluginCreateAndRegisterVersioned(t, client, pluginDir, pluginName, consts.PluginTypeCredential) - - ui, cmd := testPluginDeregisterCommand(t) - cmd.client = client - - code := cmd.Run([]string{ - "-version=" + version, - consts.PluginTypeCredential.String(), - pluginName, - }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } - - expected := "Success! Deregistered plugin (if it was registered): " - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, expected) { - t.Errorf("expected %q to contain %q", combined, expected) - } - - resp, err := client.Sys().ListPlugins(&api.ListPluginsInput{ - Type: consts.PluginTypeUnknown, - }) - if err != nil { - t.Fatal(err) - } - - found := false - for _, p := range resp.Details { - if p.Name == pluginName { - found = true - } - } - if found { - t.Errorf("expected %q to not be in %#v", pluginName, resp.Details) - } - }) - - t.Run("integration with missing version", func(t *testing.T) { - t.Parallel() - - pluginDir, cleanup := vault.MakeTestPluginDir(t) - defer cleanup(t) - - client, _, closer := testVaultServerPluginDir(t, pluginDir) - defer closer() - - pluginName := "my-plugin" - testPluginCreateAndRegisterVersioned(t, client, pluginDir, pluginName, consts.PluginTypeCredential) - - ui, cmd := testPluginDeregisterCommand(t) - cmd.client = client - - code := cmd.Run([]string{ - consts.PluginTypeCredential.String(), - pluginName, - }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } - - expected := "Success! Deregistered plugin (if it was registered): " - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, expected) { - t.Errorf("expected %q to contain %q", combined, expected) - } - - resp, err := client.Sys().ListPlugins(&api.ListPluginsInput{ - Type: consts.PluginTypeUnknown, - }) - if err != nil { - t.Fatal(err) - } - - found := false - for _, p := range resp.Details { - if p.Name == pluginName { - found = true - } - } - if !found { - t.Errorf("expected %q to be in %#v", pluginName, resp.Details) - } - }) - t.Run("communication_failure", func(t *testing.T) { t.Parallel() diff --git a/command/plugin_info.go b/command/plugin_info.go index bb7a4a5053cbe..a5676e7304450 100644 --- a/command/plugin_info.go +++ b/command/plugin_info.go @@ -107,12 +107,11 @@ func (c *PluginInfoCommand) Run(args []string) int { } data := map[string]interface{}{ - "args": resp.Args, - "builtin": resp.Builtin, - "command": resp.Command, - "name": resp.Name, - "sha256": resp.SHA256, - "deprecation_status": resp.DeprecationStatus, + "args": resp.Args, + "builtin": resp.Builtin, + "command": resp.Command, + "name": resp.Name, + "sha256": resp.SHA256, } if c.flagField != "" { diff --git a/command/plugin_info_test.go b/command/plugin_info_test.go index cfdab72ab3712..46dac68138c71 100644 --- a/command/plugin_info_test.go +++ b/command/plugin_info_test.go @@ -5,7 +5,6 @@ import ( "testing" "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/vault" "github.com/mitchellh/cli" ) @@ -74,7 +73,7 @@ func TestPluginInfoCommand_Run(t *testing.T) { t.Run("default", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := vault.MakeTestPluginDir(t) + pluginDir, cleanup := testPluginDir(t) defer cleanup(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) @@ -105,7 +104,7 @@ func TestPluginInfoCommand_Run(t *testing.T) { t.Run("field", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := vault.MakeTestPluginDir(t) + pluginDir, cleanup := testPluginDir(t) defer cleanup(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) diff --git a/command/plugin_list.go b/command/plugin_list.go index d9651127ba195..40cf5a8fd86fe 100644 --- a/command/plugin_list.go +++ b/command/plugin_list.go @@ -18,8 +18,6 @@ var ( type PluginListCommand struct { *BaseCommand - - flagDetailed bool } func (c *PluginListCommand) Synopsis() string { @@ -42,30 +40,13 @@ Usage: vault plugin list [options] [TYPE] $ vault plugin list database - List all available plugins with detailed output: - - $ vault plugin list -detailed - ` + c.Flags().Help() return strings.TrimSpace(helpText) } func (c *PluginListCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) - - f := set.NewFlagSet("Command Options") - - f.BoolVar(&BoolVar{ - Name: "detailed", - Target: &c.flagDetailed, - Default: false, - Usage: "Print detailed plugin information such as plugin type, " + - "version, and deprecation status for each plugin. This option " + - "is only applicable to table-formatted output.", - }) - - return set + return c.flagSet(FlagSetHTTP | FlagSetOutputFormat) } func (c *PluginListCommand) AutocompleteArgs() complete.Predictor { @@ -124,11 +105,19 @@ func (c *PluginListCommand) Run(args []string) int { switch Format(c.UI) { case "table": - if c.flagDetailed { - c.UI.Output(tableOutput(c.detailedResponse(resp), nil)) - return 0 + var flattenedNames []string + namesAdded := make(map[string]bool) + for _, names := range resp.PluginsByType { + for _, name := range names { + if ok := namesAdded[name]; !ok { + flattenedNames = append(flattenedNames, name) + namesAdded[name] = true + } + } + sort.Strings(flattenedNames) } - c.UI.Output(tableOutput(c.simpleResponse(resp), nil)) + list := append([]string{"Plugins"}, flattenedNames...) + c.UI.Output(tableOutput(list, nil)) return 0 default: res := make(map[string]interface{}) @@ -138,28 +127,3 @@ func (c *PluginListCommand) Run(args []string) int { return OutputData(c.UI, res) } } - -func (c *PluginListCommand) simpleResponse(plugins *api.ListPluginsResponse) []string { - var flattenedNames []string - namesAdded := make(map[string]bool) - for _, names := range plugins.PluginsByType { - for _, name := range names { - if ok := namesAdded[name]; !ok { - flattenedNames = append(flattenedNames, name) - namesAdded[name] = true - } - } - sort.Strings(flattenedNames) - } - list := append([]string{"Plugins"}, flattenedNames...) - return list -} - -func (c *PluginListCommand) detailedResponse(plugins *api.ListPluginsResponse) []string { - out := []string{"Name | Type | Version | Deprecation Status"} - for _, plugin := range plugins.Details { - out = append(out, fmt.Sprintf("%s | %s | %s | %s", plugin.Name, plugin.Type, plugin.Version, plugin.DeprecationStatus)) - } - - return out -} diff --git a/command/plugin_register.go b/command/plugin_register.go index 0c4510e3b99b2..4a1eb19a5baba 100644 --- a/command/plugin_register.go +++ b/command/plugin_register.go @@ -21,7 +21,6 @@ type PluginRegisterCommand struct { flagArgs []string flagCommand string flagSHA256 string - flagVersion string } func (c *PluginRegisterCommand) Synopsis() string { @@ -38,13 +37,12 @@ Usage: vault plugin register [options] TYPE NAME Register the plugin named my-custom-plugin: - $ vault plugin register -sha256=d3f0a8b... -version=v1.0.0 auth my-custom-plugin + $ vault plugin register -sha256=d3f0a8b... auth my-custom-plugin Register a plugin with custom arguments: $ vault plugin register \ -sha256=d3f0a8b... \ - -version=v1.0.0 \ -args=--with-glibc,--with-cgo \ auth my-custom-plugin @@ -81,13 +79,6 @@ func (c *PluginRegisterCommand) Flags() *FlagSets { Usage: "SHA256 of the plugin binary. This is required for all plugins.", }) - f.StringVar(&StringVar{ - Name: "version", - Target: &c.flagVersion, - Completion: complete.PredictAnything, - Usage: "Semantic version of the plugin. Optional.", - }) - return set } @@ -153,7 +144,6 @@ func (c *PluginRegisterCommand) Run(args []string) int { Args: c.flagArgs, Command: command, SHA256: c.flagSHA256, - Version: c.flagVersion, }); err != nil { c.UI.Error(fmt.Sprintf("Error registering plugin %s: %s", pluginName, err)) return 2 diff --git a/command/plugin_register_test.go b/command/plugin_register_test.go index 69031c46911f6..05b358e6f4782 100644 --- a/command/plugin_register_test.go +++ b/command/plugin_register_test.go @@ -6,7 +6,6 @@ import ( "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/vault" "github.com/mitchellh/cli" ) @@ -78,7 +77,7 @@ func TestPluginRegisterCommand_Run(t *testing.T) { t.Run("integration", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := vault.MakeTestPluginDir(t) + pluginDir, cleanup := testPluginDir(t) defer cleanup(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) diff --git a/command/plugin_reload_test.go b/command/plugin_reload_test.go index 6c4982295b821..99b0c03c7f6e0 100644 --- a/command/plugin_reload_test.go +++ b/command/plugin_reload_test.go @@ -6,7 +6,6 @@ import ( "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/vault" "github.com/mitchellh/cli" ) @@ -83,7 +82,7 @@ func TestPluginReloadCommand_Run(t *testing.T) { t.Run("integration", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := vault.MakeTestPluginDir(t) + pluginDir, cleanup := testPluginDir(t) defer cleanup(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) diff --git a/command/plugin_test.go b/command/plugin_test.go index be40abef8e142..786abdb52f4e9 100644 --- a/command/plugin_test.go +++ b/command/plugin_test.go @@ -6,12 +6,36 @@ import ( "io" "io/ioutil" "os" + "path/filepath" "testing" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/sdk/helper/consts" ) +// testPluginDir creates a temporary directory suitable for holding plugins. +// This helper also resolves symlinks to make tests happy on OS X. +func testPluginDir(tb testing.TB) (string, func(tb testing.TB)) { + tb.Helper() + + dir, err := ioutil.TempDir("", "") + if err != nil { + tb.Fatal(err) + } + + // OSX tempdir are /var, but actually symlinked to /private/var + dir, err = filepath.EvalSymlinks(dir) + if err != nil { + tb.Fatal(err) + } + + return dir, func(tb testing.TB) { + if err := os.RemoveAll(dir); err != nil { + tb.Fatal(err) + } + } +} + // testPluginCreate creates a sample plugin in a tempdir and returns the shasum // and filepath to the plugin. func testPluginCreate(tb testing.TB, dir, name string) (string, string) { @@ -54,22 +78,3 @@ func testPluginCreateAndRegister(tb testing.TB, client *api.Client, dir, name st return pth, sha256Sum } - -// testPluginCreateAndRegisterVersioned creates a versioned plugin and registers it in the catalog. -func testPluginCreateAndRegisterVersioned(tb testing.TB, client *api.Client, dir, name string, pluginType consts.PluginType) (string, string, string) { - tb.Helper() - - pth, sha256Sum := testPluginCreate(tb, dir, name) - - if err := client.Sys().RegisterPlugin(&api.RegisterPluginInput{ - Name: name, - Type: pluginType, - Command: name, - SHA256: sha256Sum, - Version: "v1.0.0", - }); err != nil { - tb.Fatal(err) - } - - return pth, sha256Sum, "v1.0.0" -} diff --git a/command/secrets_enable_test.go b/command/secrets_enable_test.go index 70393412059ac..814f4731204f3 100644 --- a/command/secrets_enable_test.go +++ b/command/secrets_enable_test.go @@ -2,7 +2,6 @@ package command import ( "io/ioutil" - "os" "strings" "testing" @@ -12,10 +11,9 @@ import ( "github.com/mitchellh/cli" ) -// logicalBackendAdjustmentFactor is set to plus 1 for the database backend -// which is a plugin but not found in go.mod files, and minus 1 for the ldap -// and openldap secret backends which have the same underlying plugin. -var logicalBackendAdjustmentFactor = 1 - 1 +// logicalBackendAdjustmentFactor is set to 1 for the database backend +// which is a plugin but not found in go.mod files +var logicalBackendAdjustmentFactor = 1 func testSecretsEnableCommand(tb testing.TB) (*cli.MockUi, *SecretsEnableCommand) { tb.Helper() @@ -244,23 +242,14 @@ func TestSecretsEnableCommand_Run(t *testing.T) { } for _, b := range backends { - expectedResult := 0 - status, _ := builtinplugins.Registry.DeprecationStatus(b, consts.PluginTypeSecrets) - allowDeprecated := os.Getenv(consts.VaultAllowPendingRemovalMountsEnv) - - // Need to handle deprecated builtins specially - if (status == consts.PendingRemoval && allowDeprecated == "") || status == consts.Removed { - expectedResult = 2 - } - ui, cmd := testSecretsEnableCommand(t) cmd.client = client - actualResult := cmd.Run([]string{ + code := cmd.Run([]string{ b, }) - if actualResult != expectedResult { - t.Errorf("type: %s - got: %d, expected: %d - %s", b, actualResult, expectedResult, ui.OutputWriter.String()+ui.ErrorWriter.String()) + if exp := 0; code != exp { + t.Errorf("type %s, expected %d to be %d - %s", b, code, exp, ui.OutputWriter.String()+ui.ErrorWriter.String()) } } }) diff --git a/command/secrets_list.go b/command/secrets_list.go index e67a1e8215754..e9ce1ff31e236 100644 --- a/command/secrets_list.go +++ b/command/secrets_list.go @@ -145,7 +145,7 @@ func (c *SecretsListCommand) detailedMounts(mounts map[string]*api.MountOutput) } } - out := []string{"Path | Plugin | Accessor | Default TTL | Max TTL | Force No Cache | Replication | Seal Wrap | External Entropy Access | Options | Description | UUID | Deprecation Status"} + out := []string{"Path | Plugin | Accessor | Default TTL | Max TTL | Force No Cache | Replication | Seal Wrap | External Entropy Access | Options | Description | UUID "} for _, path := range paths { mount := mounts[path] @@ -162,7 +162,7 @@ func (c *SecretsListCommand) detailedMounts(mounts map[string]*api.MountOutput) pluginName = mount.Config.PluginName } - out = append(out, fmt.Sprintf("%s | %s | %s | %s | %s | %t | %s | %t | %v | %s | %s | %s | %s", + out = append(out, fmt.Sprintf("%s | %s | %s | %s | %s | %t | %s | %t | %v | %s | %s | %s", path, pluginName, mount.Accessor, @@ -175,7 +175,6 @@ func (c *SecretsListCommand) detailedMounts(mounts map[string]*api.MountOutput) mount.Options, mount.Description, mount.UUID, - mount.DeprecationStatus, )) } diff --git a/command/secrets_list_test.go b/command/secrets_list_test.go index 1aeee5bf67294..9edb628202d7d 100644 --- a/command/secrets_list_test.go +++ b/command/secrets_list_test.go @@ -42,7 +42,7 @@ func TestSecretsListCommand_Run(t *testing.T) { { "detailed", []string{"-detailed"}, - "Deprecation Status", + "Default TTL", 0, }, } diff --git a/command/server.go b/command/server.go index a75ea0b5b0764..d849b8d101a5e 100644 --- a/command/server.go +++ b/command/server.go @@ -24,8 +24,8 @@ import ( systemd "github.com/coreos/go-systemd/daemon" "github.com/hashicorp/errwrap" "github.com/hashicorp/go-hclog" - wrapping "github.com/hashicorp/go-kms-wrapping/v2" - aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2" + wrapping "github.com/hashicorp/go-kms-wrapping" + aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-secure-stdlib/gatedwriter" "github.com/hashicorp/go-secure-stdlib/mlock" @@ -42,14 +42,12 @@ import ( "github.com/hashicorp/vault/internalshared/listenerutil" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/helper/logging" - "github.com/hashicorp/vault/sdk/helper/strutil" "github.com/hashicorp/vault/sdk/helper/useragent" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/physical" "github.com/hashicorp/vault/sdk/version" sr "github.com/hashicorp/vault/serviceregistration" "github.com/hashicorp/vault/vault" - "github.com/hashicorp/vault/vault/hcp_link" vaultseal "github.com/hashicorp/vault/vault/seal" "github.com/mitchellh/cli" "github.com/mitchellh/go-testing-interface" @@ -77,9 +75,8 @@ const ( // Even though there are more types than the ones below, the following consts // are declared internally for value comparison and reusability. - storageTypeRaft = "raft" - storageTypeConsul = "consul" - disableStorageTypeCheckEnv = "VAULT_DISABLE_SUPPORTED_STORAGE_CHECK" + storageTypeRaft = "raft" + storageTypeConsul = "consul" ) type ServerCommand struct { @@ -118,8 +115,6 @@ type ServerCommand struct { flagLogFormat string flagRecovery bool flagDev bool - flagDevTLS bool - flagDevTLSCertDir string flagDevRootTokenID string flagDevListenAddr string flagDevNoStoreToken bool @@ -248,23 +243,6 @@ func (c *ServerCommand) Flags() *FlagSets { "production.", }) - f.BoolVar(&BoolVar{ - Name: "dev-tls", - Target: &c.flagDevTLS, - Usage: "Enable TLS development mode. In this mode, Vault runs in-memory and " + - "starts unsealed, with a generated TLS CA, certificate and key. " + - "As the name implies, do not run \"dev-tls\" mode in " + - "production.", - }) - - f.StringVar(&StringVar{ - Name: "dev-tls-cert-dir", - Target: &c.flagDevTLSCertDir, - Default: "", - Usage: "Directory where generated TLS files are created if `-dev-tls` is " + - "specified. If left unset, files are generated in a temporary directory.", - }) - f.StringVar(&StringVar{ Name: "dev-root-token-id", Target: &c.flagDevRootTokenID, @@ -557,7 +535,7 @@ func (c *ServerCommand) runRecoveryMode() int { var wrapper wrapping.Wrapper if len(config.Seals) == 0 { - config.Seals = append(config.Seals, &configutil.KMS{Type: wrapping.WrapperTypeShamir.String()}) + config.Seals = append(config.Seals, &configutil.KMS{Type: wrapping.Shamir}) } if len(config.Seals) > 1 { @@ -566,7 +544,7 @@ func (c *ServerCommand) runRecoveryMode() int { } configSeal := config.Seals[0] - sealType := wrapping.WrapperTypeShamir.String() + sealType := wrapping.Shamir if !configSeal.Disabled && os.Getenv("VAULT_SEAL_TYPE") != "" { sealType = os.Getenv("VAULT_SEAL_TYPE") configSeal.Type = sealType @@ -579,7 +557,9 @@ func (c *ServerCommand) runRecoveryMode() int { var seal vault.Seal defaultSeal := vault.NewDefaultSeal(&vaultseal.Access{ - Wrapper: aeadwrapper.NewShamirWrapper(), + Wrapper: aeadwrapper.NewShamirWrapper(&wrapping.WrapperOptions{ + Logger: c.logger.Named("shamir"), + }), }) sealLogger := c.logger.ResetNamed(fmt.Sprintf("seal.%s", sealType)) wrapper, sealConfigError = configutil.ConfigureWrapper(configSeal, &infoKeys, &info, sealLogger) @@ -593,12 +573,9 @@ func (c *ServerCommand) runRecoveryMode() int { if wrapper == nil { seal = defaultSeal } else { - seal, err = vault.NewAutoSeal(&vaultseal.Access{ + seal = vault.NewAutoSeal(&vaultseal.Access{ Wrapper: wrapper, }) - if err != nil { - c.UI.Error(fmt.Sprintf("error creating auto seal: %v", err)) - } } barrierSeal = seal @@ -1047,7 +1024,7 @@ func (c *ServerCommand) Run(args []string) int { } // Automatically enable dev mode if other dev flags are provided. - if c.flagDevConsul || c.flagDevHA || c.flagDevTransactional || c.flagDevLeasedKV || c.flagDevThreeNode || c.flagDevFourCluster || c.flagDevAutoSeal || c.flagDevKVV1 || c.flagDevTLS { + if c.flagDevConsul || c.flagDevHA || c.flagDevTransactional || c.flagDevLeasedKV || c.flagDevThreeNode || c.flagDevFourCluster || c.flagDevAutoSeal || c.flagDevKVV1 { c.flagDev = true } @@ -1083,7 +1060,6 @@ func (c *ServerCommand) Run(args []string) int { // Load the configuration var config *server.Config var err error - var certDir string if c.flagDev { var devStorageType string switch { @@ -1098,59 +1074,11 @@ func (c *ServerCommand) Run(args []string) int { default: devStorageType = "inmem" } - - if c.flagDevTLS { - if c.flagDevTLSCertDir != "" { - _, err := os.Stat(c.flagDevTLSCertDir) - if err != nil { - c.UI.Error(err.Error()) - return 1 - } - - certDir = c.flagDevTLSCertDir - } else { - certDir, err = os.MkdirTemp("", "vault-tls") - if err != nil { - c.UI.Error(err.Error()) - return 1 - } - } - config, err = server.DevTLSConfig(devStorageType, certDir) - - defer func() { - err := os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevCAFilename)) - if err != nil { - c.UI.Error(err.Error()) - } - - err = os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevCertFilename)) - if err != nil { - c.UI.Error(err.Error()) - } - - err = os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevKeyFilename)) - if err != nil { - c.UI.Error(err.Error()) - } - - // Only delete temp directories we made. - if c.flagDevTLSCertDir == "" { - err = os.Remove(certDir) - if err != nil { - c.UI.Error(err.Error()) - } - } - }() - - } else { - config, err = server.DevConfig(devStorageType) - } - + config, err = server.DevConfig(devStorageType) if err != nil { c.UI.Error(err.Error()) return 1 } - if c.flagDevListenAddr != "" { config.Listeners[0].Address = c.flagDevListenAddr } @@ -1400,24 +1328,6 @@ func (c *ServerCommand) Run(args []string) int { // Apply any enterprise configuration onto the coreConfig. adjustCoreConfigForEnt(config, &coreConfig) - if !c.flagDev && os.Getenv(disableStorageTypeCheckEnv) == "" { - inMemStorageTypes := []string{ - "inmem", "inmem_ha", "inmem_transactional", "inmem_transactional_ha", - } - - if strutil.StrListContains(inMemStorageTypes, coreConfig.StorageType) { - c.UI.Warn("") - c.UI.Warn(wrapAtLength(fmt.Sprintf("WARNING: storage configured to use %q which should NOT be used in production", coreConfig.StorageType))) - c.UI.Warn("") - } else { - err = checkStorageTypeForEnt(&coreConfig) - if err != nil { - c.UI.Error(fmt.Sprintf("Invalid storage type: %s", err)) - return 1 - } - } - } - // Initialize the core core, newCoreError := vault.NewCore(&coreConfig) if newCoreError != nil { @@ -1565,7 +1475,7 @@ func (c *ServerCommand) Run(args []string) int { } // If we're in Dev mode, then initialize the core - err = initDevCore(c, &coreConfig, config, core, certDir) + err = initDevCore(c, &coreConfig, config, core) if err != nil { c.UI.Error(err.Error()) return 1 @@ -1578,14 +1488,6 @@ func (c *ServerCommand) Run(args []string) int { return 1 } - hcpLogger := c.logger.Named("hcpLink") - hcpLink, err := hcp_link.NewHCPLink(config.HCPLinkConf, core, hcpLogger) - if err != nil { - c.logger.Error("failed to start HCP Link", "error", err) - } else if hcpLink != nil { - c.logger.Trace("started HCP link") - } - if c.flagTestServerConfig { return 0 } @@ -1697,12 +1599,6 @@ func (c *ServerCommand) Run(args []string) int { // Setting log request with the new value in the config after reload core.ReloadLogRequestsLevel() - // reloading HCP link - hcpLink, err = c.reloadHCPLink(hcpLink, config, core, hcpLogger) - if err != nil { - c.logger.Error(err.Error()) - } - if config.LogLevel != "" { configLogLevel := strings.ToLower(strings.TrimSpace(config.LogLevel)) switch configLogLevel { @@ -1756,12 +1652,6 @@ func (c *ServerCommand) Run(args []string) int { // Stop the listeners so that we don't process further client requests. c.cleanupGuard.Do(listenerCloseFunc) - if hcpLink != nil { - if err := hcpLink.Shutdown(); err != nil { - c.UI.Error(fmt.Sprintf("Error with HCP Link shutdown: %v", err.Error())) - } - } - // Finalize will wait until after Vault is sealed, which means the // request forwarding listeners will also be closed (and also // waited for). @@ -1774,31 +1664,6 @@ func (c *ServerCommand) Run(args []string) int { return retCode } -func (c *ServerCommand) reloadHCPLink(hcpLinkVault *hcp_link.WrappedHCPLinkVault, conf *server.Config, core *vault.Core, hcpLogger hclog.Logger) (*hcp_link.WrappedHCPLinkVault, error) { - // trigger a shutdown - if hcpLinkVault != nil { - err := hcpLinkVault.Shutdown() - if err != nil { - return nil, err - } - } - - if conf.HCPLinkConf == nil { - // if cloud stanza is not configured, we should not show anything - // in the seal-status related to HCP link - core.SetHCPLinkStatus("", "") - return nil, nil - } - - // starting HCP link - hcpLink, err := hcp_link.NewHCPLink(conf.HCPLinkConf, core, hcpLogger) - if err != nil { - return nil, fmt.Errorf("failed to restart HCP Link and it is no longer running, %w", err) - } - - return hcpLink, nil -} - func (c *ServerCommand) notifySystemd(status string) { sent, err := systemd.SdNotify(false, status) if err != nil { @@ -2200,8 +2065,7 @@ func (c *ServerCommand) addPlugin(path, token string, core *vault.Core) error { // detectRedirect is used to attempt redirect address detection func (c *ServerCommand) detectRedirect(detect physical.RedirectDetect, - config *server.Config, -) (string, error) { + config *server.Config) (string, error) { // Get the hostname host, err := detect.DetectHostAddr() if err != nil { @@ -2397,28 +2261,24 @@ func setSeal(c *ServerCommand, config *server.Config, infoKeys []string, info ma var wrapper wrapping.Wrapper var barrierWrapper wrapping.Wrapper if c.flagDevAutoSeal { - var err error - barrierSeal, err = vault.NewAutoSeal(vaultseal.NewTestSeal(nil)) - if err != nil { - return nil, nil, nil, nil, nil, err - } + barrierSeal = vault.NewAutoSeal(vaultseal.NewTestSeal(nil)) return barrierSeal, nil, nil, nil, nil, nil } // Handle the case where no seal is provided switch len(config.Seals) { case 0: - config.Seals = append(config.Seals, &configutil.KMS{Type: wrapping.WrapperTypeShamir.String()}) + config.Seals = append(config.Seals, &configutil.KMS{Type: wrapping.Shamir}) case 1: // If there's only one seal and it's disabled assume they want to // migrate to a shamir seal and simply didn't provide it if config.Seals[0].Disabled { - config.Seals = append(config.Seals, &configutil.KMS{Type: wrapping.WrapperTypeShamir.String()}) + config.Seals = append(config.Seals, &configutil.KMS{Type: wrapping.Shamir}) } } var createdSeals []vault.Seal = make([]vault.Seal, len(config.Seals)) for _, configSeal := range config.Seals { - sealType := wrapping.WrapperTypeShamir.String() + sealType := wrapping.Shamir if !configSeal.Disabled && os.Getenv("VAULT_SEAL_TYPE") != "" { sealType = os.Getenv("VAULT_SEAL_TYPE") configSeal.Type = sealType @@ -2430,7 +2290,9 @@ func setSeal(c *ServerCommand, config *server.Config, infoKeys []string, info ma sealLogger := c.logger.ResetNamed(fmt.Sprintf("seal.%s", sealType)) c.allLoggers = append(c.allLoggers, sealLogger) defaultSeal := vault.NewDefaultSeal(&vaultseal.Access{ - Wrapper: aeadwrapper.NewShamirWrapper(), + Wrapper: aeadwrapper.NewShamirWrapper(&wrapping.WrapperOptions{ + Logger: c.logger.Named("shamir"), + }), }) var sealInfoKeys []string sealInfoMap := map[string]string{} @@ -2444,13 +2306,9 @@ func setSeal(c *ServerCommand, config *server.Config, infoKeys []string, info ma if wrapper == nil { seal = defaultSeal } else { - var err error - seal, err = vault.NewAutoSeal(&vaultseal.Access{ + seal = vault.NewAutoSeal(&vaultseal.Access{ Wrapper: wrapper, }) - if err != nil { - return nil, nil, nil, nil, nil, err - } } infoPrefix := "" if configSeal.Disabled { @@ -2563,11 +2421,7 @@ func determineRedirectAddr(c *ServerCommand, coreConfig *vault.CoreConfig, confi } } if coreConfig.RedirectAddr == "" && c.flagDev { - protocol := "http" - if c.flagDevTLS { - protocol = "https" - } - coreConfig.RedirectAddr = fmt.Sprintf("%s://%s", protocol, config.Listeners[0].Address) + coreConfig.RedirectAddr = fmt.Sprintf("http://%s", config.Listeners[0].Address) } return retErr } @@ -2656,8 +2510,7 @@ func runUnseal(c *ServerCommand, core *vault.Core, ctx context.Context) { } func createCoreConfig(c *ServerCommand, config *server.Config, backend physical.Backend, configSR sr.ServiceRegistration, barrierSeal, unwrapSeal vault.Seal, - metricsHelper *metricsutil.MetricsHelper, metricSink *metricsutil.ClusterMetricSink, secureRandomReader io.Reader, -) vault.CoreConfig { + metricsHelper *metricsutil.MetricsHelper, metricSink *metricsutil.ClusterMetricSink, secureRandomReader io.Reader) vault.CoreConfig { coreConfig := &vault.CoreConfig{ RawConfig: config, Physical: backend, @@ -2729,7 +2582,7 @@ func runListeners(c *ServerCommand, coreConfig *vault.CoreConfig, config *server return nil } -func initDevCore(c *ServerCommand, coreConfig *vault.CoreConfig, config *server.Config, core *vault.Core, certDir string) error { +func initDevCore(c *ServerCommand, coreConfig *vault.CoreConfig, config *server.Config, core *vault.Core) error { if c.flagDev && !c.flagDevSkipInit { init, err := c.enableDev(core, coreConfig) @@ -2780,15 +2633,10 @@ func initDevCore(c *ServerCommand, coreConfig *vault.CoreConfig, config *server. "token is already authenticated to the CLI, so you can immediately " + "begin using Vault.")) c.UI.Warn("") - c.UI.Warn("You may need to set the following environment variables:") + c.UI.Warn("You may need to set the following environment variable:") c.UI.Warn("") - protocol := "http://" - if c.flagDevTLS { - protocol = "https://" - } - - endpointURL := protocol + config.Listeners[0].Address + endpointURL := "http://" + config.Listeners[0].Address if runtime.GOOS == "windows" { c.UI.Warn("PowerShell:") c.UI.Warn(fmt.Sprintf(" $env:VAULT_ADDR=\"%s\"", endpointURL)) @@ -2798,18 +2646,6 @@ func initDevCore(c *ServerCommand, coreConfig *vault.CoreConfig, config *server. c.UI.Warn(fmt.Sprintf(" $ export VAULT_ADDR='%s'", endpointURL)) } - if c.flagDevTLS { - if runtime.GOOS == "windows" { - c.UI.Warn("PowerShell:") - c.UI.Warn(fmt.Sprintf(" $env:VAULT_CACERT=\"%s/vault-ca.pem\"", certDir)) - c.UI.Warn("cmd.exe:") - c.UI.Warn(fmt.Sprintf(" set VAULT_CACERT=%s/vault-ca.pem", certDir)) - } else { - c.UI.Warn(fmt.Sprintf(" $ export VAULT_CACERT='%s/vault-ca.pem'", certDir)) - } - c.UI.Warn("") - } - // Unseal key is not returned if stored shares is supported if len(init.SecretShares) > 0 { c.UI.Warn("") diff --git a/command/server/config.go b/command/server/config.go index 5d985db69d08d..a3526e16e8f23 100644 --- a/command/server/config.go +++ b/command/server/config.go @@ -22,12 +22,6 @@ import ( "github.com/hashicorp/vault/sdk/helper/consts" ) -const ( - VaultDevCAFilename = "vault-ca.pem" - VaultDevCertFilename = "vault-cert.pem" - VaultDevKeyFilename = "vault-key.pem" -) - var entConfigValidate = func(_ *Config, _ string) []configutil.ConfigError { return nil } @@ -157,62 +151,6 @@ ui = true return parsed, nil } -// DevTLSConfig is a Config that is used for dev tls mode of Vault. -func DevTLSConfig(storageType, certDir string) (*Config, error) { - ca, err := GenerateCA() - if err != nil { - return nil, err - } - - cert, key, err := GenerateCert(ca.Template, ca.Signer) - if err != nil { - return nil, err - } - - if err := os.WriteFile(fmt.Sprintf("%s/%s", certDir, VaultDevCAFilename), []byte(ca.PEM), 0o444); err != nil { - return nil, err - } - - if err := os.WriteFile(fmt.Sprintf("%s/%s", certDir, VaultDevCertFilename), []byte(cert), 0o400); err != nil { - return nil, err - } - - if err := os.WriteFile(fmt.Sprintf("%s/%s", certDir, VaultDevKeyFilename), []byte(key), 0o400); err != nil { - return nil, err - } - - hclStr := ` -disable_mlock = true - -listener "tcp" { - address = "[::]:8200" - tls_cert_file = "%s/vault-cert.pem" - tls_key_file = "%s/vault-key.pem" - proxy_protocol_behavior = "allow_authorized" - proxy_protocol_authorized_addrs = "[::]:8200" -} - -telemetry { - prometheus_retention_time = "24h" - disable_hostname = true -} -enable_raw_endpoint = true - -storage "%s" { -} - -ui = true -` - - hclStr = fmt.Sprintf(hclStr, certDir, certDir, storageType) - parsed, err := ParseConfig(hclStr, "") - if err != nil { - return nil, err - } - - return parsed, nil -} - // Storage is the underlying storage configuration for the server. type Storage struct { Type string @@ -864,25 +802,11 @@ func parseHAStorage(result *Config, list *ast.ObjectList, name string) error { key = item.Keys[0].Token.Value().(string) } - var config map[string]interface{} - if err := hcl.DecodeObject(&config, item.Val); err != nil { + var m map[string]string + if err := hcl.DecodeObject(&m, item.Val); err != nil { return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key)) } - m := make(map[string]string) - for key, val := range config { - valStr, ok := val.(string) - if ok { - m[key] = valStr - continue - } - valBytes, err := json.Marshal(val) - if err != nil { - return err - } - m[key] = string(valBytes) - } - // Pull out the redirect address since it's common to all backends var redirectAddr string if v, ok := m["redirect_addr"]; ok { diff --git a/command/server/config_test.go b/command/server/config_test.go index e8e80cc99b3d4..d246087032bd2 100644 --- a/command/server/config_test.go +++ b/command/server/config_test.go @@ -48,10 +48,6 @@ func TestParseSeals(t *testing.T) { testParseSeals(t) } -func TestParseStorage(t *testing.T) { - testParseStorageTemplate(t) -} - func TestUnknownFieldValidation(t *testing.T) { testUnknownFieldValidation(t) } diff --git a/command/server/config_test_helpers.go b/command/server/config_test_helpers.go index c459198263fae..4cde9b115668b 100644 --- a/command/server/config_test_helpers.go +++ b/command/server/config_test_helpers.go @@ -900,49 +900,6 @@ EOF } } -func testParseStorageTemplate(t *testing.T) { - config, err := ParseConfig(` -storage "consul" { - - disable_registration = false - path = "tmp/" - -} -ha_storage "consul" { - tls_skip_verify = true - scheme = "http" - max_parallel = 128 -} - -`, "") - if err != nil { - t.Fatal(err) - } - - expected := &Config{ - Storage: &Storage{ - Type: "consul", - Config: map[string]string{ - "disable_registration": "false", - "path": "tmp/", - }, - }, - HAStorage: &Storage{ - Type: "consul", - Config: map[string]string{ - "tls_skip_verify": "true", - "scheme": "http", - "max_parallel": "128", - }, - }, - SharedConfig: &configutil.SharedConfig{}, - } - config.Prune() - if diff := deep.Equal(config, expected); diff != nil { - t.Fatal(diff) - } -} - func testParseSeals(t *testing.T) { config, err := LoadConfigFile("./test-fixtures/config_seals.hcl") if err != nil { @@ -1056,7 +1013,6 @@ func testLoadConfigFileLeaseMetrics(t *testing.T) { Config: map[string]string{ "bar": "baz", }, - DisableClustering: true, }, diff --git a/command/server/server_seal_transit_acc_test.go b/command/server/server_seal_transit_acc_test.go index 3f13aee938c01..e7d4d00e5d6b0 100644 --- a/command/server/server_seal_transit_acc_test.go +++ b/command/server/server_seal_transit_acc_test.go @@ -26,7 +26,7 @@ func TestTransitWrapper_Lifecycle(t *testing.T) { "key_name": config.keyName, } - kms, _, err := configutil.GetTransitKMSFunc(&configutil.KMS{Config: wrapperConfig}) + kms, _, err := configutil.GetTransitKMSFunc(nil, &configutil.KMS{Config: wrapperConfig}) if err != nil { t.Fatalf("error setting wrapper config: %v", err) } @@ -72,7 +72,7 @@ func TestTransitSeal_TokenRenewal(t *testing.T) { "mount_path": config.mountPath, "key_name": config.keyName, } - kms, _, err := configutil.GetTransitKMSFunc(&configutil.KMS{Config: wrapperConfig}) + kms, _, err := configutil.GetTransitKMSFunc(nil, &configutil.KMS{Config: wrapperConfig}) if err != nil { t.Fatalf("error setting wrapper config: %v", err) } diff --git a/command/server/tls_util.go b/command/server/tls_util.go deleted file mode 100644 index d327006332511..0000000000000 --- a/command/server/tls_util.go +++ /dev/null @@ -1,162 +0,0 @@ -package server - -import ( - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - "math/big" - "net" - "os" - "time" - - "github.com/hashicorp/vault/sdk/helper/certutil" -) - -type CaCert struct { - PEM string - Template *x509.Certificate - Signer crypto.Signer -} - -// GenerateCert creates a new leaf cert from provided CA template and signer -func GenerateCert(caCertTemplate *x509.Certificate, caSigner crypto.Signer) (string, string, error) { - // Create the private key - signer, keyPEM, err := privateKey() - if err != nil { - return "", "", fmt.Errorf("error generating private key for server certificate: %v", err) - } - - // The serial number for the cert - sn, err := serialNumber() - if err != nil { - return "", "", fmt.Errorf("error generating serial number: %v", err) - } - - signerKeyId, err := certutil.GetSubjKeyID(signer) - if err != nil { - return "", "", fmt.Errorf("error getting subject key id from key: %v", err) - } - - hostname, err := os.Hostname() - if err != nil { - return "", "", fmt.Errorf("error getting hostname: %v", err) - } - - if hostname == "" { - hostname = "localhost" - } - - // Create the leaf cert - template := x509.Certificate{ - SerialNumber: sn, - Subject: pkix.Name{CommonName: hostname}, - KeyUsage: x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, - NotAfter: time.Now().Add(365 * 24 * time.Hour), - NotBefore: time.Now().Add(-1 * time.Minute), - IPAddresses: []net.IP{net.ParseIP("127.0.0.1")}, - DNSNames: []string{"localhost", "localhost4", "localhost6", "localhost.localdomain"}, - AuthorityKeyId: caCertTemplate.AuthorityKeyId, - SubjectKeyId: signerKeyId, - } - - bs, err := x509.CreateCertificate( - rand.Reader, &template, caCertTemplate, signer.Public(), caSigner) - if err != nil { - return "", "", fmt.Errorf("error creating server certificate: %v", err) - } - var buf bytes.Buffer - err = pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: bs}) - if err != nil { - return "", "", fmt.Errorf("error encoding server certificate: %v", err) - } - - return buf.String(), keyPEM, nil -} - -// GenerateCA generates a new self-signed CA cert and returns a -// CaCert struct containing the PEM encoded cert, -// X509 Certificate Template, and crypto.Signer -func GenerateCA() (*CaCert, error) { - // Create the private key we'll use for this CA cert. - signer, _, err := privateKey() - if err != nil { - return nil, fmt.Errorf("error generating private key for CA: %v", err) - } - - signerKeyId, err := certutil.GetSubjKeyID(signer) - if err != nil { - return nil, fmt.Errorf("error getting subject key id from key: %v", err) - } - - // The serial number for the cert - sn, err := serialNumber() - if err != nil { - return nil, fmt.Errorf("error generating serial number: %v", err) - } - - // Create the CA cert - template := x509.Certificate{ - SerialNumber: sn, - Subject: pkix.Name{CommonName: "Vault Dev CA"}, - BasicConstraintsValid: true, - KeyUsage: x509.KeyUsageCertSign, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, - IsCA: true, - NotAfter: time.Now().Add(10 * 365 * 24 * time.Hour), - NotBefore: time.Now().Add(-1 * time.Minute), - AuthorityKeyId: signerKeyId, - SubjectKeyId: signerKeyId, - IPAddresses: []net.IP{net.ParseIP("127.0.0.1")}, - } - - bs, err := x509.CreateCertificate( - rand.Reader, &template, &template, signer.Public(), signer) - if err != nil { - return nil, fmt.Errorf("error creating CA certificate: %v", err) - } - - var buf bytes.Buffer - err = pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: bs}) - if err != nil { - return nil, fmt.Errorf("error encoding CA certificate: %v", err) - } - return &CaCert{ - PEM: buf.String(), - Template: &template, - Signer: signer, - }, nil -} - -// privateKey returns a new ECDSA-based private key. Both a crypto.Signer -// and the key in PEM format are returned. -func privateKey() (crypto.Signer, string, error) { - pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { - return nil, "", err - } - - bs, err := x509.MarshalECPrivateKey(pk) - if err != nil { - return nil, "", err - } - - var buf bytes.Buffer - err = pem.Encode(&buf, &pem.Block{Type: "EC PRIVATE KEY", Bytes: bs}) - if err != nil { - return nil, "", err - } - - return pk, buf.String(), nil -} - -// serialNumber generates a new random serial number. -func serialNumber() (*big.Int, error) { - return rand.Int(rand.Reader, (&big.Int{}).Exp(big.NewInt(2), big.NewInt(159), nil)) -} diff --git a/command/server_test.go b/command/server_test.go index e19b499e255ff..d836b11047699 100644 --- a/command/server_test.go +++ b/command/server_test.go @@ -121,7 +121,7 @@ func TestServer_ReloadListener(t *testing.T) { inBytes, _ = ioutil.ReadFile(wd + "reload_foo.key") ioutil.WriteFile(td+"/reload_key.pem", inBytes, 0o777) - relhcl := strings.ReplaceAll(reloadHCL, "TMPDIR", td) + relhcl := strings.Replace(reloadHCL, "TMPDIR", td, -1) ioutil.WriteFile(td+"/reload.hcl", []byte(relhcl), 0o777) inBytes, _ = ioutil.ReadFile(wd + "reload_ca.pem") @@ -172,7 +172,7 @@ func TestServer_ReloadListener(t *testing.T) { t.Fatalf("certificate name didn't check out: %s", err) } - relhcl = strings.ReplaceAll(reloadHCL, "TMPDIR", td) + relhcl = strings.Replace(reloadHCL, "TMPDIR", td, -1) inBytes, _ = ioutil.ReadFile(wd + "reload_bar.pem") ioutil.WriteFile(td+"/reload_cert.pem", inBytes, 0o777) inBytes, _ = ioutil.ReadFile(wd + "reload_bar.key") diff --git a/command/server_util.go b/command/server_util.go index 1959f17662569..4054693be1e4d 100644 --- a/command/server_util.go +++ b/command/server_util.go @@ -5,10 +5,7 @@ import ( "github.com/hashicorp/vault/vault" ) -var ( - adjustCoreConfigForEnt = adjustCoreConfigForEntNoop - checkStorageTypeForEnt = checkStorageTypeForEntNoop -) +var adjustCoreConfigForEnt = adjustCoreConfigForEntNoop func adjustCoreConfigForEntNoop(config *server.Config, coreConfig *vault.CoreConfig) { } @@ -18,7 +15,3 @@ var getFIPSInfoKey = getFIPSInfoKeyNoop func getFIPSInfoKeyNoop() string { return "" } - -func checkStorageTypeForEntNoop(coreConfig *vault.CoreConfig) error { - return nil -} diff --git a/command/token/helper_external.go b/command/token/helper_external.go index 83f5f8907291d..102bc1cff4058 100644 --- a/command/token/helper_external.go +++ b/command/token/helper_external.go @@ -10,7 +10,6 @@ import ( "strings" ) -// ExternalTokenHelperPath should only be used in dev mode. // ExternalTokenHelperPath takes the configured path to a helper and expands it to // a full absolute path that can be executed. As of 0.5, the default token // helper is internal, to avoid problems running in dev mode (see GH-850 and @@ -39,17 +38,15 @@ func ExternalTokenHelperPath(path string) (string, error) { var _ TokenHelper = (*ExternalTokenHelper)(nil) -// ExternalTokenHelper should only be used in a dev mode. For all other cases, -// InternalTokenHelper should be used. // ExternalTokenHelper is the struct that has all the logic for storing and retrieving // tokens from the token helper. The API for the helpers is simple: the // BinaryPath is executed within a shell with environment Env. The last argument // appended will be the operation, which is: // -// - "get" - Read the value of the token and write it to stdout. -// - "store" - Store the value of the token which is on stdin. Output -// nothing. -// - "erase" - Erase the contents stored. Output nothing. +// * "get" - Read the value of the token and write it to stdout. +// * "store" - Store the value of the token which is on stdin. Output +// nothing. +// * "erase" - Erase the contents stored. Output nothing. // // Any errors can be written on stdout. If the helper exits with a non-zero // exit code then the stderr will be made part of the error value. @@ -106,7 +103,7 @@ func (h *ExternalTokenHelper) Path() string { } func (h *ExternalTokenHelper) cmd(op string) (*exec.Cmd, error) { - script := strings.ReplaceAll(h.BinaryPath, "\\", "\\\\") + " " + op + script := strings.Replace(h.BinaryPath, "\\", "\\\\", -1) + " " + op cmd, err := ExecScript(script) if err != nil { return nil, err diff --git a/command/util.go b/command/util.go index 4f9614e234fb3..def0a1e05857c 100644 --- a/command/util.go +++ b/command/util.go @@ -14,7 +14,6 @@ import ( ) // DefaultTokenHelper returns the token helper that is configured for Vault. -// This helper should only be used for non-server CLI commands. func DefaultTokenHelper() (token.TokenHelper, error) { return config.DefaultTokenHelper() } diff --git a/enos/Makefile b/enos/Makefile deleted file mode 100644 index ad27fb0ffbe92..0000000000000 --- a/enos/Makefile +++ /dev/null @@ -1,24 +0,0 @@ -.PHONY: default -default: check-fmt - -.PHONY: check-fmt -check-fmt: check-fmt-enos check-fmt-modules - -.PHONY: fmt -fmt: fmt-enos fmt-modules - -.PHONY: check-fmt-enos -check-fmt-enos: - enos fmt --check --diff . - -.PHONY: fmt-enos -fmt-enos: - enos fmt . - -.PHONY: check-fmt-modules -check-fmt-modules: - terraform fmt -check -diff -recursive ./modules - -.PHONY: fmt-modules -fmt-modules: - terraform fmt -diff -recursive ./modules diff --git a/enos/README.md b/enos/README.md deleted file mode 100644 index 7e165b0df8a0d..0000000000000 --- a/enos/README.md +++ /dev/null @@ -1,142 +0,0 @@ -# Enos - -Enos is an quality testing framework that allows composing and executing quality -requirement scenarios as code. For Vault, it is currently used to perform -infrastructure integration testing using the artifacts that are created as part -of the `build` workflow. While intended to be executed via Github Actions using -the results of the `build` workflow, scenarios are also executable from a developer -machine that has the requisite dependencies and configuration. - -Refer to the [Enos documentation](https://github.com/hashicorp/Enos-Docs) -for further information regarding installation, execution or composing Enos scenarios. - -## When to use Enos -Determining whether to use `vault.NewTestCluster()` or Enos for testing a feature -or scenario is ultimately up to the author. Sometimes one, the other, or both -might be appropriate depending on the requirements. Generally, `vault.NewTestCluster()` -is going to give you faster feedback and execution time, whereas Enos is going -to give you a real-world execution and validation of the requirement. Consider -the following cases as examples of when one might opt for an Enos scenario: - -* The feature require third-party integrations. Whether that be networked - dependencies like a real Consul backend, a real KMS key to test awskms - auto-unseal, auto-join discovery using AWS tags, or Cloud hardware KMS's. -* The feature might behave differently under multiple configuration variants - and therefore should be tested with both combinations, e.g. auto-unseal and - manual shamir unseal or replication in HA mode with integrated storage or - Consul storage. -* The scenario requires coordination between multiple targets. For example, - consider the complex lifecycle event of migrating the seal type or storage, - or manually triggering a raft disaster scenario by partitioning the network - between the leader and follower nodes. Or perhaps an auto-pilot upgrade between - a stable version of Vault and our candidate version. -* The scenario has specific deployment strategy requirements. For example, - if we want to add a regression test for an issue that only arises when the - software is deployed in a certain manner. -* The scenario needs to use actual build artifacts that will be promoted - through the pipeline. - -## Requirements -* AWS access. HashiCorp Vault developers should use Doormat. -* Terraform >= 1.2 -* Enos >= v0.0.10. You can [install it from a release channel](https://github.com/hashicorp/Enos-Docs/blob/main/installation.md). -* Access to the QTI org in Terraform Cloud. HashiCorp Vault developers can - access a shared token in 1Password or request their own in #team-quality on - Slack. -* An SSH keypair in the AWS region you wish to run the scenario. You can use - Doormat to log in to the AWS console to create or upload an existing keypair. -* A Vault install bundle downloaded from releases.hashicorp.com or Artifactory - when using the `builder:crt` variants. When using the `builder:local` variants - Enos will build a Vault bundle from the current branch for you. - -## Scenario Variables -In CI, each scenario is executed via Github Actions and has been configured using -environment variable inputs that follow the `ENOS_VAR_varname` pattern. - -For local execution you can specify all the required variables using environment -variables, or you can update `enos.vars.hcl` with values and uncomment the lines. - -Variables that are required: -* `aws_ssh_keypair_name` -* `aws_ssh_private_key_path` -* `tfc_api_token` -* `vault_bundle_path` -* `vault_license_path` (only required for non-OSS editions) - -See [enos.vars.hcl](./enos.vars.hcl) or [enos-variables.hcl](./enos-variables.hcl) -for further descriptions of the variables. - -## Executing Scenarios -From the `enos` directory: - -```bash -# List all available scenarios -enos scenario list -# Run the smoke or upgrade scenario with an artifact that is built locally. Make sure -# the local machine has been configured as detailed in the requirements -# section. This will execute the scenario and clean up any resources if successful. -enos scenario run smoke builder:local -enos scenario run upgrade builder:local -# To run the same scenario variants that are run in CI, refer to the scenarios listed -# in .github/workflows/enos-run.yml under `jobs.enos.strategy.matrix.include`, -# adding `builder:local` to run locally. -enos scenario run smoke backend:consul consul_version:1.12.3 distro:ubuntu seal:awskms builder:local arch:amd64 edition:oss -# Launch an individual scenario but leave infrastructure up after execution -enos scenario launch smoke builder:local -# Check an individual scenario for validity. This is useful during scenario -# authoring and debugging. -enos scenario validate smoke builder:local -# If you've run the tests and desire to see the outputs, such as the URL or -# credentials, you can run the output command to see them. Please note that -# after "run" or destroy there will be no "outputs" as the infrastructure -# will have been destroyed and state cleared. -enos scenario output smoke builder:local -# Explicitly destroy all existing infrastructure -enos scenario destroy smoke builder:local -``` - -Refer to the [Enos documentation](https://github.com/hashicorp/Enos-Docs) -for further information regarding installation, execution or composing scenarios. - -# Scenarios -There are current two scenarios: `smoke` and `upgrade`. Both begin by building Vault -as specified by the selected `builder` variant (see Variants section below for more -information). - -## Smoke -The [`smoke` scenario](./enos-scenario-smoke.hcl) creates a Vault cluster using -the version from the current branch (either in CI or locally), with the backend -specified by the `backend` variant (`raft` or `consul`). Next, it unseals with the -appropriate method (`awskms` or `shamir`) and performs different verifications -depending on the backend and seal type. - -## Upgrade -The [`upgrade` scenario](./enos-scenario-upgrade.hcl) creates a Vault cluster using -the version specified in `vault_upgrade_initial_release`, with the backend specified -by the `backend` variant (`raft` or `consul`). Next, it upgrades the Vault binary -that is determined by the `builder` variant. After the upgrade, it verifies that -cluster is at the desired version, along with additional verifications. - - -## Autopilot -The [`autopilot` scenario](./enos-scenario-autopilot.hcl) creates a Vault cluster using -the version specified in `vault_upgrade_initial_release`. Next, it creates additional -nodes with the candiate version of Vault as determined by the `builder` variant. -The module uses AWS auto-join to handle discovery and unseals with auto-unseal -or Shamir depending on the `seal` variant. After the new nodes have joined and been -unsealed, it waits for Autopilot to upgrade the new nodes and demote the old nodes. - -# Variants -Both scenarios support a matrix of variants. In order to achieve broad coverage while -keeping test run time reasonable, the variants executed by the `enos-run` Github -Actions are tailored to maximize variant distribution per scenario. - -## `builder:crt` -This variant is designed for use in Github Actions. The `enos-run.yml` workflow -downloads the artifact built by the `build.yml` workflow, unzips it, and sets the -`vault_bundle_path` to the zip file and the `vault_local_binary_path` to the binary. - -## `builder:local` -This variant is for running the Enos scenario locally. It builds the Vault bundle -from the current branch, placing the bundle at the `vault_bundle_path` and the -unzipped Vault binary at the `vault_local_binary_path`. diff --git a/enos/enos-modules.hcl b/enos/enos-modules.hcl deleted file mode 100644 index 5c4b8a3e7b438..0000000000000 --- a/enos/enos-modules.hcl +++ /dev/null @@ -1,96 +0,0 @@ -module "autopilot_upgrade_storageconfig" { - source = "./modules/autopilot_upgrade_storageconfig" -} - -module "az_finder" { - source = "./modules/az_finder" -} - -module "backend_consul" { - source = "app.terraform.io/hashicorp-qti/aws-consul/enos" - - project_name = var.project_name - environment = "ci" - common_tags = var.tags - ssh_aws_keypair = var.aws_ssh_keypair_name - - # Set this to a real license vault if using an Enterprise edition of Consul - consul_license = var.backend_license_path == null ? "none" : file(abspath(var.backend_license_path)) -} - -module "backend_raft" { - source = "./modules/backend_raft" -} - -module "build_crt" { - source = "./modules/build_crt" -} - -module "build_local" { - source = "./modules/build_local" -} - -module "create_vpc" { - source = "app.terraform.io/hashicorp-qti/aws-infra/enos" - - project_name = var.project_name - environment = "ci" - common_tags = var.tags - ami_architectures = ["amd64", "arm64"] -} - -module "get_local_version_from_make" { - source = "./modules/get_local_version_from_make" -} - -module "read_license" { - source = "./modules/read_license" -} - -module "vault_cluster" { - source = "app.terraform.io/hashicorp-qti/aws-vault/enos" - # source = "../../terraform-enos-aws-vault" - - common_tags = var.tags - environment = "ci" - instance_count = var.vault_instance_count - project_name = var.project_name - ssh_aws_keypair = var.aws_ssh_keypair_name - vault_install_dir = var.vault_install_dir -} - -module "vault_upgrade" { - source = "./modules/vault_upgrade" - - vault_install_dir = var.vault_install_dir - vault_instance_count = var.vault_instance_count -} - -module "vault_verify_autopilot" { - source = "./modules/vault_verify_autopilot" - - vault_autopilot_upgrade_status = "await-server-removal" - vault_install_dir = var.vault_install_dir - vault_instance_count = var.vault_instance_count -} - -module "vault_verify_raft_auto_join_voter" { - source = "./modules/vault_verify_raft_auto_join_voter" - - vault_install_dir = var.vault_install_dir - vault_instance_count = var.vault_instance_count -} - -module "vault_verify_unsealed" { - source = "./modules/vault_verify_unsealed" - - vault_install_dir = var.vault_install_dir - vault_instance_count = var.vault_instance_count -} - -module "vault_verify_version" { - source = "./modules/vault_verify_version" - - vault_install_dir = var.vault_install_dir - vault_instance_count = var.vault_instance_count -} diff --git a/enos/enos-providers.hcl b/enos/enos-providers.hcl deleted file mode 100644 index 9301b55037d06..0000000000000 --- a/enos/enos-providers.hcl +++ /dev/null @@ -1,21 +0,0 @@ -provider "aws" "default" { - region = var.aws_region -} - -provider "enos" "rhel" { - transport = { - ssh = { - user = "ec2-user" - private_key_path = abspath(var.aws_ssh_private_key_path) - } - } -} - -provider "enos" "ubuntu" { - transport = { - ssh = { - user = "ubuntu" - private_key_path = abspath(var.aws_ssh_private_key_path) - } - } -} diff --git a/enos/enos-scenario-autopilot.hcl b/enos/enos-scenario-autopilot.hcl deleted file mode 100644 index 4e744ba20852d..0000000000000 --- a/enos/enos-scenario-autopilot.hcl +++ /dev/null @@ -1,252 +0,0 @@ -scenario "autopilot" { - matrix { - arch = ["amd64", "arm64"] - builder = ["local", "crt"] - distro = ["ubuntu", "rhel"] - edition = ["ent"] - seal = ["awskms", "shamir"] - } - - terraform_cli = terraform_cli.default - terraform = terraform.default - providers = [ - provider.aws.default, - provider.enos.ubuntu, - provider.enos.rhel - ] - - locals { - build_tags = { - "ent" = ["enterprise", "ent"] - } - bundle_path = abspath(var.vault_bundle_path) - dependencies_to_install = ["jq"] - enos_provider = { - rhel = provider.enos.rhel - ubuntu = provider.enos.ubuntu - } - tags = merge({ - "Project Name" : var.project_name - "Project" : "Enos", - "Environment" : "ci" - }, var.tags) - vault_instance_types = { - amd64 = "t3a.small" - arm64 = "t4g.small" - } - vault_instance_type = coalesce(var.vault_instance_type, local.vault_instance_types[matrix.arch]) - } - - step "build_vault" { - module = matrix.builder == "crt" ? module.build_crt : module.build_local - - variables { - build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : local.build_tags[matrix.edition] - bundle_path = local.bundle_path - goarch = matrix.arch - goos = "linux" - } - } - - step "find_azs" { - module = module.az_finder - - variables { - instance_type = [ - local.vault_instance_type - ] - } - } - - step "create_vpc" { - module = module.create_vpc - - variables { - ami_architectures = [matrix.arch] - } - } - - step "read_license" { - module = module.read_license - - variables { - file_name = abspath(joinpath(path.root, "./support/vault.hclic")) - } - } - - step "create_vault_cluster" { - module = module.vault_cluster - depends_on = [ - step.create_vpc, - step.build_vault, - ] - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags - dependencies_to_install = local.dependencies_to_install - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - storage_backend = "raft" - storage_backend_addl_config = { - autopilot_upgrade_version = var.vault_autopilot_initial_release.version - } - unseal_method = matrix.seal - vault_release = var.vault_autopilot_initial_release - vault_license = step.read_license.license - vpc_id = step.create_vpc.vpc_id - } - } - - step "get_local_version" { - module = module.get_local_version_from_make - } - - step "create_autopilot_upgrade_storageconfig" { - module = module.autopilot_upgrade_storageconfig - depends_on = [step.get_local_version] - - variables { - vault_product_version = step.get_local_version.version - } - } - - step "upgrade_vault_cluster_with_autopilot" { - module = module.vault_cluster - depends_on = [ - step.create_vault_cluster, - step.create_autopilot_upgrade_storageconfig, - ] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags - dependencies_to_install = local.dependencies_to_install - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - storage_backend = "raft" - storage_backend_addl_config = step.create_autopilot_upgrade_storageconfig.storage_addl_config - unseal_method = matrix.seal - vault_cluster_tag = step.create_vault_cluster.vault_cluster_tag - vault_init = false - vault_license = step.read_license.license - vault_local_artifact_path = local.bundle_path - vault_node_prefix = "upgrade_node" - vault_root_token = step.create_vault_cluster.vault_root_token - vault_unseal_when_no_init = matrix.seal == "shamir" - vault_unseal_keys = matrix.seal == "shamir" ? step.create_vault_cluster.vault_unseal_keys_hex : null - vpc_id = step.create_vpc.vpc_id - } - } - - step "verify_autopilot_upgraded_vault_cluster" { - module = module.vault_verify_autopilot - depends_on = [step.upgrade_vault_cluster_with_autopilot] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - vault_autopilot_upgrade_version = step.get_local_version.version - vault_instances = step.create_vault_cluster.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token - } - } - - step "verify_vault_unsealed" { - module = module.vault_verify_unsealed - depends_on = [ - step.create_vault_cluster, - step.upgrade_vault_cluster_with_autopilot, - ] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - vault_instances = step.create_vault_cluster.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token - } - } - - step "verify_raft_auto_join_voter" { - module = module.vault_verify_raft_auto_join_voter - depends_on = [ - step.create_vault_cluster, - step.upgrade_vault_cluster_with_autopilot, - ] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - vault_instances = step.create_vault_cluster.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token - } - } - - output "vault_cluster_instance_ids" { - description = "The Vault cluster instance IDs" - value = step.create_vault_cluster.instance_ids - } - - output "vault_cluster_pub_ips" { - description = "The Vault cluster public IPs" - value = step.create_vault_cluster.instance_public_ips - } - - output "vault_cluster_priv_ips" { - description = "The Vault cluster private IPs" - value = step.create_vault_cluster.instance_private_ips - } - - output "vault_cluster_key_id" { - description = "The Vault cluster Key ID" - value = step.create_vault_cluster.key_id - } - - output "vault_cluster_root_token" { - description = "The Vault cluster root token" - value = step.create_vault_cluster.vault_root_token - } - - output "vault_cluster_unseal_keys_b64" { - description = "The Vault cluster unseal keys" - value = step.create_vault_cluster.vault_unseal_keys_b64 - } - - output "vault_cluster_unseal_keys_hex" { - description = "The Vault cluster unseal keys hex" - value = step.create_vault_cluster.vault_unseal_keys_hex - } - - output "vault_cluster_tag" { - description = "The Vault cluster tag" - value = step.create_vault_cluster.vault_cluster_tag - } - - output "upgraded_vault_cluster_instance_ids" { - description = "The Vault cluster instance IDs" - value = step.upgrade_vault_cluster_with_autopilot.instance_ids - } - - output "upgraded_vault_cluster_pub_ips" { - description = "The Vault cluster public IPs" - value = step.upgrade_vault_cluster_with_autopilot.instance_public_ips - } - - output "upgraded_vault_cluster_priv_ips" { - description = "The Vault cluster private IPs" - value = step.upgrade_vault_cluster_with_autopilot.instance_private_ips - } -} diff --git a/enos/enos-scenario-smoke.hcl b/enos/enos-scenario-smoke.hcl deleted file mode 100644 index f14796c23896f..0000000000000 --- a/enos/enos-scenario-smoke.hcl +++ /dev/null @@ -1,206 +0,0 @@ -scenario "smoke" { - matrix { - arch = ["amd64", "arm64"] - backend = ["consul", "raft"] - builder = ["local", "crt"] - consul_version = ["1.12.3", "1.11.7", "1.10.12"] - distro = ["ubuntu", "rhel"] - edition = ["oss", "ent"] - seal = ["awskms", "shamir"] - } - - terraform_cli = terraform_cli.default - terraform = terraform.default - providers = [ - provider.aws.default, - provider.enos.ubuntu, - provider.enos.rhel - ] - - locals { - build_tags = { - "oss" = ["ui"] - "ent" = ["enterprise", "ent"] - } - bundle_path = abspath(var.vault_bundle_path) - dependencies_to_install = ["jq"] - enos_provider = { - rhel = provider.enos.rhel - ubuntu = provider.enos.ubuntu - } - tags = merge({ - "Project Name" : var.project_name - "Project" : "Enos", - "Environment" : "ci" - }, var.tags) - vault_instance_types = { - amd64 = "t3a.small" - arm64 = "t4g.small" - } - vault_instance_type = coalesce(var.vault_instance_type, local.vault_instance_types[matrix.arch]) - } - - step "build_vault" { - module = matrix.builder == "crt" ? module.build_crt : module.build_local - - variables { - build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : local.build_tags[matrix.edition] - bundle_path = local.bundle_path - goarch = matrix.arch - goos = "linux" - } - } - - step "find_azs" { - module = module.az_finder - - variables { - instance_type = [ - var.backend_instance_type, - local.vault_instance_type - ] - } - } - - step "create_vpc" { - module = module.create_vpc - - variables { - ami_architectures = [matrix.arch] - availability_zones = step.find_azs.availability_zones - common_tags = local.tags - } - } - - step "read_license" { - skip_step = matrix.edition == "oss" - module = module.read_license - - variables { - file_name = abspath(joinpath(path.root, "./support/vault.hclic")) - } - } - - step "create_backend_cluster" { - module = "backend_${matrix.backend}" - depends_on = [ - step.create_vpc, - step.build_vault, - ] - - providers = { - enos = provider.enos.ubuntu - } - - variables { - ami_id = step.create_vpc.ami_ids["ubuntu"][matrix.arch] - common_tags = local.tags - consul_release = { - edition = var.backend_edition - version = matrix.consul_version - } - instance_type = var.backend_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - vpc_id = step.create_vpc.vpc_id - } - } - - step "create_vault_cluster" { - module = module.vault_cluster - depends_on = [ - step.create_vpc, - step.create_backend_cluster, - ] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags - consul_cluster_tag = step.create_backend_cluster.consul_cluster_tag - dependencies_to_install = local.dependencies_to_install - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - storage_backend = matrix.backend - unseal_method = matrix.seal - vault_local_artifact_path = local.bundle_path - vault_license = matrix.edition != "oss" ? step.read_license.license : null - vpc_id = step.create_vpc.vpc_id - } - } - - step "verify_vault_unsealed" { - module = module.vault_verify_unsealed - depends_on = [ - step.create_vault_cluster, - ] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - vault_instances = step.create_vault_cluster.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token - } - } - - step "verify_raft_auto_join_voter" { - skip_step = matrix.backend != "raft" - module = module.vault_verify_raft_auto_join_voter - depends_on = [ - step.create_vault_cluster, - ] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - vault_instances = step.create_vault_cluster.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token - } - } - - output "vault_cluster_instance_ids" { - description = "The Vault cluster instance IDs" - value = step.create_vault_cluster.instance_ids - } - - output "vault_cluster_pub_ips" { - description = "The Vault cluster public IPs" - value = step.create_vault_cluster.instance_public_ips - } - - output "vault_cluster_priv_ips" { - description = "The Vault cluster private IPs" - value = step.create_vault_cluster.instance_private_ips - } - - output "vault_cluster_key_id" { - description = "The Vault cluster Key ID" - value = step.create_vault_cluster.key_id - } - - output "vault_cluster_root_token" { - description = "The Vault cluster root token" - value = step.create_vault_cluster.vault_root_token - } - - output "vault_cluster_unseal_keys_b64" { - description = "The Vault cluster unseal keys" - value = step.create_vault_cluster.vault_unseal_keys_b64 - } - - output "vault_cluster_unseal_keys_hex" { - description = "The Vault cluster unseal keys hex" - value = step.create_vault_cluster.vault_unseal_keys_hex - } - - output "vault_cluster_tag" { - description = "The Vault cluster tag" - value = step.create_vault_cluster.vault_cluster_tag - } -} diff --git a/enos/enos-scenario-upgrade.hcl b/enos/enos-scenario-upgrade.hcl deleted file mode 100644 index 4193b06fee62d..0000000000000 --- a/enos/enos-scenario-upgrade.hcl +++ /dev/null @@ -1,243 +0,0 @@ -scenario "upgrade" { - matrix { - arch = ["amd64", "arm64"] - backend = ["consul", "raft"] - builder = ["local", "crt"] - consul_version = ["1.12.3", "1.11.7", "1.10.12"] - distro = ["ubuntu", "rhel"] - edition = ["oss", "ent"] - seal = ["awskms", "shamir"] - } - - terraform_cli = terraform_cli.default - terraform = terraform.default - providers = [ - provider.aws.default, - provider.enos.ubuntu, - provider.enos.rhel - ] - - locals { - build_tags = { - "oss" = ["ui"] - "ent" = ["enterprise", "ent"] - } - bundle_path = abspath(var.vault_bundle_path) - dependencies_to_install = ["jq"] - enos_provider = { - rhel = provider.enos.rhel - ubuntu = provider.enos.ubuntu - } - tags = merge({ - "Project Name" : var.project_name - "Project" : "Enos", - "Environment" : "ci" - }, var.tags) - vault_instance_types = { - amd64 = "t3a.small" - arm64 = "t4g.small" - } - vault_instance_type = coalesce(var.vault_instance_type, local.vault_instance_types[matrix.arch]) - } - - step "build_vault" { - module = matrix.builder == "crt" ? module.build_crt : module.build_local - - variables { - build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : local.build_tags[matrix.edition] - bundle_path = local.bundle_path - goarch = matrix.arch - goos = "linux" - } - } - - step "find_azs" { - module = module.az_finder - - variables { - instance_type = [ - var.backend_instance_type, - local.vault_instance_type, - ] - } - } - - step "create_vpc" { - module = module.create_vpc - - variables { - ami_architectures = [matrix.arch] - availability_zones = step.find_azs.availability_zones - common_tags = local.tags - } - } - - step "read_license" { - skip_step = matrix.edition == "oss" - module = module.read_license - - variables { - file_name = abspath(joinpath(path.root, "./support/vault.hclic")) - } - } - - step "create_backend_cluster" { - module = "backend_${matrix.backend}" - depends_on = [ - step.create_vpc, - step.build_vault, - ] - - providers = { - enos = provider.enos.ubuntu - } - - variables { - ami_id = step.create_vpc.ami_ids["ubuntu"][matrix.arch] - common_tags = local.tags - consul_release = { - edition = var.backend_edition - version = matrix.consul_version - } - instance_type = var.backend_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - vpc_id = step.create_vpc.vpc_id - } - } - - step "create_vault_cluster" { - module = module.vault_cluster - depends_on = [ - step.create_vpc, - step.create_backend_cluster, - ] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags - consul_cluster_tag = step.create_backend_cluster.consul_cluster_tag - dependencies_to_install = local.dependencies_to_install - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - storage_backend = matrix.backend - unseal_method = matrix.seal - vault_release = var.vault_upgrade_initial_release - vault_license = matrix.edition != "oss" ? step.read_license.license : null - vpc_id = step.create_vpc.vpc_id - } - } - - step "upgrade_vault" { - module = module.vault_upgrade - depends_on = [ - step.create_vault_cluster, - ] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - vault_api_addr = "http://localhost:8200" - vault_instances = step.create_vault_cluster.vault_instances - vault_local_bundle_path = local.bundle_path - vault_unseal_keys = matrix.seal == "shamir" ? step.create_vault_cluster.vault_unseal_keys_hex : null - vault_seal_type = matrix.seal - } - } - - step "verify_vault_version" { - module = module.vault_verify_version - depends_on = [ - step.create_backend_cluster, - step.upgrade_vault, - ] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - vault_instances = step.create_vault_cluster.vault_instances - } - } - - step "verify_vault_unsealed" { - module = module.vault_verify_unsealed - depends_on = [ - step.create_vault_cluster, - step.upgrade_vault, - ] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - vault_instances = step.create_vault_cluster.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token - } - } - - step "verify_raft_auto_join_voter" { - skip_step = matrix.backend != "raft" - module = module.vault_verify_raft_auto_join_voter - depends_on = [ - step.create_backend_cluster, - step.upgrade_vault, - ] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - vault_instances = step.create_vault_cluster.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token - } - } - - output "vault_cluster_instance_ids" { - description = "The Vault cluster instance IDs" - value = step.create_vault_cluster.instance_ids - } - - output "vault_cluster_pub_ips" { - description = "The Vault cluster public IPs" - value = step.create_vault_cluster.instance_public_ips - } - - output "vault_cluster_priv_ips" { - description = "The Vault cluster private IPs" - value = step.create_vault_cluster.instance_private_ips - } - - output "vault_cluster_key_id" { - description = "The Vault cluster Key ID" - value = step.create_vault_cluster.key_id - } - - output "vault_cluster_root_token" { - description = "The Vault cluster root token" - value = step.create_vault_cluster.vault_root_token - } - - output "vault_cluster_unseal_keys_b64" { - description = "The Vault cluster unseal keys" - value = step.create_vault_cluster.vault_unseal_keys_b64 - } - - output "vault_cluster_unseal_keys_hex" { - description = "The Vault cluster unseal keys hex" - value = step.create_vault_cluster.vault_unseal_keys_hex - } - - output "vault_cluster_tag" { - description = "The Vault cluster tag" - value = step.create_vault_cluster.vault_cluster_tag - } -} diff --git a/enos/enos-terraform.hcl b/enos/enos-terraform.hcl deleted file mode 100644 index 0c88cc7910f07..0000000000000 --- a/enos/enos-terraform.hcl +++ /dev/null @@ -1,30 +0,0 @@ -terraform_cli "default" { - plugin_cache_dir = var.terraform_plugin_cache_dir != null ? abspath(var.terraform_plugin_cache_dir) : null - - credentials "app.terraform.io" { - token = var.tfc_api_token - } - - /* - provider_installation { - dev_overrides = { - "app.terraform.io/hashicorp-qti/enos" = abspath("../../enos-provider") - } - direct {} - } - */ -} - -terraform "default" { - required_version = ">= 1.2.0" - - required_providers { - aws = { - source = "hashicorp/aws" - } - - enos = { - source = "app.terraform.io/hashicorp-qti/enos" - } - } -} diff --git a/enos/enos-variables.hcl b/enos/enos-variables.hcl deleted file mode 100644 index f62dfc12d669f..0000000000000 --- a/enos/enos-variables.hcl +++ /dev/null @@ -1,111 +0,0 @@ -variable "aws_region" { - description = "The AWS region where we'll create infrastructure" - type = string - default = "us-west-1" -} - -variable "aws_ssh_keypair_name" { - description = "The AWS keypair to use for SSH" - type = string - default = "enos-ci-ssh-key" -} - -variable "aws_ssh_private_key_path" { - description = "The path to the AWS keypair private key" - type = string - default = "./support/private_key.pem" -} - -variable "backend_edition" { - description = "The backend release edition if applicable" - type = string - default = "oss" -} - -variable "backend_instance_type" { - description = "The instance type to use for the Vault backend" - type = string - default = "t3.small" -} - -variable "backend_license_path" { - description = "The license for the backend if applicable (Consul Enterprise)" - type = string - default = null -} - -variable "project_name" { - description = "The description of the project" - type = string - default = "vault-enos-integration" -} - -variable "tags" { - description = "Tags that will be applied to infrastructure resources that support tagging" - type = map(string) - default = null -} - -variable "terraform_plugin_cache_dir" { - description = "The directory to cache Terraform modules and providers" - type = string - default = null -} - -variable "tfc_api_token" { - description = "The Terraform Cloud QTI Organization API token." - type = string -} - -variable "vault_autopilot_initial_release" { - description = "The Vault release to deploy before upgrading with autopilot" - default = { - edition = "ent" - version = "1.11.0" - } -} - -variable "vault_bundle_path" { - description = "Path to CRT generated or local vault.zip bundle" - type = string - default = "/tmp/vault.zip" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the vault binary will be installed" - default = "/opt/vault/bin" -} - -variable "vault_instance_type" { - description = "The instance type to use for the Vault backend" - type = string - default = null -} - -variable "vault_instance_count" { - description = "How many instances to create for the Vault cluster" - type = number - default = 3 -} - -variable "vault_license_path" { - description = "The path to a valid Vault enterprise edition license. This is only required for non-oss editions" - type = string - default = null -} - -variable "vault_local_build_tags" { - description = "The build tags to pass to the Go compiler for builder:local variants" - type = list(string) - default = null -} - -variable "vault_upgrade_initial_release" { - description = "The Vault release to deploy before upgrading" - default = { - edition = "oss" - // vault 1.10.5 has a known issue with retry_join. - version = "1.10.4" - } -} diff --git a/enos/enos.vars.hcl b/enos/enos.vars.hcl deleted file mode 100644 index d157c7b92650e..0000000000000 --- a/enos/enos.vars.hcl +++ /dev/null @@ -1,48 +0,0 @@ -# aws_region is the AWS region where we'll create infrastructure -# for the smoke scenario -# aws_region = "us-west-1" - -# aws_ssh_keypair_name is the AWS keypair to use for SSH -# aws_ssh_keypair_name = "enos-ci-ssh-key" - -# aws_ssh_private_key_path is the path to the AWS keypair private key -# aws_ssh_private_key_path = "./support/private_key.pem" - -# backend_instance_type is the instance type to use for the Vault backend -# backend_instance_type = "t3.small" - -# tags are a map of tags that will be applied to infrastructure resources that -# support tagging. -# tags = { "Project Name" : "Vault", "Something Cool" : "Value" } - -# terraform_plugin_cache_dir is the directory to cache Terraform modules and providers. -# It must exist. -# terraform_plugin_cache_dir = "/Users//.terraform/plugin-cache-dir - -# tfc_api_token is the Terraform Cloud QTI Organization API token. We need this -# to download the enos Terraform provider and the enos Terraform modules. -# tfc_api_token = "XXXXX.atlasv1.XXXXX..." - -# vault_bundle_path is the path to CRT generated or local vault.zip bundle. When -# using the "builder:local" variant a bundle will be built from the current branch. -# In CI it will use the output of the build workflow. -# vault_bundle_path = "./dist/vault.zip" - -# vault_install_dir is the directory where the vault binary will be installed on -# the remote machines. -# vault_install_dir = "/opt/vault/bin" - -# vault_local_binary_path is the path of the local binary that we're upgrading to. -# vault_local_binary_path = "./support/vault" - -# vault_instance_type is the instance type to use for the Vault backend -# vault_instance_type = "t3.small" - -# vault_instance_count is how many instances to create for the Vault cluster. -# vault_instance_count = 3 - -# vault_license_path is the path to a valid Vault enterprise edition license. -# This is only required for non-oss editions" -# vault_license_path = "./support/vault.hclic" - -# vault_upgrade_initial_release is the Vault release to deploy before upgrading. diff --git a/enos/modules/autopilot_upgrade_storageconfig/main.tf b/enos/modules/autopilot_upgrade_storageconfig/main.tf deleted file mode 100644 index 6093b8b1066de..0000000000000 --- a/enos/modules/autopilot_upgrade_storageconfig/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -variable "vault_product_version" {} - -output "storage_addl_config" { - value = { - autopilot_upgrade_version = var.vault_product_version - } -} diff --git a/enos/modules/az_finder/main.tf b/enos/modules/az_finder/main.tf deleted file mode 100644 index b55975578c61c..0000000000000 --- a/enos/modules/az_finder/main.tf +++ /dev/null @@ -1,25 +0,0 @@ -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - } - } -} - -variable "instance_type" { - default = ["t3.small"] - type = list(string) -} - -data "aws_ec2_instance_type_offerings" "infra" { - filter { - name = "instance-type" - values = var.instance_type - } - - location_type = "availability-zone" -} - -output "availability_zones" { - value = data.aws_ec2_instance_type_offerings.infra.locations -} diff --git a/enos/modules/backend_raft/main.tf b/enos/modules/backend_raft/main.tf deleted file mode 100644 index 4cb8e58a592bf..0000000000000 --- a/enos/modules/backend_raft/main.tf +++ /dev/null @@ -1,46 +0,0 @@ -// Shim module to handle the fact that Vault doesn't actually need a backend module -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - } - enos = { - source = "app.terraform.io/hashicorp-qti/enos" - } - } -} - -variable "ami_id" { - default = null -} -variable "common_tags" { - default = null -} -variable "consul_license" { - default = null -} -variable "consul_release" { - default = null -} -variable "environment" { - default = null -} -variable "instance_type" { - default = null -} -variable "kms_key_arn" { - default = null -} -variable "project_name" { - default = null -} -variable "ssh_aws_keypair" { - default = null -} -variable "vpc_id" { - default = null -} - -output "consul_cluster_tag" { - value = null -} diff --git a/enos/modules/build_crt/main.tf b/enos/modules/build_crt/main.tf deleted file mode 100644 index 6e54287d2845c..0000000000000 --- a/enos/modules/build_crt/main.tf +++ /dev/null @@ -1,20 +0,0 @@ -# Shim module since CRT provided things will use the crt_bundle_path variable -variable "bundle_path" { - default = "/tmp/vault.zip" -} - -variable "build_tags" { - default = ["ui"] -} - -variable "goarch" { - type = string - description = "The Go architecture target" - default = "amd64" -} - -variable "goos" { - type = string - description = "The Go OS target" - default = "linux" -} diff --git a/enos/modules/build_local/main.tf b/enos/modules/build_local/main.tf deleted file mode 100644 index 246a4ed2d9e88..0000000000000 --- a/enos/modules/build_local/main.tf +++ /dev/null @@ -1,30 +0,0 @@ -variable "bundle_path" { - type = string - default = "/tmp/vault.zip" -} - -variable "build_tags" { - type = list(string) - description = "The build tags to pass to the Go compiler" -} - -variable "goarch" { - type = string - description = "The Go architecture target" - default = "amd64" -} - -variable "goos" { - type = string - description = "The Go OS target" - default = "linux" -} - -resource "enos_local_exec" "build" { - content = templatefile("${path.module}/templates/build.sh", { - bundle_path = var.bundle_path, - build_tags = join(" ", var.build_tags) - goarch = var.goarch - goos = var.goos - }) -} diff --git a/enos/modules/build_local/templates/build.sh b/enos/modules/build_local/templates/build.sh deleted file mode 100755 index 661df529bf313..0000000000000 --- a/enos/modules/build_local/templates/build.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -set -eux -o pipefail - -# Requirements -npm install --global yarn || true - -# Set up the environment for building Vault. -root_dir="$(git rev-parse --show-toplevel)" - -pushd "$root_dir" > /dev/null - -export GO_TAGS=${build_tags} -export CGO_ENABLED=0 - -IFS="-" read -r BASE_VERSION _other <<< "$(make version)" -export VAULT_VERSION=$BASE_VERSION - -build_date="$(make build-date)" -export VAULT_BUILD_DATE=$build_date - -revision="$(git rev-parse HEAD)" -export VAULT_REVISION=$revision -popd > /dev/null - -# Go to the UI directory of the Vault repo and build the UI -pushd "$root_dir/ui" > /dev/null -yarn install --ignore-optional -npm rebuild node-sass -yarn --verbose run build -popd > /dev/null - -# Build for linux/amd64 and create a bundle since we're deploying it to linux/amd64 -pushd "$root_dir" > /dev/null -export GOARCH=${goarch} -export GOOS=${goos} -make build - -zip -r -j ${bundle_path} dist/ -popd > /dev/null diff --git a/enos/modules/get_local_version_from_make/main.tf b/enos/modules/get_local_version_from_make/main.tf deleted file mode 100644 index a7dcf6be7f2ab..0000000000000 --- a/enos/modules/get_local_version_from_make/main.tf +++ /dev/null @@ -1,15 +0,0 @@ -terraform { - required_providers { - enos = { - source = "app.terraform.io/hashicorp-qti/enos" - } - } -} - -resource "enos_local_exec" "get_version" { - scripts = ["${path.module}/scripts/version.sh"] -} - -output "version" { - value = trimspace(enos_local_exec.get_version.stdout) -} diff --git a/enos/modules/get_local_version_from_make/scripts/version.sh b/enos/modules/get_local_version_from_make/scripts/version.sh deleted file mode 100755 index 13970ead9752c..0000000000000 --- a/enos/modules/get_local_version_from_make/scripts/version.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/env bash -set -eu -o pipefail - -# Set up the environment for building Vault. -root_dir="$(git rev-parse --show-toplevel)" - -pushd "$root_dir" > /dev/null - -IFS="-" read -r VAULT_VERSION _other <<< "$(make version)" -echo $VAULT_VERSION diff --git a/enos/modules/read_license/main.tf b/enos/modules/read_license/main.tf deleted file mode 100644 index 1b645272abe7c..0000000000000 --- a/enos/modules/read_license/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "file_name" {} - -output "license" { - value = file(var.file_name) -} diff --git a/enos/modules/vault_upgrade/main.tf b/enos/modules/vault_upgrade/main.tf deleted file mode 100644 index 375be70d63148..0000000000000 --- a/enos/modules/vault_upgrade/main.tf +++ /dev/null @@ -1,163 +0,0 @@ -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - } - enos = { - source = "app.terraform.io/hashicorp-qti/enos" - } - } -} - -variable "vault_api_addr" { - type = string - description = "The API address of the Vault cluster" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - -variable "vault_instances" { - type = map(object({ - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were created" -} - -variable "vault_local_bundle_path" { - type = string - description = "The path to the local Vault (vault.zip) bundle" -} - -variable "vault_seal_type" { - type = string - description = "The Vault seal type" -} - -variable "vault_unseal_keys" { - type = list(string) - description = "The keys to use to unseal Vault when not using auto-unseal" - default = null -} - -locals { - instances = { - for idx in range(var.vault_instance_count) : idx => { - public_ip = values(var.vault_instances)[idx].public_ip - private_ip = values(var.vault_instances)[idx].private_ip - } - } - followers = toset([for idx in range(var.vault_instance_count - 1) : tostring(idx)]) - follower_ips = compact(split(" ", enos_remote_exec.get_follower_public_ips.stdout)) - vault_bin_path = "${var.vault_install_dir}/vault" -} - -resource "enos_bundle_install" "upgrade_vault_binary" { - for_each = local.instances - - destination = var.vault_install_dir - path = var.vault_local_bundle_path - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -resource "enos_remote_exec" "get_leader_public_ip" { - depends_on = [enos_bundle_install.upgrade_vault_binary] - - content = templatefile("${path.module}/templates/get-leader-public-ip.sh", { - vault_install_dir = var.vault_install_dir, - vault_instances = jsonencode(local.instances) - }) - - transport = { - ssh = { - host = local.instances[0].public_ip - } - } -} - -resource "enos_remote_exec" "get_follower_public_ips" { - depends_on = [enos_bundle_install.upgrade_vault_binary] - - content = templatefile("${path.module}/templates/get-follower-public-ips.sh", { - vault_install_dir = var.vault_install_dir, - vault_instances = jsonencode(local.instances) - }) - - transport = { - ssh = { - host = local.instances[0].public_ip - } - } -} - -resource "enos_remote_exec" "restart_followers" { - for_each = local.followers - depends_on = [enos_remote_exec.get_follower_public_ips] - - content = file("${path.module}/templates/restart-vault.sh") - - transport = { - ssh = { - host = trimspace(local.follower_ips[tonumber(each.key)]) - } - } -} - -resource "enos_vault_unseal" "followers" { - depends_on = [enos_remote_exec.restart_followers] - for_each = { - for idx, follower in local.followers : idx => follower - if var.vault_seal_type == "shamir" - } - bin_path = local.vault_bin_path - vault_addr = var.vault_api_addr - seal_type = var.vault_seal_type - unseal_keys = var.vault_unseal_keys - - transport = { - ssh = { - host = trimspace(local.follower_ips[each.key]) - } - } -} - -resource "enos_remote_exec" "restart_leader" { - depends_on = [enos_vault_unseal.followers] - - content = file("${path.module}/templates/restart-vault.sh") - - transport = { - ssh = { - host = trimspace(enos_remote_exec.get_leader_public_ip.stdout) - } - } -} - -resource "enos_vault_unseal" "leader" { - count = var.vault_seal_type == "shamir" ? 1 : 0 - depends_on = [enos_remote_exec.restart_leader] - - bin_path = local.vault_bin_path - vault_addr = var.vault_api_addr - seal_type = var.vault_seal_type - unseal_keys = var.vault_unseal_keys - - transport = { - ssh = { - host = trimspace(enos_remote_exec.get_leader_public_ip.stdout) - } - } -} diff --git a/enos/modules/vault_upgrade/templates/get-follower-public-ips.sh b/enos/modules/vault_upgrade/templates/get-follower-public-ips.sh deleted file mode 100644 index e424aa44406ce..0000000000000 --- a/enos/modules/vault_upgrade/templates/get-follower-public-ips.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -set -e - -binpath=${vault_install_dir}/vault -export VAULT_ADDR="http://localhost:8200" - -instances='${vault_instances}' - -# Find the leader -leader_address=$($binpath status -format json | jq '.leader_address | rtrimstr(":8200") | ltrimstr("http://")') - -# Get the public ip addresses of the followers -follower_ips=$(jq ".[] | select(.private_ip!=$leader_address) | .public_ip" <<< "$instances") - -echo "$follower_ips" | sed 's/\"//g' | tr '\n' ' ' diff --git a/enos/modules/vault_upgrade/templates/get-leader-public-ip.sh b/enos/modules/vault_upgrade/templates/get-leader-public-ip.sh deleted file mode 100644 index 5c36dae336f5c..0000000000000 --- a/enos/modules/vault_upgrade/templates/get-leader-public-ip.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -set -e - -binpath=${vault_install_dir}/vault -export VAULT_ADDR="http://localhost:8200" - -instances='${vault_instances}' - -# Find the leader -leader_address=$($binpath status -format json | jq '.leader_address | rtrimstr(":8200") | ltrimstr("http://")') - -# Get the public ip address of the leader -leader_public=$(jq ".[] | select(.private_ip==$leader_address) | .public_ip" <<< "$instances") -echo "$leader_public" | sed 's/\"//g' diff --git a/enos/modules/vault_upgrade/templates/restart-vault.sh b/enos/modules/vault_upgrade/templates/restart-vault.sh deleted file mode 100644 index aa68536430560..0000000000000 --- a/enos/modules/vault_upgrade/templates/restart-vault.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -set -eux - -sudo systemctl restart vault diff --git a/enos/modules/vault_verify_autopilot/main.tf b/enos/modules/vault_verify_autopilot/main.tf deleted file mode 100644 index af9a673e626ff..0000000000000 --- a/enos/modules/vault_verify_autopilot/main.tf +++ /dev/null @@ -1,68 +0,0 @@ -terraform { - required_providers { - enos = { - source = "app.terraform.io/hashicorp-qti/enos" - } - } -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - -variable "vault_instances" { - type = map(object({ - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were created" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -variable "vault_autopilot_upgrade_version" { - type = string - description = "The directory where the Vault binary will be installed" - default = null -} - -variable "vault_autopilot_upgrade_status" { - type = string - description = "The directory where the Vault binary will be installed" - default = null -} - -locals { - public_ips = { - for idx in range(var.vault_instance_count) : idx => { - public_ip = values(var.vault_instances)[idx].public_ip - private_ip = values(var.vault_instances)[idx].private_ip - } - } -} - -resource "enos_remote_exec" "smoke-verify-autopilot" { - for_each = local.public_ips - - content = templatefile("${path.module}/templates/smoke-verify-autopilot.sh", { - vault_install_dir = var.vault_install_dir - vault_token = var.vault_root_token - vault_autopilot_upgrade_status = var.vault_autopilot_upgrade_status, - vault_autopilot_upgrade_version = var.vault_autopilot_upgrade_version, - }) - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/vault_verify_autopilot/templates/smoke-verify-autopilot.sh b/enos/modules/vault_verify_autopilot/templates/smoke-verify-autopilot.sh deleted file mode 100755 index 1dd5d90147100..0000000000000 --- a/enos/modules/vault_verify_autopilot/templates/smoke-verify-autopilot.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -token="${vault_token}" -autopilot_version="${vault_autopilot_upgrade_version}" -autopilot_status="${vault_autopilot_upgrade_status}" - -export VAULT_ADDR="http://localhost:8200" -export VAULT_TOKEN="$token" - -function fail() { - echo "$1" 1>&2 - exit 1 -} - -count=0 -retries=7 -while :; do - state=$(${vault_install_dir}/vault read -format=json sys/storage/raft/autopilot/state) - status="$(jq -r '.data.upgrade_info.status' <<< "$state")" - target_version="$(jq -r '.data.upgrade_info.target_version' <<< "$state")" - - if [ "$status" = "$autopilot_status" ] && [ "$target_version" = "$autopilot_version" ]; then - exit 0 - fi - - wait=$((2 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - echo "$state" - sleep "$wait" - else - fail "Autopilot did not get into the correct status" - fi -done diff --git a/enos/modules/vault_verify_raft_auto_join_voter/main.tf b/enos/modules/vault_verify_raft_auto_join_voter/main.tf deleted file mode 100644 index ded9c3cc7007f..0000000000000 --- a/enos/modules/vault_verify_raft_auto_join_voter/main.tf +++ /dev/null @@ -1,62 +0,0 @@ -terraform { - required_providers { - enos = { - source = "app.terraform.io/hashicorp-qti/enos" - } - } -} - -variable "vault_cluster_addr_port" { - description = "The Raft cluster address port" - type = string - default = "8201" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - -variable "vault_instances" { - type = map(object({ - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were created" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -locals { - instances = { - for idx in range(var.vault_instance_count) : idx => { - public_ip = values(var.vault_instances)[idx].public_ip - private_ip = values(var.vault_instances)[idx].private_ip - } - } -} - -resource "enos_remote_exec" "verify_raft_auto_join_voter" { - for_each = local.instances - - content = templatefile("${path.module}/templates/verify-raft-auto-join-voter.sh", { - vault_cluster_addr = "${each.value.private_ip}:${var.vault_cluster_addr_port}" - vault_install_dir = var.vault_install_dir - vault_local_binary_path = "${var.vault_install_dir}/vault" - vault_token = var.vault_root_token - }) - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/vault_verify_raft_auto_join_voter/templates/verify-raft-auto-join-voter.sh b/enos/modules/vault_verify_raft_auto_join_voter/templates/verify-raft-auto-join-voter.sh deleted file mode 100644 index e1172d7158f61..0000000000000 --- a/enos/modules/vault_verify_raft_auto_join_voter/templates/verify-raft-auto-join-voter.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bash - -set -e - -binpath=${vault_install_dir}/vault - -fail() { - echo "$1" 2>&1 - return 1 -} - -retry() { - local retries=$1 - shift - local count=0 - - until "$@"; do - exit=$? - wait=$((2 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - sleep "$wait" - echo "retry $count" - else - return "$exit" - fi - done - - return 0 -} - -check_voter_status() { - voter_status=$($binpath operator raft list-peers -format json | jq -Mr --argjson expected "true" '.data.config.servers[] | select(.address=="${vault_cluster_addr}") | .voter == $expected') - - if [[ "$voter_status" != 'true' ]]; then - fail "expected ${vault_cluster_addr} to be raft voter, got raft status for node: $($binpath operator raft list-peers -format json | jq '.data.config.servers[] | select(.address==${vault_cluster_addr})')" - fi -} - -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_ADDR='http://127.0.0.1:8200' -export VAULT_TOKEN='${vault_token}' - -# Retry a few times because it can take some time for things to settle after -# all the nodes are unsealed -retry 5 check_voter_status diff --git a/enos/modules/vault_verify_unsealed/main.tf b/enos/modules/vault_verify_unsealed/main.tf deleted file mode 100644 index ead8d01818439..0000000000000 --- a/enos/modules/vault_verify_unsealed/main.tf +++ /dev/null @@ -1,62 +0,0 @@ -terraform { - required_providers { - enos = { - source = "app.terraform.io/hashicorp-qti/enos" - } - } -} - -variable "vault_cluster_addr_port" { - description = "The Raft cluster address port" - type = string - default = "8201" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - -variable "vault_instances" { - type = map(object({ - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were created" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -locals { - instances = { - for idx in range(var.vault_instance_count) : idx => { - public_ip = values(var.vault_instances)[idx].public_ip - private_ip = values(var.vault_instances)[idx].private_ip - } - } -} - -resource "enos_remote_exec" "verify_raft_auto_join_voter" { - for_each = local.instances - - content = templatefile("${path.module}/templates/verify-vault-node-unsealed.sh", { - vault_cluster_addr = "${each.value.private_ip}:${var.vault_cluster_addr_port}" - vault_install_dir = var.vault_install_dir - vault_local_binary_path = "${var.vault_install_dir}/vault" - vault_token = var.vault_root_token - }) - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/vault_verify_unsealed/templates/verify-vault-node-unsealed.sh b/enos/modules/vault_verify_unsealed/templates/verify-vault-node-unsealed.sh deleted file mode 100644 index de3edd6482027..0000000000000 --- a/enos/modules/vault_verify_unsealed/templates/verify-vault-node-unsealed.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash - -set -e - -binpath=${vault_install_dir}/vault - -fail() { - echo "$1" 1>&2 - return 1 -} - -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_ADDR='http://127.0.0.1:8200' -export VAULT_TOKEN='${vault_token}' - -unseal_status=$($binpath status -format json | jq -Mr --argjson expected "false" '.sealed == $expected') -if [[ "$unseal_status" != 'true' ]]; then - fail "expected ${vault_cluster_addr} to be unsealed, got unseal status: $unseal_status" -fi diff --git a/enos/modules/vault_verify_version/main.tf b/enos/modules/vault_verify_version/main.tf deleted file mode 100644 index fddb765bf91d6..0000000000000 --- a/enos/modules/vault_verify_version/main.tf +++ /dev/null @@ -1,48 +0,0 @@ -terraform { - required_providers { - enos = { - source = "app.terraform.io/hashicorp-qti/enos" - } - } -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - -variable "vault_instances" { - type = map(object({ - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were created" -} - -locals { - instances = { - for idx in range(var.vault_instance_count) : idx => { - public_ip = values(var.vault_instances)[idx].public_ip - private_ip = values(var.vault_instances)[idx].private_ip - } - } -} - -resource "enos_remote_exec" "verify_all_nodes_have_updated_version" { - for_each = local.instances - - content = templatefile("${path.module}/templates/verify-cluster-version.sh", { - vault_install_dir = var.vault_install_dir, - }) - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/vault_verify_version/templates/verify-cluster-version.sh b/enos/modules/vault_verify_version/templates/verify-cluster-version.sh deleted file mode 100644 index 402ca82358b6b..0000000000000 --- a/enos/modules/vault_verify_version/templates/verify-cluster-version.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env bash - -# The Vault smoke test to verify the Vault version installed - -set -e - -binpath=${vault_install_dir}/vault - -fail() { - echo "$1" 1>&2 - exit 1 -} - -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -binary_version_full=$($binpath version) -# Get the Vault build tag -binary_version=$(cut -d ' ' -f2 <<< $binary_version_full) -# Strip the leading v -semantic=$${binary_version:1} -# Get the build timestamp -build_date=$(cut -d ' ' -f5 <<< $binary_version_full) - -export VAULT_ADDR='http://127.0.0.1:8200' - -# Ensure that the cluster version and build time match the binary installed -vault_status=$("$binpath" status -format json) -result=$(jq -Mr \ - --arg version "$semantic" \ - --arg build_date "$build_date" \ - 'select(.version == $version) | .build_date == $build_date' \ - <<< $vault_status -) - -if [[ "$result" != "true" ]]; then - fail "expected version $binary_version with build_date $build_date, got status $vault_status" -fi diff --git a/go.mod b/go.mod index 5ece8c471a93f..cebb57a8bbd57 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/hashicorp/vault -go 1.19 +go 1.17 replace github.com/hashicorp/vault/api => ./api @@ -15,32 +15,30 @@ replace github.com/hashicorp/vault/sdk => ./sdk replace go.etcd.io/etcd/client/pkg/v3 v3.5.0 => go.etcd.io/etcd/client/pkg/v3 v3.0.0-20210928084031-3df272774672 require ( - cloud.google.com/go/monitoring v1.2.0 + cloud.google.com/go v0.65.0 cloud.google.com/go/spanner v1.5.1 cloud.google.com/go/storage v1.10.0 github.com/Azure/azure-storage-blob-go v0.14.0 github.com/Azure/go-autorest/autorest v0.11.24 github.com/Azure/go-autorest/autorest/adal v0.9.18 github.com/NYTimes/gziphandler v1.1.1 - github.com/ProtonMail/go-crypto v0.0.0-20220824120805-4b6e5c587895 github.com/SAP/go-hdb v0.14.1 github.com/Sectorbob/mlab-ns2 v0.0.0-20171030222938-d3aa0c295a8a github.com/aerospike/aerospike-client-go/v5 v5.6.0 - github.com/aliyun/alibaba-cloud-sdk-go v1.61.1499 + github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5 github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2 - github.com/armon/go-metrics v0.4.0 + github.com/armon/go-metrics v0.3.10 github.com/armon/go-radix v1.0.0 - github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef - github.com/aws/aws-sdk-go v1.44.95 - github.com/axiomhq/hyperloglog v0.0.0-20220105174342-98591331716a + github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a + github.com/aws/aws-sdk-go v1.37.19 github.com/cenkalti/backoff/v3 v3.2.2 github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0 github.com/client9/misspell v0.3.4 github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf - github.com/denisenkom/go-mssqldb v0.12.2 - github.com/docker/docker v20.10.18+incompatible + github.com/denisenkom/go-mssqldb v0.12.0 + github.com/docker/docker v20.10.10+incompatible github.com/docker/go-connections v0.4.0 github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74 github.com/dustin/go-humanize v1.0.0 @@ -50,39 +48,32 @@ require ( github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 github.com/go-errors/errors v1.4.1 github.com/go-ldap/ldap/v3 v3.4.1 - github.com/go-sql-driver/mysql v1.6.0 + github.com/go-sql-driver/mysql v1.5.0 github.com/go-test/deep v1.0.8 github.com/gocql/gocql v1.0.0 github.com/golang-jwt/jwt/v4 v4.3.0 github.com/golang/protobuf v1.5.2 - github.com/google/go-cmp v0.5.8 + github.com/google/go-cmp v0.5.7 github.com/google/go-github v17.0.0+incompatible github.com/google/go-metrics-stackdriver v0.2.0 - github.com/google/tink/go v1.6.1 - github.com/hashicorp/cap v0.2.1-0.20220727210936-60cd1534e220 + github.com/google/tink/go v1.4.0 + github.com/hashicorp/cap v0.2.1-0.20220502204956-9a9f4a9d6e61 github.com/hashicorp/consul-template v0.29.2 github.com/hashicorp/consul/api v1.14.0 github.com/hashicorp/errwrap v1.1.0 github.com/hashicorp/go-cleanhttp v0.5.2 github.com/hashicorp/go-discover v0.0.0-20210818145131-c573d69da192 - github.com/hashicorp/go-gcp-common v0.8.0 - github.com/hashicorp/go-hclog v1.3.0 - github.com/hashicorp/go-kms-wrapping/v2 v2.0.5 - github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.4 - github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.1 - github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.1 - github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.1 - github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.1 - github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.0 - github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.1 - github.com/hashicorp/go-memdb v1.3.3 + github.com/hashicorp/go-gcp-common v0.7.1-0.20220519220342-94aabf4c4c87 + github.com/hashicorp/go-hclog v1.2.2 + github.com/hashicorp/go-kms-wrapping v0.7.0 + github.com/hashicorp/go-memdb v1.3.2 github.com/hashicorp/go-msgpack v1.1.5 github.com/hashicorp/go-multierror v1.1.1 - github.com/hashicorp/go-plugin v1.4.5 + github.com/hashicorp/go-plugin v1.4.4 github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a - github.com/hashicorp/go-retryablehttp v0.7.1 + github.com/hashicorp/go-retryablehttp v0.7.0 github.com/hashicorp/go-rootcerts v1.0.2 - github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6 + github.com/hashicorp/go-secure-stdlib/awsutil v0.1.5 github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 github.com/hashicorp/go-secure-stdlib/gatedwriter v0.1.1 github.com/hashicorp/go-secure-stdlib/kv-builder v0.1.2 @@ -94,53 +85,51 @@ require ( github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1 github.com/hashicorp/go-sockaddr v1.0.2 github.com/hashicorp/go-syslog v1.0.0 - github.com/hashicorp/go-uuid v1.0.3 - github.com/hashicorp/go-version v1.6.0 + github.com/hashicorp/go-uuid v1.0.2 + github.com/hashicorp/go-version v1.4.0 github.com/hashicorp/golang-lru v0.5.4 github.com/hashicorp/hcl v1.0.1-vault-3 - github.com/hashicorp/hcp-sdk-go v0.22.0 github.com/hashicorp/nomad/api v0.0.0-20220707195938-75f4c2237b28 - github.com/hashicorp/raft v1.3.10 - github.com/hashicorp/raft-autopilot v0.1.6 + github.com/hashicorp/raft v1.3.9 + github.com/hashicorp/raft-autopilot v0.1.3 github.com/hashicorp/raft-boltdb/v2 v2.0.0-20210421194847-a7e34179d62c github.com/hashicorp/raft-snapshot v1.0.4 - github.com/hashicorp/vault-plugin-auth-alicloud v0.13.0 - github.com/hashicorp/vault-plugin-auth-azure v0.12.0 - github.com/hashicorp/vault-plugin-auth-centrify v0.13.0 - github.com/hashicorp/vault-plugin-auth-cf v0.13.0 - github.com/hashicorp/vault-plugin-auth-gcp v0.14.0 - github.com/hashicorp/vault-plugin-auth-jwt v0.14.0 - github.com/hashicorp/vault-plugin-auth-kerberos v0.8.0 - github.com/hashicorp/vault-plugin-auth-kubernetes v0.14.0 - github.com/hashicorp/vault-plugin-auth-oci v0.12.0 - github.com/hashicorp/vault-plugin-database-couchbase v0.8.0 - github.com/hashicorp/vault-plugin-database-elasticsearch v0.12.0 - github.com/hashicorp/vault-plugin-database-mongodbatlas v0.8.0 - github.com/hashicorp/vault-plugin-database-redis v0.1.0 - github.com/hashicorp/vault-plugin-database-redis-elasticache v0.1.0 - github.com/hashicorp/vault-plugin-database-snowflake v0.6.0 + github.com/hashicorp/vault-plugin-auth-alicloud v0.12.0 + github.com/hashicorp/vault-plugin-auth-azure v0.11.0 + github.com/hashicorp/vault-plugin-auth-centrify v0.12.0 + github.com/hashicorp/vault-plugin-auth-cf v0.12.0 + github.com/hashicorp/vault-plugin-auth-gcp v0.13.2 + github.com/hashicorp/vault-plugin-auth-jwt v0.13.0 + github.com/hashicorp/vault-plugin-auth-kerberos v0.7.3 + github.com/hashicorp/vault-plugin-auth-kubernetes v0.13.2 + github.com/hashicorp/vault-plugin-auth-oci v0.11.0 + github.com/hashicorp/vault-plugin-database-couchbase v0.7.0 + github.com/hashicorp/vault-plugin-database-elasticsearch v0.11.1 + github.com/hashicorp/vault-plugin-database-mongodbatlas v0.7.0 + github.com/hashicorp/vault-plugin-database-snowflake v0.5.1 github.com/hashicorp/vault-plugin-mock v0.16.1 - github.com/hashicorp/vault-plugin-secrets-ad v0.14.0 - github.com/hashicorp/vault-plugin-secrets-alicloud v0.13.0 - github.com/hashicorp/vault-plugin-secrets-azure v0.14.0 - github.com/hashicorp/vault-plugin-secrets-gcp v0.14.0 - github.com/hashicorp/vault-plugin-secrets-gcpkms v0.13.0 - github.com/hashicorp/vault-plugin-secrets-kubernetes v0.2.0 - github.com/hashicorp/vault-plugin-secrets-kv v0.13.0 - github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.8.0 - github.com/hashicorp/vault-plugin-secrets-openldap v0.9.0 - github.com/hashicorp/vault-plugin-secrets-terraform v0.6.0 + github.com/hashicorp/vault-plugin-secrets-ad v0.13.1 + github.com/hashicorp/vault-plugin-secrets-alicloud v0.12.0 + github.com/hashicorp/vault-plugin-secrets-azure v0.13.0 + github.com/hashicorp/vault-plugin-secrets-gcp v0.13.1 + github.com/hashicorp/vault-plugin-secrets-gcpkms v0.12.0 + github.com/hashicorp/vault-plugin-secrets-kubernetes v0.1.1 + github.com/hashicorp/vault-plugin-secrets-kv v0.12.1 + github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.7.0 + github.com/hashicorp/vault-plugin-secrets-openldap v0.8.0 + github.com/hashicorp/vault-plugin-secrets-terraform v0.5.0 github.com/hashicorp/vault-testing-stepwise v0.1.2 - github.com/hashicorp/vault/api v1.8.0 + github.com/hashicorp/vault/api v1.7.2 github.com/hashicorp/vault/api/auth/approle v0.1.0 github.com/hashicorp/vault/api/auth/userpass v0.1.0 - github.com/hashicorp/vault/sdk v0.6.1-0.20220920194006-b5b65fe1ddb9 + github.com/hashicorp/vault/sdk v0.5.3-0.20220826205147-7caf353e8124 github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab github.com/jackc/pgx/v4 v4.15.0 github.com/jcmturner/gokrb5/v8 v8.4.2 github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f github.com/jefferai/jsonx v1.0.0 github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f + github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f github.com/kr/pretty v0.3.0 github.com/kr/text v0.2.0 github.com/mattn/go-colorable v0.1.12 @@ -153,6 +142,7 @@ require ( github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/go-testing-interface v1.14.1 github.com/mitchellh/go-wordwrap v1.0.0 + github.com/mitchellh/gox v1.0.1 github.com/mitchellh/mapstructure v1.5.0 github.com/mitchellh/reflectwalk v1.0.2 github.com/natefinch/atomic v0.0.0-20150920032501-a62ce929ffcc @@ -161,7 +151,7 @@ require ( github.com/okta/okta-sdk-golang/v2 v2.12.1 github.com/oracle/oci-go-sdk v13.1.0+incompatible github.com/ory/dockertest v3.3.5+incompatible - github.com/ory/dockertest/v3 v3.9.1 + github.com/ory/dockertest/v3 v3.8.0 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pires/go-proxyproto v0.6.1 github.com/pkg/errors v0.9.1 @@ -175,7 +165,7 @@ require ( github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da github.com/sasha-s/go-deadlock v0.2.0 github.com/sethvargo/go-limiter v0.7.1 - github.com/shirou/gopsutil/v3 v3.22.6 + github.com/shirou/gopsutil v3.21.5+incompatible github.com/stretchr/testify v1.8.0 go.etcd.io/bbolt v1.3.6 go.etcd.io/etcd/client/pkg/v3 v3.5.0 @@ -188,32 +178,28 @@ require ( go.opentelemetry.io/otel/trace v0.20.0 go.uber.org/atomic v1.9.0 go.uber.org/goleak v1.1.12 - golang.org/x/crypto v0.0.0-20220817201139-bc19a97f63c8 - golang.org/x/net v0.0.0-20220909164309-bea034e7d591 - golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401 - golang.org/x/sys v0.0.0-20220913175220-63ea55921009 + golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d + golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd + golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 + golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6 golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 - golang.org/x/tools v0.1.10 - google.golang.org/api v0.83.0 - google.golang.org/grpc v1.47.0 + golang.org/x/tools v0.1.5 + google.golang.org/api v0.30.0 + google.golang.org/grpc v1.44.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 - google.golang.org/protobuf v1.28.1 + google.golang.org/protobuf v1.27.1 gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce gopkg.in/ory-am/dockertest.v3 v3.3.4 gopkg.in/square/go-jose.v2 v2.6.0 k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 layeh.com/radius v0.0.0-20190322222518-890bc1058917 - mvdan.cc/gofumpt v0.3.1 + mvdan.cc/gofumpt v0.1.1 ) require ( - cloud.google.com/go v0.100.2 // indirect - cloud.google.com/go/compute v1.6.1 // indirect - cloud.google.com/go/iam v0.3.0 // indirect - cloud.google.com/go/kms v1.4.0 // indirect code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f // indirect github.com/Azure/azure-pipeline-go v0.2.3 // indirect - github.com/Azure/azure-sdk-for-go v62.0.0+incompatible // indirect + github.com/Azure/azure-sdk-for-go v61.4.0+incompatible // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 // indirect @@ -230,11 +216,10 @@ require ( github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver v1.5.0 // indirect github.com/Masterminds/sprig v2.22.0+incompatible // indirect - github.com/Microsoft/go-winio v0.5.2 // indirect + github.com/Microsoft/go-winio v0.5.1 // indirect github.com/Microsoft/hcsshim v0.9.0 // indirect github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/StackExchange/wmi v1.2.1 // indirect github.com/andybalholm/brotli v1.0.4 // indirect github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64 // indirect github.com/aws/aws-sdk-go-v2 v1.8.0 // indirect @@ -249,30 +234,28 @@ require ( github.com/bgentry/speakeasy v0.1.0 // indirect github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect github.com/cenkalti/backoff v2.2.1+incompatible // indirect - github.com/cenkalti/backoff/v4 v4.1.3 // indirect + github.com/cenkalti/backoff/v4 v4.1.1 // indirect github.com/centrify/cloud-golang-sdk v0.0.0-20210923165758-a8c48d049166 // indirect github.com/cespare/xxhash/v2 v2.1.1 // indirect github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible // indirect github.com/circonus-labs/circonusllhist v0.1.3 // indirect - github.com/cloudflare/circl v1.1.0 // indirect github.com/cloudfoundry-community/go-cfclient v0.0.0-20210823134051-721f0e559306 // indirect - github.com/containerd/cgroups v1.0.3 // indirect - github.com/containerd/containerd v1.5.13 // indirect - github.com/containerd/continuity v0.3.0 // indirect + github.com/containerd/cgroups v1.0.1 // indirect + github.com/containerd/containerd v1.5.7 // indirect + github.com/containerd/continuity v0.2.1 // indirect github.com/coreos/go-oidc v2.2.1+incompatible // indirect github.com/coreos/go-oidc/v3 v3.1.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/couchbase/gocb/v2 v2.3.3 // indirect github.com/couchbase/gocbcore/v10 v10.0.4 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/davecgh/go-spew v1.1.1 // indirect github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba // indirect - github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect github.com/digitalocean/godo v1.7.5 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/docker/cli v20.10.18+incompatible // indirect + github.com/docker/cli v20.10.9+incompatible // indirect github.com/docker/distribution v2.7.1+incompatible // indirect - github.com/docker/go-units v0.5.0 // indirect + github.com/docker/go-units v0.4.0 // indirect github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect @@ -282,22 +265,11 @@ require ( github.com/go-asn1-ber/asn1-ber v1.5.1 // indirect github.com/go-ldap/ldif v0.0.0-20200320164324-fd88d9b715b3 // indirect github.com/go-logr/logr v1.2.0 // indirect - github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-openapi/analysis v0.20.0 // indirect - github.com/go-openapi/errors v0.19.9 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.5 // indirect - github.com/go-openapi/loads v0.20.2 // indirect - github.com/go-openapi/runtime v0.19.24 // indirect - github.com/go-openapi/spec v0.20.3 // indirect - github.com/go-openapi/strfmt v0.20.0 // indirect - github.com/go-openapi/swag v0.19.14 // indirect - github.com/go-openapi/validate v0.20.2 // indirect - github.com/go-ozzo/ozzo-validation v3.6.0+incompatible // indirect + github.com/go-ole/go-ole v1.2.5 // indirect github.com/go-stack/stack v1.8.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect - github.com/golang-sql/sqlexp v0.1.0 // indirect + github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/mock v1.6.0 // indirect github.com/golang/snappy v0.0.4 // indirect @@ -306,14 +278,14 @@ require ( github.com/google/gofuzz v1.1.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/gax-go/v2 v2.4.0 // indirect + github.com/googleapis/gax-go/v2 v2.0.5 // indirect github.com/googleapis/gnostic v0.5.5 // indirect github.com/gophercloud/gophercloud v0.1.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/hashicorp/cronexpr v1.1.1 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect - github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 // indirect + github.com/hashicorp/go-kms-wrapping/entropy v0.1.0 // indirect github.com/hashicorp/go-secure-stdlib/fileutil v0.1.0 // indirect github.com/hashicorp/go-slug v0.7.0 // indirect github.com/hashicorp/go-tfe v0.20.0 // indirect @@ -341,56 +313,50 @@ require ( github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jeffchao/backoff v0.0.0-20140404060208-9d7fd7aa17f2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/jstemmer/go-junit-report v0.9.1 // indirect github.com/kelseyhightower/envconfig v1.4.0 // indirect github.com/klauspost/compress v1.13.6 // indirect github.com/klauspost/pgzip v1.2.5 // indirect - github.com/lib/pq v1.10.6 // indirect + github.com/lib/pq v1.10.2 // indirect github.com/linode/linodego v0.7.1 // indirect - github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/mailru/easyjson v0.7.6 // indirect github.com/mattn/go-ieproxy v0.0.1 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect - github.com/mediocregopher/radix/v4 v4.1.1 // indirect github.com/miekg/dns v1.1.41 // indirect github.com/mitchellh/hashstructure v1.1.0 // indirect + github.com/mitchellh/iochan v1.0.0 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/moby/sys/mount v0.2.0 // indirect - github.com/moby/sys/mountinfo v0.5.0 // indirect - github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect + github.com/moby/sys/mountinfo v0.4.1 // indirect + github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mongodb-forks/digest v1.0.3 // indirect github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 // indirect github.com/nwaples/rardecode v1.1.2 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.0.2 // indirect - github.com/opencontainers/runc v1.1.4 // indirect + github.com/opencontainers/image-spec v1.0.1 // indirect + github.com/opencontainers/runc v1.0.2 // indirect github.com/openlyinc/pointy v1.1.2 // indirect - github.com/oracle/oci-go-sdk/v60 v60.0.0 // indirect github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c // indirect github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect github.com/pierrec/lz4 v2.6.1+incompatible // indirect github.com/pierrec/lz4/v4 v4.1.8 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pquerna/cachecontrol v0.1.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/procfs v0.6.0 // indirect github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 // indirect - github.com/rogpeppe/go-internal v1.8.1 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect + github.com/rogpeppe/go-internal v1.6.2 // indirect + github.com/sirupsen/logrus v1.8.1 // indirect github.com/snowflakedb/gosnowflake v1.6.3 // indirect github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d // indirect - github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.4.0 // indirect github.com/tencentcloud/tencentcloud-sdk-go v1.0.162 // indirect - github.com/tilinna/clock v1.0.2 // indirect - github.com/tklauser/go-sysconf v0.3.10 // indirect - github.com/tklauser/numcpus v0.4.0 // indirect + github.com/tklauser/go-sysconf v0.3.9 // indirect + github.com/tklauser/numcpus v0.3.0 // indirect github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c // indirect github.com/ulikunitz/xz v0.5.10 // indirect github.com/vmware/govmomi v0.18.0 // indirect @@ -403,21 +369,21 @@ require ( github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 // indirect - github.com/yusufpapurcu/wmi v1.2.2 // indirect go.etcd.io/etcd/api/v3 v3.5.0 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel/metric v0.20.0 // indirect - go.uber.org/multierr v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.19.1 // indirect - golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect - golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f // indirect + golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect + golang.org/x/mod v0.4.2 // indirect + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect golang.org/x/text v0.3.7 // indirect - golang.org/x/time v0.0.0-20220411224347-583f2d630306 // indirect - golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect + golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8 // indirect + google.golang.org/genproto v0.0.0-20220207185906-7721543eae58 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/ini.v1 v1.66.2 // indirect + gopkg.in/ini.v1 v1.62.0 // indirect gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect gopkg.in/resty.v1 v1.12.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 32539790657a5..d1368358a1343 100644 --- a/go.sum +++ b/go.sum @@ -15,23 +15,8 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0 h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= -cloud.google.com/go v0.100.2 h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -39,21 +24,8 @@ cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUM cloud.google.com/go/bigquery v1.6.0/go.mod h1:hyFDG0qSGdHNz8Q6nDN8rYIkld0q/+5uBZaelxiDLfE= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1 h1:2sMmt8prCn7DPaG4Pmh0N3Inmc8cT8ae5k1M6VJ9Wqc= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= -cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc= -cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/kms v1.4.0 h1:iElbfoE61VeLhnZcGOltqL8HIly8Nhbe5t6JlH9GXjo= -cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= -cloud.google.com/go/monitoring v1.2.0 h1:fEvQITrhVcPM6vuDQcgPMbU5kZFeQFwZmE7v6+S8BPo= -cloud.google.com/go/monitoring v1.2.0/go.mod h1:tE8I08OzjWmXLhCopnPaUDpfGOEJOonfWXGR9E9SsFo= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -69,13 +41,15 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f h1:UrKzEwTgeiff9vxdrfdqxibzpWjxLnuXDI5m6z3GJAk= code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:sk5LnIjB/nIEU7yP5sDQExVm62wu0pBh3yrElngUisI= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v36.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v44.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v58.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v62.0.0+incompatible h1:8N2k27SYtc12qj5nTsuFMFJPZn5CGmgMWqTy4y9I7Jw= -github.com/Azure/azure-sdk-for-go v62.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v61.4.0+incompatible h1:BF2Pm3aQWIa6q9KmxyF1JYKYXtVw67vtvu2Wd54NGuY= +github.com/Azure/azure-sdk-for-go v61.4.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw= github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0= github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8= @@ -88,6 +62,8 @@ github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSW github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= +github.com/Azure/go-autorest/autorest v0.10.1/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.11.0/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= @@ -96,6 +72,9 @@ github.com/Azure/go-autorest/autorest v0.11.21/go.mod h1:Do/yuMSW/13ayUkcVREpsMH github.com/Azure/go-autorest/autorest v0.11.24 h1:1fIGgHKqVm54KIPT+q8Zmd1QlVsmHqeUGso5qm2BqqE= github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJv2v0KACFHWTB2drffI1B68Pk= @@ -103,22 +82,27 @@ github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4Uw github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.18 h1:kLnPsRjzZZUF3K5REu/Kc+qMQrvuza2bwSnNdhmzLfQ= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= github.com/Azure/go-autorest/autorest/azure/auth v0.5.0/go.mod h1:QRTvSZQpxqm8mSErhnbI+tANIBAKP7B+UIE2z4ypUO0= github.com/Azure/go-autorest/autorest/azure/auth v0.5.8/go.mod h1:kxyKZTSfKh8OVFWPAgOgQ/frrJgeYQJPyR5fLFmXko4= github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 h1:P6bYXFoao05z5uhOQzbC3Qd8JqF3jUoocoTeIxkp2cA= github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= github.com/Azure/go-autorest/autorest/azure/cli v0.4.0/go.mod h1:JljT387FplPzBA31vUcvsetLKF3pec5bdAxjVU4kI2s= github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4= github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= @@ -162,9 +146,8 @@ github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JP github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= -github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= @@ -183,43 +166,41 @@ github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMo github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/ProtonMail/go-crypto v0.0.0-20220824120805-4b6e5c587895 h1:NsReiLpErIPzRrnogAXYwSoU7txA977LjDGrbkewJbg= -github.com/ProtonMail/go-crypto v0.0.0-20220824120805-4b6e5c587895/go.mod h1:UBYPn8k0D56RtnR8RFQMjmh4KrZzWJ5o7Z9SYjossQ8= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/SAP/go-hdb v0.14.1 h1:hkw4ozGZ/i4eak7ZuGkY5e0hxiXFdNUBNhr4AvZVNFE= github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo= github.com/Sectorbob/mlab-ns2 v0.0.0-20171030222938-d3aa0c295a8a h1:KFHLI4QGttB0i7M3qOkAo8Zn/GSsxwwCnInFqBaYtkM= github.com/Sectorbob/mlab-ns2 v0.0.0-20171030222938-d3aa0c295a8a/go.mod h1:D73UAuEPckrDorYZdtlCu2ySOLuPB5W4rhIkmmc/XbI= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14= github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= github.com/aerospike/aerospike-client-go/v5 v5.6.0 h1:tRxcUq0HY8fFPQEzF3EgrknF+w1xFO0YDfUb9Nm8yRI= github.com/aerospike/aerospike-client-go/v5 v5.6.0/go.mod h1:rJ/KpmClE7kiBPfvAPrGw9WuNOiz8v2uKbQaUyYPXtI= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190412020505-60e2075261b6/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA= +github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f h1:oRD16bhpKNAanfcDDVU+J0NXqsgHIvGbbe/sy+r6Rs0= github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ= -github.com/aliyun/alibaba-cloud-sdk-go v1.61.1499 h1:P2FUu1/xkj4abuHcqdRQO9ZAYc9hSWG5c5gifsU/Ogc= -github.com/aliyun/alibaba-cloud-sdk-go v1.61.1499/go.mod h1:RcDobYh8k5VP6TNybz9m++gL3ijVI5wueVr0EM10VsU= github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5 h1:nWDRPCyCltiTsANwC/n3QZH7Vww33Npq9MKqlwRzI/c= github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64 h1:ZsPrlYPY/v1PR7pGrmYD/rq5BFiSPalH8i9eEkSfnnI= github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64/go.mod h1:2qMFB56yOP3KzkB3PbYZ4AlUFg3a88F67TIx5lB/WwY= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2 h1:VoHKYIXEQU5LWoambPBOvYxyLqZYHuj+rj5DVnMUc3k= github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2/go.mod h1:OMVSB21p9+xQUIqlGizHPZfjK+SHws1ht+ZytVDoz9U= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= @@ -227,26 +208,21 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5 github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= +github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo= github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-metrics v0.4.0 h1:yCQqn7dwca4ITXb+CbubHmedzaQYHhNhrEXLYUeEe8Q= -github.com/armon/go-metrics v0.4.0/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= -github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg= -github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.25.39/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= -github.com/aws/aws-sdk-go v1.36.29/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.95 h1:QwmA+PeR6v4pF0f/dPHVPWGAshAhb9TnGZBTM5uKuI8= -github.com/aws/aws-sdk-go v1.44.95/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.37.19 h1:/xKHoSsYfH9qe16pJAHIjqTVpMM2DRSsEt8Ok1bzYiw= +github.com/aws/aws-sdk-go v1.37.19/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v1.8.0 h1:HcN6yDnHV9S7D69E7To0aUppJhiJNEzQSNcUxc7r3qo= github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= github.com/aws/aws-sdk-go-v2/config v1.6.0 h1:rtoCnNObhVm7me+v9sA2aY+NtHNZjjWWC3ifXVci+wE= @@ -273,8 +249,6 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.6.1 h1:1Pls85C5CFjhE3aH+h85/hyAk89kQ github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= github.com/aws/smithy-go v1.7.0 h1:+cLHMRrDZvQ4wk+KuQ9yH6eEg6KZEJ9RI2IkDqnygCg= github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/axiomhq/hyperloglog v0.0.0-20220105174342-98591331716a h1:eqjiAL3qooftPm8b9C1GsSSRcmlw7iOva8vdBTmV2PY= -github.com/axiomhq/hyperloglog v0.0.0-20220105174342-98591331716a/go.mod h1:2stgcRjl6QmW+gU2h5E7BQXg4HU0gzxKWDuT5HviN9s= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= @@ -302,24 +276,24 @@ github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7 github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/cenkalti/backoff/v4 v4.1.0/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= -github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/centrify/cloud-golang-sdk v0.0.0-20210923165758-a8c48d049166 h1:jQ93fKqb/wRmK/KiHpa7Tk9rmHeKXhp4j+5Sg/tENiY= github.com/centrify/cloud-golang-sdk v0.0.0-20210923165758-a8c48d049166/go.mod h1:c/gmvyN8lq6lYtHvrqqoXrg2xyN65N0mBmbikxFWXNE= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= -github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0 h1:CWU8piLyqoi9qXEUwzOh5KFKGgmSU5ZhktJyYcq6ryQ= github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0/go.mod h1:5d8DqS60xkj9k3aXfL3+mXBH0DPYO0FQjcKosxl+b/Q= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -330,25 +304,20 @@ github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLI github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.1.0 h1:bZgT/A+cikZnKIwn7xL2OBj012Bmvho/o6RpRvv3GKY= -github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= github.com/cloudfoundry-community/go-cfclient v0.0.0-20210823134051-721f0e559306 h1:k8q2Nsz7kNaUlysVCnWIFLMUSqiKXaGLdIf9P0GsX2Y= github.com/cloudfoundry-community/go-cfclient v0.0.0-20210823134051-721f0e559306/go.mod h1:0FdHblxw7g3M2PPICOw9i8YZOHP9dZTHbJUtoxL7Z/E= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= @@ -370,15 +339,13 @@ github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1 github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= -github.com/containerd/cgroups v1.0.3 h1:ADZftAkglvCiD44c77s5YmMqaP2pzVCFZvBmAlBdAP4= -github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= -github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= @@ -392,21 +359,20 @@ github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.7 h1:rQyoYtj4KddB3bxG6SAqd4+08gePNyJjRqvOIfV3rkM= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.5.13 h1:XqvKw9i4P7/mFrC3TSM7yV5cwFZ9avXe6M3YANKnzEE= -github.com/containerd/containerd v1.5.13/go.mod h1:3AlCrzKROjIuP3JALsY14n8YtntaUDBu7vek+rPN5Vc= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200709052629-daa8e1ccc0bc/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= github.com/containerd/continuity v0.2.0/go.mod h1:wCYX+dRqZdImhGucXOqTQn05AhX6EUDaGEMUzTFFpLg= +github.com/containerd/continuity v0.2.1 h1:/EeEo2EtN3umhbbgCveyjifoMYg0pS+nMMEemaYw634= github.com/containerd/continuity v0.2.1/go.mod h1:wCYX+dRqZdImhGucXOqTQn05AhX6EUDaGEMUzTFFpLg= -github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= -github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= @@ -458,6 +424,7 @@ github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmeka github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-oidc/v3 v3.0.0/go.mod h1:rEJ/idjfUyfkBit1eI1fvyr+64/g9dcKpAm8MJMesvo= github.com/coreos/go-oidc/v3 v3.1.0 h1:6avEvcdvTa1qYsOZ6I5PRkSYHzpTNWgKYmaJfaYbrRw= github.com/coreos/go-oidc/v3 v3.1.0/go.mod h1:rEJ/idjfUyfkBit1eI1fvyr+64/g9dcKpAm8MJMesvo= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -487,24 +454,20 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= -github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denisenkom/go-mssqldb v0.12.2 h1:1OcPn5GBIobjWNd+8yjfHNIaFX14B1pWI3F9HZy5KXw= -github.com/denisenkom/go-mssqldb v0.12.2/go.mod h1:lnIw1mZukFRZDJYQ0Pb833QS2IaC3l5HkEfra2LJ+sk= +github.com/denisenkom/go-mssqldb v0.12.0 h1:VtrkII767ttSPNRfFekePK3sctr+joXgO58stqQbtUA= +github.com/denisenkom/go-mssqldb v0.12.0/go.mod h1:iiK0YP1ZeepvmBQk/QpLEhhTNJgfzrpArPY/aFvc9yU= github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba h1:p6poVbjHDkKa+wtC8frBMwQtT3BmqGYBjzMwJ63tuR4= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc h1:8WFBn63wegobsYAX0YjD+8suexZDga5CctH4CCTx2+8= -github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/digitalocean/godo v1.7.5 h1:JOQbAO6QT1GGjor0doT0mXefX2FgUDPOpYh2RaXA+ko= github.com/digitalocean/godo v1.7.5/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= @@ -516,9 +479,8 @@ github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v20.10.8+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v20.10.9+incompatible h1:OJ7YkwQA+k2Oi51lmCojpjiygKpi76P7bg91b2eJxYU= github.com/docker/cli v20.10.9+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v20.10.18+incompatible h1:f/GQLsVpo10VvToRay2IraVA1wHz9KktZyjev3SIVDU= -github.com/docker/cli v20.10.18+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= @@ -527,9 +489,8 @@ github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r github.com/docker/docker v1.4.2-0.20200319182547-c7ad2b866182/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.10+incompatible h1:GKkP0T7U4ks6X3lmmHKC2QDprnpRJor2Z5a8m62R9ZM= github.com/docker/docker v20.10.10+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.18+incompatible h1:SN84VYXTBNGn92T/QwIRPlum9zfemfitN7pbsp26WSc= -github.com/docker/docker v20.10.18+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= @@ -538,9 +499,8 @@ github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6Uezg github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= -github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= @@ -552,27 +512,32 @@ github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74/go.mod github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.11.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= @@ -583,10 +548,11 @@ github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoD github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8= github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= -github.com/frankban/quicktest v1.14.2 h1:SPb1KFFmM+ybpEjPUhCCkZOM5xlovT5UbrMvWnXyBns= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= @@ -603,8 +569,6 @@ github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2H github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-asn1-ber/asn1-ber v1.4.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-asn1-ber/asn1-ber v1.5.1 h1:pDbRAunXzIUXfx4CB2QJFv5IuPiuoW+sWvr/Us009o8= @@ -634,106 +598,21 @@ github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= -github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= -github.com/go-openapi/analysis v0.19.16/go.mod h1:GLInF007N83Ad3m8a/CbQ5TPzdnGT7workfHwuVjNVk= -github.com/go-openapi/analysis v0.20.0 h1:UN09o0kNhleunxW7LR+KnltD0YrJ8FF03pSqvAN3Vro= -github.com/go-openapi/analysis v0.20.0/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.9 h1:9SnKdGhiPZHF3ttwFMiCBEb8jQ4IDdrK+5+a0oTygA4= -github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= -github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= -github.com/go-openapi/loads v0.19.6/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= -github.com/go-openapi/loads v0.19.7/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= -github.com/go-openapi/loads v0.20.0/go.mod h1:2LhKquiE513rN5xC6Aan6lYOSddlL8Mp20AW9kpviM4= -github.com/go-openapi/loads v0.20.2 h1:z5p5Xf5wujMxS1y8aP+vxwW5qYT2zdJBbXKmQUG3lcc= -github.com/go-openapi/loads v0.20.2/go.mod h1:hTVUotJ+UonAMMZsvakEgmWKgtulweO9vYP2bQYKA/o= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= -github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= -github.com/go-openapi/runtime v0.19.24 h1:TqagMVlRAOTwllE/7hNKx6rQ10O6T8ZzeJdMjSTKaD4= -github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.19.15/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= -github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= -github.com/go-openapi/spec v0.20.1/go.mod h1:93x7oh+d+FQsmsieroS4cmR3u0p/ywH649a3qwC9OsQ= -github.com/go-openapi/spec v0.20.3 h1:uH9RQ6vdyPSs2pSy9fL8QPspDF2AMIMPtmK5coSSjtQ= -github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= -github.com/go-openapi/strfmt v0.20.0 h1:l2omNtmNbMc39IGptl9BuXBEKcZfS8zjrTsPKTiJiDM= -github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= -github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= -github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M= -github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= -github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= -github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4= -github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI= -github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0= -github.com/go-openapi/validate v0.20.2 h1:AhqDegYV3J3iQkMPJSXkvzymHKMTw0BST3RK3hTT4ts= -github.com/go-openapi/validate v0.20.2/go.mod h1:e7OJoKNgd0twXZwIn0A43tHbvIcr/rZIVCbJBpTUoY0= -github.com/go-ozzo/ozzo-validation v3.6.0+incompatible h1:msy24VGS42fKO9K1vLz82/GeYW1cILu7Nuuj1N3BBkE= -github.com/go-ozzo/ozzo-validation v3.6.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= +github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= -github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= @@ -772,12 +651,12 @@ github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblf github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= @@ -791,8 +670,8 @@ github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoB github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= -github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= -github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= +github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188 h1:+eHOFJl1BaXrQxKX+T06f78590z4qA2ZzBTqahsKSE4= +github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188/go.mod h1:vXjM/+wXQnTPR4KqTKDgJukSZ6amVRtWMPEjE6sQoK8= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -801,6 +680,7 @@ github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -808,7 +688,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -830,6 +709,7 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -853,9 +733,8 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= @@ -864,15 +743,14 @@ github.com/google/go-metrics-stackdriver v0.2.0/go.mod h1:KLcPyp3dWJAFD+yHisGlJS github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0 h1:pMen7vLs8nvgEYhywH3KDWJIJTeEr2ULsVWHWYHQyBs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -880,18 +758,11 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/tink/go v1.6.1 h1:t7JHqO8Ath2w2ig5vjwQYJzhGEZymedQc90lQXUBa4I= -github.com/google/tink/go v1.6.1/go.mod h1:IGW53kTgag+st5yPhKKwJ6u2l+SSp5/v9XF7spovjlY= +github.com/google/tink/go v1.4.0 h1:7Ihv6n6/0zPrm2GRAeeF408P9Y00HXC2J6tvUzgb2sg= +github.com/google/tink/go v1.4.0/go.mod h1:OdW+ACSIXwGiPOWJiRTdoKzStsnqo8ZOsTzchWLy2DY= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -899,13 +770,8 @@ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= @@ -917,8 +783,11 @@ github.com/gophercloud/gophercloud v0.1.0 h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/ github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= @@ -930,17 +799,21 @@ github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= -github.com/hashicorp/cap v0.2.1-0.20220727210936-60cd1534e220 h1:Vgv3jG0kicczshK+lOHWJ9OososZjnjSu1YslqofFYY= -github.com/hashicorp/cap v0.2.1-0.20220727210936-60cd1534e220/go.mod h1:zb3VvIFA0lM2lbmO69NjowV9dJzJnZS89TaM9blXPJA= +github.com/hashicorp/cap v0.0.0-20220502204956-9a9f4a9d6e61/go.mod h1:zb3VvIFA0lM2lbmO69NjowV9dJzJnZS89TaM9blXPJA= +github.com/hashicorp/cap v0.1.1/go.mod h1:VfBvK2ULRyqsuqAnjgZl7HJ7/CGMC7ro4H5eXiZuun8= +github.com/hashicorp/cap v0.2.1-0.20220502204956-9a9f4a9d6e61 h1:FIf15EEnoIOSXXy58zl89s88iz0jFhWEPMbIGyRoOvo= +github.com/hashicorp/cap v0.2.1-0.20220502204956-9a9f4a9d6e61/go.mod h1:zb3VvIFA0lM2lbmO69NjowV9dJzJnZS89TaM9blXPJA= github.com/hashicorp/consul-template v0.29.2 h1:LxhyfcrItxFvNLFuiidNg1623ZOkOHNs+BOaHmnUrdA= github.com/hashicorp/consul-template v0.29.2/go.mod h1:1bHIoTCGRTK3dv1qGUmTmct8dZCcywbMZdj6T3R4/j8= github.com/hashicorp/consul/api v1.14.0 h1:Y64GIJ8hYTu+tuGekwO4G4ardXoiCivX9wv1iP/kihk= @@ -948,6 +821,7 @@ github.com/hashicorp/consul/api v1.14.0/go.mod h1:bcaw5CSZ7NE9qfOfKCI1xb7ZKjzu/M github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.10.0/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= github.com/hashicorp/consul/sdk v0.11.0 h1:HRzj8YSCln2yGgCumN5CL8lYlD3gBurnervJRJAZyC4= +github.com/hashicorp/consul/sdk v0.11.0/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= github.com/hashicorp/cronexpr v1.1.1/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -961,40 +835,32 @@ github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/S github.com/hashicorp/go-discover v0.0.0-20210818145131-c573d69da192 h1:eje2KOX8Sf7aYPiAsLnpWdAIrGRMcpFjN/Go/Exb7Zo= github.com/hashicorp/go-discover v0.0.0-20210818145131-c573d69da192/go.mod h1:3/4dzY4lR1Hzt9bBqMhBzG7lngZ0GKx/nL6G/ad62wE= github.com/hashicorp/go-gatedio v0.5.0 h1:Jm1X5yP4yCqqWj5L1TgW7iZwCVPGtVc+mro5r/XX7Tg= -github.com/hashicorp/go-gcp-common v0.8.0 h1:/2vGAbCU1v+BZ3YHXTCzTvxqma9WOJHYtADTfhZixLo= -github.com/hashicorp/go-gcp-common v0.8.0/go.mod h1:Q7zYRy9ue9SuaEN2s9YLIQs4SoKHdoRmKRcImY3SLgs= +github.com/hashicorp/go-gatedio v0.5.0/go.mod h1:Lr3t8L6IyxD3DAeaUxGcgl2JnRUpWMCsmBl4Omu/2t4= +github.com/hashicorp/go-gcp-common v0.5.0/go.mod h1:IDGUI2N/OS3PiU4qZcXJeWKPI6O/9Y8hOrbSiMcqyYw= +github.com/hashicorp/go-gcp-common v0.7.1-0.20220519220342-94aabf4c4c87 h1:ZFwYpI67zQ2G73zrij1LXhuubprduZORoClazMJ/cb8= +github.com/hashicorp/go-gcp-common v0.7.1-0.20220519220342-94aabf4c4c87/go.mod h1:RuZi18562/z30wxOzpjeRrGcmk9Ro/rBzixaSZDhIhY= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.3.0 h1:G0ACM8Z2WilWgPv3Vdzwm3V0BQu/kSmrkVtpe1fy9do= -github.com/hashicorp/go-hclog v1.3.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.1.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.2.2 h1:ihRI7YFwcZdiSD7SIenIhHfQH3OuDvWerAUBZbeQS3M= +github.com/hashicorp/go-hclog v1.2.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 h1:pSjQfW3vPtrOTcasTUKgCTQT7OGPPTTMVRrOfU6FJD8= -github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.5 h1:rOFDv+3k05mnW0oaDLffhVUwg03Csn0mvfO98Wdd2bE= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.5/go.mod h1:sDQAfwJGv25uGPZA04x87ERglCG6avnRcBT9wYoMII8= -github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.4 h1:ws2CPDuXMKwaBb2z/duBCdnB9pSxlN2nuDZWXcVj6RU= -github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.4/go.mod h1:dDxt3GXi5QONVHYrJi2+EjsJLCUs59FktZQA8ZMnm+U= -github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.1 h1:ydUCtmr8f9F+mHZ1iCsvzqFTXqNVpewX3s9zcYipMKI= -github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.1/go.mod h1:Sl/ffzV57UAyjtSg1h5Km0rN5+dtzZJm1CUztkoCW2c= -github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.1 h1:WxpTuafkDjdeeu0Xtk9y3m9YAJhfFMb8+y6eTnxvV8A= -github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.1/go.mod h1:3D5UB9fjot4oUTYGQ5gGmhLJKreyLZeI0XB+NxcLTKs= -github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.1 h1:6joKpqCFveaNMEwC3qna67usws6DjdxqfCuQEHSM0aM= -github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.1/go.mod h1:sDmsWR/W2LqwU217o32RzdHMb/FywGLF72PVIhpZ3hE= -github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.1 h1:+paf/3ompzaXe07BdxkV1vTnqvhwtmZPE4yQnMPTThI= -github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.1/go.mod h1:YRtkersQ2N3iHlPDG5B3xBQtBsNZ3bjmlCwnrl26jVE= -github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.0 h1:FnWV2E0NLj+yYdhToUQjU81ayCMgURiL2WbJ0V7u/XY= -github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.0/go.mod h1:17twrc0lM8IpfGqIv69WQvwgDiu3nRwWlk5YfCSQduY= -github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.1 h1:72zlIBTJd2pvYmINqotpvcI4ZXLxhRq2cVPTuqv0xqY= -github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.1/go.mod h1:JytRAxdJViV+unUUWedb7uzEy5pgu7OurbqX0eHEikE= -github.com/hashicorp/go-memdb v1.3.3 h1:oGfEWrFuxtIUF3W2q/Jzt6G85TrMk9ey6XfYLvVe1Wo= -github.com/hashicorp/go-memdb v1.3.3/go.mod h1:uBTr1oQbtuMgd1SSGoR8YV27eT3sBHbYiNm53bMpgSg= +github.com/hashicorp/go-kms-wrapping v0.7.0 h1:UBagVJn4nSNOSjjtpkR370VOEBLnGMXfQcIlE/WL/7o= +github.com/hashicorp/go-kms-wrapping v0.7.0/go.mod h1:rmGmNzO/DIBzUyisFjeocXvazOlxgO5K8vsFQkUn7Hk= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0 h1:xuTi5ZwjimfpvpL09jDE71smCBRpnF5xfo871BSX4gs= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= +github.com/hashicorp/go-memdb v1.3.2 h1:RBKHOsnSszpU6vxq80LzC2BaQjuuvoyaQbkLTf7V7g8= +github.com/hashicorp/go-memdb v1.3.2/go.mod h1:Mluclgwib3R93Hk5fxEfiRhB+6Dar64wWh71LpNSe3g= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs= @@ -1004,20 +870,22 @@ github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHh github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= -github.com/hashicorp/go-plugin v1.4.5 h1:oTE/oQR4eghggRg8VY7PAz3dr++VwDNBGCcOfIvHpBo= -github.com/hashicorp/go-plugin v1.4.5/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= +github.com/hashicorp/go-plugin v1.4.4 h1:NVdrSdFRt3SkZtNckJ6tog7gbpRrcbOjQi/rgF7JYWQ= +github.com/hashicorp/go-plugin v1.4.4/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a h1:FmnBDwGwlTgugDGbVxwV8UavqSMACbGrUpfc98yFLR4= github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a/go.mod h1:xbXnmKqX9/+RhPkJ4zrEx4738HacP72aaUPlT2RZ4sU= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.6.7/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.0 h1:eu1EI/mbirUgP5C8hVsTNaGZreBDlYiwC1FZWkvQPQ4= github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= -github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6 h1:W9WN8p6moV1fjKLkeqEgkAMu5rauy9QeYDAmIaPuuiA= -github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6/go.mod h1:MpCPSPGLDILGb4JMm94/mMi3YysIqsXzGCzkEZjcjXg= +github.com/hashicorp/go-secure-stdlib/awsutil v0.1.2/go.mod h1:QRJZ7siKie+SZJB9jLbfKrs0Gd0yPWMtbneg0iU1PrY= +github.com/hashicorp/go-secure-stdlib/awsutil v0.1.5 h1:TkCWKqk1psjvUV7WktmZiRoZ1a9vw048AVnk/YbrzgY= +github.com/hashicorp/go-secure-stdlib/awsutil v0.1.5/go.mod h1:MpCPSPGLDILGb4JMm94/mMi3YysIqsXzGCzkEZjcjXg= github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 h1:ET4pqyjiGmY09R5y+rSd70J2w45CtbWDNvGqWp/R3Ng= github.com/hashicorp/go-secure-stdlib/base62 v0.1.2/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= @@ -1032,6 +900,7 @@ github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 h1:p4AKXPPS24tO8Wc8i1gLvSKdmk github.com/hashicorp/go-secure-stdlib/mlock v0.1.2/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.2/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.4/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/password v0.1.1 h1:6JzmBqXprakgFEHwBgdchsjaA9x3GyjdI568bXKxa60= @@ -1054,22 +923,21 @@ github.com/hashicorp/go-tfe v0.20.0 h1:XUAhKoCX8ZUQfwBebC8hz7nkSSnqgNkaablIfxnZ0 github.com/hashicorp/go-tfe v0.20.0/go.mod h1:gyXLXbpBVxA2F/6opah8XBsOkZJxHYQmghl0OWi8keI= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= -github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.4.0 h1:aAQzgqIrRKRa7w75CKpbBxYsmUoPjzVm1W59ca1L0J4= +github.com/hashicorp/go-version v1.4.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl v1.0.1-vault-3 h1:V95v5KSTu6DB5huDSKiq4uAfILEuNigK/+qPET6H/Mg= github.com/hashicorp/hcl v1.0.1-vault-3/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= -github.com/hashicorp/hcp-sdk-go v0.22.0 h1:LWkLOkJFYWSojBM3IkwvYK6nrwrL+p4Fw8zEaoCQG10= -github.com/hashicorp/hcp-sdk-go v0.22.0/go.mod h1:mM3nYdVHuv2X2tv88MGVKRf/o2k3zF8jUZSMkwICQ28= github.com/hashicorp/jsonapi v0.0.0-20210826224640-ee7dae0fb22d h1:9ARUJJ1VVynB176G1HCwleORqCaXm/Vx0uUi0dL26I0= github.com/hashicorp/jsonapi v0.0.0-20210826224640-ee7dae0fb22d/go.mod h1:Yog5+CPEM3c99L1CL2CFCYoSzgWm5vTU58idbRUaLik= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= @@ -1086,10 +954,11 @@ github.com/hashicorp/raft v1.0.1/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcb github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= -github.com/hashicorp/raft v1.3.10 h1:LR5QZX1VQd0DFWZfeCwWawyeKfpS/Tm1yjnJIY5X4Tw= -github.com/hashicorp/raft v1.3.10/go.mod h1:J8naEwc6XaaCfts7+28whSeRvCqTd6e20BlCU3LtEO4= -github.com/hashicorp/raft-autopilot v0.1.6 h1:C1q3RNF2FfXNZfHWbvVAu0QixaQK8K5pX4O5lh+9z4I= -github.com/hashicorp/raft-autopilot v0.1.6/go.mod h1:Af4jZBwaNOI+tXfIqIdbcAnh/UyyqIMj/pOISIfhArw= +github.com/hashicorp/raft v1.3.9 h1:9yuo1aR0bFTr1cw7pj3S2Bk6MhJCsnr2NAxvIBrP2x4= +github.com/hashicorp/raft v1.3.9/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= +github.com/hashicorp/raft-autopilot v0.1.3 h1:Y+5jWKTFABJhCrpVwGpGjti2LzwQSzivoqd2wM6JWGw= +github.com/hashicorp/raft-autopilot v0.1.3/go.mod h1:Af4jZBwaNOI+tXfIqIdbcAnh/UyyqIMj/pOISIfhArw= +github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea h1:xykPFhrBAS2J0VBzVa5e80b5ZtYuNQtgXjN40qBZlD4= github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= github.com/hashicorp/raft-boltdb/v2 v2.0.0-20210421194847-a7e34179d62c h1:oiKun9QlrOz5yQxMZJ3tf1kWtFYuKSJzxzEDxDPevj4= github.com/hashicorp/raft-boltdb/v2 v2.0.0-20210421194847-a7e34179d62c/go.mod h1:kiPs9g148eLShc2TYagUAyKDnD+dH9U+CQKsXzlY9xo= @@ -1097,58 +966,54 @@ github.com/hashicorp/raft-snapshot v1.0.4 h1:EuDuayAJPdiDmVk1ygTDnG2zDzrs0/6/yBu github.com/hashicorp/raft-snapshot v1.0.4/go.mod h1:5sL9eUn72lH5DzsFIJ9jaysITbHksSSszImWSOTC8Ic= github.com/hashicorp/serf v0.9.7 h1:hkdgbqizGQHuU5IPqYM1JdSMV8nKfpuOnZYXssk9muY= github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= -github.com/hashicorp/vault-plugin-auth-alicloud v0.13.0 h1:qQTzTw5qrnQurdE5nuXebltQ0JLJprxm5X8Lg4tCfd4= -github.com/hashicorp/vault-plugin-auth-alicloud v0.13.0/go.mod h1:UO140aqMmOpWVfot9kpowLHhbbJ1alBJBjctIxKtpkY= -github.com/hashicorp/vault-plugin-auth-azure v0.12.0 h1:hhL5qOgIvzIYwROp6Sgg4KNBJPu6bbIdKtdIKpZp2tE= -github.com/hashicorp/vault-plugin-auth-azure v0.12.0/go.mod h1:r0Wlg9xgS7dOx/H2rgkVgXS77m289ON291g+/txFdv8= -github.com/hashicorp/vault-plugin-auth-centrify v0.13.0 h1:IbtgJAY3EFyY+8n9A3QMn3MDGsvfQKDdH60r8G/C0nA= -github.com/hashicorp/vault-plugin-auth-centrify v0.13.0/go.mod h1:3fDbIVdwA/hkOVhwktKHDX5lo4DqIUUVbBdwQNNvxHw= -github.com/hashicorp/vault-plugin-auth-cf v0.13.0 h1:Iu4nRoZrkaLbW4vJ8t/wYS8z5BG4VQI7nKpBuwPTpOU= -github.com/hashicorp/vault-plugin-auth-cf v0.13.0/go.mod h1:Tktv1OXUjFobzjAU5qNJA8t1KC0109eu6Pcgm1uiwHg= -github.com/hashicorp/vault-plugin-auth-gcp v0.14.0 h1:9qjpx7fJcz7WoFiazIktaJSvuqscR2v/drc+euW8v2M= -github.com/hashicorp/vault-plugin-auth-gcp v0.14.0/go.mod h1:WNwaZN7NWy14xcy3otm1OXp5blcKgblUfvE16eYeUoQ= -github.com/hashicorp/vault-plugin-auth-jwt v0.14.0 h1:Wzg9qqAdEh1DQwsKf2ruggqaSbIdeTaZfDmO1Nn7YqA= -github.com/hashicorp/vault-plugin-auth-jwt v0.14.0/go.mod h1:oWM7Naj8lo4J9vJ23S0kpNW9pmeiHRiG/9ghLlPu6N0= -github.com/hashicorp/vault-plugin-auth-kerberos v0.8.0 h1:5PiNahpVYFnQIg0Np3wLiFnfhHfnAHcWTl3VSzUVu/Y= -github.com/hashicorp/vault-plugin-auth-kerberos v0.8.0/go.mod h1:eqjae8tMBpAWgJNk1NjV/vtJYXQRZnYudUkBFowz3bY= -github.com/hashicorp/vault-plugin-auth-kubernetes v0.14.0 h1:Hz/CcpNYfi99cUUMg5Tfx3uElKuvQ0wGGpy0L2bqAzk= -github.com/hashicorp/vault-plugin-auth-kubernetes v0.14.0/go.mod h1:rouq4XoBoCzXtECtxGCWHS++g6Nzw2HOms6p6N+Uzkw= -github.com/hashicorp/vault-plugin-auth-oci v0.12.0 h1:7Tuj5q+rwyPm1aS1rsLg2TRo2QIrPTz1qNHGDkUvz18= -github.com/hashicorp/vault-plugin-auth-oci v0.12.0/go.mod h1:oj2gh7qH2VzjelFeul8FzDmmYrJXnCuLUUeQAA6fMN8= -github.com/hashicorp/vault-plugin-database-couchbase v0.8.0 h1:lDZ1OazKfSPIb1DXLbq7NCf1BZwB1cFN3OG3NedXB/s= -github.com/hashicorp/vault-plugin-database-couchbase v0.8.0/go.mod h1:skmG6MgIG6fjIOlOEgVKOcNlr1PcgHPUb9q1YQ5+Q9k= -github.com/hashicorp/vault-plugin-database-elasticsearch v0.12.0 h1:g+jD35qUZlDcS2YWQBqXbfpMNBTvGEvRzSYjwLgWOK4= -github.com/hashicorp/vault-plugin-database-elasticsearch v0.12.0/go.mod h1:wO8EPQs5bsBERD6MSQ+7Az+YJ4TFclCNxBo3r3VKeao= -github.com/hashicorp/vault-plugin-database-mongodbatlas v0.8.0 h1:wx/9Dh9YGGU7GiijwRfwPFBlWdmBEdf6n2VhgTdRtJU= -github.com/hashicorp/vault-plugin-database-mongodbatlas v0.8.0/go.mod h1:eWwd1Ba7aLU1tIAtmFsEhu9E023jkkypHawxhnAbZfc= -github.com/hashicorp/vault-plugin-database-redis v0.1.0 h1:fDT32ZphGdvVdenvieWb+ZjWmCOHFtZ1Qjv581BloHw= -github.com/hashicorp/vault-plugin-database-redis v0.1.0/go.mod h1:bzrD2dQUClKcl89yYsaZqboFDEzst+TpXROWuhVxLEM= -github.com/hashicorp/vault-plugin-database-redis-elasticache v0.1.0 h1:qwDcp1vdlT0Io0x5YjtvhXtndfQB66jnDICg7NHxKQk= -github.com/hashicorp/vault-plugin-database-redis-elasticache v0.1.0/go.mod h1:gB/SMtnIf0NdDyPSIo0KgSNp1ajTvLDiwP+lIAy8uHs= -github.com/hashicorp/vault-plugin-database-snowflake v0.6.0 h1:T13MXwD0xKA2vKGIdHdv17UOlm03zzR+q2MCLv+q/bM= -github.com/hashicorp/vault-plugin-database-snowflake v0.6.0/go.mod h1:QJ8IL/Qlu4Me1KkL0OpaWO7aMFL0TNoSEKVB5F+lCiM= +github.com/hashicorp/vault-plugin-auth-alicloud v0.12.0 h1:Uv6V7tLPO09CPcHg2NfhxNkSQqtS3EpFwgC753hjC14= +github.com/hashicorp/vault-plugin-auth-alicloud v0.12.0/go.mod h1:nHouIuEK5glNQLTvGZ55WGJlzOgrv8wNw5SJt6RjqtU= +github.com/hashicorp/vault-plugin-auth-azure v0.11.0 h1:PY2CuGj2RX4paj1kjX3VC0Q3HC/QvcsJ9uFa74tAIC4= +github.com/hashicorp/vault-plugin-auth-azure v0.11.0/go.mod h1:JkrWHfpu0pc6tQDBBAHAJ8oVqV70kUC5FubJIPwjKyA= +github.com/hashicorp/vault-plugin-auth-centrify v0.12.0 h1:d2dDZoUlSBNiw+jfi/2wxPoz6iNU7W5RasuwFc/deYo= +github.com/hashicorp/vault-plugin-auth-centrify v0.12.0/go.mod h1:3fDbIVdwA/hkOVhwktKHDX5lo4DqIUUVbBdwQNNvxHw= +github.com/hashicorp/vault-plugin-auth-cf v0.12.0 h1:8X3Zlc6P/ih0MVhrPJ54iwiCyMVIcLwfBVpONuYddks= +github.com/hashicorp/vault-plugin-auth-cf v0.12.0/go.mod h1:dr0cewrZ0SgpsPFkQ7Vf31J4xg+ylxM3Yv+dk1zWpEQ= +github.com/hashicorp/vault-plugin-auth-gcp v0.13.2 h1:rv8gBKYzFz9BD9pFRyrmfi46BuOh3K2uSS3AjkVVEvg= +github.com/hashicorp/vault-plugin-auth-gcp v0.13.2/go.mod h1:tHtTF/qQmrRrY5DEOxWxoW/y5Wk9VoHsBOC339RO3d8= +github.com/hashicorp/vault-plugin-auth-jwt v0.13.0 h1:BeMC4ZnP8iwRgL8vInEvCICA6e+iiDtkmOdNYKg3aGQ= +github.com/hashicorp/vault-plugin-auth-jwt v0.13.0/go.mod h1:+WL5kaq/0L5OROsA31X15U8yTIX4GTEv1rTLA9d15eo= +github.com/hashicorp/vault-plugin-auth-kerberos v0.7.3 h1:QumrPHn5n9iTaZScZwplqdnXoeMOrb3GJcwMweTmR3o= +github.com/hashicorp/vault-plugin-auth-kerberos v0.7.3/go.mod h1:eqjae8tMBpAWgJNk1NjV/vtJYXQRZnYudUkBFowz3bY= +github.com/hashicorp/vault-plugin-auth-kubernetes v0.13.2 h1:rkYx4X4wH7DIo4zeXmC3UWF4uCmGdHv36jUy2xcy/R8= +github.com/hashicorp/vault-plugin-auth-kubernetes v0.13.2/go.mod h1:/hQF30guXWLcIUiTYsXoQ0dUTHspo0q30nLBr1RE+Lw= +github.com/hashicorp/vault-plugin-auth-oci v0.11.0 h1:DrdccnGU8O28I1MIs21zmbYM2Nta7RLOAzozvDSX9h0= +github.com/hashicorp/vault-plugin-auth-oci v0.11.0/go.mod h1:Cn5cjR279Y+snw8LTaiLTko3KGrbigRbsQPOd2D5xDw= +github.com/hashicorp/vault-plugin-database-couchbase v0.7.0 h1:p//NCrYPF7AlCFtwKsO6dT7RvINSq3/x1VN19nfPlK4= +github.com/hashicorp/vault-plugin-database-couchbase v0.7.0/go.mod h1:Xw7uSxLWTzyWRHZhOBoc51cBC2nmNc7ekPgaaKK7UWI= +github.com/hashicorp/vault-plugin-database-elasticsearch v0.11.1 h1:XU2b1wrvHNeQafypbJ0Xs6Zec0kN5VzMZ0AJzWXlI+4= +github.com/hashicorp/vault-plugin-database-elasticsearch v0.11.1/go.mod h1:OMEQaNXsITksICGgkWW2y9/Nekv/cPKdqGOcMW5uUdI= +github.com/hashicorp/vault-plugin-database-mongodbatlas v0.7.0 h1:TAyYn8/rWn+OeeiYAqlACV4q7A5YYDv3vVqucHTJgxE= +github.com/hashicorp/vault-plugin-database-mongodbatlas v0.7.0/go.mod h1:e3HTaMD+aRWHBVctX/M39OaARQA8ux9TvdWDU0dJaoc= +github.com/hashicorp/vault-plugin-database-snowflake v0.5.1 h1:/arASm4g8nyZrL2DxDSWhhQ7RjTrveXHURL3dRIfHM0= +github.com/hashicorp/vault-plugin-database-snowflake v0.5.1/go.mod h1:v7EvYChgjpg6Q9NVnoz+5NyUGUfrYsksWtuWeyHX4A8= github.com/hashicorp/vault-plugin-mock v0.16.1 h1:5QQvSUHxDjEEbrd2REOeacqyJnCLPD51IQzy71hx8P0= github.com/hashicorp/vault-plugin-mock v0.16.1/go.mod h1:83G4JKlOwUtxVourn5euQfze3ZWyXcUiLj2wqrKSDIM= -github.com/hashicorp/vault-plugin-secrets-ad v0.14.0 h1:64qTDXSj3tw1li7lWh13OJoYIbJ/dp9F0kWdHo6vBiU= -github.com/hashicorp/vault-plugin-secrets-ad v0.14.0/go.mod h1:5XIn6cw1+gG+WWxK0SdEAKCDOXTp+MX90PzZ7f3Eks0= -github.com/hashicorp/vault-plugin-secrets-alicloud v0.13.0 h1:eWDAAvZsKHhnXF8uCiyF/wDqT57fflCs54PTIolONBo= -github.com/hashicorp/vault-plugin-secrets-alicloud v0.13.0/go.mod h1:F4KWrlCQZbhP2dFXCkRvbHX2J6CTydlaY0cH+OrLHCE= -github.com/hashicorp/vault-plugin-secrets-azure v0.14.0 h1:1Az9rwdDHiTXiJSEYSq+Ar+MXeC3Z9v2ltZx3N+DwNY= -github.com/hashicorp/vault-plugin-secrets-azure v0.14.0/go.mod h1:Xw8CQPkyZSJRK9BXKBruf6kOO8rLyXEf40o19ClK9kY= -github.com/hashicorp/vault-plugin-secrets-gcp v0.14.0 h1:1yUxhYhFiMQm/miMYCtplnYly6HGBprOKStM6hpk+z0= -github.com/hashicorp/vault-plugin-secrets-gcp v0.14.0/go.mod h1:kRgZfXRD9qUHoGclaR09tKXXwkwNrkY+D76ahetsaB8= -github.com/hashicorp/vault-plugin-secrets-gcpkms v0.13.0 h1:R36pNaaN4tJyIrPJej7/355Qt5+Q5XUTB+Az6rGs5xg= -github.com/hashicorp/vault-plugin-secrets-gcpkms v0.13.0/go.mod h1:n2VKlYDCuO8+OXN4S1Im8esIL53/ENRFa4gXrvhCVIM= -github.com/hashicorp/vault-plugin-secrets-kubernetes v0.2.0 h1:iPue19f7LW63lAo8YFsm0jmo49gox0oIYFPAtVtnzGg= -github.com/hashicorp/vault-plugin-secrets-kubernetes v0.2.0/go.mod h1:WO0wUxGh1PxhwdBHD7mXU5XQTqLwMZiJrUwVuzx3tIg= -github.com/hashicorp/vault-plugin-secrets-kv v0.13.0 h1:3Rf8RQIulyhaANaQxQdElfMh4SXS/z49thoSJpJ3ssg= -github.com/hashicorp/vault-plugin-secrets-kv v0.13.0/go.mod h1:9V2Ecim3m/qw+YAQelUeFADqZ1GVo8xwoLqfKsqh9pI= -github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.8.0 h1:VREm+cJGUXcPCakaYVxQt8wTVqTwJclsIIk2XuqpPbs= -github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.8.0/go.mod h1:PLx2vxXukfsKsDRo/PlG4fxmJ1d+H2h82wT3vf4buuI= -github.com/hashicorp/vault-plugin-secrets-openldap v0.9.0 h1:/6FQzNB4zjep7O14pkVOapwRJvnQ4gINGAc1Ss1IYg8= -github.com/hashicorp/vault-plugin-secrets-openldap v0.9.0/go.mod h1:o7mF9tWgDkAD5OvvXWM3bOCqN+n/cCpaMm1CrEUZkHc= -github.com/hashicorp/vault-plugin-secrets-terraform v0.6.0 h1:N5s1ojXyG8gBZlx6BdqE04LviR0rw4vX1dDDMdnEzX8= -github.com/hashicorp/vault-plugin-secrets-terraform v0.6.0/go.mod h1:GzYAJYytgbNNyT3S7rspz1cLE53E1oajFbEtaDUlVGU= +github.com/hashicorp/vault-plugin-secrets-ad v0.13.1 h1:zxIaGsl8FI7B5GKJkXev56HSGowNAeUPy503auFE+Lg= +github.com/hashicorp/vault-plugin-secrets-ad v0.13.1/go.mod h1:5XIn6cw1+gG+WWxK0SdEAKCDOXTp+MX90PzZ7f3Eks0= +github.com/hashicorp/vault-plugin-secrets-alicloud v0.12.0 h1:4Ke3dtM7ARa9ga2jI2rW/TouXWZ45hjfwwtcILoErA4= +github.com/hashicorp/vault-plugin-secrets-alicloud v0.12.0/go.mod h1:F4KWrlCQZbhP2dFXCkRvbHX2J6CTydlaY0cH+OrLHCE= +github.com/hashicorp/vault-plugin-secrets-azure v0.13.0 h1:35JsvhKhvuATkP6vVQisA4prHd2gjzX4AT0CPvPXJ7I= +github.com/hashicorp/vault-plugin-secrets-azure v0.13.0/go.mod h1:Xw8CQPkyZSJRK9BXKBruf6kOO8rLyXEf40o19ClK9kY= +github.com/hashicorp/vault-plugin-secrets-gcp v0.13.1 h1:6aUi1Y9jGBoWm58wsJnq8xerxAsxXjdeFsgUle1eIqw= +github.com/hashicorp/vault-plugin-secrets-gcp v0.13.1/go.mod h1:ndpmRkIPHW5UYqv2nn2AJNVZsucJ8lY2bp5i5Ngvhuc= +github.com/hashicorp/vault-plugin-secrets-gcpkms v0.12.0 h1:MXqB1waq3L18eUhTZ7ng14MbjOiAlwANgZCVUwoLBXo= +github.com/hashicorp/vault-plugin-secrets-gcpkms v0.12.0/go.mod h1:6DPwGu8oGR1sZRpjwkcAnrQZWQuAJ/Ph+rQHfUo1Yf4= +github.com/hashicorp/vault-plugin-secrets-kubernetes v0.1.1 h1:KiYpZpQv7X7Tm9wHUvboC2CyquwmjMhrPU2DvTcPC8o= +github.com/hashicorp/vault-plugin-secrets-kubernetes v0.1.1/go.mod h1:aF9rgE2pGvWpyS/ijVrd817aA4Sf1I+dpLaKgshAPyQ= +github.com/hashicorp/vault-plugin-secrets-kv v0.12.1 h1:Nef6kmnCQQRRdYzA52diUnx4r8HPwoh1oP9FCHB2hrg= +github.com/hashicorp/vault-plugin-secrets-kv v0.12.1/go.mod h1:9V2Ecim3m/qw+YAQelUeFADqZ1GVo8xwoLqfKsqh9pI= +github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.7.0 h1:EDyX/utLxEKGETeGAyWe4QNoKwIfCw6VpEzKLb8zudc= +github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.7.0/go.mod h1:PLx2vxXukfsKsDRo/PlG4fxmJ1d+H2h82wT3vf4buuI= +github.com/hashicorp/vault-plugin-secrets-openldap v0.8.0 h1:WJk5wRg861RlTd8xN6To/sRw3SnEUzqXpWml98GPZks= +github.com/hashicorp/vault-plugin-secrets-openldap v0.8.0/go.mod h1:XC7R76jZiuD50ENel+I1/Poz5phaEQg9d6Dko8DF3Ts= +github.com/hashicorp/vault-plugin-secrets-terraform v0.5.0 h1:NbQW1Z2+oIn8v4jjqLBbxDas0Uw0bzV74da4BQsdRow= +github.com/hashicorp/vault-plugin-secrets-terraform v0.5.0/go.mod h1:GzYAJYytgbNNyT3S7rspz1cLE53E1oajFbEtaDUlVGU= github.com/hashicorp/vault-testing-stepwise v0.1.1/go.mod h1:3vUYn6D0ZadvstNO3YQQlIcp7u1a19MdoOC0NQ0yaOE= github.com/hashicorp/vault-testing-stepwise v0.1.2 h1:3obC/ziAPGnsz2IQxr5e4Ayb7tu7WL6pm6mmZ5gwhhs= github.com/hashicorp/vault-testing-stepwise v0.1.2/go.mod h1:TeU6B+5NqxUjto+Zey+QQEH1iywuHn0ciHZNYh4q3uI= @@ -1159,10 +1024,11 @@ github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKe github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 h1:xixZ2bWeofWV68J+x6AzmKuVM/JWCQwkWm6GW/MUR6I= github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huaweicloud/golangsdk v0.0.0-20200304081349-45ec0797f2a4/go.mod h1:WQBcHRNX9shz3928lWEvstQJtAtYI7ks6XlgtRT9Tcw= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -1172,10 +1038,10 @@ github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb v1.7.6/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab h1:HqW4xhhynfjrtEiiSGcQUd6vrK23iMam1FO8rI7mwig= github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= @@ -1198,6 +1064,7 @@ github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5W github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= @@ -1229,6 +1096,7 @@ github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dv github.com/jackc/puddle v1.2.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= github.com/jarcoal/httpmock v1.0.7 h1:d1a2VFpSdm5gtjhCPWsQHSnx8+5V3ms5431YwvmkuNk= +github.com/jarcoal/httpmock v1.0.7/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= @@ -1261,12 +1129,11 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f h1:ENpDacvnr8faw5ugQmEF1QYk+f/Y9lXFvuYmRxykago= github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f/go.mod h1:KDSfL7qe5ZfQqvlDMkVjCztbmcpp/c8M77vhQP8ZPvk= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -1277,6 +1144,7 @@ github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= @@ -1286,12 +1154,13 @@ github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaR github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f h1:Gsc9mVHLRqBjMgdQCghN9NObCcRncDqxJvBvEaIIQEo= +github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= @@ -1323,24 +1192,16 @@ github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= -github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/linode/linodego v0.7.1 h1:4WZmMpSA2NRwlPZcc0+4Gyn7rr99Evk9bnr0B3gXRKE= github.com/linode/linodego v0.7.1/go.mod h1:ga11n3ivecUrPCHN0rANxKmfWBJVkOXfLMZinAbj2sY= github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= @@ -1350,6 +1211,7 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= @@ -1373,8 +1235,6 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5 github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= -github.com/mediocregopher/radix/v4 v4.1.1 h1:JkZBEp0y8pWGNZkmO3RR5oEO5huwd4zKKt4rh1C+P8s= -github.com/mediocregopher/radix/v4 v4.1.1/go.mod h1:ajchozX/6ELmydxWeWM6xCFHVpZ4+67LXHOTOVR0nCE= github.com/mholt/archiver/v3 v3.5.1 h1:rDjOBX9JSF5BvoJGvjqK479aL70qh9DIpZCl+k7Clwo= github.com/mholt/archiver/v3 v3.5.1/go.mod h1:e3dqJ7H78uzsRSEACH1joayhuSyhnonssnDhppzS1L4= github.com/michaelklishin/rabbit-hole/v2 v2.12.0 h1:946p6jOYFcVJdtBBX8MwXvuBkpPjwm1Nm2Qg8oX+uFk= @@ -1398,19 +1258,22 @@ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.14.0/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/gox v1.0.1 h1:x0jD3dcHk9a9xPSDN6YEL4xL6Qz0dvNYm8yZqui5chI= +github.com/mitchellh/gox v1.0.1/go.mod h1:ED6BioOGXMswlXa2zxfh/xdd5QhwYliBFn9V18Ap4z4= github.com/mitchellh/hashstructure v1.1.0 h1:P6P1hdjqAAknpY/M1CGipelZgp+4y9ja9kmUZPXP+H0= github.com/mitchellh/hashstructure v1.1.0/go.mod h1:xUDAozZz0Wmdiufv0uyhnHkUTN6/6d8ulp4AwfLKrmA= +github.com/mitchellh/iochan v1.0.0 h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= @@ -1424,15 +1287,13 @@ github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0Gq github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/mountinfo v0.5.0 h1:2Ks8/r6lopsxWi9m58nlwjaeSzUX9iiL1vj5qB/9ObI= -github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= -github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae h1:O4SWKdcHVCvYqyDV+9CJA1fcDN2L11Bule0iFy3YlAI= -github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1476,6 +1337,7 @@ github.com/olekukonko/tablewriter v0.0.0-20180130162743-b8a9be070da4/go.mod h1:v github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -1488,6 +1350,7 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1505,17 +1368,15 @@ github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go. github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= -github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= -github.com/opencontainers/runc v1.1.4 h1:nRCz/8sKg6K6jgYAFLDlXzPeITBZJyX28DBVhWD+5dg= -github.com/opencontainers/runc v1.1.4/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -1526,19 +1387,17 @@ github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mo github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= -github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/openlyinc/pointy v1.1.2 h1:LywVV2BWC5Sp5v7FoP4bUD+2Yn5k0VNeRbU5vq9jUMY= github.com/openlyinc/pointy v1.1.2/go.mod h1:w2Sytx+0FVuMKn37xpXIAyBNhFNBIJGR/v2m7ik1WtM= +github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/oracle/oci-go-sdk v7.0.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= github.com/oracle/oci-go-sdk v13.1.0+incompatible h1:inwbT0b/mMbnTfzYoW2xcU1cCMIlU6Fz973at5phRXM= github.com/oracle/oci-go-sdk v13.1.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= -github.com/oracle/oci-go-sdk/v60 v60.0.0 h1:EJAWjEi4SY5Raha6iUzq4LTQ0uM5YFw/wat/L1ehIEM= -github.com/oracle/oci-go-sdk/v60 v60.0.0/go.mod h1:krz+2gkSzlSL/L4PvP0Z9pZpag9HYLNtsMd1PmxlA2w= github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/ory/dockertest/v3 v3.8.0 h1:i5b0cJCd801qw0cVQUOH6dSpI9fT3j5tdWu0jKu90ks= github.com/ory/dockertest/v3 v3.8.0/go.mod h1:9zPATATlWQru+ynXP+DytBQrsXV7Tmlx7K86H6fQaDo= -github.com/ory/dockertest/v3 v3.9.1 h1:v4dkG+dlu76goxMiTT2j8zV7s4oPPEppKT8K8p2f1kY= -github.com/ory/dockertest/v3 v3.9.1/go.mod h1:42Ir9hmvaAPm0Mgibk6mBPi7SFvTXxEcnztDYOJ//uM= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2 h1:CXwSGu/LYmbjEab5aMCs5usQRVBGThelUKBNnoSOuso= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0= github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c h1:vwpFWvAO8DeIZfFeqASzZfsxuWPno9ncAebBEP0N3uE= @@ -1546,16 +1405,17 @@ github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c/go.mod h1:otz github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/patrickmn/go-cache v0.0.0-20180815053127-5633e0862627/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -1568,21 +1428,16 @@ github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= @@ -1591,6 +1446,7 @@ github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d/go.mod h1:dkJfzwRKNi github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= @@ -1601,6 +1457,7 @@ github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= @@ -1608,6 +1465,7 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= @@ -1618,6 +1476,7 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9 github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -1631,6 +1490,7 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rboyer/safeio v0.2.1 h1:05xhhdRNAdS3apYm7JRjOqngf4xruaW959jmRxGDuSU= github.com/rboyer/safeio v0.2.1/go.mod h1:Cq/cEPK+YXFn622lsQ0K4KsPZSPtaptHHEldsy7Fmig= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 h1:Wdi9nwnhFNAlseAOekn6B5G/+GMtks9UKbvRU/CMM/o= github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03/go.mod h1:gRAiPF5C5Nd0eyyRdqIu9qTiFSoZzpTq727b5B8fkkU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -1639,8 +1499,8 @@ github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= -github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/rogpeppe/go-internal v1.6.2 h1:aIihoIOHCiLZHxyoNQ+ABL4NKhFTgKLBdMLyEAh98m0= +github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.4.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= @@ -1665,12 +1525,10 @@ github.com/sean-/pager v0.0.0-20180208200047-666be9bf53b5/go.mod h1:BeybITEsBEg6 github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sethvargo/go-limiter v0.7.1 h1:wWNhTj0pxjyJ7wuJHpRJpYwJn+bUnjYfw2a85eu5w9U= github.com/sethvargo/go-limiter v0.7.1/go.mod h1:C0kbSFbiriE5k2FFOe18M1YZbAR2Fiwf72uGu0CXCcU= -github.com/shirou/gopsutil/v3 v3.22.6 h1:FnHOFOh+cYAM0C30P+zysPISzlknLC5Z1G4EAElznfQ= -github.com/shirou/gopsutil/v3 v3.22.6/go.mod h1:EdIubSnZhbAvBS1yJ7Xi+AShB/hxwLHOMz4MCYz7yMs= +github.com/shirou/gopsutil v3.21.5+incompatible h1:OloQyEerMi7JUrXiNzy8wQ5XN+baemxSl12QgIzt0jc= +github.com/shirou/gopsutil v3.21.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= @@ -1683,9 +1541,8 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= @@ -1696,8 +1553,6 @@ github.com/snowflakedb/gosnowflake v1.6.3/go.mod h1:6hLajn6yxuJ4xUHZegMekpq9rnQb github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d h1:bVQRCxQvfjNUeRqaY/uT0tFuvuFY0ulgnczuR684Xic= github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d/go.mod h1:Cw4GTlQccdRGSEf6KiMju767x0NEHE0YIVPJSaXjlsw= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b h1:br+bPNZsJWKicw/5rALEo67QHs5weyD5tf8WST+4sJ0= -github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= @@ -1735,7 +1590,6 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1746,12 +1600,10 @@ github.com/tencentcloud/tencentcloud-sdk-go v1.0.162 h1:8fDzz4GuVg4skjY2B0nMN7h6 github.com/tencentcloud/tencentcloud-sdk-go v1.0.162/go.mod h1:asUz5BPXxgoPGaRgZaVm1iGcUAuHyYUo1nXqKa83cvI= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tilinna/clock v1.0.2 h1:6BO2tyAC9JbPExKH/z9zl44FLu1lImh3nDNKA0kgrkI= -github.com/tilinna/clock v1.0.2/go.mod h1:ZsP7BcY7sEEz7ktc0IVy8Us6boDrK8VradlKRUGfOao= -github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= -github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= -github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o= -github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= +github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo= +github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= +github.com/tklauser/numcpus v0.3.0 h1:ILuRUQBtssgnxw0XXIjKUC56fgnOrFoQQ/4+DeU2biQ= +github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= @@ -1767,7 +1619,6 @@ github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= @@ -1784,8 +1635,6 @@ github.com/xdg-go/scram v1.0.2 h1:akYIkZ28e6A96dkWNJQu3nmCzH3YfwMPQExUYDaRv7w= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -1798,6 +1647,8 @@ github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofm github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yandex-cloud/go-genproto v0.0.0-20200722140432-762fe965ce77/go.mod h1:HEUYX/p8966tMUHHT+TsS0hF/Ca/NYwqprC5WXSDMfE= +github.com/yandex-cloud/go-sdk v0.0.0-20200722140627-2194e5077f13/go.mod h1:LEdAMqa1v/7KYe4b13ALLkonuDxLph57ibUb50ctvJk= github.com/yhat/scrape v0.0.0-20161128144610-24b7890b0945/go.mod h1:4vRFPPNYllgCacoj+0FoKOjTW68rUhEfqPLiEJaK2w8= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= @@ -1809,8 +1660,6 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 h1:k/gmLsJDWwWqbLCur2yWnJzwQEKRcAHXo6seXGuSwWw= github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= -github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= -github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= @@ -1820,6 +1669,7 @@ go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0 h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= @@ -1832,22 +1682,16 @@ go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lL go.mongodb.org/atlas v0.13.0/go.mod h1:wVCnHcm/7/IfTjEB6K8K35PLG70yGz8BdkRwX0oK9/M= go.mongodb.org/atlas v0.15.0 h1:YyOBdBIuI//krRITf4r7PSirJ3YDNNUfNmapxwSyDow= go.mongodb.org/atlas v0.15.0/go.mod h1:lQhRHIxc6jQHEK3/q9WLu/SdBkPj2fQYhjLGUF6Z3U8= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= -go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= -go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= -go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= -go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.mongodb.org/mongo-driver v1.7.3 h1:G4l/eYY9VrQAK/AUgkV0koQKzQnyddnWxrd/Etf0jIs= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= +go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/otel v0.20.0 h1:eaP0Fqu7SXHwvjiqDq83zImeehOHX8doTvU9AwXON8g= @@ -1875,9 +1719,8 @@ go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec= -go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= @@ -1892,18 +1735,18 @@ golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191119213627-4f8c1d86b1ba/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1922,8 +1765,11 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220817201139-bc19a97f63c8 h1:GIAS/yBem/gq2MUqgNIzUHW7cJMmx3TGZOrnyYaNQ6c= -golang.org/x/crypto v0.0.0-20220817201139-bc19a97f63c8/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220208050332-20e1d8d225ab/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1936,7 +1782,9 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -1958,25 +1806,24 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -1998,6 +1845,7 @@ golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200320220750-118fecf932d8/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200505041828-1ed23360d12c/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -2011,12 +1859,8 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= @@ -2024,6 +1868,7 @@ golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210505024714-0287a6fb4125/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -2034,36 +1879,19 @@ golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220909164309-bea034e7d591 h1:D0B/7al0LLrVC8aWF4+oxpv/m8bc7ViFfVS8/gXGdqI= -golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190319182350-c85d3e98c914/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401 h1:zwrSfklXn0gxyLRX/aR+q6cgHbV/ItVyzbPlbA+dkAw= -golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2075,9 +1903,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2086,13 +1913,15 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2159,7 +1988,6 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2170,14 +1998,10 @@ golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2192,33 +2016,21 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210818153620-00dd8d7831e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025112917-711f33c9992c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211031064116-611d5d643895/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220207234003-57398862261d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6 h1:nonptSpoQ4vQjyraW20DXPAglgQfVnM9ZC6MmNLMR60= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220913175220-63ea55921009 h1:PuvuRMeLWqsf/ZdT1UUZz0syhioyv1mzuFZsXs4fvhw= -golang.org/x/sys v0.0.0-20220913175220-63ea55921009/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -2229,6 +2041,7 @@ golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fq golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -2243,14 +2056,16 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220411224347-583f2d630306 h1:+gHMid33q6pen7kv9xvT+JRinntgeXO2AeZVd0AWD3w= -golang.org/x/time v0.0.0-20220411224347-583f2d630306/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= +golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -2267,7 +2082,6 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -2311,32 +2125,27 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20= -golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU= +google.golang.org/api v0.3.0/go.mod h1:IuvZyQh8jgscv8qWfQ4ABd8m7hEudgBFM/EdhA3BnXw= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -2354,34 +2163,13 @@ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0 h1:yfrXXP61wVuLb0vBcG6qaOoIoqYEzOQS8jum51jkv2w= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.83.0 h1:pMvST+6v+46Gabac4zlJlalxZjCeRcepwg2EdBU+nCc= -google.golang.org/api v0.83.0/go.mod h1:CNywQoj/AfhTw26ZWAa6LwOv+6WFxHmeLPZq2uncLZk= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= @@ -2390,11 +2178,13 @@ google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190513181449-d00d292a067c/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= @@ -2412,6 +2202,7 @@ google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200323114720-3f67cca34472/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200409111301-baae70f3302d/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200416231807-8751e049a2a0/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -2425,56 +2216,20 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8 h1:qRu95HZ148xXw+XeZ3dvqe85PxH4X8+jIo0iRPKcEnM= -google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8/go.mod h1:yKyY4AMRwFiC8yMMNaMi+RkCnjZJt9LoWuvhXjMs+To= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207185906-7721543eae58 h1:i67FGOy2/zGfhE3YgHdrOrcFbOBhqdcRoBrsDqSQrOI= +google.golang.org/genproto v0.0.0-20220207185906-7721543eae58/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -2492,27 +2247,16 @@ google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 h1:M1YKkFIboKNieVO5DLUEVzQfGwJD30Nv2jfUgzb5UcE= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -2527,10 +2271,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -2546,12 +2288,12 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= -gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU= @@ -2562,6 +2304,7 @@ gopkg.in/ory-am/dockertest.v3 v3.3.4/go.mod h1:s9mmoLkaGeAh97qygnNj4xWkiN7e1SKek gopkg.in/resty.v1 v1.12.0 h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= @@ -2579,7 +2322,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -2588,8 +2330,10 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -gotest.tools/v3 v3.2.0 h1:I0DwBVMGAx26dttAj1BtJLAkVGncrkkUXfJLC4Flt/I= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2597,12 +2341,14 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.0.0-20190409092523-d687e77c8ae9/go.mod h1:FQEUn50aaytlU65qqBn/w+5ugllHwrBzKm7DzbnXdzE= k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw= k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8= +k8s.io/apimachinery v0.0.0-20190409092423-760d1845f48b/go.mod h1:FW86P8YXVLsbuplGMZeb20J3jYHscrDqw4jELaFJvRU= k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= @@ -2631,7 +2377,9 @@ k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8 k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= @@ -2639,6 +2387,7 @@ k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= @@ -2651,8 +2400,8 @@ k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19V k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= layeh.com/radius v0.0.0-20190322222518-890bc1058917 h1:BDXFaFzUt5EIqe/4wrTc4AcYZWP6iC6Ult+jQWLh5eU= layeh.com/radius v0.0.0-20190322222518-890bc1058917/go.mod h1:fywZKyu//X7iRzaxLgPWsvc0L26IUpVvE/aeIL2JtIQ= -mvdan.cc/gofumpt v0.3.1 h1:avhhrOmv0IuvQVK7fvwV91oFSGAk5/6Po8GXTzICeu8= -mvdan.cc/gofumpt v0.3.1/go.mod h1:w3ymliuxvzVx8DAutBnVyDqYb1Niy/yCJt/lk821YCE= +mvdan.cc/gofumpt v0.1.1 h1:bi/1aS/5W00E2ny5q65w9SnKpWEF/UIOqDYBILpo9rA= +mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/helper/builtinplugins/registry.go b/helper/builtinplugins/registry.go index 9bd5c7c0289c9..3157159162c08 100644 --- a/helper/builtinplugins/registry.go +++ b/helper/builtinplugins/registry.go @@ -13,8 +13,6 @@ import ( dbCouchbase "github.com/hashicorp/vault-plugin-database-couchbase" dbElastic "github.com/hashicorp/vault-plugin-database-elasticsearch" dbMongoAtlas "github.com/hashicorp/vault-plugin-database-mongodbatlas" - dbRedis "github.com/hashicorp/vault-plugin-database-redis" - dbRedisElastiCache "github.com/hashicorp/vault-plugin-database-redis-elasticache" dbSnowflake "github.com/hashicorp/vault-plugin-database-snowflake" logicalAd "github.com/hashicorp/vault-plugin-secrets-ad/plugin" logicalAlicloud "github.com/hashicorp/vault-plugin-secrets-alicloud" @@ -24,7 +22,7 @@ import ( logicalKube "github.com/hashicorp/vault-plugin-secrets-kubernetes" logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" logicalMongoAtlas "github.com/hashicorp/vault-plugin-secrets-mongodbatlas" - logicalLDAP "github.com/hashicorp/vault-plugin-secrets-openldap" + logicalOpenLDAP "github.com/hashicorp/vault-plugin-secrets-openldap" logicalTerraform "github.com/hashicorp/vault-plugin-secrets-terraform" credAppId "github.com/hashicorp/vault/builtin/credential/app-id" credAppRole "github.com/hashicorp/vault/builtin/credential/approle" @@ -70,114 +68,74 @@ var addExternalPlugins = addExtPluginsImpl // the plugin's New() func. type BuiltinFactory func() (interface{}, error) -// There are three forms of Backends which exist in the BuiltinRegistry. -type credentialBackend struct { - logical.Factory - consts.DeprecationStatus -} - -type databasePlugin struct { - Factory BuiltinFactory - consts.DeprecationStatus -} - -type logicalBackend struct { - logical.Factory - consts.DeprecationStatus -} - func newRegistry() *registry { reg := ®istry{ - credentialBackends: map[string]credentialBackend{ - "alicloud": {Factory: credAliCloud.Factory}, - "app-id": { - Factory: credAppId.Factory, - DeprecationStatus: consts.PendingRemoval, - }, - "approle": {Factory: credAppRole.Factory}, - "aws": {Factory: credAws.Factory}, - "azure": {Factory: credAzure.Factory}, - "centrify": {Factory: credCentrify.Factory}, - "cert": {Factory: credCert.Factory}, - "cf": {Factory: credCF.Factory}, - "gcp": {Factory: credGcp.Factory}, - "github": {Factory: credGitHub.Factory}, - "jwt": {Factory: credJWT.Factory}, - "kerberos": {Factory: credKerb.Factory}, - "kubernetes": {Factory: credKube.Factory}, - "ldap": {Factory: credLdap.Factory}, - "oci": {Factory: credOCI.Factory}, - "oidc": {Factory: credJWT.Factory}, - "okta": {Factory: credOkta.Factory}, - "pcf": { - Factory: credCF.Factory, - DeprecationStatus: consts.Deprecated, - }, - "radius": {Factory: credRadius.Factory}, - "userpass": {Factory: credUserpass.Factory}, + credentialBackends: map[string]logical.Factory{ + "alicloud": credAliCloud.Factory, + "app-id": credAppId.Factory, + "approle": credAppRole.Factory, + "aws": credAws.Factory, + "azure": credAzure.Factory, + "centrify": credCentrify.Factory, + "cert": credCert.Factory, + "cf": credCF.Factory, + "gcp": credGcp.Factory, + "github": credGitHub.Factory, + "jwt": credJWT.Factory, + "kerberos": credKerb.Factory, + "kubernetes": credKube.Factory, + "ldap": credLdap.Factory, + "oci": credOCI.Factory, + "oidc": credJWT.Factory, + "okta": credOkta.Factory, + "pcf": credCF.Factory, // Deprecated. + "radius": credRadius.Factory, + "userpass": credUserpass.Factory, }, - databasePlugins: map[string]databasePlugin{ + databasePlugins: map[string]BuiltinFactory{ // These four plugins all use the same mysql implementation but with // different username settings passed by the constructor. - "mysql-database-plugin": {Factory: dbMysql.New(dbMysql.DefaultUserNameTemplate)}, - "mysql-aurora-database-plugin": {Factory: dbMysql.New(dbMysql.DefaultLegacyUserNameTemplate)}, - "mysql-rds-database-plugin": {Factory: dbMysql.New(dbMysql.DefaultLegacyUserNameTemplate)}, - "mysql-legacy-database-plugin": {Factory: dbMysql.New(dbMysql.DefaultLegacyUserNameTemplate)}, - - "cassandra-database-plugin": {Factory: dbCass.New}, - "couchbase-database-plugin": {Factory: dbCouchbase.New}, - "elasticsearch-database-plugin": {Factory: dbElastic.New}, - "hana-database-plugin": {Factory: dbHana.New}, - "influxdb-database-plugin": {Factory: dbInflux.New}, - "mongodb-database-plugin": {Factory: dbMongo.New}, - "mongodbatlas-database-plugin": {Factory: dbMongoAtlas.New}, - "mssql-database-plugin": {Factory: dbMssql.New}, - "postgresql-database-plugin": {Factory: dbPostgres.New}, - "redshift-database-plugin": {Factory: dbRedshift.New}, - "redis-database-plugin": {Factory: dbRedis.New}, - "redis-elasticache-database-plugin": {Factory: dbRedisElastiCache.New}, - "snowflake-database-plugin": {Factory: dbSnowflake.New}, + "mysql-database-plugin": dbMysql.New(dbMysql.DefaultUserNameTemplate), + "mysql-aurora-database-plugin": dbMysql.New(dbMysql.DefaultLegacyUserNameTemplate), + "mysql-rds-database-plugin": dbMysql.New(dbMysql.DefaultLegacyUserNameTemplate), + "mysql-legacy-database-plugin": dbMysql.New(dbMysql.DefaultLegacyUserNameTemplate), + + "cassandra-database-plugin": dbCass.New, + "couchbase-database-plugin": dbCouchbase.New, + "elasticsearch-database-plugin": dbElastic.New, + "hana-database-plugin": dbHana.New, + "influxdb-database-plugin": dbInflux.New, + "mongodb-database-plugin": dbMongo.New, + "mongodbatlas-database-plugin": dbMongoAtlas.New, + "mssql-database-plugin": dbMssql.New, + "postgresql-database-plugin": dbPostgres.New, + "redshift-database-plugin": dbRedshift.New, + "snowflake-database-plugin": dbSnowflake.New, }, - logicalBackends: map[string]logicalBackend{ - "ad": {Factory: logicalAd.Factory}, - "alicloud": {Factory: logicalAlicloud.Factory}, - "aws": {Factory: logicalAws.Factory}, - "azure": {Factory: logicalAzure.Factory}, - "cassandra": { - Factory: logicalCass.Factory, - DeprecationStatus: consts.PendingRemoval, - }, - "consul": {Factory: logicalConsul.Factory}, - "gcp": {Factory: logicalGcp.Factory}, - "gcpkms": {Factory: logicalGcpKms.Factory}, - "kubernetes": {Factory: logicalKube.Factory}, - "kv": {Factory: logicalKv.Factory}, - "mongodb": { - Factory: logicalMongo.Factory, - DeprecationStatus: consts.PendingRemoval, - }, - "mongodbatlas": {Factory: logicalMongoAtlas.Factory}, - "mssql": { - Factory: logicalMssql.Factory, - DeprecationStatus: consts.PendingRemoval, - }, - "mysql": { - Factory: logicalMysql.Factory, - DeprecationStatus: consts.PendingRemoval, - }, - "nomad": {Factory: logicalNomad.Factory}, - "openldap": {Factory: logicalLDAP.Factory}, - "ldap": {Factory: logicalLDAP.Factory}, - "pki": {Factory: logicalPki.Factory}, - "postgresql": { - Factory: logicalPostgres.Factory, - DeprecationStatus: consts.PendingRemoval, - }, - "rabbitmq": {Factory: logicalRabbit.Factory}, - "ssh": {Factory: logicalSsh.Factory}, - "terraform": {Factory: logicalTerraform.Factory}, - "totp": {Factory: logicalTotp.Factory}, - "transit": {Factory: logicalTransit.Factory}, + logicalBackends: map[string]logical.Factory{ + "ad": logicalAd.Factory, + "alicloud": logicalAlicloud.Factory, + "aws": logicalAws.Factory, + "azure": logicalAzure.Factory, + "cassandra": logicalCass.Factory, // Deprecated + "consul": logicalConsul.Factory, + "gcp": logicalGcp.Factory, + "gcpkms": logicalGcpKms.Factory, + "kubernetes": logicalKube.Factory, + "kv": logicalKv.Factory, + "mongodb": logicalMongo.Factory, // Deprecated + "mongodbatlas": logicalMongoAtlas.Factory, + "mssql": logicalMssql.Factory, // Deprecated + "mysql": logicalMysql.Factory, // Deprecated + "nomad": logicalNomad.Factory, + "openldap": logicalOpenLDAP.Factory, + "pki": logicalPki.Factory, + "postgresql": logicalPostgres.Factory, // Deprecated + "rabbitmq": logicalRabbit.Factory, + "ssh": logicalSsh.Factory, + "terraform": logicalTerraform.Factory, + "totp": logicalTotp.Factory, + "transit": logicalTransit.Factory, }, } @@ -189,9 +147,9 @@ func newRegistry() *registry { func addExtPluginsImpl(r *registry) {} type registry struct { - credentialBackends map[string]credentialBackend - databasePlugins map[string]databasePlugin - logicalBackends map[string]logicalBackend + credentialBackends map[string]logical.Factory + databasePlugins map[string]BuiltinFactory + logicalBackends map[string]logical.Factory } // Get returns the Factory func for a particular backend plugin from the @@ -199,22 +157,17 @@ type registry struct { func (r *registry) Get(name string, pluginType consts.PluginType) (func() (interface{}, error), bool) { switch pluginType { case consts.PluginTypeCredential: - if f, ok := r.credentialBackends[name]; ok { - return toFunc(f.Factory), ok - } + f, ok := r.credentialBackends[name] + return toFunc(f), ok case consts.PluginTypeSecrets: - if f, ok := r.logicalBackends[name]; ok { - return toFunc(f.Factory), ok - } + f, ok := r.logicalBackends[name] + return toFunc(f), ok case consts.PluginTypeDatabase: - if f, ok := r.databasePlugins[name]; ok { - return f.Factory, ok - } + f, ok := r.databasePlugins[name] + return f, ok default: return nil, false } - - return nil, false } // Keys returns the list of plugin names that are considered builtin plugins. @@ -246,28 +199,6 @@ func (r *registry) Contains(name string, pluginType consts.PluginType) bool { return false } -// DeprecationStatus returns the Deprecation status for a builtin with type `pluginType` -func (r *registry) DeprecationStatus(name string, pluginType consts.PluginType) (consts.DeprecationStatus, bool) { - switch pluginType { - case consts.PluginTypeCredential: - if f, ok := r.credentialBackends[name]; ok { - return f.DeprecationStatus, ok - } - case consts.PluginTypeSecrets: - if f, ok := r.logicalBackends[name]; ok { - return f.DeprecationStatus, ok - } - case consts.PluginTypeDatabase: - if f, ok := r.databasePlugins[name]; ok { - return f.DeprecationStatus, ok - } - default: - return consts.Unknown, false - } - - return consts.Unknown, false -} - func toFunc(ifc interface{}) func() (interface{}, error) { return func() (interface{}, error) { return ifc, nil diff --git a/helper/builtinplugins/registry_test.go b/helper/builtinplugins/registry_test.go deleted file mode 100644 index 5e63ba3e701f3..0000000000000 --- a/helper/builtinplugins/registry_test.go +++ /dev/null @@ -1,207 +0,0 @@ -package builtinplugins - -import ( - "reflect" - "testing" - - credAppId "github.com/hashicorp/vault/builtin/credential/app-id" - dbMysql "github.com/hashicorp/vault/plugins/database/mysql" - "github.com/hashicorp/vault/sdk/helper/consts" -) - -// Test_RegistryGet exercises the (registry).Get functionality by comparing -// factory types and ok response. -func Test_RegistryGet(t *testing.T) { - tests := []struct { - name string - builtin string - pluginType consts.PluginType - want BuiltinFactory - wantOk bool - }{ - { - name: "non-existent builtin", - builtin: "foo", - pluginType: consts.PluginTypeCredential, - want: nil, - wantOk: false, - }, - { - name: "bad plugin type", - builtin: "app-id", - pluginType: 9000, - want: nil, - wantOk: false, - }, - { - name: "known builtin lookup", - builtin: "app-id", - pluginType: consts.PluginTypeCredential, - want: toFunc(credAppId.Factory), - wantOk: true, - }, - { - name: "known builtin lookup", - builtin: "mysql-database-plugin", - pluginType: consts.PluginTypeDatabase, - want: dbMysql.New(dbMysql.DefaultUserNameTemplate), - wantOk: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - var got BuiltinFactory - got, ok := Registry.Get(tt.builtin, tt.pluginType) - if ok { - if reflect.TypeOf(got) != reflect.TypeOf(tt.want) { - t.Fatalf("got type: %T, want type: %T", got, tt.want) - } - } - if tt.wantOk != ok { - t.Fatalf("error: got %v, want %v", ok, tt.wantOk) - } - }) - } -} - -// Test_RegistryKeyCounts is a light unit test used to check the builtin -// registry lists for each plugin type and make sure they match in length. -func Test_RegistryKeyCounts(t *testing.T) { - tests := []struct { - name string - pluginType consts.PluginType - want int // use slice length as test condition - wantOk bool - }{ - { - name: "bad plugin type", - pluginType: 9001, - want: 0, - }, - { - name: "number of auth plugins", - pluginType: consts.PluginTypeCredential, - want: 20, - }, - { - name: "number of database plugins", - pluginType: consts.PluginTypeDatabase, - want: 17, - }, - { - name: "number of secrets plugins", - pluginType: consts.PluginTypeSecrets, - want: 24, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - keys := Registry.Keys(tt.pluginType) - if len(keys) != tt.want { - t.Fatalf("got size: %d, want size: %d", len(keys), tt.want) - } - }) - } -} - -// Test_RegistryContains exercises the (registry).Contains functionality. -func Test_RegistryContains(t *testing.T) { - tests := []struct { - name string - builtin string - pluginType consts.PluginType - want bool - }{ - { - name: "non-existent builtin", - builtin: "foo", - pluginType: consts.PluginTypeCredential, - want: false, - }, - { - name: "bad plugin type", - builtin: "app-id", - pluginType: 9001, - want: false, - }, - { - name: "known builtin lookup", - builtin: "app-id", - pluginType: consts.PluginTypeCredential, - want: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := Registry.Contains(tt.builtin, tt.pluginType) - if got != tt.want { - t.Fatalf("error: got %v, wanted %v", got, tt.want) - } - }) - } -} - -// Test_RegistryStatus exercises the (registry).Status functionality. -func Test_RegistryStatus(t *testing.T) { - tests := []struct { - name string - builtin string - pluginType consts.PluginType - want consts.DeprecationStatus - wantOk bool - }{ - { - name: "non-existent builtin and valid type", - builtin: "foo", - pluginType: consts.PluginTypeCredential, - want: consts.Unknown, - wantOk: false, - }, - { - name: "mismatch builtin and plugin type", - builtin: "app-id", - pluginType: consts.PluginTypeSecrets, - want: consts.Unknown, - wantOk: false, - }, - { - name: "existing builtin and invalid plugin type", - builtin: "app-id", - pluginType: 9000, - want: consts.Unknown, - wantOk: false, - }, - { - name: "supported builtin lookup", - builtin: "approle", - pluginType: consts.PluginTypeCredential, - want: consts.Supported, - wantOk: true, - }, - { - name: "deprecated builtin lookup", - builtin: "pcf", - pluginType: consts.PluginTypeCredential, - want: consts.Deprecated, - wantOk: true, - }, - { - name: "pending removal builtin lookup", - builtin: "app-id", - pluginType: consts.PluginTypeCredential, - want: consts.PendingRemoval, - wantOk: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, ok := Registry.DeprecationStatus(tt.builtin, tt.pluginType) - if got != tt.want { - t.Fatalf("got %+v, wanted %+v", got, tt.want) - } - if ok != tt.wantOk { - t.Fatalf("got ok: %t, want ok: %t", ok, tt.wantOk) - } - }) - } -} diff --git a/helper/constants/fips_build_check.go b/helper/constants/fips_build_check.go index 1e865b499f62e..aee3d0edba690 100644 --- a/helper/constants/fips_build_check.go +++ b/helper/constants/fips_build_check.go @@ -8,17 +8,17 @@ import "C" // without implementation in any imported or built library. This results in // a linker err if the above build constraints are satisfied: // -// /home/cipherboy/GitHub/cipherboy/vault-enterprise/helper/constants/fips_build_check.go:10: undefined reference to `github.com/hashicorp/vault/helper/constants.VaultFIPSBuildRequiresVersionAgnosticTagAndOneVersionTag' +// /home/cipherboy/GitHub/cipherboy/vault-enterprise/helper/constants/fips_build_check.go:10: undefined reference to `github.com/hashicorp/vault/helper/constants.VaultFIPSBuildRequiresVersionAgnosticTagAndOneVersionTag' // // This indicates that a build error has occurred due to mismatched tags. // // In particular, we use this to enforce the following restrictions on build // tags: // -// - If a versioned fips_140_* tag is specified, the unversioned tag must -// also be. -// - If the unversioned tag is specified, a versioned tag must be. -// - Both versioned flags cannot be specified at the same time. +// - If a versioned fips_140_* tag is specified, the unversioned tag must +// also be. +// - If the unversioned tag is specified, a versioned tag must be. +// - Both versioned flags cannot be specified at the same time. // // In the unlikely event that a FFI implementation for this function exists // in the future, it should be renamed to a new function which does not diff --git a/helper/forwarding/types.pb.go b/helper/forwarding/types.pb.go index c610a3e3e4023..3a036f4726aaf 100644 --- a/helper/forwarding/types.pb.go +++ b/helper/forwarding/types.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc-gen-go v1.27.1 +// protoc v3.19.4 // source: helper/forwarding/types.proto package forwarding @@ -27,7 +27,7 @@ type Request struct { // Not used right now but reserving in case it turns out that streaming // makes things more economical on the gRPC side - // uint64 id = 1; + //uint64 id = 1; Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"` Url *URL `protobuf:"bytes,3,opt,name=url,proto3" json:"url,omitempty"` HeaderEntries map[string]*HeaderEntry `protobuf:"bytes,4,rep,name=header_entries,json=headerEntries,proto3" json:"header_entries,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` @@ -127,12 +127,12 @@ type URL struct { Opaque string `protobuf:"bytes,2,opt,name=opaque,proto3" json:"opaque,omitempty"` // This isn't needed now but might be in the future, so we'll skip the // number to keep the ordering in net/url - // UserInfo user = 3; + //UserInfo user = 3; Host string `protobuf:"bytes,4,opt,name=host,proto3" json:"host,omitempty"` Path string `protobuf:"bytes,5,opt,name=path,proto3" json:"path,omitempty"` RawPath string `protobuf:"bytes,6,opt,name=raw_path,json=rawPath,proto3" json:"raw_path,omitempty"` // This also isn't needed right now, but we'll reserve the number - // bool force_query = 7; + //bool force_query = 7; RawQuery string `protobuf:"bytes,8,opt,name=raw_query,json=rawQuery,proto3" json:"raw_query,omitempty"` Fragment string `protobuf:"bytes,9,opt,name=fragment,proto3" json:"fragment,omitempty"` } @@ -272,7 +272,7 @@ type Response struct { // Not used right now but reserving in case it turns out that streaming // makes things more economical on the gRPC side - // uint64 id = 1; + //uint64 id = 1; StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` Body []byte `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"` // Added in 0.6.2 to ensure that the content-type is set appropriately, as diff --git a/helper/hostutil/hostinfo.go b/helper/hostutil/hostinfo.go index d35afb57d900a..ed8e648f6e628 100644 --- a/helper/hostutil/hostinfo.go +++ b/helper/hostutil/hostinfo.go @@ -8,10 +8,10 @@ import ( "time" "github.com/hashicorp/go-multierror" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/disk" - "github.com/shirou/gopsutil/v3/host" - "github.com/shirou/gopsutil/v3/mem" + "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/disk" + "github.com/shirou/gopsutil/host" + "github.com/shirou/gopsutil/mem" ) // HostInfo holds all the information that gets captured on the host. The @@ -28,10 +28,10 @@ type HostInfo struct { Disk []*disk.UsageStat `json:"disk"` // Host returns general host information such as hostname, platform, uptime, // kernel version, etc. - Host *HostInfoStat `json:"host"` + Host *host.InfoStat `json:"host"` // Memory contains statistics about the memory such as total, available, and // used memory in number of bytes. - Memory *VirtualMemoryStat `json:"memory"` + Memory *mem.VirtualMemoryStat `json:"memory"` } // CollectHostInfo returns information on the host, which includes general @@ -44,13 +44,13 @@ func CollectHostInfo(ctx context.Context) (*HostInfo, error) { var retErr *multierror.Error info := &HostInfo{Timestamp: time.Now().UTC()} - if h, err := CollectHostInfoStat(ctx); err != nil { + if h, err := host.InfoWithContext(ctx); err != nil { retErr = multierror.Append(retErr, &HostInfoError{"host", err}) } else { info.Host = h } - if v, err := CollectHostMemory(ctx); err != nil { + if v, err := mem.VirtualMemoryWithContext(ctx); err != nil { retErr = multierror.Append(retErr, &HostInfoError{"memory", err}) } else { info.Memory = v @@ -96,63 +96,9 @@ func CollectHostMemory(ctx context.Context) (*VirtualMemoryStat, error) { } return &VirtualMemoryStat{ - Total: m.Total, - Available: m.Available, - Used: m.Used, - UsedPercent: m.UsedPercent, - Free: m.Free, - Active: m.Active, - Inactive: m.Inactive, - Wired: m.Wired, - Laundry: m.Laundry, - Buffers: m.Buffers, - Cached: m.Cached, - Writeback: m.WriteBack, - Dirty: m.Dirty, - WritebackTmp: m.WriteBackTmp, - Shared: m.Shared, - Slab: m.Slab, - SReclaimable: m.Sreclaimable, - SUnreclaim: m.Sunreclaim, - PageTables: m.PageTables, - SwapCached: m.SwapCached, - CommitLimit: m.CommitLimit, - CommittedAS: m.CommittedAS, - HighTotal: m.HighTotal, - HighFree: m.HighFree, - LowTotal: m.LowTotal, - LowFree: m.LowFree, - SwapTotal: m.SwapTotal, - SwapFree: m.SwapFree, - Mapped: m.Mapped, - VMallocTotal: m.VmallocTotal, - VMallocUsed: m.VmallocUsed, - VMallocChunk: m.VmallocChunk, - HugePagesTotal: m.HugePagesTotal, - HugePagesFree: m.HugePagesFree, - HugePageSize: m.HugePageSize, - }, nil -} - -func CollectHostInfoStat(ctx context.Context) (*HostInfoStat, error) { - h, err := host.InfoWithContext(ctx) - if err != nil { - return nil, err - } - - return &HostInfoStat{ - Hostname: h.Hostname, - Uptime: h.Uptime, - BootTime: h.BootTime, - Procs: h.Procs, - OS: h.OS, - Platform: h.Platform, - PlatformFamily: h.PlatformFamily, - PlatformVersion: h.PlatformVersion, - KernelVersion: h.KernelVersion, - KernelArch: h.KernelArch, - VirtualizationSystem: h.VirtualizationSystem, - VirtualizationRole: h.VirtualizationRole, - HostID: h.HostID, + Total: m.Total, + Available: m.Available, + Used: m.Used, + UsedPercent: m.UsedPercent, }, nil } diff --git a/helper/hostutil/hostinfo_util.go b/helper/hostutil/hostinfo_util.go index 8811746fd8178..114f596f9abab 100644 --- a/helper/hostutil/hostinfo_util.go +++ b/helper/hostutil/hostinfo_util.go @@ -1,124 +1,24 @@ -// Copyright (c) 2014, WAKAYAMA Shirou -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// * Neither the name of the gopsutil authors nor the names of its contributors -// may be used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Copied from https://github.com/shirou/gopsutil/blob/b49f37e9f30f49530cf2ad6038a4dac1b746c8f7/mem/mem.go#L15 -// Copied from https://github.com/shirou/gopsutil/blob/b49f37e9f30f49530cf2ad6038a4dac1b746c8f7/host/host.go#L17 - package hostutil -// VirtualMemoryStat holds commonly used memory measurements. We must have a +// VirutalMemoryStat holds commonly used memory measurements. We must have a // local type here in order to avoid building the gopsutil library on certain // arch types. -// -// This struct is copied to maintain backwards compatibility in the Vault host-info API. -// This is done because gopsutil changed JSON struct tags between its v2 and v3 releases. -// For details see https://github.com/shirou/gopsutil/tree/master/_tools/v3migration. type VirtualMemoryStat struct { // Total amount of RAM on this system - Total uint64 `json:"total"` + Total uint64 // RAM available for programs to allocate // // This value is computed from the kernel specific values. - Available uint64 `json:"available"` + Available uint64 // RAM used by programs // // This value is computed from the kernel specific values. - Used uint64 `json:"used"` + Used uint64 // Percentage of RAM used by programs // // This value is computed from the kernel specific values. - UsedPercent float64 `json:"usedPercent"` - - // This is the kernel's notion of free memory; RAM chips whose bits nobody - // cares about the value of right now. For a human consumable number, - // Available is what you really want. - Free uint64 `json:"free"` - - // OS X / BSD specific numbers: - // http://www.macyourself.com/2010/02/17/what-is-free-wired-active-and-inactive-system-memory-ram/ - Active uint64 `json:"active"` - Inactive uint64 `json:"inactive"` - Wired uint64 `json:"wired"` - - // FreeBSD specific numbers: - // https://reviews.freebsd.org/D8467 - Laundry uint64 `json:"laundry"` - - // Linux specific numbers - // https://www.centos.org/docs/5/html/5.1/Deployment_Guide/s2-proc-meminfo.html - // https://www.kernel.org/doc/Documentation/filesystems/proc.txt - // https://www.kernel.org/doc/Documentation/vm/overcommit-accounting - Buffers uint64 `json:"buffers"` - Cached uint64 `json:"cached"` - Writeback uint64 `json:"writeback"` - Dirty uint64 `json:"dirty"` - WritebackTmp uint64 `json:"writebacktmp"` - Shared uint64 `json:"shared"` - Slab uint64 `json:"slab"` - SReclaimable uint64 `json:"sreclaimable"` - SUnreclaim uint64 `json:"sunreclaim"` - PageTables uint64 `json:"pagetables"` - SwapCached uint64 `json:"swapcached"` - CommitLimit uint64 `json:"commitlimit"` - CommittedAS uint64 `json:"committedas"` - HighTotal uint64 `json:"hightotal"` - HighFree uint64 `json:"highfree"` - LowTotal uint64 `json:"lowtotal"` - LowFree uint64 `json:"lowfree"` - SwapTotal uint64 `json:"swaptotal"` - SwapFree uint64 `json:"swapfree"` - Mapped uint64 `json:"mapped"` - VMallocTotal uint64 `json:"vmalloctotal"` - VMallocUsed uint64 `json:"vmallocused"` - VMallocChunk uint64 `json:"vmallocchunk"` - HugePagesTotal uint64 `json:"hugepagestotal"` - HugePagesFree uint64 `json:"hugepagesfree"` - HugePageSize uint64 `json:"hugepagesize"` -} - -// HostInfoStat describes the host status. -// -// This struct is copied to maintain backwards compatibility in the Vault host-info API. -// This is done because gopsutil changed JSON struct tags between its v2 and v3 releases. -// For details see https://github.com/shirou/gopsutil/tree/master/_tools/v3migration. -type HostInfoStat struct { - Hostname string `json:"hostname"` - Uptime uint64 `json:"uptime"` - BootTime uint64 `json:"bootTime"` - Procs uint64 `json:"procs"` - OS string `json:"os"` - Platform string `json:"platform"` - PlatformFamily string `json:"platformFamily"` - PlatformVersion string `json:"platformVersion"` - KernelVersion string `json:"kernelVersion"` - KernelArch string `json:"kernelArch"` - VirtualizationSystem string `json:"virtualizationSystem"` - VirtualizationRole string `json:"virtualizationRole"` - HostID string `json:"hostid"` + UsedPercent float64 } diff --git a/helper/identity/mfa/types.pb.go b/helper/identity/mfa/types.pb.go index 59d989e86e3a7..789def20f0fe0 100644 --- a/helper/identity/mfa/types.pb.go +++ b/helper/identity/mfa/types.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc-gen-go v1.27.1 +// protoc v3.19.4 // source: helper/identity/mfa/types.proto package mfa @@ -42,7 +42,6 @@ type Config struct { // @inject_tag: sentinel:"-" // // Types that are assignable to Config: - // // *Config_TOTPConfig // *Config_OktaConfig // *Config_DuoConfig @@ -590,7 +589,6 @@ type Secret struct { // @inject_tag: sentinel:"-" MethodName string `protobuf:"bytes,1,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty" sentinel:"-"` // Types that are assignable to Value: - // // *Secret_TOTPSecret Value isSecret_Value `protobuf_oneof:"value"` } diff --git a/helper/identity/types.pb.go b/helper/identity/types.pb.go index 7a88a74773104..a392d24bc313e 100644 --- a/helper/identity/types.pb.go +++ b/helper/identity/types.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc-gen-go v1.27.1 +// protoc v3.19.4 // source: helper/identity/types.proto package identity diff --git a/helper/metricsutil/gauge_process.go b/helper/metricsutil/gauge_process.go index 0ad0e9d876cf9..fd327bc687f85 100644 --- a/helper/metricsutil/gauge_process.go +++ b/helper/metricsutil/gauge_process.go @@ -6,7 +6,6 @@ import ( "sort" "time" - "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" ) @@ -61,7 +60,7 @@ type GaugeCollectionProcess struct { collector GaugeCollector // destination for metrics - sink Metrics + sink *ClusterMetricSink logger log.Logger // time between collections @@ -69,39 +68,10 @@ type GaugeCollectionProcess struct { currentInterval time.Duration ticker *time.Ticker - // used to help limit cardinality - maxGaugeCardinality int - // time source clock clock } -// NewGaugeCollectionProcess creates a new collection process for the callback -// function given as an argument, and starts it running. -// A label should be provided for metrics *about* this collection process. -// -// The Run() method must be called to start the process. -func NewGaugeCollectionProcess( - key []string, - id []Label, - collector GaugeCollector, - m metrics.MetricSink, - gaugeInterval time.Duration, - maxGaugeCardinality int, - logger log.Logger, -) (*GaugeCollectionProcess, error) { - return newGaugeCollectionProcessWithClock( - key, - id, - collector, - SinkWrapper{MetricSink: m}, - gaugeInterval, - maxGaugeCardinality, - logger, - defaultClock{}, - ) -} - // NewGaugeCollectionProcess creates a new collection process for the callback // function given as an argument, and starts it running. // A label should be provided for metrics *about* this collection process. @@ -113,48 +83,41 @@ func (m *ClusterMetricSink) NewGaugeCollectionProcess( collector GaugeCollector, logger log.Logger, ) (*GaugeCollectionProcess, error) { - return newGaugeCollectionProcessWithClock( + return m.newGaugeCollectionProcessWithClock( key, id, collector, - m, - m.GaugeInterval, - m.MaxGaugeCardinality, logger, defaultClock{}, ) } // test version allows an alternative clock implementation -func newGaugeCollectionProcessWithClock( +func (m *ClusterMetricSink) newGaugeCollectionProcessWithClock( key []string, id []Label, collector GaugeCollector, - sink Metrics, - gaugeInterval time.Duration, - maxGaugeCardinality int, logger log.Logger, clock clock, ) (*GaugeCollectionProcess, error) { process := &GaugeCollectionProcess{ - stop: make(chan struct{}, 1), - stopped: make(chan struct{}, 1), - key: key, - labels: id, - collector: collector, - sink: sink, - originalInterval: gaugeInterval, - currentInterval: gaugeInterval, - maxGaugeCardinality: maxGaugeCardinality, - logger: logger, - clock: clock, + stop: make(chan struct{}, 1), + stopped: make(chan struct{}, 1), + key: key, + labels: id, + collector: collector, + sink: m, + originalInterval: m.GaugeInterval, + currentInterval: m.GaugeInterval, + logger: logger, + clock: clock, } return process, nil } // delayStart randomly delays by up to one extra interval -// so that collection processes do not all run at the time. -// If we knew all the processes in advance, we could just schedule them +// so that collection processes do not all run at the time time. +// If we knew all the procsses in advance, we could just schedule them // evenly, but a new one could be added per secret engine. func (p *GaugeCollectionProcess) delayStart() bool { randomDelay := time.Duration(rand.Int63n(int64(p.currentInterval))) @@ -224,11 +187,11 @@ func (p *GaugeCollectionProcess) collectAndFilterGauges() { // Filter to top N. // This does not guarantee total cardinality is <= N, but it does slow things down // a little if the cardinality *is* too high and the gauge needs to be disabled. - if len(values) > p.maxGaugeCardinality { + if len(values) > p.sink.MaxGaugeCardinality { sort.Slice(values, func(a, b int) bool { return values[a].Value > values[b].Value }) - values = values[:p.maxGaugeCardinality] + values = values[:p.sink.MaxGaugeCardinality] } p.streamGaugesToSink(values) diff --git a/helper/metricsutil/gauge_process_test.go b/helper/metricsutil/gauge_process_test.go index 9971714e04e30..89ef813a850c6 100644 --- a/helper/metricsutil/gauge_process_test.go +++ b/helper/metricsutil/gauge_process_test.go @@ -147,13 +147,10 @@ func TestGauge_StartDelay(t *testing.T) { sink := BlackholeSink() sink.GaugeInterval = 2 * time.Hour - p, err := newGaugeCollectionProcessWithClock( + p, err := sink.newGaugeCollectionProcessWithClock( []string{"example", "count"}, []Label{{"gauge", "test"}}, c.EmptyCollectionFunction, - sink, - sink.GaugeInterval, - sink.MaxGaugeCardinality, log.Default(), s, ) @@ -212,13 +209,10 @@ func TestGauge_StoppedDuringInitialDelay(t *testing.T) { sink := BlackholeSink() sink.GaugeInterval = 2 * time.Hour - p, err := newGaugeCollectionProcessWithClock( + p, err := sink.newGaugeCollectionProcessWithClock( []string{"example", "count"}, []Label{{"gauge", "test"}}, c.EmptyCollectionFunction, - sink, - sink.GaugeInterval, - sink.MaxGaugeCardinality, log.Default(), s, ) @@ -241,13 +235,10 @@ func TestGauge_StoppedAfterInitialDelay(t *testing.T) { sink := BlackholeSink() sink.GaugeInterval = 2 * time.Hour - p, err := newGaugeCollectionProcessWithClock( + p, err := sink.newGaugeCollectionProcessWithClock( []string{"example", "count"}, []Label{{"gauge", "test"}}, c.EmptyCollectionFunction, - sink, - sink.GaugeInterval, - sink.MaxGaugeCardinality, log.Default(), s, ) @@ -283,13 +274,10 @@ func TestGauge_Backoff(t *testing.T) { return []GaugeLabelValues{}, nil } - p, err := newGaugeCollectionProcessWithClock( + p, err := sink.newGaugeCollectionProcessWithClock( []string{"example", "count"}, []Label{{"gauge", "test"}}, f, - sink, - sink.GaugeInterval, - sink.MaxGaugeCardinality, log.Default(), s, ) @@ -312,13 +300,10 @@ func TestGauge_RestartTimer(t *testing.T) { sink := BlackholeSink() sink.GaugeInterval = 2 * time.Hour - p, err := newGaugeCollectionProcessWithClock( + p, err := sink.newGaugeCollectionProcessWithClock( []string{"example", "count"}, []Label{{"gauge", "test"}}, c.EmptyCollectionFunction, - sink, - sink.GaugeInterval, - sink.MaxGaugeCardinality, log.Default(), s, ) @@ -385,13 +370,10 @@ func TestGauge_InterruptedStreaming(t *testing.T) { sink.MaxGaugeCardinality = 500 sink.GaugeInterval = 2 * time.Hour - p, err := newGaugeCollectionProcessWithClock( + p, err := sink.newGaugeCollectionProcessWithClock( []string{"example", "count"}, []Label{{"gauge", "test"}}, nil, // shouldn't be called - sink, - sink.GaugeInterval, - sink.MaxGaugeCardinality, log.Default(), s, ) @@ -463,13 +445,10 @@ func TestGauge_MaximumMeasurements(t *testing.T) { // Advance time by 0.5% of duration advance := time.Duration(int(0.005 * float32(sink.GaugeInterval))) - p, err := newGaugeCollectionProcessWithClock( + p, err := sink.newGaugeCollectionProcessWithClock( []string{"example", "count"}, []Label{{"gauge", "test"}}, c.makeFunctionForValues(values, s, advance), - sink, - sink.GaugeInterval, - sink.MaxGaugeCardinality, log.Default(), s, ) @@ -545,13 +524,10 @@ func TestGauge_MeasurementError(t *testing.T) { return values, errors.New("test error") } - p, err := newGaugeCollectionProcessWithClock( + p, err := sink.newGaugeCollectionProcessWithClock( []string{"example", "count"}, []Label{{"gauge", "test"}}, f, - sink, - sink.GaugeInterval, - sink.MaxGaugeCardinality, log.Default(), s, ) diff --git a/helper/metricsutil/metricsutil.go b/helper/metricsutil/metricsutil.go index de85c7e4628e9..0abb8148e3eb9 100644 --- a/helper/metricsutil/metricsutil.go +++ b/helper/metricsutil/metricsutil.go @@ -105,7 +105,7 @@ func (m *MetricsHelper) ResponseForFormat(format string) *logical.Response { return &logical.Response{ Data: map[string]interface{}{ logical.HTTPContentType: ErrorContentType, - logical.HTTPRawBody: fmt.Sprintf("metric response format %q unknown", format), + logical.HTTPRawBody: fmt.Sprintf("metric response format \"%s\" unknown", format), logical.HTTPStatusCode: http.StatusBadRequest, }, } diff --git a/helper/metricsutil/wrapped_metrics.go b/helper/metricsutil/wrapped_metrics.go index 67deb3bee1cd1..dcbd42aad3f27 100644 --- a/helper/metricsutil/wrapped_metrics.go +++ b/helper/metricsutil/wrapped_metrics.go @@ -5,7 +5,7 @@ import ( "sync/atomic" "time" - "github.com/armon/go-metrics" + metrics "github.com/armon/go-metrics" "github.com/hashicorp/vault/helper/namespace" ) @@ -49,25 +49,6 @@ type Metrics interface { var _ Metrics = &ClusterMetricSink{} -// SinkWrapper implements `metricsutil.Metrics` using an instance of -// armon/go-metrics `MetricSink` as the underlying implementation. -type SinkWrapper struct { - metrics.MetricSink -} - -func (s SinkWrapper) AddDurationWithLabels(key []string, d time.Duration, labels []Label) { - val := float32(d) / float32(time.Millisecond) - s.MetricSink.AddSampleWithLabels(key, val, labels) -} - -func (s SinkWrapper) MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { - elapsed := time.Now().Sub(start) - val := float32(elapsed) / float32(time.Millisecond) - s.MetricSink.AddSampleWithLabels(key, val, labels) -} - -var _ Metrics = SinkWrapper{} - // Convenience alias type Label = metrics.Label diff --git a/helper/namespace/namespace.go b/helper/namespace/namespace.go index 93d68622dec57..90ddadefd62df 100644 --- a/helper/namespace/namespace.go +++ b/helper/namespace/namespace.go @@ -12,9 +12,8 @@ import ( type contextValues struct{} type Namespace struct { - ID string `json:"id" mapstructure:"id"` - Path string `json:"path" mapstructure:"path"` - CustomMetadata map[string]string `json:"custom_metadata" mapstructure:"custom_metadata"` + ID string `json:"id"` + Path string `json:"path"` } func (n *Namespace) String() string { @@ -29,9 +28,8 @@ var ( contextNamespace contextValues = struct{}{} ErrNoNamespace error = errors.New("no namespace") RootNamespace *Namespace = &Namespace{ - ID: RootNamespaceID, - Path: "", - CustomMetadata: make(map[string]string), + ID: RootNamespaceID, + Path: "", } ) diff --git a/helper/pgpkeys/encrypt_decrypt.go b/helper/pgpkeys/encrypt_decrypt.go index 554013d6af494..31678df69f4c1 100644 --- a/helper/pgpkeys/encrypt_decrypt.go +++ b/helper/pgpkeys/encrypt_decrypt.go @@ -5,8 +5,8 @@ import ( "encoding/base64" "fmt" - "github.com/ProtonMail/go-crypto/openpgp" - "github.com/ProtonMail/go-crypto/openpgp/packet" + "github.com/keybase/go-crypto/openpgp" + "github.com/keybase/go-crypto/openpgp/packet" ) // EncryptShares takes an ordered set of byte slices to encrypt and the diff --git a/helper/pgpkeys/flag.go b/helper/pgpkeys/flag.go index e107bc9943385..4490d891dc9ed 100644 --- a/helper/pgpkeys/flag.go +++ b/helper/pgpkeys/flag.go @@ -8,7 +8,7 @@ import ( "os" "strings" - "github.com/ProtonMail/go-crypto/openpgp" + "github.com/keybase/go-crypto/openpgp" ) // PubKeyFileFlag implements flag.Value and command.Example to receive exactly diff --git a/helper/pgpkeys/flag_test.go b/helper/pgpkeys/flag_test.go index ec6402d5eb2f2..214a1719b9ae6 100644 --- a/helper/pgpkeys/flag_test.go +++ b/helper/pgpkeys/flag_test.go @@ -12,8 +12,8 @@ import ( "strings" "testing" - "github.com/ProtonMail/go-crypto/openpgp" - "github.com/ProtonMail/go-crypto/openpgp/packet" + "github.com/keybase/go-crypto/openpgp" + "github.com/keybase/go-crypto/openpgp/packet" ) func TestPubKeyFilesFlag_implements(t *testing.T) { @@ -68,7 +68,7 @@ func TestPubKeyFilesFlagSetBinary(t *testing.T) { t.Fatalf("err: should not have been able to set a second value") } - expected := []string{strings.ReplaceAll(pubKey1, "\n", ""), strings.ReplaceAll(pubKey2, "\n", "")} + expected := []string{strings.Replace(pubKey1, "\n", "", -1), strings.Replace(pubKey2, "\n", "", -1)} if !reflect.DeepEqual(pkf.String(), fmt.Sprint(expected)) { t.Fatalf("Bad: %#v", pkf) } diff --git a/helper/pgpkeys/keybase.go b/helper/pgpkeys/keybase.go index b2571b451d9b7..a9dde2bdd7aed 100644 --- a/helper/pgpkeys/keybase.go +++ b/helper/pgpkeys/keybase.go @@ -6,9 +6,9 @@ import ( "fmt" "strings" - "github.com/ProtonMail/go-crypto/openpgp" cleanhttp "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/keybase/go-crypto/openpgp" ) const ( diff --git a/helper/pgpkeys/keybase_test.go b/helper/pgpkeys/keybase_test.go index 3faa3f5d8db2e..c261e6f14c421 100644 --- a/helper/pgpkeys/keybase_test.go +++ b/helper/pgpkeys/keybase_test.go @@ -7,8 +7,8 @@ import ( "reflect" "testing" - "github.com/ProtonMail/go-crypto/openpgp" - "github.com/ProtonMail/go-crypto/openpgp/packet" + "github.com/keybase/go-crypto/openpgp" + "github.com/keybase/go-crypto/openpgp/packet" ) func TestFetchKeybasePubkeys(t *testing.T) { diff --git a/helper/storagepacker/types.pb.go b/helper/storagepacker/types.pb.go index 6bed81c48a152..bd7b780cd5a9a 100644 --- a/helper/storagepacker/types.pb.go +++ b/helper/storagepacker/types.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc-gen-go v1.27.1 +// protoc v3.19.4 // source: helper/storagepacker/types.proto package storagepacker diff --git a/helper/testhelpers/consul/consulhelper.go b/helper/testhelpers/consul/consulhelper.go index e88ead2bfad56..3facba58feb1b 100644 --- a/helper/testhelpers/consul/consulhelper.go +++ b/helper/testhelpers/consul/consulhelper.go @@ -27,7 +27,7 @@ func (c *Config) APIConfig() *consulapi.Config { // the Consul version used will be given by the environment variable // CONSUL_DOCKER_VERSION, or if that's empty, whatever we've hardcoded as the // the latest Consul version. -func PrepareTestContainer(t *testing.T, version string, isEnterprise bool, doBootstrapSetup bool) (func(), *Config) { +func PrepareTestContainer(t *testing.T, version string, isEnterprise bool, bootstrap bool) (func(), *Config) { t.Helper() if retAddress := os.Getenv("CONSUL_HTTP_ADDR"); retAddress != "" { @@ -119,7 +119,7 @@ func PrepareTestContainer(t *testing.T, version string, isEnterprise bool, doBoo // New default behavior var consulToken string - if doBootstrapSetup { + if bootstrap { aclbootstrap, _, err := consul.ACL().Bootstrap() if err != nil { return nil, err diff --git a/helper/testhelpers/logical/testing.go b/helper/testhelpers/logical/testing.go index 4740ff6be370a..ffc801b78d5d6 100644 --- a/helper/testhelpers/logical/testing.go +++ b/helper/testhelpers/logical/testing.go @@ -120,7 +120,7 @@ func Test(tt TestT, c TestCase) { // slow and generally require some outside configuration. if c.AcceptanceTest && os.Getenv(TestEnvVar) == "" { tt.Skip(fmt.Sprintf( - "Acceptance tests skipped unless env %q set", + "Acceptance tests skipped unless env '%s' set", TestEnvVar)) return } diff --git a/helper/testhelpers/seal/sealhelper.go b/helper/testhelpers/seal/sealhelper.go index 4087f6fc0d926..7705126f992ef 100644 --- a/helper/testhelpers/seal/sealhelper.go +++ b/helper/testhelpers/seal/sealhelper.go @@ -57,7 +57,7 @@ func (tss *TransitSealServer) MakeKey(t testing.T, key string) { } } -func (tss *TransitSealServer) MakeSeal(t testing.T, key string) (vault.Seal, error) { +func (tss *TransitSealServer) MakeSeal(t testing.T, key string) vault.Seal { client := tss.Cores[0].Client wrapperConfig := map[string]string{ "address": client.Address(), @@ -66,7 +66,7 @@ func (tss *TransitSealServer) MakeSeal(t testing.T, key string) (vault.Seal, err "key_name": key, "tls_ca_cert": tss.CACertPEMFile, } - transitSeal, _, err := configutil.GetTransitKMSFunc(&configutil.KMS{Config: wrapperConfig}) + transitSeal, _, err := configutil.GetTransitKMSFunc(nil, &configutil.KMS{Config: wrapperConfig}) if err != nil { t.Fatalf("error setting wrapper config: %v", err) } diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index 9c07ea52304a6..08e3017e32833 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -349,9 +349,6 @@ func WaitForStandbyNode(t testing.T, core *vault.TestClusterCore) { if isLeader, _, clusterAddr, _ := core.Core.Leader(); isLeader != true && clusterAddr != "" { return } - if core.Core.ActiveNodeReplicationState() == 0 { - return - } time.Sleep(time.Second) } diff --git a/helper/testhelpers/testhelpers_oss.go b/helper/testhelpers/testhelpers_oss.go index 912d50fdec3b0..05e132a921f39 100644 --- a/helper/testhelpers/testhelpers_oss.go +++ b/helper/testhelpers/testhelpers_oss.go @@ -11,9 +11,4 @@ import ( // on OSS. On enterprise it waits for perf standbys to be healthy too. func WaitForActiveNodeAndStandbys(t testing.T, cluster *vault.TestCluster) { WaitForActiveNode(t, cluster) - for _, core := range cluster.Cores { - if standby, _ := core.Core.Standby(); standby { - WaitForStandbyNode(t, core) - } - } } diff --git a/helper/testhelpers/teststorage/teststorage_reusable.go b/helper/testhelpers/teststorage/teststorage_reusable.go index 257a5a0184c5f..7806f03635118 100644 --- a/helper/testhelpers/teststorage/teststorage_reusable.go +++ b/helper/testhelpers/teststorage/teststorage_reusable.go @@ -18,6 +18,7 @@ import ( // seal migration, wherein a given physical backend must be re-used as several // test clusters are sequentially created, tested, and discarded. type ReusableStorage struct { + // IsRaft specifies whether the storage is using a raft backend. IsRaft bool diff --git a/helper/versions/version.go b/helper/versions/version.go deleted file mode 100644 index 1fa4e36e27a79..0000000000000 --- a/helper/versions/version.go +++ /dev/null @@ -1,54 +0,0 @@ -package versions - -import ( - "fmt" - "runtime/debug" - "strings" - "sync" - - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/version" -) - -var ( - buildInfoOnce sync.Once // once is used to ensure we only parse build info once. - buildInfo *debug.BuildInfo - DefaultBuiltinVersion = "v" + version.GetVersion().Version + "+builtin.vault" -) - -func GetBuiltinVersion(pluginType consts.PluginType, pluginName string) string { - buildInfoOnce.Do(func() { - buildInfo, _ = debug.ReadBuildInfo() - }) - - // Should never happen, means the binary was built without Go modules. - // Fall back to just the Vault version. - if buildInfo == nil { - return DefaultBuiltinVersion - } - - // Vault builtin plugins are all either: - // a) An external repo within the hashicorp org - return external repo version with +builtin - // b) Within the Vault repo itself - return Vault version with +builtin.vault - // - // The repo names are predictable, but follow slightly different patterns - // for each plugin type. - t := pluginType.String() - switch pluginType { - case consts.PluginTypeDatabase: - // Database plugin built-ins are registered as e.g. "postgresql-database-plugin" - pluginName = strings.TrimSuffix(pluginName, "-database-plugin") - case consts.PluginTypeSecrets: - // Repos use "secrets", pluginType.String() is "secret". - t = "secrets" - } - pluginModulePath := fmt.Sprintf("github.com/hashicorp/vault-plugin-%s-%s", t, pluginName) - - for _, dep := range buildInfo.Deps { - if dep.Path == pluginModulePath { - return dep.Version + "+builtin" - } - } - - return DefaultBuiltinVersion -} diff --git a/http/assets.go b/http/assets.go index c401f94910872..4c4b6c395eab6 100644 --- a/http/assets.go +++ b/http/assets.go @@ -9,7 +9,6 @@ import ( ) // content is our static web server content. -// //go:embed web_ui/* var content embed.FS diff --git a/http/handler.go b/http/handler.go index 5bf848db2d678..f40ec3d72b178 100644 --- a/http/handler.go +++ b/http/handler.go @@ -1148,9 +1148,9 @@ func parseMFAHeader(req *logical.Request) error { // maintain backwards compatibility, this will err on the side of JSON. // The request will be considered a form only if: // -// 1. The content type is "application/x-www-form-urlencoded" -// 2. The start of the request doesn't look like JSON. For this test we -// we expect the body to begin with { or [, ignoring leading whitespace. +// 1. The content type is "application/x-www-form-urlencoded" +// 2. The start of the request doesn't look like JSON. For this test we +// we expect the body to begin with { or [, ignoring leading whitespace. func isForm(head []byte, contentType string) bool { contentType, _, err := mime.ParseMediaType(contentType) @@ -1221,8 +1221,8 @@ func oidcPermissionDenied(path string, err error) bool { // permission denied errors (expired token) on resources protected // by OIDC access tokens. Currently, the UserInfo Endpoint is the only // protected resource. See the following specifications for details: -// - https://openid.net/specs/openid-connect-core-1_0.html#UserInfoError -// - https://datatracker.ietf.org/doc/html/rfc6750#section-3.1 +// - https://openid.net/specs/openid-connect-core-1_0.html#UserInfoError +// - https://datatracker.ietf.org/doc/html/rfc6750#section-3.1 func respondOIDCPermissionDenied(w http.ResponseWriter) { errorCode := "invalid_token" errorDescription := logical.ErrPermissionDenied.Error() diff --git a/http/handler_test.go b/http/handler_test.go index 49565b41e2356..382c57c25056d 100644 --- a/http/handler_test.go +++ b/http/handler_test.go @@ -17,7 +17,6 @@ import ( "github.com/go-test/deep" "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/vault/helper/namespace" - "github.com/hashicorp/vault/helper/versions" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" @@ -411,12 +410,9 @@ func TestSysMounts_headerAuth(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": map[string]interface{}{"version": "1"}, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, }, "sys/": map[string]interface{}{ "description": "system endpoints used for control, policy and debugging", @@ -428,12 +424,9 @@ func TestSysMounts_headerAuth(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Accept"}, }, - "local": false, - "seal_wrap": true, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.DefaultBuiltinVersion, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), }, "cubbyhole/": map[string]interface{}{ "description": "per-token private secret storage", @@ -444,12 +437,9 @@ func TestSysMounts_headerAuth(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": true, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + "local": true, + "seal_wrap": false, + "options": interface{}(nil), }, "identity/": map[string]interface{}{ "description": "identity store", @@ -461,12 +451,9 @@ func TestSysMounts_headerAuth(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Authorization"}, }, - "local": false, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + "local": false, + "seal_wrap": false, + "options": interface{}(nil), }, }, "secret/": map[string]interface{}{ @@ -478,12 +465,9 @@ func TestSysMounts_headerAuth(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": map[string]interface{}{"version": "1"}, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, }, "sys/": map[string]interface{}{ "description": "system endpoints used for control, policy and debugging", @@ -495,12 +479,9 @@ func TestSysMounts_headerAuth(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Accept"}, }, - "local": false, - "seal_wrap": true, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.DefaultBuiltinVersion, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), }, "cubbyhole/": map[string]interface{}{ "description": "per-token private secret storage", @@ -511,12 +492,9 @@ func TestSysMounts_headerAuth(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": true, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + "local": true, + "seal_wrap": false, + "options": interface{}(nil), }, "identity/": map[string]interface{}{ "description": "identity store", @@ -528,12 +506,9 @@ func TestSysMounts_headerAuth(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Authorization"}, }, - "local": false, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + "local": false, + "seal_wrap": false, + "options": interface{}(nil), }, } testResponseStatus(t, resp, 200) diff --git a/http/logical.go b/http/logical.go index 6cdf6bb071105..2de6c32954857 100644 --- a/http/logical.go +++ b/http/logical.go @@ -71,7 +71,6 @@ func buildLogicalRequestNoAuth(perfStandby bool, w http.ResponseWriter, r *http. return nil, nil, http.StatusBadRequest, nil } if list { - queryVals.Del("list") op = logical.ListOperation if !strings.HasSuffix(path, "/") { path += "/" @@ -79,7 +78,9 @@ func buildLogicalRequestNoAuth(perfStandby bool, w http.ResponseWriter, r *http. } } - data = parseQuery(queryVals) + if !list { + data = parseQuery(queryVals) + } switch { case strings.HasPrefix(path, "sys/pprof/"): @@ -102,11 +103,10 @@ func buildLogicalRequestNoAuth(perfStandby bool, w http.ResponseWriter, r *http. bufferedBody := newBufferedReader(r.Body) r.Body = bufferedBody - // If we are uploading a snapshot or receiving an ocsp-request (which - // is der encoded) we don't want to parse it. Instead, we will simply - // add the HTTP request to the logical request object for later consumption. - contentType := r.Header.Get("Content-Type") - if path == "sys/storage/raft/snapshot" || path == "sys/storage/raft/snapshot-force" || isOcspRequest(contentType) { + // If we are uploading a snapshot we don't want to parse it. Instead + // we will simply add the HTTP request to the logical request object + // for later consumption. + if path == "sys/storage/raft/snapshot" || path == "sys/storage/raft/snapshot-force" { passHTTPReq = true origBody = r.Body } else { @@ -121,7 +121,7 @@ func buildLogicalRequestNoAuth(perfStandby bool, w http.ResponseWriter, r *http. return nil, nil, status, fmt.Errorf("error reading data") } - if isForm(head, contentType) { + if isForm(head, r.Header.Get("Content-Type")) { formData, err := parseFormRequest(r) if err != nil { status := http.StatusBadRequest @@ -178,8 +178,6 @@ func buildLogicalRequestNoAuth(perfStandby bool, w http.ResponseWriter, r *http. path += "/" } - data = parseQuery(r.URL.Query()) - case "OPTIONS", "HEAD": default: return nil, nil, http.StatusMethodNotAllowed, nil @@ -209,15 +207,6 @@ func buildLogicalRequestNoAuth(perfStandby bool, w http.ResponseWriter, r *http. return req, origBody, 0, nil } -func isOcspRequest(contentType string) bool { - contentType, _, err := mime.ParseMediaType(contentType) - if err != nil { - return false - } - - return contentType == "application/ocsp-request" -} - func buildLogicalPath(r *http.Request) (string, int, error) { ns, err := namespace.FromContext(r.Context()) if err != nil { @@ -287,9 +276,9 @@ func buildLogicalRequest(core *vault.Core, w http.ResponseWriter, r *http.Reques // handleLogical returns a handler for processing logical requests. These requests // may or may not end up getting forwarded under certain scenarios if the node // is a performance standby. Some of these cases include: -// - Perf standby and token with limited use count. -// - Perf standby and token re-validation needed (e.g. due to invalid token). -// - Perf standby and control group error. +// - Perf standby and token with limited use count. +// - Perf standby and token re-validation needed (e.g. due to invalid token). +// - Perf standby and control group error. func handleLogical(core *vault.Core) http.Handler { return handleLogicalInternal(core, false, false) } diff --git a/http/logical_test.go b/http/logical_test.go index fc6fc765811e5..5580e13082f76 100644 --- a/http/logical_test.go +++ b/http/logical_test.go @@ -362,85 +362,6 @@ func TestLogical_ListSuffix(t *testing.T) { } } -func TestLogical_ListWithQueryParameters(t *testing.T) { - core, _, rootToken := vault.TestCoreUnsealed(t) - - tests := []struct { - name string - requestMethod string - url string - expectedData map[string]interface{} - }{ - { - name: "LIST request method parses query parameter", - requestMethod: "LIST", - url: "http://127.0.0.1:8200/v1/secret/foo?key1=value1", - expectedData: map[string]interface{}{ - "key1": "value1", - }, - }, - { - name: "LIST request method parses query multiple parameters", - requestMethod: "LIST", - url: "http://127.0.0.1:8200/v1/secret/foo?key1=value1&key2=value2", - expectedData: map[string]interface{}{ - "key1": "value1", - "key2": "value2", - }, - }, - { - name: "GET request method with list=true parses query parameter", - requestMethod: "GET", - url: "http://127.0.0.1:8200/v1/secret/foo?list=true&key1=value1", - expectedData: map[string]interface{}{ - "key1": "value1", - }, - }, - { - name: "GET request method with list=true parses multiple query parameters", - requestMethod: "GET", - url: "http://127.0.0.1:8200/v1/secret/foo?list=true&key1=value1&key2=value2", - expectedData: map[string]interface{}{ - "key1": "value1", - "key2": "value2", - }, - }, - { - name: "GET request method with alternate order list=true parses multiple query parameters", - requestMethod: "GET", - url: "http://127.0.0.1:8200/v1/secret/foo?key1=value1&list=true&key2=value2", - expectedData: map[string]interface{}{ - "key1": "value1", - "key2": "value2", - }, - }, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - req, _ := http.NewRequest(tc.requestMethod, tc.url, nil) - req = req.WithContext(namespace.RootContext(nil)) - req.Header.Add(consts.AuthHeaderName, rootToken) - - lreq, _, status, err := buildLogicalRequest(core, nil, req) - if err != nil { - t.Fatal(err) - } - if status != 0 { - t.Fatalf("got status %d", status) - } - if !strings.HasSuffix(lreq.Path, "/") { - t.Fatal("trailing slash not found on path") - } - if lreq.Operation != logical.ListOperation { - t.Fatalf("expected logical.ListOperation, got %v", lreq.Operation) - } - if !reflect.DeepEqual(tc.expectedData, lreq.Data) { - t.Fatalf("expected query parameter data %v, got %v", tc.expectedData, lreq.Data) - } - }) - } -} - func TestLogical_RespondWithStatusCode(t *testing.T) { resp := &logical.Response{ Data: map[string]interface{}{ @@ -634,23 +555,10 @@ func TestLogical_AuditPort(t *testing.T) { }, } - // workaround kv-v2 initialization upgrade errors - numFailures := 0 - vault.RetryUntil(t, 10*time.Second, func() error { - resp, err := c.Logical().Write("kv/data/foo", writeData) - if err != nil { - if strings.Contains(err.Error(), "Upgrading from non-versioned to versioned data") { - t.Logf("Retrying fetch KV data due to upgrade error") - time.Sleep(100 * time.Millisecond) - numFailures += 1 - return err - } - - t.Fatalf("write request failed, err: %#v, resp: %#v\n", err, resp) - } - - return nil - }) + resp, err := c.Logical().Write("kv/data/foo", writeData) + if err != nil { + t.Fatalf("write request failed, err: %#v, resp: %#v\n", err, resp) + } decoder := json.NewDecoder(auditLogFile) @@ -671,7 +579,7 @@ func TestLogical_AuditPort(t *testing.T) { } if _, ok := auditRequest["remote_address"].(string); !ok { - t.Fatalf("remote_address should be a string, not %T", auditRequest["remote_address"]) + t.Fatalf("remote_port should be a number, not %T", auditRequest["remote_address"]) } if _, ok := auditRequest["remote_port"].(float64); !ok { @@ -679,12 +587,8 @@ func TestLogical_AuditPort(t *testing.T) { } } - // We expect the following items in the audit log: - // audit log header + an entry for updating sys/audit/file - // + request/response per failure (if any) + request/response for creating kv - numExpectedEntries := (numFailures * 2) + 4 - if count != numExpectedEntries { - t.Fatalf("wrong number of audit entries expected: %d got: %d", numExpectedEntries, count) + if count != 4 { + t.Fatalf("wrong number of audit entries: %d", count) } } diff --git a/http/plugin_test.go b/http/plugin_test.go index 164a3d25f664d..38b8669eb1b1d 100644 --- a/http/plugin_test.go +++ b/http/plugin_test.go @@ -52,7 +52,7 @@ func getPluginClusterAndCore(t testing.TB, logger log.Logger) (*vault.TestCluste os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile) vault.TestWaitActive(benchhelpers.TBtoT(t), core.Core) - vault.TestAddTestPlugin(benchhelpers.TBtoT(t), core.Core, "mock-plugin", consts.PluginTypeSecrets, "", "TestPlugin_PluginMain", []string{}, "") + vault.TestAddTestPlugin(benchhelpers.TBtoT(t), core.Core, "mock-plugin", consts.PluginTypeSecrets, "TestPlugin_PluginMain", []string{}, "") // Mount the mock plugin err = core.Client.Sys().Mount("mock", &api.MountInput{ @@ -81,10 +81,14 @@ func TestPlugin_PluginMain(t *testing.T) { flags := apiClientMeta.FlagSet() flags.Parse(args) + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) + factoryFunc := mock.FactoryType(logical.TypeLogical) err := plugin.Serve(&plugin.ServeOpts{ BackendFactoryFunc: factoryFunc, + TLSProviderFunc: tlsProviderFunc, }) if err != nil { t.Fatal(err) diff --git a/http/sys_auth_test.go b/http/sys_auth_test.go index f6b200a6f6ad7..e180959173c07 100644 --- a/http/sys_auth_test.go +++ b/http/sys_auth_test.go @@ -8,8 +8,6 @@ import ( "time" "github.com/go-test/deep" - "github.com/hashicorp/vault/helper/versions" - "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/vault" ) @@ -40,12 +38,9 @@ func TestSysAuth(t *testing.T) { "token_type": "default-service", "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": "", + "local": false, + "seal_wrap": false, + "options": interface{}(nil), }, }, "token/": map[string]interface{}{ @@ -58,12 +53,9 @@ func TestSysAuth(t *testing.T) { "token_type": "default-service", "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": "", + "local": false, + "seal_wrap": false, + "options": interface{}(nil), }, } testResponseStatus(t, resp, 200) @@ -96,7 +88,7 @@ func TestSysEnableAuth(t *testing.T) { TestServerAuth(t, addr, token) resp := testHttpPost(t, token, addr+"/v1/sys/auth/foo", map[string]interface{}{ - "type": "approle", + "type": "noop", "description": "foo", }) testResponseStatus(t, resp, 204) @@ -114,21 +106,17 @@ func TestSysEnableAuth(t *testing.T) { "data": map[string]interface{}{ "foo/": map[string]interface{}{ "description": "foo", - "type": "approle", + "type": "noop", "external_entropy_access": false, - "deprecation_status": "supported", "config": map[string]interface{}{ "default_lease_ttl": json.Number("0"), "max_lease_ttl": json.Number("0"), "token_type": "default-service", "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": map[string]interface{}{}, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeCredential, "approle"), + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{}, }, "token/": map[string]interface{}{ "description": "token based credentials", @@ -140,31 +128,24 @@ func TestSysEnableAuth(t *testing.T) { "force_no_cache": false, "token_type": "default-service", }, - "local": false, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": "", + "local": false, + "seal_wrap": false, + "options": interface{}(nil), }, }, "foo/": map[string]interface{}{ "description": "foo", - "type": "approle", + "type": "noop", "external_entropy_access": false, - "deprecation_status": "supported", "config": map[string]interface{}{ "default_lease_ttl": json.Number("0"), "max_lease_ttl": json.Number("0"), "token_type": "default-service", "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": map[string]interface{}{}, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeCredential, "approle"), + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{}, }, "token/": map[string]interface{}{ "description": "token based credentials", @@ -176,12 +157,9 @@ func TestSysEnableAuth(t *testing.T) { "token_type": "default-service", "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": "", + "local": false, + "seal_wrap": false, + "options": interface{}(nil), }, } testResponseStatus(t, resp, 200) @@ -246,9 +224,6 @@ func TestSysDisableAuth(t *testing.T) { "local": false, "seal_wrap": false, "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": "", }, }, "token/": map[string]interface{}{ @@ -264,9 +239,6 @@ func TestSysDisableAuth(t *testing.T) { "local": false, "seal_wrap": false, "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": "", }, } testResponseStatus(t, resp, 200) @@ -520,12 +492,9 @@ func TestSysRemountAuth(t *testing.T) { "token_type": "default-service", "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": map[string]interface{}{}, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{}, }, "token/": map[string]interface{}{ "description": "token based credentials", @@ -537,12 +506,9 @@ func TestSysRemountAuth(t *testing.T) { "force_no_cache": false, "token_type": "default-service", }, - "local": false, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": "", + "local": false, + "seal_wrap": false, + "options": interface{}(nil), }, }, "bar/": map[string]interface{}{ @@ -555,12 +521,9 @@ func TestSysRemountAuth(t *testing.T) { "token_type": "default-service", "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": map[string]interface{}{}, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{}, }, "token/": map[string]interface{}{ "description": "token based credentials", @@ -572,12 +535,9 @@ func TestSysRemountAuth(t *testing.T) { "token_type": "default-service", "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": "", + "local": false, + "seal_wrap": false, + "options": interface{}(nil), }, } testResponseStatus(t, resp, 200) diff --git a/http/sys_init.go b/http/sys_init.go index ae3059462bef4..b21e5363ea020 100644 --- a/http/sys_init.go +++ b/http/sys_init.go @@ -4,9 +4,7 @@ import ( "context" "encoding/base64" "encoding/hex" - "fmt" "net/http" - "strings" "github.com/hashicorp/vault/vault" ) @@ -46,12 +44,6 @@ func handleSysInitPut(core *vault.Core, w http.ResponseWriter, r *http.Request) return } - // Validate init request parameters - if err := validateInitParameters(core, req); err != nil { - respondError(w, http.StatusBadRequest, err) - return - } - // Initialize barrierConfig := &vault.SealConfig{ SecretShares: req.SecretShares, @@ -136,41 +128,3 @@ type InitResponse struct { type InitStatusResponse struct { Initialized bool `json:"initialized"` } - -// Validates if the right parameters are used based on AutoUnseal -func validateInitParameters(core *vault.Core, req InitRequest) error { - recoveryFlags := make([]string, 0) - barrierFlags := make([]string, 0) - - if req.SecretShares != 0 { - barrierFlags = append(barrierFlags, "secret_shares") - } - if req.SecretThreshold != 0 { - barrierFlags = append(barrierFlags, "secret_threshold") - } - if len(req.PGPKeys) != 0 { - barrierFlags = append(barrierFlags, "pgp_keys") - } - if req.RecoveryShares != 0 { - recoveryFlags = append(recoveryFlags, "recovery_shares") - } - if req.RecoveryThreshold != 0 { - recoveryFlags = append(recoveryFlags, "recovery_threshold") - } - if len(req.RecoveryPGPKeys) != 0 { - recoveryFlags = append(recoveryFlags, "recovery_pgp_keys") - } - - switch core.SealAccess().RecoveryKeySupported() { - case true: - if len(barrierFlags) > 0 { - return fmt.Errorf("parameters %s not applicable to seal type %s", strings.Join(barrierFlags, ","), core.SealAccess().BarrierType()) - } - default: - if len(recoveryFlags) > 0 { - return fmt.Errorf("parameters %s not applicable to seal type %s", strings.Join(recoveryFlags, ","), core.SealAccess().BarrierType()) - } - - } - return nil -} diff --git a/http/sys_init_test.go b/http/sys_init_test.go index 4953c4244ce82..f4be3413a9022 100644 --- a/http/sys_init_test.go +++ b/http/sys_init_test.go @@ -4,15 +4,9 @@ import ( "encoding/hex" "net/http" "reflect" - "strconv" "testing" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/builtin/logical/transit" - "github.com/hashicorp/vault/sdk/helper/logging" - "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" - "github.com/hashicorp/vault/vault/seal" ) func TestSysInit_get(t *testing.T) { @@ -129,66 +123,3 @@ func TestSysInit_put(t *testing.T) { t.Fatal("should not be sealed") } } - -func TestSysInit_Put_ValidateParams(t *testing.T) { - core := vault.TestCore(t) - ln, addr := TestServer(t, core) - defer ln.Close() - - resp := testHttpPut(t, "", addr+"/v1/sys/init", map[string]interface{}{ - "secret_shares": 5, - "secret_threshold": 3, - "recovery_shares": 5, - "recovery_threshold": 3, - }) - testResponseStatus(t, resp, http.StatusBadRequest) - body := map[string][]string{} - testResponseBody(t, resp, &body) - if body["errors"][0] != "parameters recovery_shares,recovery_threshold not applicable to seal type shamir" { - t.Fatal(body) - } -} - -func TestSysInit_Put_ValidateParams_AutoUnseal(t *testing.T) { - testSeal := seal.NewTestSeal(nil) - autoSeal, err := vault.NewAutoSeal(testSeal) - if err != nil { - t.Fatal(err) - } - autoSeal.SetType("transit") - - // Create the transit server. - conf := &vault.CoreConfig{ - LogicalBackends: map[string]logical.Factory{ - "transit": transit.Factory, - }, - Seal: autoSeal, - } - opts := &vault.TestClusterOptions{ - NumCores: 1, - HandlerFunc: Handler, - Logger: logging.NewVaultLogger(hclog.Trace).Named(t.Name()).Named("transit-seal" + strconv.Itoa(0)), - } - cluster := vault.NewTestCluster(t, conf, opts) - cluster.Start() - defer cluster.Cleanup() - - cores := cluster.Cores - core := cores[0].Core - - ln, addr := TestServer(t, core) - defer ln.Close() - - resp := testHttpPut(t, "", addr+"/v1/sys/init", map[string]interface{}{ - "secret_shares": 5, - "secret_threshold": 3, - "recovery_shares": 5, - "recovery_threshold": 3, - }) - testResponseStatus(t, resp, http.StatusBadRequest) - body := map[string][]string{} - testResponseBody(t, resp, &body) - if body["errors"][0] != "parameters secret_shares,secret_threshold not applicable to seal type transit" { - t.Fatal(body) - } -} diff --git a/http/sys_mount_test.go b/http/sys_mount_test.go index 09ca8c8c35050..71c454a9e73e5 100644 --- a/http/sys_mount_test.go +++ b/http/sys_mount_test.go @@ -7,10 +7,9 @@ import ( "testing" "time" - "github.com/fatih/structs" "github.com/go-test/deep" - "github.com/hashicorp/vault/helper/versions" - "github.com/hashicorp/vault/sdk/helper/consts" + + "github.com/fatih/structs" "github.com/hashicorp/vault/vault" ) @@ -40,12 +39,9 @@ func TestSysMounts(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": map[string]interface{}{"version": "1"}, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, }, "sys/": map[string]interface{}{ "description": "system endpoints used for control, policy and debugging", @@ -57,12 +53,9 @@ func TestSysMounts(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Accept"}, }, - "local": false, - "seal_wrap": true, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.DefaultBuiltinVersion, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), }, "cubbyhole/": map[string]interface{}{ "description": "per-token private secret storage", @@ -73,12 +66,9 @@ func TestSysMounts(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": true, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + "local": true, + "seal_wrap": false, + "options": interface{}(nil), }, "identity/": map[string]interface{}{ "description": "identity store", @@ -90,12 +80,9 @@ func TestSysMounts(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Authorization"}, }, - "local": false, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + "local": false, + "seal_wrap": false, + "options": interface{}(nil), }, }, "secret/": map[string]interface{}{ @@ -107,12 +94,9 @@ func TestSysMounts(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": map[string]interface{}{"version": "1"}, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, }, "sys/": map[string]interface{}{ "description": "system endpoints used for control, policy and debugging", @@ -124,12 +108,9 @@ func TestSysMounts(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Accept"}, }, - "local": false, - "seal_wrap": true, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.DefaultBuiltinVersion, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), }, "cubbyhole/": map[string]interface{}{ "description": "per-token private secret storage", @@ -140,12 +121,9 @@ func TestSysMounts(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": true, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + "local": true, + "seal_wrap": false, + "options": interface{}(nil), }, "identity/": map[string]interface{}{ "description": "identity store", @@ -157,12 +135,9 @@ func TestSysMounts(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Authorization"}, }, - "local": false, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + "local": false, + "seal_wrap": false, + "options": interface{}(nil), }, } testResponseStatus(t, resp, 200) @@ -221,12 +196,9 @@ func TestSysMount(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": map[string]interface{}{"version": "1"}, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, }, "secret/": map[string]interface{}{ "description": "key/value secret storage", @@ -237,12 +209,9 @@ func TestSysMount(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": map[string]interface{}{"version": "1"}, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, }, "sys/": map[string]interface{}{ "description": "system endpoints used for control, policy and debugging", @@ -254,12 +223,9 @@ func TestSysMount(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Accept"}, }, - "local": false, - "seal_wrap": true, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.DefaultBuiltinVersion, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), }, "cubbyhole/": map[string]interface{}{ "description": "per-token private secret storage", @@ -270,12 +236,9 @@ func TestSysMount(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": true, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + "local": true, + "seal_wrap": false, + "options": interface{}(nil), }, "identity/": map[string]interface{}{ "description": "identity store", @@ -287,12 +250,9 @@ func TestSysMount(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Authorization"}, }, - "local": false, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + "local": false, + "seal_wrap": false, + "options": interface{}(nil), }, }, "foo/": map[string]interface{}{ @@ -304,12 +264,9 @@ func TestSysMount(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": map[string]interface{}{"version": "1"}, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, }, "secret/": map[string]interface{}{ "description": "key/value secret storage", @@ -320,12 +277,9 @@ func TestSysMount(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": map[string]interface{}{"version": "1"}, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, }, "sys/": map[string]interface{}{ "description": "system endpoints used for control, policy and debugging", @@ -337,12 +291,9 @@ func TestSysMount(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Accept"}, }, - "local": false, - "seal_wrap": true, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.DefaultBuiltinVersion, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), }, "cubbyhole/": map[string]interface{}{ "description": "per-token private secret storage", @@ -353,12 +304,9 @@ func TestSysMount(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": true, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + "local": true, + "seal_wrap": false, + "options": interface{}(nil), }, "identity/": map[string]interface{}{ "description": "identity store", @@ -370,12 +318,9 @@ func TestSysMount(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Authorization"}, }, - "local": false, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + "local": false, + "seal_wrap": false, + "options": interface{}(nil), }, } testResponseStatus(t, resp, 200) @@ -469,12 +414,9 @@ func TestSysRemount(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": map[string]interface{}{}, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{}, }, "secret/": map[string]interface{}{ "description": "key/value secret storage", @@ -485,12 +427,9 @@ func TestSysRemount(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": map[string]interface{}{"version": "1"}, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, }, "sys/": map[string]interface{}{ "description": "system endpoints used for control, policy and debugging", @@ -502,12 +441,9 @@ func TestSysRemount(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Accept"}, }, - "local": false, - "seal_wrap": true, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.DefaultBuiltinVersion, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), }, "cubbyhole/": map[string]interface{}{ "description": "per-token private secret storage", @@ -518,12 +454,9 @@ func TestSysRemount(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": true, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + "local": true, + "seal_wrap": false, + "options": interface{}(nil), }, "identity/": map[string]interface{}{ "description": "identity store", @@ -535,12 +468,9 @@ func TestSysRemount(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Authorization"}, }, - "local": false, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + "local": false, + "seal_wrap": false, + "options": interface{}(nil), }, }, "bar/": map[string]interface{}{ @@ -552,12 +482,9 @@ func TestSysRemount(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": map[string]interface{}{}, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{}, }, "secret/": map[string]interface{}{ "description": "key/value secret storage", @@ -568,12 +495,9 @@ func TestSysRemount(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": map[string]interface{}{"version": "1"}, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, }, "sys/": map[string]interface{}{ "description": "system endpoints used for control, policy and debugging", @@ -585,12 +509,9 @@ func TestSysRemount(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Accept"}, }, - "local": false, - "seal_wrap": true, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.DefaultBuiltinVersion, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), }, "cubbyhole/": map[string]interface{}{ "description": "per-token private secret storage", @@ -601,12 +522,9 @@ func TestSysRemount(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": true, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + "local": true, + "seal_wrap": false, + "options": interface{}(nil), }, "identity/": map[string]interface{}{ "description": "identity store", @@ -618,12 +536,9 @@ func TestSysRemount(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Authorization"}, }, - "local": false, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + "local": false, + "seal_wrap": false, + "options": interface{}(nil), }, } testResponseStatus(t, resp, 200) @@ -682,12 +597,9 @@ func TestSysUnmount(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": map[string]interface{}{"version": "1"}, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, }, "sys/": map[string]interface{}{ "description": "system endpoints used for control, policy and debugging", @@ -699,12 +611,9 @@ func TestSysUnmount(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Accept"}, }, - "local": false, - "seal_wrap": true, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.DefaultBuiltinVersion, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), }, "cubbyhole/": map[string]interface{}{ "description": "per-token private secret storage", @@ -715,12 +624,9 @@ func TestSysUnmount(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": true, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + "local": true, + "seal_wrap": false, + "options": interface{}(nil), }, "identity/": map[string]interface{}{ "description": "identity store", @@ -732,12 +638,9 @@ func TestSysUnmount(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Authorization"}, }, - "local": false, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + "local": false, + "seal_wrap": false, + "options": interface{}(nil), }, }, "secret/": map[string]interface{}{ @@ -749,12 +652,9 @@ func TestSysUnmount(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": map[string]interface{}{"version": "1"}, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, }, "sys/": map[string]interface{}{ "description": "system endpoints used for control, policy and debugging", @@ -766,12 +666,9 @@ func TestSysUnmount(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Accept"}, }, - "local": false, - "seal_wrap": true, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.DefaultBuiltinVersion, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), }, "cubbyhole/": map[string]interface{}{ "description": "per-token private secret storage", @@ -782,12 +679,9 @@ func TestSysUnmount(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": true, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + "local": true, + "seal_wrap": false, + "options": interface{}(nil), }, "identity/": map[string]interface{}{ "description": "identity store", @@ -799,12 +693,9 @@ func TestSysUnmount(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Authorization"}, }, - "local": false, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + "local": false, + "seal_wrap": false, + "options": interface{}(nil), }, } testResponseStatus(t, resp, 200) @@ -949,12 +840,9 @@ func TestSysTuneMount(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": map[string]interface{}{}, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{}, }, "secret/": map[string]interface{}{ "description": "key/value secret storage", @@ -965,12 +853,9 @@ func TestSysTuneMount(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": map[string]interface{}{"version": "1"}, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, }, "sys/": map[string]interface{}{ "description": "system endpoints used for control, policy and debugging", @@ -982,12 +867,9 @@ func TestSysTuneMount(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Accept"}, }, - "local": false, - "seal_wrap": true, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.DefaultBuiltinVersion, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), }, "cubbyhole/": map[string]interface{}{ "description": "per-token private secret storage", @@ -998,12 +880,9 @@ func TestSysTuneMount(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": true, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + "local": true, + "seal_wrap": false, + "options": interface{}(nil), }, "identity/": map[string]interface{}{ "description": "identity store", @@ -1015,12 +894,9 @@ func TestSysTuneMount(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Authorization"}, }, - "local": false, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + "local": false, + "seal_wrap": false, + "options": interface{}(nil), }, }, "foo/": map[string]interface{}{ @@ -1032,12 +908,9 @@ func TestSysTuneMount(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": map[string]interface{}{}, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{}, }, "secret/": map[string]interface{}{ "description": "key/value secret storage", @@ -1048,12 +921,9 @@ func TestSysTuneMount(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": map[string]interface{}{"version": "1"}, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, }, "sys/": map[string]interface{}{ "description": "system endpoints used for control, policy and debugging", @@ -1065,12 +935,9 @@ func TestSysTuneMount(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Accept"}, }, - "local": false, - "seal_wrap": true, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.DefaultBuiltinVersion, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), }, "cubbyhole/": map[string]interface{}{ "description": "per-token private secret storage", @@ -1081,12 +948,9 @@ func TestSysTuneMount(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": true, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + "local": true, + "seal_wrap": false, + "options": interface{}(nil), }, "identity/": map[string]interface{}{ "description": "identity store", @@ -1098,12 +962,9 @@ func TestSysTuneMount(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Authorization"}, }, - "local": false, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + "local": false, + "seal_wrap": false, + "options": interface{}(nil), }, } testResponseStatus(t, resp, 200) @@ -1188,12 +1049,9 @@ func TestSysTuneMount(t *testing.T) { "max_lease_ttl": json.Number("259200000"), "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": map[string]interface{}{"version": "1"}, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, }, "secret/": map[string]interface{}{ "description": "key/value secret storage", @@ -1204,12 +1062,9 @@ func TestSysTuneMount(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": map[string]interface{}{"version": "1"}, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, }, "sys/": map[string]interface{}{ "description": "system endpoints used for control, policy and debugging", @@ -1221,12 +1076,9 @@ func TestSysTuneMount(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Accept"}, }, - "local": false, - "seal_wrap": true, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.DefaultBuiltinVersion, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), }, "cubbyhole/": map[string]interface{}{ "description": "per-token private secret storage", @@ -1237,12 +1089,9 @@ func TestSysTuneMount(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": true, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + "local": true, + "seal_wrap": false, + "options": interface{}(nil), }, "identity/": map[string]interface{}{ "description": "identity store", @@ -1254,12 +1103,9 @@ func TestSysTuneMount(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Authorization"}, }, - "local": false, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + "local": false, + "seal_wrap": false, + "options": interface{}(nil), }, }, "foo/": map[string]interface{}{ @@ -1271,12 +1117,9 @@ func TestSysTuneMount(t *testing.T) { "max_lease_ttl": json.Number("259200000"), "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": map[string]interface{}{"version": "1"}, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, }, "secret/": map[string]interface{}{ "description": "key/value secret storage", @@ -1287,12 +1130,9 @@ func TestSysTuneMount(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": false, - "seal_wrap": false, - "options": map[string]interface{}{"version": "1"}, - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, }, "sys/": map[string]interface{}{ "description": "system endpoints used for control, policy and debugging", @@ -1304,12 +1144,9 @@ func TestSysTuneMount(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Accept"}, }, - "local": false, - "seal_wrap": true, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + "local": false, + "seal_wrap": true, + "options": interface{}(nil), }, "cubbyhole/": map[string]interface{}{ "description": "per-token private secret storage", @@ -1320,12 +1157,9 @@ func TestSysTuneMount(t *testing.T) { "max_lease_ttl": json.Number("0"), "force_no_cache": false, }, - "local": true, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + "local": true, + "seal_wrap": false, + "options": interface{}(nil), }, "identity/": map[string]interface{}{ "description": "identity store", @@ -1337,12 +1171,9 @@ func TestSysTuneMount(t *testing.T) { "force_no_cache": false, "passthrough_request_headers": []interface{}{"Authorization"}, }, - "local": false, - "seal_wrap": false, - "options": interface{}(nil), - "plugin_version": "", - "running_sha256": "", - "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + "local": false, + "seal_wrap": false, + "options": interface{}(nil), }, } diff --git a/http/sys_raft.go b/http/sys_raft.go index 428aad4f7da3a..5db1a80fb78f6 100644 --- a/http/sys_raft.go +++ b/http/sys_raft.go @@ -68,7 +68,7 @@ func handleSysRaftJoinPost(core *vault.Core, w http.ResponseWriter, r *http.Requ } if req.AutoJoinScheme != "" && (req.AutoJoinScheme != "http" && req.AutoJoinScheme != "https") { - respondError(w, http.StatusBadRequest, fmt.Errorf("invalid scheme %q; must either be http or https", req.AutoJoinScheme)) + respondError(w, http.StatusBadRequest, fmt.Errorf("invalid scheme '%s'; must either be http or https", req.AutoJoinScheme)) return } diff --git a/http/util.go b/http/util.go index b4c8923cc3eea..cbb364843c308 100644 --- a/http/util.go +++ b/http/util.go @@ -1,10 +1,7 @@ package http import ( - "bytes" - "errors" "fmt" - "io/ioutil" "net" "net/http" "strings" @@ -50,21 +47,11 @@ func rateLimitQuotaWrapping(handler http.Handler, core *vault.Core) http.Handler respondError(w, status, err) return } - mountPath := strings.TrimPrefix(core.MatchingMount(r.Context(), path), ns.Path) - - // Clone body, so we do not close the request body reader - bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - respondError(w, http.StatusInternalServerError, errors.New("failed to read request body")) - return - } - r.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes)) quotaResp, err := core.ApplyRateLimitQuota(r.Context(), "as.Request{ Type: quotas.TypeRateLimit, Path: path, - MountPath: mountPath, - Role: core.DetermineRoleFromLoginRequestFromBytes(mountPath, bodyBytes, r.Context()), + MountPath: strings.TrimPrefix(core.MatchingMount(r.Context(), path), ns.Path), NamespacePath: ns.Path, ClientAddress: parseRemoteIPAddress(r), }) diff --git a/internalshared/configutil/config.go b/internalshared/configutil/config.go index 5c12e03b949f1..3fcbeb6b593a3 100644 --- a/internalshared/configutil/config.go +++ b/internalshared/configutil/config.go @@ -29,8 +29,6 @@ type SharedConfig struct { Telemetry *Telemetry `hcl:"telemetry"` - HCPLinkConf *HCPLinkConfig `hcl:"cloud"` - DefaultMaxRequestDuration time.Duration `hcl:"-"` DefaultMaxRequestDurationRaw interface{} `hcl:"default_max_request_duration"` @@ -141,13 +139,6 @@ func ParseConfig(d string) (*SharedConfig, error) { } } - if o := list.Filter("cloud"); len(o.Items) > 0 { - result.found("cloud", "Cloud") - if err := parseCloud(&result, o); err != nil { - return nil, fmt.Errorf("error parsing 'cloud': %w", err) - } - } - entConfig := &(result.EntSharedConfig) if err := entConfig.ParseConfig(list); err != nil { return nil, fmt.Errorf("error parsing enterprise config: %w", err) diff --git a/internalshared/configutil/encrypt_decrypt.go b/internalshared/configutil/encrypt_decrypt.go index 1e9f830901c44..7f0602ef0dcad 100644 --- a/internalshared/configutil/encrypt_decrypt.go +++ b/internalshared/configutil/encrypt_decrypt.go @@ -8,7 +8,7 @@ import ( "fmt" "regexp" - wrapping "github.com/hashicorp/go-kms-wrapping/v2" + wrapping "github.com/hashicorp/go-kms-wrapping" "google.golang.org/protobuf/proto" ) @@ -71,7 +71,7 @@ func EncryptDecrypt(rawStr string, decrypt, strip bool, wrapper wrapping.Wrapper if err != nil { return "", fmt.Errorf("error decoding encrypted parameter: %w", err) } - inBlob := new(wrapping.BlobInfo) + inBlob := new(wrapping.EncryptedBlobInfo) if err := proto.Unmarshal(inMsg, inBlob); err != nil { return "", fmt.Errorf("error unmarshaling encrypted parameter: %w", err) } diff --git a/internalshared/configutil/encrypt_decrypt_test.go b/internalshared/configutil/encrypt_decrypt_test.go index b9257bb6c2b2d..bc4a7f5fb9404 100644 --- a/internalshared/configutil/encrypt_decrypt_test.go +++ b/internalshared/configutil/encrypt_decrypt_test.go @@ -6,7 +6,7 @@ import ( "encoding/base64" "testing" - wrapping "github.com/hashicorp/go-kms-wrapping/v2" + wrapping "github.com/hashicorp/go-kms-wrapping" "google.golang.org/protobuf/proto" ) @@ -51,7 +51,7 @@ telemetry { if err != nil { t.Fatal(err) } - inBlob := new(wrapping.BlobInfo) + inBlob := new(wrapping.EncryptedBlobInfo) if err := proto.Unmarshal(inMsg, inBlob); err != nil { t.Fatal(err) } @@ -89,24 +89,18 @@ telemetry { type reversingWrapper struct{} -func (r *reversingWrapper) Type(_ context.Context) (wrapping.WrapperType, error) { - return "reverser", nil -} -func (r *reversingWrapper) KeyId(_ context.Context) (string, error) { return "reverser", nil } -func (r *reversingWrapper) HMACKeyID() string { return "" } -func (r *reversingWrapper) Init(_ context.Context) error { return nil } -func (r *reversingWrapper) Finalize(_ context.Context) error { return nil } -func (r *reversingWrapper) SetConfig(_ context.Context, opts ...wrapping.Option) (*wrapping.WrapperConfig, error) { - return &wrapping.WrapperConfig{}, nil -} - -func (r *reversingWrapper) Encrypt(_ context.Context, input []byte, _ ...wrapping.Option) (*wrapping.BlobInfo, error) { - return &wrapping.BlobInfo{ +func (r *reversingWrapper) Type() string { return "reversing" } +func (r *reversingWrapper) KeyID() string { return "reverser" } +func (r *reversingWrapper) HMACKeyID() string { return "" } +func (r *reversingWrapper) Init(_ context.Context) error { return nil } +func (r *reversingWrapper) Finalize(_ context.Context) error { return nil } +func (r *reversingWrapper) Encrypt(_ context.Context, input []byte, _ []byte) (*wrapping.EncryptedBlobInfo, error) { + return &wrapping.EncryptedBlobInfo{ Ciphertext: r.reverse(input), }, nil } -func (r *reversingWrapper) Decrypt(_ context.Context, input *wrapping.BlobInfo, _ ...wrapping.Option) ([]byte, error) { +func (r *reversingWrapper) Decrypt(_ context.Context, input *wrapping.EncryptedBlobInfo, _ []byte) ([]byte, error) { return r.reverse(input.Ciphertext), nil } diff --git a/internalshared/configutil/hcp_link.go b/internalshared/configutil/hcp_link.go deleted file mode 100644 index a46c3bb1f5530..0000000000000 --- a/internalshared/configutil/hcp_link.go +++ /dev/null @@ -1,70 +0,0 @@ -package configutil - -import ( - "fmt" - "os" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/hcl" - "github.com/hashicorp/hcl/hcl/ast" - sdkResource "github.com/hashicorp/hcp-sdk-go/resource" -) - -// HCPLinkConfig is the HCP Link configuration for the server. -type HCPLinkConfig struct { - UnusedKeys UnusedKeyMap `hcl:",unusedKeyPositions"` - - ResourceIDRaw string `hcl:"resource_id"` - Resource *sdkResource.Resource `hcl:"-"` - EnableAPICapability bool `hcl:"enable_api_capability"` - EnablePassThroughCapability bool `hcl:"enable_passthrough_capability"` - ClientID string `hcl:"client_id"` - ClientSecret string `hcl:"client_secret"` -} - -func parseCloud(result *SharedConfig, list *ast.ObjectList) error { - if len(list.Items) > 1 { - return fmt.Errorf("only one 'cloud' block is permitted") - } - - // Get our one item - item := list.Items[0] - - if result.HCPLinkConf == nil { - result.HCPLinkConf = &HCPLinkConfig{} - } - - if err := hcl.DecodeObject(&result.HCPLinkConf, item.Val); err != nil { - return multierror.Prefix(err, "cloud:") - } - - // let's check if the Client ID and Secret are set in the environment - if envClientID := os.Getenv("HCP_CLIENT_ID"); envClientID != "" { - result.HCPLinkConf.ClientID = envClientID - } - if envClientSecret := os.Getenv("HCP_CLIENT_SECRET"); envClientSecret != "" { - result.HCPLinkConf.ClientSecret = envClientSecret - } - - // three pieces are necessary if the cloud stanza is configured - if result.HCPLinkConf.ResourceIDRaw == "" || result.HCPLinkConf.ClientID == "" || result.HCPLinkConf.ClientSecret == "" { - return multierror.Prefix(fmt.Errorf("failed to find the required cloud stanza configurations. all resource ID, client ID and client secret are required"), "cloud:") - } - - res, err := sdkResource.FromString(result.HCPLinkConf.ResourceIDRaw) - if err != nil { - return multierror.Prefix(fmt.Errorf("failed to parse resource_id for HCP Link"), "cloud:") - } - result.HCPLinkConf.Resource = &res - - // ENV var takes precedence over the config value - if apiCapEnv := os.Getenv("HCP_LINK_ENABLE_API_CAPABILITY"); apiCapEnv != "" { - result.HCPLinkConf.EnableAPICapability = true - } - - if passthroughCapEnv := os.Getenv("HCP_LINK_ENABLE_PASSTHROUGH_CAPABILITY"); passthroughCapEnv != "" { - result.HCPLinkConf.EnablePassThroughCapability = true - } - - return nil -} diff --git a/internalshared/configutil/kms.go b/internalshared/configutil/kms.go index 78da77662b47e..9f6d74899a9dd 100644 --- a/internalshared/configutil/kms.go +++ b/internalshared/configutil/kms.go @@ -1,7 +1,6 @@ package configutil import ( - "context" "crypto/rand" "fmt" "io" @@ -9,14 +8,14 @@ import ( "github.com/hashicorp/errwrap" "github.com/hashicorp/go-hclog" - wrapping "github.com/hashicorp/go-kms-wrapping/v2" - aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2" - "github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2" - "github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2" - "github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2" - "github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2" - "github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2" - "github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2" + wrapping "github.com/hashicorp/go-kms-wrapping" + aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead" + "github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms" + "github.com/hashicorp/go-kms-wrapping/wrappers/awskms" + "github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault" + "github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms" + "github.com/hashicorp/go-kms-wrapping/wrappers/ocikms" + "github.com/hashicorp/go-kms-wrapping/wrappers/transit" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/hcl" @@ -160,37 +159,41 @@ func ParseKMSes(d string) ([]*KMS, error) { return result.Seals, nil } -func configureWrapper(configKMS *KMS, infoKeys *[]string, info *map[string]string, logger hclog.Logger, opts ...wrapping.Option) (wrapping.Wrapper, error) { +func configureWrapper(configKMS *KMS, infoKeys *[]string, info *map[string]string, logger hclog.Logger) (wrapping.Wrapper, error) { var wrapper wrapping.Wrapper var kmsInfo map[string]string var err error - switch wrapping.WrapperType(configKMS.Type) { - case wrapping.WrapperTypeShamir: + opts := &wrapping.WrapperOptions{ + Logger: logger, + } + + switch configKMS.Type { + case wrapping.Shamir: return nil, nil - case wrapping.WrapperTypeAead: - wrapper, kmsInfo, err = GetAEADKMSFunc(configKMS, opts...) + case wrapping.AEAD: + wrapper, kmsInfo, err = GetAEADKMSFunc(opts, configKMS) - case wrapping.WrapperTypeAliCloudKms: - wrapper, kmsInfo, err = GetAliCloudKMSFunc(configKMS, opts...) + case wrapping.AliCloudKMS: + wrapper, kmsInfo, err = GetAliCloudKMSFunc(opts, configKMS) - case wrapping.WrapperTypeAwsKms: - wrapper, kmsInfo, err = GetAWSKMSFunc(configKMS, opts...) + case wrapping.AWSKMS: + wrapper, kmsInfo, err = GetAWSKMSFunc(opts, configKMS) - case wrapping.WrapperTypeAzureKeyVault: - wrapper, kmsInfo, err = GetAzureKeyVaultKMSFunc(configKMS, opts...) + case wrapping.AzureKeyVault: + wrapper, kmsInfo, err = GetAzureKeyVaultKMSFunc(opts, configKMS) - case wrapping.WrapperTypeGcpCkms: - wrapper, kmsInfo, err = GetGCPCKMSKMSFunc(configKMS, opts...) + case wrapping.GCPCKMS: + wrapper, kmsInfo, err = GetGCPCKMSKMSFunc(opts, configKMS) - case wrapping.WrapperTypeOciKms: - wrapper, kmsInfo, err = GetOCIKMSKMSFunc(configKMS, opts...) + case wrapping.OCIKMS: + wrapper, kmsInfo, err = GetOCIKMSKMSFunc(opts, configKMS) - case wrapping.WrapperTypeTransit: - wrapper, kmsInfo, err = GetTransitKMSFunc(configKMS, opts...) + case wrapping.Transit: + wrapper, kmsInfo, err = GetTransitKMSFunc(opts, configKMS) - case wrapping.WrapperTypePkcs11: + case wrapping.PKCS11: return nil, fmt.Errorf("KMS type 'pkcs11' requires the Vault Enterprise HSM binary") default: @@ -211,9 +214,9 @@ func configureWrapper(configKMS *KMS, infoKeys *[]string, info *map[string]strin return wrapper, nil } -func GetAEADKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) { - wrapper := aeadwrapper.NewWrapper() - wrapperInfo, err := wrapper.SetConfig(context.Background(), opts...) +func GetAEADKMSFunc(opts *wrapping.WrapperOptions, kms *KMS) (wrapping.Wrapper, map[string]string, error) { + wrapper := aeadwrapper.NewWrapper(opts) + wrapperInfo, err := wrapper.SetConfig(kms.Config) if err != nil { return nil, nil, err } @@ -223,14 +226,14 @@ func GetAEADKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[st if len(kms.Purpose) > 0 { str = fmt.Sprintf("%v %s", kms.Purpose, str) } - info[str] = wrapperInfo.Metadata["aead_type"] + info[str] = wrapperInfo["aead_type"] } return wrapper, info, nil } -func GetAliCloudKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) { - wrapper := alicloudkms.NewWrapper() - wrapperInfo, err := wrapper.SetConfig(context.Background(), wrapping.WithConfigMap(kms.Config)) +func GetAliCloudKMSFunc(opts *wrapping.WrapperOptions, kms *KMS) (wrapping.Wrapper, map[string]string, error) { + wrapper := alicloudkms.NewWrapper(opts) + wrapperInfo, err := wrapper.SetConfig(kms.Config) if err != nil { // If the error is any other than logical.KeyNotFoundError, return the error if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) { @@ -239,18 +242,18 @@ func GetAliCloudKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, ma } info := make(map[string]string) if wrapperInfo != nil { - info["AliCloud KMS Region"] = wrapperInfo.Metadata["region"] - info["AliCloud KMS KeyID"] = wrapperInfo.Metadata["kms_key_id"] - if domain, ok := wrapperInfo.Metadata["domain"]; ok { + info["AliCloud KMS Region"] = wrapperInfo["region"] + info["AliCloud KMS KeyID"] = wrapperInfo["kms_key_id"] + if domain, ok := wrapperInfo["domain"]; ok { info["AliCloud KMS Domain"] = domain } } return wrapper, info, nil } -var GetAWSKMSFunc = func(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) { - wrapper := awskms.NewWrapper() - wrapperInfo, err := wrapper.SetConfig(context.Background(), wrapping.WithConfigMap(kms.Config)) +var GetAWSKMSFunc = func(opts *wrapping.WrapperOptions, kms *KMS) (wrapping.Wrapper, map[string]string, error) { + wrapper := awskms.NewWrapper(opts) + wrapperInfo, err := wrapper.SetConfig(kms.Config) if err != nil { // If the error is any other than logical.KeyNotFoundError, return the error if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) { @@ -259,18 +262,18 @@ var GetAWSKMSFunc = func(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, m } info := make(map[string]string) if wrapperInfo != nil { - info["AWS KMS Region"] = wrapperInfo.Metadata["region"] - info["AWS KMS KeyID"] = wrapperInfo.Metadata["kms_key_id"] - if endpoint, ok := wrapperInfo.Metadata["endpoint"]; ok { + info["AWS KMS Region"] = wrapperInfo["region"] + info["AWS KMS KeyID"] = wrapperInfo["kms_key_id"] + if endpoint, ok := wrapperInfo["endpoint"]; ok { info["AWS KMS Endpoint"] = endpoint } } return wrapper, info, nil } -func GetAzureKeyVaultKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) { - wrapper := azurekeyvault.NewWrapper() - wrapperInfo, err := wrapper.SetConfig(context.Background(), wrapping.WithConfigMap(kms.Config)) +func GetAzureKeyVaultKMSFunc(opts *wrapping.WrapperOptions, kms *KMS) (wrapping.Wrapper, map[string]string, error) { + wrapper := azurekeyvault.NewWrapper(opts) + wrapperInfo, err := wrapper.SetConfig(kms.Config) if err != nil { // If the error is any other than logical.KeyNotFoundError, return the error if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) { @@ -279,16 +282,16 @@ func GetAzureKeyVaultKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrappe } info := make(map[string]string) if wrapperInfo != nil { - info["Azure Environment"] = wrapperInfo.Metadata["environment"] - info["Azure Vault Name"] = wrapperInfo.Metadata["vault_name"] - info["Azure Key Name"] = wrapperInfo.Metadata["key_name"] + info["Azure Environment"] = wrapperInfo["environment"] + info["Azure Vault Name"] = wrapperInfo["vault_name"] + info["Azure Key Name"] = wrapperInfo["key_name"] } return wrapper, info, nil } -func GetGCPCKMSKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) { - wrapper := gcpckms.NewWrapper() - wrapperInfo, err := wrapper.SetConfig(context.Background(), wrapping.WithConfigMap(kms.Config)) +func GetGCPCKMSKMSFunc(opts *wrapping.WrapperOptions, kms *KMS) (wrapping.Wrapper, map[string]string, error) { + wrapper := gcpckms.NewWrapper(opts) + wrapperInfo, err := wrapper.SetConfig(kms.Config) if err != nil { // If the error is any other than logical.KeyNotFoundError, return the error if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) { @@ -297,33 +300,33 @@ func GetGCPCKMSKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map } info := make(map[string]string) if wrapperInfo != nil { - info["GCP KMS Project"] = wrapperInfo.Metadata["project"] - info["GCP KMS Region"] = wrapperInfo.Metadata["region"] - info["GCP KMS Key Ring"] = wrapperInfo.Metadata["key_ring"] - info["GCP KMS Crypto Key"] = wrapperInfo.Metadata["crypto_key"] + info["GCP KMS Project"] = wrapperInfo["project"] + info["GCP KMS Region"] = wrapperInfo["region"] + info["GCP KMS Key Ring"] = wrapperInfo["key_ring"] + info["GCP KMS Crypto Key"] = wrapperInfo["crypto_key"] } return wrapper, info, nil } -func GetOCIKMSKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) { - wrapper := ocikms.NewWrapper() - wrapperInfo, err := wrapper.SetConfig(context.Background(), wrapping.WithConfigMap(kms.Config)) +func GetOCIKMSKMSFunc(opts *wrapping.WrapperOptions, kms *KMS) (wrapping.Wrapper, map[string]string, error) { + wrapper := ocikms.NewWrapper(opts) + wrapperInfo, err := wrapper.SetConfig(kms.Config) if err != nil { return nil, nil, err } info := make(map[string]string) if wrapperInfo != nil { - info["OCI KMS KeyID"] = wrapperInfo.Metadata[ocikms.KmsConfigKeyId] - info["OCI KMS Crypto Endpoint"] = wrapperInfo.Metadata[ocikms.KmsConfigCryptoEndpoint] - info["OCI KMS Management Endpoint"] = wrapperInfo.Metadata[ocikms.KmsConfigManagementEndpoint] - info["OCI KMS Principal Type"] = wrapperInfo.Metadata["principal_type"] + info["OCI KMS KeyID"] = wrapperInfo[ocikms.KMSConfigKeyID] + info["OCI KMS Crypto Endpoint"] = wrapperInfo[ocikms.KMSConfigCryptoEndpoint] + info["OCI KMS Management Endpoint"] = wrapperInfo[ocikms.KMSConfigManagementEndpoint] + info["OCI KMS Principal Type"] = wrapperInfo["principal_type"] } return wrapper, info, nil } -var GetTransitKMSFunc = func(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) { - wrapper := transit.NewWrapper() - wrapperInfo, err := wrapper.SetConfig(context.Background(), wrapping.WithConfigMap(kms.Config)) +var GetTransitKMSFunc = func(opts *wrapping.WrapperOptions, kms *KMS) (wrapping.Wrapper, map[string]string, error) { + wrapper := transit.NewWrapper(opts) + wrapperInfo, err := wrapper.SetConfig(kms.Config) if err != nil { // If the error is any other than logical.KeyNotFoundError, return the error if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) { @@ -332,10 +335,10 @@ var GetTransitKMSFunc = func(kms *KMS, opts ...wrapping.Option) (wrapping.Wrappe } info := make(map[string]string) if wrapperInfo != nil { - info["Transit Address"] = wrapperInfo.Metadata["address"] - info["Transit Mount Path"] = wrapperInfo.Metadata["mount_path"] - info["Transit Key Name"] = wrapperInfo.Metadata["key_name"] - if namespace, ok := wrapperInfo.Metadata["namespace"]; ok { + info["Transit Address"] = wrapperInfo["address"] + info["Transit Mount Path"] = wrapperInfo["mount_path"] + info["Transit Key Name"] = wrapperInfo["key_name"] + if namespace, ok := wrapperInfo["namespace"]; ok { info["Transit Namespace"] = namespace } } diff --git a/internalshared/configutil/merge.go b/internalshared/configutil/merge.go index 8ae99ca4879d7..51f6fee887599 100644 --- a/internalshared/configutil/merge.go +++ b/internalshared/configutil/merge.go @@ -14,11 +14,6 @@ func (c *SharedConfig) Merge(c2 *SharedConfig) *SharedConfig { result.Listeners = append(result.Listeners, l) } - result.HCPLinkConf = c.HCPLinkConf - if c2.HCPLinkConf != nil { - result.HCPLinkConf = c2.HCPLinkConf - } - result.Entropy = c.Entropy if c2.Entropy != nil { result.Entropy = c2.Entropy diff --git a/internalshared/listenerutil/listener.go b/internalshared/listenerutil/listener.go index 6095713be5d24..c5b77f287c8e2 100644 --- a/internalshared/listenerutil/listener.go +++ b/internalshared/listenerutil/listener.go @@ -75,8 +75,7 @@ func UnixSocketListener(path string, unixSocketsConfig *UnixSocketsConfig) (net. func TLSConfig( l *configutil.Listener, props map[string]string, - ui cli.Ui, -) (*tls.Config, reloadutil.ReloadFunc, error) { + ui cli.Ui) (*tls.Config, reloadutil.ReloadFunc, error) { props["tls"] = "disabled" if l.TLSDisable { diff --git a/make.bat b/make.bat index ca3238bc12e4c..d636b5b03b592 100644 --- a/make.bat +++ b/make.bat @@ -7,7 +7,7 @@ REM If no target is provided, default to test. if [%1]==[] goto test set _TARGETS=bin,bootstrap,dev,generate,test,testacc,testrace,vet -set _EXTERNAL_TOOLS=github.com/kardianos/govendor +set _EXTERNAL_TOOLS=github.com/mitchellh/gox,github.com/kardianos/govendor REM Run target. for %%a in (%_TARGETS%) do (if x%1==x%%a goto %%a) @@ -82,7 +82,7 @@ REM any common errors. go tool vet 2>nul if %ERRORLEVEL% equ 3 go get golang.org/x/tools/cmd/vet - + set _vetExitCode=0 set _VAULT_PKG_DIRS=%TEMP%\vault-pkg-dirs.txt diff --git a/physical/cassandra/cassandra.go b/physical/cassandra/cassandra.go index 84c2ab149db23..f20b992055601 100644 --- a/physical/cassandra/cassandra.go +++ b/physical/cassandra/cassandra.go @@ -102,14 +102,6 @@ func NewCassandraBackend(conf map[string]string, logger log.Logger) (physical.Ba cluster.Port = port cluster.Keyspace = keyspace - if retryCountStr, ok := conf["simple_retry_policy_retries"]; ok { - retryCount, err := strconv.Atoi(retryCountStr) - if err != nil || retryCount <= 0 { - return nil, fmt.Errorf("'simple_retry_policy_retries' must be a positive integer") - } - cluster.RetryPolicy = &gocql.SimpleRetryPolicy{NumRetries: retryCount} - } - cluster.ProtoVersion = 2 if protoVersionStr, ok := conf["protocol_version"]; ok { protoVersion, err := strconv.Atoi(protoVersionStr) @@ -130,18 +122,10 @@ func NewCassandraBackend(conf map[string]string, logger log.Logger) (physical.Ba cluster.Authenticator = authenticator } - if initialConnectionTimeoutStr, ok := conf["initial_connection_timeout"]; ok { - initialConnectionTimeout, err := strconv.Atoi(initialConnectionTimeoutStr) - if err != nil || initialConnectionTimeout <= 0 { - return nil, fmt.Errorf("'initial_connection_timeout' must be a positive integer") - } - cluster.ConnectTimeout = time.Duration(initialConnectionTimeout) * time.Second - } - if connTimeoutStr, ok := conf["connection_timeout"]; ok { connectionTimeout, err := strconv.Atoi(connTimeoutStr) - if err != nil || connectionTimeout <= 0 { - return nil, fmt.Errorf("'connection_timeout' must be a positive integer") + if err != nil { + return nil, fmt.Errorf("'connection_timeout' must be an integer") } cluster.Timeout = time.Duration(connectionTimeout) * time.Second } diff --git a/physical/cassandra/cassandra_test.go b/physical/cassandra/cassandra_test.go index e9fe7bc059a5b..ea1e4e1296a06 100644 --- a/physical/cassandra/cassandra_test.go +++ b/physical/cassandra/cassandra_test.go @@ -25,11 +25,8 @@ func TestCassandraBackend(t *testing.T) { // Run vault tests logger := logging.NewVaultLogger(log.Debug) b, err := NewCassandraBackend(map[string]string{ - "hosts": host.ConnectionURL(), - "protocol_version": "3", - "connection_timeout": "5", - "initial_connection_timeout": "5", - "simple_retry_policy_retries": "3", + "hosts": host.ConnectionURL(), + "protocol_version": "3", }, logger) if err != nil { t.Fatalf("Failed to create new backend: %v", err) diff --git a/physical/cockroachdb/cockroachdb.go b/physical/cockroachdb/cockroachdb.go index 385074d917cbd..e6bfc17416df1 100644 --- a/physical/cockroachdb/cockroachdb.go +++ b/physical/cockroachdb/cockroachdb.go @@ -304,8 +304,8 @@ func (c *CockroachDBBackend) transaction(tx *sql.Tx, txns []*physical.TxnEntry) // https://www.cockroachlabs.com/docs/stable/keywords-and-identifiers.html#identifiers // // - All values that accept an identifier must: -// - Begin with a Unicode letter or an underscore (_). Subsequent characters can be letters, -// - underscores, digits (0-9), or dollar signs ($). +// - Begin with a Unicode letter or an underscore (_). Subsequent characters can be letters, +// - underscores, digits (0-9), or dollar signs ($). // - Not equal any SQL keyword unless the keyword is accepted by the element's syntax. For example, // name accepts Unreserved or Column Name keywords. // diff --git a/physical/consul/consul.go b/physical/consul/consul.go index f30403468c6bb..66c770486d4a4 100644 --- a/physical/consul/consul.go +++ b/physical/consul/consul.go @@ -46,11 +46,11 @@ type ConsulBackend struct { client *api.Client path string kv *api.KV - txn *api.Txn permitPool *physical.PermitPool consistencyMode string - sessionTTL string - lockWaitTime time.Duration + + sessionTTL string + lockWaitTime time.Duration } // NewConsulBackend constructs a Consul backend using the given API client @@ -139,19 +139,17 @@ func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backe return nil, fmt.Errorf("client setup failed: %w", err) } - // Set up the backend + // Setup the backend c := &ConsulBackend{ path: path, client: client, kv: client.KV(), - txn: client.Txn(), permitPool: physical.NewPermitPool(maxParInt), consistencyMode: consistencyMode, sessionTTL: sessionTTL, lockWaitTime: lockWaitTime, } - return c, nil } @@ -224,95 +222,55 @@ func SetupSecureTLS(ctx context.Context, consulConf *api.Config, conf map[string return nil } -// Transaction is used to run multiple entries via a transaction. +// Used to run multiple entries via a transaction func (c *ConsulBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error { if len(txns) == 0 { return nil } defer metrics.MeasureSince([]string{"consul", "transaction"}, time.Now()) - ops := make([]*api.TxnOp, 0, len(txns)) - for _, t := range txns { - o, err := c.makeApiTxn(t) - if err != nil { - return fmt.Errorf("error converting physical transactions into api transactions: %w", err) + ops := make([]*api.KVTxnOp, 0, len(txns)) + + for _, op := range txns { + cop := &api.KVTxnOp{ + Key: c.path + op.Entry.Key, + } + switch op.Operation { + case physical.DeleteOperation: + cop.Verb = api.KVDelete + case physical.PutOperation: + cop.Verb = api.KVSet + cop.Value = op.Entry.Value + default: + return fmt.Errorf("%q is not a supported transaction operation", op.Operation) } - ops = append(ops, o) + ops = append(ops, cop) } c.permitPool.Acquire() defer c.permitPool.Release() - var retErr *multierror.Error - kvMap := make(map[string][]byte, 0) - queryOpts := &api.QueryOptions{} queryOpts = queryOpts.WithContext(ctx) - ok, resp, _, err := c.txn.Txn(ops, queryOpts) + ok, resp, _, err := c.kv.Txn(ops, queryOpts) if err != nil { if strings.Contains(err.Error(), "is too large") { return fmt.Errorf("%s: %w", physical.ErrValueTooLarge, err) } return err } - if ok && len(resp.Errors) == 0 { - // Loop over results and cache them in a map. Note that we're only caching the first time we see a key, - // which _should_ correspond to a Get operation, since we expect those come first in our txns slice. - for _, txnr := range resp.Results { - if len(txnr.KV.Value) > 0 { - // We need to trim the Consul kv path (typically "vault/") from the key otherwise it won't - // match the transaction entries we have. - key := strings.TrimPrefix(txnr.KV.Key, c.path) - if _, found := kvMap[key]; !found { - kvMap[key] = txnr.KV.Value - } - } - } - } - - if len(resp.Errors) > 0 { - for _, res := range resp.Errors { - retErr = multierror.Append(retErr, errors.New(res.What)) - } - } - - if retErr != nil { - return retErr - } - - // Loop over our get transactions and populate any values found in our map cache. - for _, t := range txns { - if val, ok := kvMap[t.Entry.Key]; ok && t.Operation == physical.GetOperation { - newVal := make([]byte, len(val)) - copy(newVal, val) - t.Entry.Value = newVal - } + return nil } - return nil -} - -func (c *ConsulBackend) makeApiTxn(txn *physical.TxnEntry) (*api.TxnOp, error) { - op := &api.KVTxnOp{ - Key: c.path + txn.Entry.Key, - } - switch txn.Operation { - case physical.GetOperation: - // TODO: This is currently broken. Once Consul releases 1.14, this should be updated to use api.KVGetOrEmpty - op.Verb = api.KVGet - case physical.DeleteOperation: - op.Verb = api.KVDelete - case physical.PutOperation: - op.Verb = api.KVSet - op.Value = txn.Entry.Value - default: - return nil, fmt.Errorf("%q is not a supported transaction operation", txn.Operation) + var retErr *multierror.Error + for _, res := range resp.Errors { + retErr = multierror.Append(retErr, errors.New(res.What)) } - return &api.TxnOp{KV: op}, nil + return retErr } // Put is used to insert or update an entry @@ -409,7 +367,7 @@ func (c *ConsulBackend) List(ctx context.Context, prefix string) ([]string, erro return out, err } -// LockWith is used for mutual exclusion based on the given key. +// Lock is used for mutual exclusion based on the given key. func (c *ConsulBackend) LockWith(key, value string) (physical.Lock, error) { // Create the lock opts := &api.LockOptions{ diff --git a/physical/consul/consul_test.go b/physical/consul/consul_test.go index b2a06e612520b..60a185287c87c 100644 --- a/physical/consul/consul_test.go +++ b/physical/consul/consul_test.go @@ -1,9 +1,7 @@ package consul import ( - "bytes" "context" - "encoding/hex" "fmt" "math/rand" "reflect" @@ -251,158 +249,6 @@ func TestConsul_TooLarge(t *testing.T) { } } -func TestConsul_TransactionalBackend_GetTransactionsForNonExistentValues(t *testing.T) { - // TODO: unskip this after Consul releases 1.14 and we update our API dep. It currently fails but should pass with Consul 1.14 - t.SkipNow() - - cleanup, config := consul.PrepareTestContainer(t, "1.4.4", false, true) - defer cleanup() - - client, err := api.NewClient(config.APIConfig()) - if err != nil { - t.Fatal(err) - } - - txns := make([]*physical.TxnEntry, 0) - ctx := context.Background() - logger := logging.NewVaultLogger(log.Debug) - backendConfig := map[string]string{ - "address": config.Address(), - "token": config.Token, - "path": "vault/", - "max_parallel": "-1", - } - - be, err := NewConsulBackend(backendConfig, logger) - if err != nil { - t.Fatal(err) - } - b := be.(*ConsulBackend) - - defer func() { - _, _ = client.KV().DeleteTree("foo/", nil) - }() - - txns = append(txns, &physical.TxnEntry{ - Operation: physical.GetOperation, - Entry: &physical.Entry{ - Key: "foo/bar", - }, - }) - txns = append(txns, &physical.TxnEntry{ - Operation: physical.PutOperation, - Entry: &physical.Entry{ - Key: "foo/bar", - Value: []byte("baz"), - }, - }) - - err = b.Transaction(ctx, txns) - if err != nil { - t.Fatal(err) - } - - // This should return nil, because the key foo/bar didn't exist when we ran that transaction, so the get - // should return nil, and the put always returns nil - for _, txn := range txns { - if txn.Operation == physical.GetOperation { - if txn.Entry.Value != nil { - t.Fatalf("expected txn.entry.value to be nil but it was %q", string(txn.Entry.Value)) - } - } - } -} - -// TestConsul_TransactionalBackend_GetTransactions tests that passing a slice of transactions to the -// consul backend will populate values for any transactions that are Get operations. -func TestConsul_TransactionalBackend_GetTransactions(t *testing.T) { - // TODO: unskip this after Consul releases 1.14 and we update our API dep. It currently fails but should pass with Consul 1.14 - t.SkipNow() - - cleanup, config := consul.PrepareTestContainer(t, "1.4.4", false, true) - defer cleanup() - - client, err := api.NewClient(config.APIConfig()) - if err != nil { - t.Fatal(err) - } - - txns := make([]*physical.TxnEntry, 0) - ctx := context.Background() - logger := logging.NewVaultLogger(log.Debug) - backendConfig := map[string]string{ - "address": config.Address(), - "token": config.Token, - "path": "vault/", - "max_parallel": "-1", - } - - be, err := NewConsulBackend(backendConfig, logger) - if err != nil { - t.Fatal(err) - } - b := be.(*ConsulBackend) - - defer func() { - _, _ = client.KV().DeleteTree("foo/", nil) - }() - - // Add some seed values to consul, and prepare our slice of transactions at the same time - for i := 0; i < 64; i++ { - key := fmt.Sprintf("foo/lol-%d", i) - err := b.Put(ctx, &physical.Entry{Key: key, Value: []byte(fmt.Sprintf("value-%d", i))}) - if err != nil { - t.Fatal(err) - } - - txns = append(txns, &physical.TxnEntry{ - Operation: physical.GetOperation, - Entry: &physical.Entry{ - Key: key, - }, - }) - } - - for i := 0; i < 64; i++ { - key := fmt.Sprintf("foo/lol-%d", i) - if i%2 == 0 { - txns = append(txns, &physical.TxnEntry{ - Operation: physical.PutOperation, - Entry: &physical.Entry{ - Key: key, - Value: []byte("lmao"), - }, - }) - } else { - txns = append(txns, &physical.TxnEntry{ - Operation: physical.DeleteOperation, - Entry: &physical.Entry{ - Key: key, - }, - }) - } - } - - if len(txns) != 128 { - t.Fatal("wrong number of transactions") - } - - err = b.Transaction(ctx, txns) - if err != nil { - t.Fatal(err) - } - - // Check that our Get operations were populated with their values - for i, txn := range txns { - if txn.Operation == physical.GetOperation { - val := []byte(fmt.Sprintf("value-%d", i)) - if !bytes.Equal(val, txn.Entry.Value) { - t.Fatalf("expected %s to equal %s but it didn't", hex.EncodeToString(val), hex.EncodeToString(txn.Entry.Value)) - } - } - } -} - func TestConsulHABackend(t *testing.T) { cleanup, config := consul.PrepareTestContainer(t, "1.4.4", false, true) defer cleanup() diff --git a/physical/foundationdb/foundationdb.go b/physical/foundationdb/foundationdb.go index 56305b2fbf7df..e6fe596b081ae 100644 --- a/physical/foundationdb/foundationdb.go +++ b/physical/foundationdb/foundationdb.go @@ -122,8 +122,8 @@ func decoratePath(path string) ([]byte, error) { // Turn a decorated byte array back into a path string func undecoratePath(decoratedPath []byte) string { - ret := strings.ReplaceAll(string(decoratedPath), dirPathMarker, "/") - ret = strings.ReplaceAll(ret, dirEntryMarker, "/") + ret := strings.Replace(string(decoratedPath), dirPathMarker, "/", -1) + ret = strings.Replace(ret, dirEntryMarker, "/", -1) return strings.TrimLeft(ret, "/") } @@ -233,12 +233,12 @@ func NewFDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, db, err := fdb.Open(fdbClusterFile, []byte("DB")) if err != nil { - return nil, fmt.Errorf("failed to open database with cluster file %q: %w", fdbClusterFile, err) + return nil, fmt.Errorf("failed to open database with cluster file '%s': %w", fdbClusterFile, err) } topDir, err := directory.CreateOrOpen(db, dirPath, nil) if err != nil { - return nil, fmt.Errorf("failed to create/open top-level directory %q: %w", path, err) + return nil, fmt.Errorf("failed to create/open top-level directory '%s': %w", path, err) } // Setup the backend diff --git a/physical/gcs/gcs_ha.go b/physical/gcs/gcs_ha.go index 3a8e45d981905..7ad57a0f48c3d 100644 --- a/physical/gcs/gcs_ha.go +++ b/physical/gcs/gcs_ha.go @@ -321,10 +321,9 @@ OUTER: // // - lock does not exist // - write the lock -// // - lock exists // - if key is empty or identity is the same or timestamp exceeds TTL -// - update the lock to self +// - update the lock to self func (l *Lock) writeLock() (bool, error) { // Create a transaction to read and the update (maybe) ctx, cancel := context.WithCancel(context.Background()) diff --git a/physical/postgresql/postgresql.go b/physical/postgresql/postgresql.go index ed4c883440cab..b7b1d072baf77 100644 --- a/physical/postgresql/postgresql.go +++ b/physical/postgresql/postgresql.go @@ -36,9 +36,11 @@ const ( // Verify PostgreSQLBackend satisfies the correct interfaces var _ physical.Backend = (*PostgreSQLBackend)(nil) +// // HA backend was implemented based on the DynamoDB backend pattern // With distinction using central postgres clock, hereby avoiding // possible issues with multiple clocks +// var ( _ physical.HABackend = (*PostgreSQLBackend)(nil) _ physical.Lock = (*PostgreSQLLock)(nil) @@ -242,7 +244,7 @@ func (m *PostgreSQLBackend) Put(ctx context.Context, entry *physical.Entry) erro parentPath, path, key := m.splitKey(entry.Key) - _, err := m.client.ExecContext(ctx, m.put_query, parentPath, path, key, entry.Value) + _, err := m.client.Exec(m.put_query, parentPath, path, key, entry.Value) if err != nil { return err } @@ -259,7 +261,7 @@ func (m *PostgreSQLBackend) Get(ctx context.Context, fullPath string) (*physical _, path, key := m.splitKey(fullPath) var result []byte - err := m.client.QueryRowContext(ctx, m.get_query, path, key).Scan(&result) + err := m.client.QueryRow(m.get_query, path, key).Scan(&result) if err == sql.ErrNoRows { return nil, nil } @@ -283,7 +285,7 @@ func (m *PostgreSQLBackend) Delete(ctx context.Context, fullPath string) error { _, path, key := m.splitKey(fullPath) - _, err := m.client.ExecContext(ctx, m.delete_query, path, key) + _, err := m.client.Exec(m.delete_query, path, key) if err != nil { return err } @@ -298,7 +300,7 @@ func (m *PostgreSQLBackend) List(ctx context.Context, prefix string) ([]string, m.permitPool.Acquire() defer m.permitPool.Release() - rows, err := m.client.QueryContext(ctx, m.list_query, "/"+prefix) + rows, err := m.client.Query(m.list_query, "/"+prefix) if err != nil { return nil, err } diff --git a/physical/postgresql/postgresql_test.go b/physical/postgresql/postgresql_test.go index 15d1ab35076d9..22476de55580c 100644 --- a/physical/postgresql/postgresql_test.go +++ b/physical/postgresql/postgresql_test.go @@ -106,7 +106,7 @@ func TestPostgreSQLBackendMaxIdleConnectionsParameter(t *testing.T) { } expectedErrStr := "failed parsing max_idle_connections parameter: strconv.Atoi: parsing \"bad param\": invalid syntax" if err.Error() != expectedErrStr { - t.Errorf("Expected: %q but found %q", expectedErrStr, err.Error()) + t.Errorf("Expected: \"%s\" but found \"%s\"", expectedErrStr, err.Error()) } } @@ -165,7 +165,7 @@ func TestConnectionURL(t *testing.T) { got := connectionURL(tt.input.conf) if got != tt.want { - t.Errorf("connectionURL(%s): want %q, got %q", tt.input, tt.want, got) + t.Errorf("connectionURL(%s): want '%s', got '%s'", tt.input, tt.want, got) } }) } diff --git a/physical/raft/bolt_linux.go b/physical/raft/bolt_linux.go index 4ea13e2a39860..03db74bc475c0 100644 --- a/physical/raft/bolt_linux.go +++ b/physical/raft/bolt_linux.go @@ -4,7 +4,7 @@ import ( "context" "os" - "github.com/shirou/gopsutil/v3/mem" + "github.com/shirou/gopsutil/mem" "golang.org/x/sys/unix" ) diff --git a/physical/raft/fsm.go b/physical/raft/fsm.go index 8d5b5524db12c..29ca39a7f2a1e 100644 --- a/physical/raft/fsm.go +++ b/physical/raft/fsm.go @@ -3,7 +3,6 @@ package raft import ( "bytes" "context" - "encoding/hex" "errors" "fmt" "io" @@ -32,7 +31,6 @@ const ( deleteOp uint32 = 1 << iota putOp restoreCallbackOp - getOp chunkingPrefix = "raftchunking/" databaseFilename = "vault.db" @@ -57,20 +55,10 @@ var ( type restoreCallback func(context.Context) error -type FSMEntry struct { - Key string - Value []byte -} - -func (f *FSMEntry) String() string { - return fmt.Sprintf("Key: %s. Value: %s", f.Key, hex.EncodeToString(f.Value)) -} - // FSMApplyResponse is returned from an FSM apply. It indicates if the apply was -// successful or not. EntryMap contains the keys/values from the Get operations. +// successful or not. type FSMApplyResponse struct { - Success bool - EntrySlice []*FSMEntry + Success bool } // FSM is Vault's primary state storage. It writes updates to a bolt db file @@ -572,25 +560,19 @@ func (f *FSM) Transaction(ctx context.Context, txns []*physical.TxnEntry) error return nil }) - return err } // ApplyBatch will apply a set of logs to the FSM. This is called from the raft // library. func (f *FSM) ApplyBatch(logs []*raft.Log) []interface{} { - numLogs := len(logs) - - if numLogs == 0 { + if len(logs) == 0 { return []interface{}{} } - // We will construct one slice per log, each slice containing another slice of results from our get ops - entrySlices := make([][]*FSMEntry, 0, numLogs) - // Do the unmarshalling first so we don't hold locks var latestConfiguration *ConfigurationValue - commands := make([]interface{}, 0, numLogs) + commands := make([]interface{}, 0, len(logs)) for _, log := range logs { switch log.Type { case raft.LogCommand: @@ -621,7 +603,7 @@ func (f *FSM) ApplyBatch(logs []*raft.Log) []interface{} { var logIndex []byte var err error latestIndex, _ := f.LatestState() - lastLog := logs[numLogs-1] + lastLog := logs[len(logs)-1] if latestIndex.Index < lastLog.Index { logIndex, err = proto.Marshal(&IndexValue{ Term: lastLog.Term, @@ -643,7 +625,6 @@ func (f *FSM) ApplyBatch(logs []*raft.Log) []interface{} { err = f.db.Update(func(tx *bolt.Tx) error { b := tx.Bucket(dataBucketName) for _, commandRaw := range commands { - entrySlice := make([]*FSMEntry, 0) switch command := commandRaw.(type) { case *LogData: for _, op := range command.Operations { @@ -653,17 +634,6 @@ func (f *FSM) ApplyBatch(logs []*raft.Log) []interface{} { err = b.Put([]byte(op.Key), op.Value) case deleteOp: err = b.Delete([]byte(op.Key)) - case getOp: - fsmEntry := &FSMEntry{ - Key: op.Key, - } - val := b.Get([]byte(op.Key)) - if len(val) > 0 { - newVal := make([]byte, len(val)) - copy(newVal, val) - fsmEntry.Value = newVal - } - entrySlice = append(entrySlice, fsmEntry) case restoreCallbackOp: if f.restoreCb != nil { // Kick off the restore callback function in a go routine @@ -687,8 +657,6 @@ func (f *FSM) ApplyBatch(logs []*raft.Log) []interface{} { return err } } - - entrySlices = append(entrySlices, entrySlice) } if len(logIndex) > 0 { @@ -719,12 +687,11 @@ func (f *FSM) ApplyBatch(logs []*raft.Log) []interface{} { // Build the responses. The logs array is used here to ensure we reply to // all command values; even if they are not of the types we expect. This - // should futureproof this function from more log types being provided. - resp := make([]interface{}, numLogs) + // should future proof this function from more log types being provided. + resp := make([]interface{}, len(logs)) for i := range logs { resp[i] = &FSMApplyResponse{ - Success: true, - EntrySlice: entrySlices[i], + Success: true, } } diff --git a/physical/raft/fsm_test.go b/physical/raft/fsm_test.go index e80a6ce5573ff..75a7ffd872eec 100644 --- a/physical/raft/fsm_test.go +++ b/physical/raft/fsm_test.go @@ -2,7 +2,7 @@ package raft import ( "context" - "fmt" + fmt "fmt" "io/ioutil" "math/rand" "os" @@ -10,8 +10,8 @@ import ( "testing" "github.com/go-test/deep" - "github.com/golang/protobuf/proto" - "github.com/hashicorp/go-hclog" + proto "github.com/golang/protobuf/proto" + hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/raft" "github.com/hashicorp/vault/sdk/physical" ) @@ -38,7 +38,7 @@ func getFSM(t testing.TB) (*FSM, string) { func TestFSM_Batching(t *testing.T) { fsm, dir := getFSM(t) - defer func() { _ = os.RemoveAll(dir) }() + defer os.RemoveAll(dir) var index uint64 var term uint64 = 1 @@ -53,8 +53,8 @@ func TestFSM_Batching(t *testing.T) { Data: raft.EncodeConfiguration(raft.Configuration{ Servers: []raft.Server{ { - Address: "test", - ID: "test", + Address: raft.ServerAddress("test"), + ID: raft.ServerID("test"), }, }, }), @@ -131,7 +131,7 @@ func TestFSM_Batching(t *testing.T) { func TestFSM_List(t *testing.T) { fsm, dir := getFSM(t) - defer func() { _ = os.RemoveAll(dir) }() + defer os.RemoveAll(dir) ctx := context.Background() count := 100 diff --git a/physical/raft/raft.go b/physical/raft/raft.go index 605357bebf3d5..21d6d93058494 100644 --- a/physical/raft/raft.go +++ b/physical/raft/raft.go @@ -17,7 +17,7 @@ import ( "github.com/armon/go-metrics" "github.com/golang/protobuf/proto" log "github.com/hashicorp/go-hclog" - wrapping "github.com/hashicorp/go-kms-wrapping/v2" + wrapping "github.com/hashicorp/go-kms-wrapping" "github.com/hashicorp/go-raftchunking" "github.com/hashicorp/go-secure-stdlib/tlsutil" "github.com/hashicorp/go-uuid" @@ -250,7 +250,7 @@ func (b *RaftBackend) JoinConfig() ([]*LeaderJoinInfo, error) { } if info.AutoJoinScheme != "" && (info.AutoJoinScheme != "http" && info.AutoJoinScheme != "https") { - return nil, fmt.Errorf("invalid scheme %q; must either be http or https", info.AutoJoinScheme) + return nil, fmt.Errorf("invalid scheme '%s'; must either be http or https", info.AutoJoinScheme) } info.Retry = true @@ -1508,8 +1508,6 @@ func (b *RaftBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry return err } - txnMap := make(map[string]*physical.TxnEntry) - command := &LogData{ Operations: make([]*LogOperation, len(txns)), } @@ -1526,10 +1524,6 @@ func (b *RaftBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry case physical.DeleteOperation: op.OpType = deleteOp op.Key = txn.Entry.Key - case physical.GetOperation: - op.OpType = getOp - op.Key = txn.Entry.Key - txnMap[op.Key] = txn default: return fmt.Errorf("%q is not a supported transaction operation", txn.Operation) } @@ -1543,16 +1537,6 @@ func (b *RaftBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry b.l.RLock() err := b.applyLog(ctx, command) b.l.RUnlock() - - // loop over results and update pointers to get operations - for _, logOp := range command.Operations { - if logOp.OpType == getOp { - if txn, found := txnMap[logOp.Key]; found { - txn.Entry.Value = logOp.Value - } - } - } - return err } @@ -1617,33 +1601,10 @@ func (b *RaftBackend) applyLog(ctx context.Context, command *LogData) error { resp = chunkedSuccess.Response } - fsmar, ok := resp.(*FSMApplyResponse) - if !ok || !fsmar.Success { + if resp, ok := resp.(*FSMApplyResponse); !ok || !resp.Success { return errors.New("could not apply data") } - // populate command with our results - if fsmar.EntrySlice == nil { - return errors.New("entries on FSM response were empty") - } - - for i, logOp := range command.Operations { - if logOp.OpType == getOp { - fsmEntry := fsmar.EntrySlice[i] - - // this should always be true because the entries in the slice were created in the same order as - // the command operations. - if logOp.Key == fsmEntry.Key { - if len(fsmEntry.Value) > 0 { - logOp.Value = fsmEntry.Value - } - } else { - // this shouldn't happen - return errors.New("entries in FSM response were out of order") - } - } - } - return nil } @@ -1840,7 +1801,7 @@ func (s sealer) Open(ctx context.Context, ct []byte) ([]byte, error) { return nil, errors.New("no seal access available") } - var eblob wrapping.BlobInfo + var eblob wrapping.EncryptedBlobInfo err := proto.Unmarshal(ct, &eblob) if err != nil { return nil, err diff --git a/physical/raft/raft_test.go b/physical/raft/raft_test.go index b3fff3851cac2..dcb9624fe68c1 100644 --- a/physical/raft/raft_test.go +++ b/physical/raft/raft_test.go @@ -5,7 +5,6 @@ import ( "context" "crypto/md5" "encoding/base64" - "encoding/hex" "fmt" "io" "io/ioutil" @@ -303,65 +302,6 @@ func TestRaft_Backend_LargeValue(t *testing.T) { } } -// TestRaft_TransactionalBackend_GetTransactions tests that passing a slice of transactions to the -// raft backend will populate values for any transactions that are Get operations. -func TestRaft_TransactionalBackend_GetTransactions(t *testing.T) { - b, dir := getRaft(t, true, true) - defer os.RemoveAll(dir) - - ctx := context.Background() - txns := make([]*physical.TxnEntry, 0) - - // Add some seed values to our FSM, and prepare our slice of transactions at the same time - for i := 0; i < 5; i++ { - key := fmt.Sprintf("foo/%d", i) - err := b.fsm.Put(ctx, &physical.Entry{Key: key, Value: []byte(fmt.Sprintf("value-%d", i))}) - if err != nil { - t.Fatal(err) - } - - txns = append(txns, &physical.TxnEntry{ - Operation: physical.GetOperation, - Entry: &physical.Entry{ - Key: key, - }, - }) - } - - // Add some additional transactions, so we have a mix of operations - for i := 0; i < 10; i++ { - txnEntry := &physical.TxnEntry{ - Entry: &physical.Entry{ - Key: fmt.Sprintf("lol-%d", i), - }, - } - - if i%2 == 0 { - txnEntry.Operation = physical.PutOperation - txnEntry.Entry.Value = []byte("lol") - } else { - txnEntry.Operation = physical.DeleteOperation - } - - txns = append(txns, txnEntry) - } - - err := b.Transaction(ctx, txns) - if err != nil { - t.Fatal(err) - } - - // Check that our Get operations were populated with their values - for i, txn := range txns { - if txn.Operation == physical.GetOperation { - val := []byte(fmt.Sprintf("value-%d", i)) - if !bytes.Equal(val, txn.Entry.Value) { - t.Fatalf("expected %s to equal %s but it didn't", hex.EncodeToString(val), hex.EncodeToString(txn.Entry.Value)) - } - } - } -} - func TestRaft_TransactionalBackend_LargeKey(t *testing.T) { b, dir := getRaft(t, true, true) defer os.RemoveAll(dir) diff --git a/physical/raft/types.pb.go b/physical/raft/types.pb.go index 6b9e4c4b3659d..5fca8f6c3e81d 100644 --- a/physical/raft/types.pb.go +++ b/physical/raft/types.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc-gen-go v1.27.1 +// protoc v3.19.4 // source: physical/raft/types.proto package raft diff --git a/physical/spanner/spanner.go b/physical/spanner/spanner.go index 09634da53c8dd..8447ed9853a96 100644 --- a/physical/spanner/spanner.go +++ b/physical/spanner/spanner.go @@ -371,5 +371,5 @@ func sanitizeTable(s string) string { if end > -1 { s = s[:end] } - return strings.ReplaceAll(s, `"`, `""`) + return strings.Replace(s, `"`, `""`, -1) } diff --git a/physical/spanner/spanner_ha.go b/physical/spanner/spanner_ha.go index 7aa4f8986dbd8..f3284fc270196 100644 --- a/physical/spanner/spanner_ha.go +++ b/physical/spanner/spanner_ha.go @@ -320,10 +320,9 @@ OUTER: // // - lock does not exist // - write the lock -// // - lock exists // - if key is empty or identity is the same or timestamp exceeds TTL -// - update the lock to self +// - update the lock to self func (l *Lock) writeLock() (bool, error) { // Keep track of whether the lock was written lockWritten := false diff --git a/physical/swift/swift_test.go b/physical/swift/swift_test.go index 0e569c5a9fae9..c385285437d17 100644 --- a/physical/swift/swift_test.go +++ b/physical/swift/swift_test.go @@ -50,7 +50,7 @@ func TestSwiftBackend(t *testing.T) { err = cleaner.ContainerCreate(container, nil) if nil != err { - t.Fatalf("Unable to create test container %q: %v", container, err) + t.Fatalf("Unable to create test container '%s': %v", container, err) } defer func() { newObjects, err := cleaner.ObjectNamesAll(container, nil) diff --git a/plugins/database/cassandra/cassandra-database-plugin/main.go b/plugins/database/cassandra/cassandra-database-plugin/main.go index 4ee0903642e08..0b92125807d3a 100644 --- a/plugins/database/cassandra/cassandra-database-plugin/main.go +++ b/plugins/database/cassandra/cassandra-database-plugin/main.go @@ -5,7 +5,7 @@ import ( "os" "github.com/hashicorp/vault/plugins/database/cassandra" - "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" ) func main() { @@ -18,7 +18,12 @@ func main() { // Run instantiates a Cassandra object, and runs the RPC server for the plugin func Run() error { - dbplugin.ServeMultiplex(cassandra.New) + dbType, err := cassandra.New() + if err != nil { + return err + } + + dbplugin.Serve(dbType.(dbplugin.Database)) return nil } diff --git a/plugins/database/hana/hana-database-plugin/main.go b/plugins/database/hana/hana-database-plugin/main.go index 2057c36c08d40..1285a8d5fca85 100644 --- a/plugins/database/hana/hana-database-plugin/main.go +++ b/plugins/database/hana/hana-database-plugin/main.go @@ -5,7 +5,7 @@ import ( "os" "github.com/hashicorp/vault/plugins/database/hana" - "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" ) func main() { @@ -18,7 +18,12 @@ func main() { // Run instantiates a HANA object, and runs the RPC server for the plugin func Run() error { - dbplugin.ServeMultiplex(hana.New) + dbType, err := hana.New() + if err != nil { + return err + } + + dbplugin.Serve(dbType.(dbplugin.Database)) return nil } diff --git a/plugins/database/hana/hana.go b/plugins/database/hana/hana.go index bca437c369a69..7462a35063775 100644 --- a/plugins/database/hana/hana.go +++ b/plugins/database/hana/hana.go @@ -10,22 +10,19 @@ import ( "github.com/hashicorp/go-secure-stdlib/strutil" "github.com/hashicorp/vault/sdk/database/dbplugin/v5" "github.com/hashicorp/vault/sdk/database/helper/connutil" + "github.com/hashicorp/vault/sdk/database/helper/credsutil" "github.com/hashicorp/vault/sdk/database/helper/dbutil" "github.com/hashicorp/vault/sdk/helper/dbtxn" - "github.com/hashicorp/vault/sdk/helper/template" ) const ( - hanaTypeName = "hdb" - - defaultUserNameTemplate = `{{ printf "v_%s_%s_%s_%s" (.DisplayName | truncate 32) (.RoleName | truncate 20) (random 20) (unix_time) | truncate 127 | replace "-" "_" | uppercase }}` + hanaTypeName = "hdb" + maxIdentifierLength = 127 ) // HANA is an implementation of Database interface type HANA struct { *connutil.SQLConnectionProducer - - usernameProducer template.StringTemplate } var _ dbplugin.Database = (*HANA)(nil) @@ -60,25 +57,6 @@ func (h *HANA) Initialize(ctx context.Context, req dbplugin.InitializeRequest) ( return dbplugin.InitializeResponse{}, fmt.Errorf("error initializing db: %w", err) } - usernameTemplate, err := strutil.GetString(req.Config, "username_template") - if err != nil { - return dbplugin.InitializeResponse{}, fmt.Errorf("failed to retrieve username_template: %w", err) - } - if usernameTemplate == "" { - usernameTemplate = defaultUserNameTemplate - } - - up, err := template.NewTemplate(template.Template(usernameTemplate)) - if err != nil { - return dbplugin.InitializeResponse{}, fmt.Errorf("unable to initialize username template: %w", err) - } - h.usernameProducer = up - - _, err = h.usernameProducer.Generate(dbplugin.UsernameMetadata{}) - if err != nil { - return dbplugin.InitializeResponse{}, fmt.Errorf("invalid username template: %w", err) - } - return dbplugin.InitializeResponse{ Config: conf, }, nil @@ -116,13 +94,19 @@ func (h *HANA) NewUser(ctx context.Context, req dbplugin.NewUserRequest) (respon } // Generate username - username, err := h.usernameProducer.Generate(req.UsernameConfig) + username, err := credsutil.GenerateUsername( + credsutil.DisplayName(req.UsernameConfig.DisplayName, 32), + credsutil.RoleName(req.UsernameConfig.RoleName, 20), + credsutil.MaxLength(maxIdentifierLength), + credsutil.Separator("_"), + credsutil.ToUpper(), + ) if err != nil { return dbplugin.NewUserResponse{}, err } // HANA does not allow hyphens in usernames, and highly prefers capital letters - username = strings.ReplaceAll(username, "-", "_") + username = strings.Replace(username, "-", "_", -1) username = strings.ToUpper(username) // If expiration is in the role SQL, HANA will deactivate the user when time is up, diff --git a/plugins/database/hana/hana_test.go b/plugins/database/hana/hana_test.go index 67c1088834897..b38e93b459cb1 100644 --- a/plugins/database/hana/hana_test.go +++ b/plugins/database/hana/hana_test.go @@ -12,7 +12,6 @@ import ( "github.com/hashicorp/vault/sdk/database/dbplugin/v5" dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing" - "github.com/stretchr/testify/require" ) func TestHANA_Initialize(t *testing.T) { @@ -179,7 +178,7 @@ func TestHANA_UpdateUser(t *testing.T) { if err == nil { t.Fatalf("Able to login with new creds when expecting an issue") } else if test.expectedErrMsg != "" && !strings.Contains(err.Error(), test.expectedErrMsg) { - t.Fatalf("Expected error message to contain %q, received: %s", test.expectedErrMsg, err) + t.Fatalf("Expected error message to contain \"%s\", received: %s", test.expectedErrMsg, err) } } if !test.expectErrOnLogin && err != nil { @@ -289,97 +288,6 @@ func copyConfig(config map[string]interface{}) map[string]interface{} { return newConfig } -func TestHANA_DefaultUsernameTemplate(t *testing.T) { - if os.Getenv("HANA_URL") == "" || os.Getenv("VAULT_ACC") != "1" { - t.SkipNow() - } - connURL := os.Getenv("HANA_URL") - - connectionDetails := map[string]interface{}{ - "connection_url": connURL, - } - - initReq := dbplugin.InitializeRequest{ - Config: connectionDetails, - VerifyConnection: true, - } - - db := new() - dbtesting.AssertInitialize(t, db, initReq) - - usernameConfig := dbplugin.UsernameMetadata{ - DisplayName: "test", - RoleName: "test", - } - - const password = "SuperSecurePa55w0rd!" - resp := dbtesting.AssertNewUser(t, db, dbplugin.NewUserRequest{ - UsernameConfig: usernameConfig, - Password: password, - Statements: dbplugin.Statements{ - Commands: []string{testHANARole}, - }, - Expiration: time.Now().Add(5 * time.Minute), - }) - username := resp.Username - - if resp.Username == "" { - t.Fatalf("Missing username") - } - - testCredsExist(t, connURL, username, password) - - require.Regexp(t, `^V_TEST_TEST_[A-Z0-9]{20}_[0-9]{10}$`, resp.Username) - - defer dbtesting.AssertClose(t, db) -} - -func TestHANA_CustomUsernameTemplate(t *testing.T) { - if os.Getenv("HANA_URL") == "" || os.Getenv("VAULT_ACC") != "1" { - t.SkipNow() - } - connURL := os.Getenv("HANA_URL") - - connectionDetails := map[string]interface{}{ - "connection_url": connURL, - "username_template": "{{.DisplayName}}_{{random 10}}", - } - - initReq := dbplugin.InitializeRequest{ - Config: connectionDetails, - VerifyConnection: true, - } - - db := new() - dbtesting.AssertInitialize(t, db, initReq) - - usernameConfig := dbplugin.UsernameMetadata{ - DisplayName: "test", - RoleName: "test", - } - - const password = "SuperSecurePa55w0rd!" - resp := dbtesting.AssertNewUser(t, db, dbplugin.NewUserRequest{ - UsernameConfig: usernameConfig, - Password: password, - Statements: dbplugin.Statements{ - Commands: []string{testHANARole}, - }, - Expiration: time.Now().Add(5 * time.Minute), - }) - username := resp.Username - - if resp.Username == "" { - t.Fatalf("Missing username") - } - - testCredsExist(t, connURL, username, password) - - require.Regexp(t, `^TEST_[A-Z0-9]{10}$`, resp.Username) - - defer dbtesting.AssertClose(t, db) -} - const testHANARole = ` CREATE USER {{name}} PASSWORD "{{password}}" NO FORCE_FIRST_PASSWORD_CHANGE VALID UNTIL '{{expiration}}';` diff --git a/plugins/database/influxdb/influxdb-database-plugin/main.go b/plugins/database/influxdb/influxdb-database-plugin/main.go index c8f6c5fa1e3fd..47284f725d0a8 100644 --- a/plugins/database/influxdb/influxdb-database-plugin/main.go +++ b/plugins/database/influxdb/influxdb-database-plugin/main.go @@ -5,7 +5,7 @@ import ( "os" "github.com/hashicorp/vault/plugins/database/influxdb" - "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" ) func main() { @@ -18,7 +18,12 @@ func main() { // Run instantiates a Influxdb object, and runs the RPC server for the plugin func Run() error { - dbplugin.ServeMultiplex(influxdb.New) + dbType, err := influxdb.New() + if err != nil { + return err + } + + dbplugin.Serve(dbType.(dbplugin.Database)) return nil } diff --git a/plugins/database/mongodb/mongodb-database-plugin/main.go b/plugins/database/mongodb/mongodb-database-plugin/main.go index 30dd5fdd7cff7..3e78ca33ffd04 100644 --- a/plugins/database/mongodb/mongodb-database-plugin/main.go +++ b/plugins/database/mongodb/mongodb-database-plugin/main.go @@ -5,7 +5,7 @@ import ( "os" "github.com/hashicorp/vault/plugins/database/mongodb" - "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" ) func main() { @@ -18,7 +18,12 @@ func main() { // Run instantiates a MongoDB object, and runs the RPC server for the plugin func Run() error { - dbplugin.ServeMultiplex(mongodb.New) + dbType, err := mongodb.New() + if err != nil { + return err + } + + dbplugin.Serve(dbType.(dbplugin.Database)) return nil } diff --git a/plugins/database/mssql/mssql-database-plugin/main.go b/plugins/database/mssql/mssql-database-plugin/main.go index 37a81a660012f..d8b680cbc4934 100644 --- a/plugins/database/mssql/mssql-database-plugin/main.go +++ b/plugins/database/mssql/mssql-database-plugin/main.go @@ -5,7 +5,7 @@ import ( "os" "github.com/hashicorp/vault/plugins/database/mssql" - "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" ) func main() { @@ -18,7 +18,12 @@ func main() { // Run instantiates a MSSQL object, and runs the RPC server for the plugin func Run() error { - dbplugin.ServeMultiplex(mssql.New) + dbType, err := mssql.New() + if err != nil { + return err + } + + dbplugin.Serve(dbType.(dbplugin.Database)) return nil } diff --git a/plugins/database/mysql/mysql-database-plugin/main.go b/plugins/database/mysql/mysql-database-plugin/main.go index 6b1505aff194c..ae75b64aadaaa 100644 --- a/plugins/database/mysql/mysql-database-plugin/main.go +++ b/plugins/database/mysql/mysql-database-plugin/main.go @@ -5,7 +5,7 @@ import ( "os" "github.com/hashicorp/vault/plugins/database/mysql" - "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" ) func main() { @@ -20,8 +20,12 @@ func main() { func Run() error { var f func() (interface{}, error) f = mysql.New(mysql.DefaultUserNameTemplate) + dbType, err := f() + if err != nil { + return err + } - dbplugin.ServeMultiplex(f) + dbplugin.Serve(dbType.(dbplugin.Database)) return nil } diff --git a/plugins/database/mysql/mysql.go b/plugins/database/mysql/mysql.go index db47c71dd3108..1a992a30f1a55 100644 --- a/plugins/database/mysql/mysql.go +++ b/plugins/database/mysql/mysql.go @@ -16,7 +16,7 @@ import ( const ( defaultMysqlRevocationStmts = ` - REVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{name}}'@'%'; + REVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{name}}'@'%'; DROP USER '{{name}}'@'%' ` @@ -174,8 +174,8 @@ func (m *MySQL) DeleteUser(ctx context.Context, req dbplugin.DeleteUserRequest) // This is not a prepared statement because not all commands are supported // 1295: This command is not supported in the prepared statement protocol yet // Reference https://mariadb.com/kb/en/mariadb/prepare-statement/ - query = strings.ReplaceAll(query, "{{name}}", req.Username) - query = strings.ReplaceAll(query, "{{username}}", req.Username) + query = strings.Replace(query, "{{name}}", req.Username, -1) + query = strings.Replace(query, "{{username}}", req.Username, -1) _, err = tx.ExecContext(ctx, query) if err != nil { return dbplugin.DeleteUserResponse{}, err diff --git a/plugins/database/postgresql/postgresql-database-plugin/main.go b/plugins/database/postgresql/postgresql-database-plugin/main.go index 75b5fd9babb2a..3d2e14cd9aab3 100644 --- a/plugins/database/postgresql/postgresql-database-plugin/main.go +++ b/plugins/database/postgresql/postgresql-database-plugin/main.go @@ -5,7 +5,7 @@ import ( "os" "github.com/hashicorp/vault/plugins/database/postgresql" - "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" ) func main() { @@ -18,7 +18,12 @@ func main() { // Run instantiates a PostgreSQL object, and runs the RPC server for the plugin func Run() error { - dbplugin.ServeMultiplex(postgresql.New) + dbType, err := postgresql.New() + if err != nil { + return err + } + + dbplugin.Serve(dbType.(dbplugin.Database)) return nil } diff --git a/plugins/database/postgresql/postgresql.go b/plugins/database/postgresql/postgresql.go index c76558350586d..359e7ca886883 100644 --- a/plugins/database/postgresql/postgresql.go +++ b/plugins/database/postgresql/postgresql.go @@ -14,7 +14,6 @@ import ( "github.com/hashicorp/vault/sdk/database/helper/dbutil" "github.com/hashicorp/vault/sdk/helper/dbtxn" "github.com/hashicorp/vault/sdk/helper/template" - "github.com/hashicorp/vault/sdk/logical" _ "github.com/jackc/pgx/v4/stdlib" ) @@ -33,8 +32,7 @@ ALTER ROLE "{{username}}" WITH PASSWORD '{{password}}'; ) var ( - _ dbplugin.Database = (*PostgreSQL)(nil) - _ logical.PluginVersioner = (*PostgreSQL)(nil) + _ dbplugin.Database = &PostgreSQL{} // postgresEndStatement is basically the word "END" but // surrounded by a word boundary to differentiate it from @@ -48,9 +46,6 @@ var ( // singleQuotedPhrases finds substrings like 'hello' // and pulls them out with the quotes included. singleQuotedPhrases = regexp.MustCompile(`('.*?')`) - - // ReportedVersion is used to report a specific version to Vault. - ReportedVersion = "" ) func New() (interface{}, error) { @@ -474,10 +469,6 @@ func (p *PostgreSQL) secretValues() map[string]string { } } -func (p *PostgreSQL) PluginVersion() logical.PluginVersion { - return logical.PluginVersion{Version: ReportedVersion} -} - // containsMultilineStatement is a best effort to determine whether // a particular statement is multiline, and therefore should not be // split upon semicolons. If it's unsure, it defaults to false. @@ -491,7 +482,7 @@ func containsMultilineStatement(stmt string) bool { } stmtWithoutLiterals := stmt for _, literal := range literals { - stmtWithoutLiterals = strings.ReplaceAll(stmt, literal, "") + stmtWithoutLiterals = strings.Replace(stmt, literal, "", -1) } // Now look for the word "END" specifically. This will miss any // representations of END that aren't surrounded by spaces, but diff --git a/plugins/database/redshift/redshift-database-plugin/main.go b/plugins/database/redshift/redshift-database-plugin/main.go index 8d2f796eeab3f..35a4642fa7661 100644 --- a/plugins/database/redshift/redshift-database-plugin/main.go +++ b/plugins/database/redshift/redshift-database-plugin/main.go @@ -4,20 +4,30 @@ import ( "log" "os" + "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/plugins/database/redshift" - "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/database/dbplugin" ) func main() { - if err := Run(); err != nil { + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + if err := Run(apiClientMeta.GetTLSConfig()); err != nil { log.Println(err) os.Exit(1) } } // Run instantiates a RedShift object, and runs the RPC server for the plugin -func Run() error { - dbplugin.ServeMultiplex(redshift.New) +func Run(apiTLSConfig *api.TLSConfig) error { + dbType, err := redshift.New() + if err != nil { + return err + } + + dbplugin.Serve(dbType.(dbplugin.Database), api.VaultPluginTLSProvider(apiTLSConfig)) return nil } diff --git a/scripts/build.sh b/scripts/build.sh index b2e1302cb4aec..14261537a52f7 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -23,6 +23,21 @@ GIT_DIRTY="$(test -n "`git status --porcelain`" && echo "+CHANGES" || true)" BUILD_DATE=$("$SOURCE_DIR"/build_date.sh) +# If its dev mode, only build for ourself +if [ "${VAULT_DEV_BUILD}x" != "x" ] && [ "${XC_OSARCH}x" == "x" ]; then + XC_OS=$(${GO_CMD} env GOOS) + XC_ARCH=$(${GO_CMD} env GOARCH) + XC_OSARCH=$(${GO_CMD} env GOOS)/$(${GO_CMD} env GOARCH) +elif [ "${XC_OSARCH}x" != "x" ]; then + IFS='/' read -ra SPLITXC <<< "${XC_OSARCH}" + DEV_PLATFORM="./pkg/${SPLITXC[0]}_${SPLITXC[1]}" +fi + +# Determine the arch/os combos we're building for +XC_ARCH=${XC_ARCH:-"386 amd64"} +XC_OS=${XC_OS:-linux darwin windows freebsd openbsd netbsd solaris} +XC_OSARCH=${XC_OSARCH:-"linux/386 linux/amd64 linux/arm linux/arm64 darwin/386 darwin/amd64 darwin/arm64 windows/386 windows/amd64 freebsd/386 freebsd/amd64 freebsd/arm openbsd/386 openbsd/amd64 openbsd/arm netbsd/386 netbsd/amd64 solaris/amd64"} + GOPATH=${GOPATH:-$(${GO_CMD} env GOPATH)} case $(uname) in CYGWIN*) @@ -37,12 +52,16 @@ rm -rf pkg/* mkdir -p bin/ # Build! +# If GOX_PARALLEL_BUILDS is set, it will be used to add a "-parallel=${GOX_PARALLEL_BUILDS}" gox parameter echo "==> Building..." -${GO_CMD} build \ +gox \ + -osarch="${XC_OSARCH}" \ -gcflags "${GCFLAGS}" \ - -ldflags "${LD_FLAGS} -X github.com/hashicorp/vault/sdk/version.GitCommit='${GIT_COMMIT}${GIT_DIRTY}' -X github.com/hashicorp/vault/sdk/version.BuildDate=${BUILD_DATE}" \ - -o "bin/vault" \ - -tags "${BUILD_TAGS}" \ + -ldflags "${LD_FLAGS}-X github.com/hashicorp/vault/sdk/version.GitCommit='${GIT_COMMIT}${GIT_DIRTY}' -X github.com/hashicorp/vault/sdk/version.BuildDate=${BUILD_DATE}" \ + -output "pkg/{{.OS}}_{{.Arch}}/vault" \ + ${GOX_PARALLEL_BUILDS+-parallel="${GOX_PARALLEL_BUILDS}"} \ + -tags="${BUILD_TAGS}" \ + -gocmd="${GO_CMD}" \ . # Move all the compiled things to the $GOPATH/bin @@ -50,8 +69,26 @@ OLDIFS=$IFS IFS=: MAIN_GOPATH=($GOPATH) IFS=$OLDIFS -rm -f ${MAIN_GOPATH}/bin/vault -cp bin/vault ${MAIN_GOPATH}/bin/ +# Copy our OS/Arch to the bin/ directory +DEV_PLATFORM=${DEV_PLATFORM:-"./pkg/$(${GO_CMD} env GOOS)_$(${GO_CMD} env GOARCH)"} +for F in $(find ${DEV_PLATFORM} -mindepth 1 -maxdepth 1 -type f); do + cp ${F} bin/ + rm -f ${MAIN_GOPATH}/bin/vault + cp ${F} ${MAIN_GOPATH}/bin/ +done + +if [ "${VAULT_DEV_BUILD}x" = "x" ]; then + # Zip and copy to the dist dir + echo "==> Packaging..." + for PLATFORM in $(find ./pkg -mindepth 1 -maxdepth 1 -type d); do + OSARCH=$(basename ${PLATFORM}) + echo "--> ${OSARCH}" + + pushd $PLATFORM >/dev/null 2>&1 + zip ../${OSARCH}.zip ./* + popd >/dev/null 2>&1 + done +fi # Done! echo diff --git a/scripts/cross/Dockerfile b/scripts/cross/Dockerfile index 504399c3ff386..220982a6f3976 100644 --- a/scripts/cross/Dockerfile +++ b/scripts/cross/Dockerfile @@ -31,6 +31,7 @@ ENV GOROOT /goroot ENV PATH $GOROOT/bin:$GOPATH/bin:$PATH RUN go get golang.org/x/tools/cmd/goimports +RUN go get github.com/mitchellh/gox RUN mkdir -p /gopath/src/github.com/hashicorp/vault WORKDIR /gopath/src/github.com/hashicorp/vault diff --git a/scripts/docker/Dockerfile b/scripts/docker/Dockerfile index 7817c02a52bf0..1ee94cf09652c 100644 --- a/scripts/docker/Dockerfile +++ b/scripts/docker/Dockerfile @@ -1,6 +1,7 @@ # Multi-stage builder to avoid polluting users environment with wrong -# architecture binaries. -ARG VERSION=1.19.1 +# architecture binaries. Since this binary is used in an alpine container, +# we're explicitly compiling for 'linux/amd64' +ARG VERSION=1.17.13 FROM golang:${VERSION} AS builder @@ -11,7 +12,7 @@ WORKDIR /go/src/github.com/hashicorp/vault COPY . . RUN make bootstrap \ - && CGO_ENABLED=$CGO_ENABLED BUILD_TAGS="${BUILD_TAGS}" VAULT_DEV_BUILD=1 sh -c "'./scripts/build.sh'" + && CGO_ENABLED=$CGO_ENABLED BUILD_TAGS="${BUILD_TAGS}" VAULT_DEV_BUILD=1 XC_OSARCH='linux/amd64' sh -c "'./scripts/build.sh'" # Docker Image @@ -26,7 +27,7 @@ RUN addgroup vault && \ RUN set -eux; \ apk add --no-cache ca-certificates libcap su-exec dumb-init tzdata -COPY --from=builder /go/src/github.com/hashicorp/vault/bin/vault /bin/vault +COPY --from=builder /go/bin/vault /bin/vault # /vault/logs is made available to use as a location to store audit logs, if # desired; /vault/file is made available to use as a location with the file diff --git a/scripts/docker/Dockerfile.ui b/scripts/docker/Dockerfile.ui index 371a576fc9f4e..9bfbfb38110c1 100644 --- a/scripts/docker/Dockerfile.ui +++ b/scripts/docker/Dockerfile.ui @@ -1,8 +1,9 @@ # Multi-stage builder to avoid polluting users environment with wrong -# architecture binaries. This file only currently works for linux/amd64. +# architecture binaries. Since this binary is used in an alpine container, +# we're explicitly compiling for 'linux/amd64' FROM debian:buster AS builder -ARG VERSION=1.19.1 +ARG VERSION=1.17.13 ARG CGO_ENABLED=0 ARG BUILD_TAGS ENV JOBS=2 @@ -37,7 +38,7 @@ ENV PATH $GOROOT/bin:$GOPATH/bin:$PATH WORKDIR /go/src/github.com/hashicorp/vault COPY . . RUN make bootstrap static-dist \ - && CGO_ENABLED=$CGO_ENABLED BUILD_TAGS="${BUILD_TAGS} ui" VAULT_DEV_BUILD=1 GOOS=linux GOARCH=amd64 sh -c "'./scripts/build.sh'" + && CGO_ENABLED=$CGO_ENABLED BUILD_TAGS="${BUILD_TAGS} ui" VAULT_DEV_BUILD=1 XC_OSARCH='linux/amd64' sh -c "'./scripts/build.sh'" # Docker Image @@ -52,7 +53,7 @@ RUN addgroup vault && \ RUN set -eux; \ apk add --no-cache ca-certificates libcap su-exec dumb-init tzdata -COPY --from=builder /go/src/github.com/hashicorp/vault/bin/vault /bin/vault +COPY --from=builder /go/bin/vault /bin/vault # /vault/logs is made available to use as a location to store audit logs, if # desired; /vault/file is made available to use as a location with the file diff --git a/scripts/protocversioncheck.sh b/scripts/protocversioncheck.sh deleted file mode 100755 index 4b081674806b9..0000000000000 --- a/scripts/protocversioncheck.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -PROTOC_CMD=${PROTOC_CMD:-protoc} -PROTOC_VERSION_EXACT="$1" -echo "==> Checking that protoc is at version $1..." - -PROTOC_VERSION=$($PROTOC_CMD --version | grep -o '[0-9]\+\.[0-9]\+\.[0-9]\+') - -if [ "$PROTOC_VERSION" == "$PROTOC_VERSION_EXACT" ]; then - echo "Using protoc version $PROTOC_VERSION" -else - echo "protoc should be at $PROTOC_VERSION_EXACT; found $PROTOC_VERSION." - echo "If your version is higher than the version this script is looking for, updating the Makefile with the newer version." - exit 1 -fi diff --git a/scripts/windows/build.bat b/scripts/windows/build.bat index 1452911e514a7..3b4ee3a4c616b 100644 --- a/scripts/windows/build.bat +++ b/scripts/windows/build.bat @@ -62,9 +62,11 @@ del /f "%_GO_ENV_TMP_FILE%" 2>nul :build REM Build! echo ==^> Building... -go build^ +gox^ + -os="%_XC_OS%"^ + -arch="%_XC_ARCH%"^ -ldflags "-X github.com/hashicorp/vault/sdk/version.GitCommit=%_GIT_COMMIT%%_GIT_DIRTY% -X github.com/hashicorp/vault/sdk/version.BuildDate=%_BUILD_DATE%"^ - -o "bin/vault.exe"^ + -output "pkg/{{.OS}}_{{.Arch}}/vault"^ . if %ERRORLEVEL% equ 1 set %_EXITCODE%=1 @@ -85,6 +87,14 @@ go env GOOS >"%_GO_ENV_TMP_FILE%" set /p _GOOS=<"%_GO_ENV_TMP_FILE%" del /f "%_GO_ENV_TMP_FILE%" 2>nul +REM Copy our OS/Arch to the bin/ directory +set _DEV_PLATFORM=pkg\%_GOOS%_%_GOARCH% + +for /r %%f in (%_DEV_PLATFORM%) do ( + copy /b /y %%f bin\ >nul + copy /b /y %%f %_GOPATH%\bin\ >nul +) + REM TODO(ceh): package dist REM Done! diff --git a/sdk/database/dbplugin/database.pb.go b/sdk/database/dbplugin/database.pb.go index c916c861e27fa..7c9e08a9b03eb 100644 --- a/sdk/database/dbplugin/database.pb.go +++ b/sdk/database/dbplugin/database.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc-gen-go v1.27.1 +// protoc v3.19.4 // source: sdk/database/dbplugin/database.proto package dbplugin diff --git a/sdk/database/dbplugin/databasemiddleware.go b/sdk/database/dbplugin/databasemiddleware.go index 29c806113844b..e7cb0a2f5af12 100644 --- a/sdk/database/dbplugin/databasemiddleware.go +++ b/sdk/database/dbplugin/databasemiddleware.go @@ -323,11 +323,11 @@ func (mw *DatabaseErrorSanitizerMiddleware) sanitize(err error) error { // error without changing the actual error message s, ok := status.FromError(err) if ok { - err = status.Error(s.Code(), strings.ReplaceAll(s.Message(), k, v.(string))) + err = status.Error(s.Code(), strings.Replace(s.Message(), k, v.(string), -1)) continue } - err = errors.New(strings.ReplaceAll(err.Error(), k, v.(string))) + err = errors.New(strings.Replace(err.Error(), k, v.(string), -1)) } } return err diff --git a/sdk/database/dbplugin/plugin.go b/sdk/database/dbplugin/plugin.go index 29f2f1f898b89..6788e3379ddce 100644 --- a/sdk/database/dbplugin/plugin.go +++ b/sdk/database/dbplugin/plugin.go @@ -71,15 +71,9 @@ type Database interface { // PluginFactory is used to build plugin database types. It wraps the database // object in a logging and metrics middleware. -func PluginFactory(ctx context.Context, pluginName string, pluginVersion string, sys pluginutil.LookRunnerUtil, logger log.Logger) (Database, error) { - return PluginFactoryVersion(ctx, pluginName, "", sys, logger) -} - -// PluginFactory is used to build plugin database types with a version specified. -// It wraps the database object in a logging and metrics middleware. -func PluginFactoryVersion(ctx context.Context, pluginName string, pluginVersion string, sys pluginutil.LookRunnerUtil, logger log.Logger) (Database, error) { +func PluginFactory(ctx context.Context, pluginName string, sys pluginutil.LookRunnerUtil, logger log.Logger) (Database, error) { // Look for plugin in the plugin catalog - pluginRunner, err := sys.LookupPluginVersion(ctx, pluginName, consts.PluginTypeDatabase, pluginVersion) + pluginRunner, err := sys.LookupPlugin(ctx, pluginName, consts.PluginTypeDatabase) if err != nil { return nil, err } diff --git a/sdk/database/dbplugin/v5/grpc_client.go b/sdk/database/dbplugin/v5/grpc_client.go index cfddfcd578efa..e71c7979484c4 100644 --- a/sdk/database/dbplugin/v5/grpc_client.go +++ b/sdk/database/dbplugin/v5/grpc_client.go @@ -9,28 +9,17 @@ import ( "github.com/golang/protobuf/ptypes" "github.com/hashicorp/vault/sdk/database/dbplugin/v5/proto" "github.com/hashicorp/vault/sdk/helper/pluginutil" - "github.com/hashicorp/vault/sdk/logical" ) var ( - _ Database = gRPCClient{} - _ logical.PluginVersioner = gRPCClient{} + _ Database = gRPCClient{} ErrPluginShutdown = errors.New("plugin shutdown") ) type gRPCClient struct { - client proto.DatabaseClient - versionClient logical.PluginVersionClient - doneCtx context.Context -} - -func (c gRPCClient) PluginVersion() logical.PluginVersion { - version, _ := c.versionClient.Version(context.Background(), &logical.Empty{}) - if version != nil { - return logical.PluginVersion{Version: version.PluginVersion} - } - return logical.EmptyPluginVersion + client proto.DatabaseClient + doneCtx context.Context } func (c gRPCClient) Initialize(ctx context.Context, req InitializeRequest) (InitializeResponse, error) { diff --git a/sdk/database/dbplugin/v5/grpc_database_plugin.go b/sdk/database/dbplugin/v5/grpc_database_plugin.go index 441030df93e0b..92ed2dcc66a11 100644 --- a/sdk/database/dbplugin/v5/grpc_database_plugin.go +++ b/sdk/database/dbplugin/v5/grpc_database_plugin.go @@ -6,7 +6,6 @@ import ( "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/sdk/database/dbplugin/v5/proto" "github.com/hashicorp/vault/sdk/helper/pluginutil" - "github.com/hashicorp/vault/sdk/logical" "google.golang.org/grpc" ) @@ -55,15 +54,13 @@ func (d GRPCDatabasePlugin) GRPCServer(_ *plugin.GRPCBroker, s *grpc.Server) err } proto.RegisterDatabaseServer(s, &server) - logical.RegisterPluginVersionServer(s, &server) return nil } func (GRPCDatabasePlugin) GRPCClient(doneCtx context.Context, _ *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { client := gRPCClient{ - client: proto.NewDatabaseClient(c), - versionClient: logical.NewPluginVersionClient(c), - doneCtx: doneCtx, + client: proto.NewDatabaseClient(c), + doneCtx: doneCtx, } return client, nil } diff --git a/sdk/database/dbplugin/v5/grpc_server.go b/sdk/database/dbplugin/v5/grpc_server.go index ce3be1efb7c68..4d29a5a62f6ab 100644 --- a/sdk/database/dbplugin/v5/grpc_server.go +++ b/sdk/database/dbplugin/v5/grpc_server.go @@ -2,17 +2,15 @@ package dbplugin import ( "context" - "errors" "fmt" "sync" "time" "github.com/golang/protobuf/ptypes" "github.com/hashicorp/vault/sdk/database/dbplugin/v5/proto" - "github.com/hashicorp/vault/sdk/helper/base62" "github.com/hashicorp/vault/sdk/helper/pluginutil" - "github.com/hashicorp/vault/sdk/logical" "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) @@ -20,7 +18,6 @@ var _ proto.DatabaseServer = &gRPCServer{} type gRPCServer struct { proto.UnimplementedDatabaseServer - logical.UnimplementedPluginVersionServer // holds the non-multiplexed Database // when this is set the plugin does not support multiplexing @@ -33,6 +30,25 @@ type gRPCServer struct { sync.RWMutex } +func getMultiplexIDFromContext(ctx context.Context) (string, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return "", fmt.Errorf("missing plugin multiplexing metadata") + } + + multiplexIDs := md[pluginutil.MultiplexingCtxKey] + if len(multiplexIDs) != 1 { + return "", fmt.Errorf("unexpected number of IDs in metadata: (%d)", len(multiplexIDs)) + } + + multiplexID := multiplexIDs[0] + if multiplexID == "" { + return "", fmt.Errorf("empty multiplex ID in metadata") + } + + return multiplexID, nil +} + func (g *gRPCServer) getOrCreateDatabase(ctx context.Context) (Database, error) { g.Lock() defer g.Unlock() @@ -41,18 +57,15 @@ func (g *gRPCServer) getOrCreateDatabase(ctx context.Context) (Database, error) return g.singleImpl, nil } - id, err := pluginutil.GetMultiplexIDFromContext(ctx) + id, err := getMultiplexIDFromContext(ctx) if err != nil { return nil, err } + if db, ok := g.instances[id]; ok { return db, nil } - return g.createDatabase(id) -} -// must hold the g.Lock() to call this function -func (g *gRPCServer) createDatabase(id string) (Database, error) { db, err := g.factoryFunc() if err != nil { return nil, err @@ -70,7 +83,7 @@ func (g *gRPCServer) getDatabaseInternal(ctx context.Context) (Database, error) return g.singleImpl, nil } - id, err := pluginutil.GetMultiplexIDFromContext(ctx) + id, err := getMultiplexIDFromContext(ctx) if err != nil { return nil, err } @@ -299,7 +312,7 @@ func (g *gRPCServer) Close(ctx context.Context, _ *proto.Empty) (*proto.Empty, e if g.singleImpl == nil { // only cleanup instances map when multiplexing is supported - id, err := pluginutil.GetMultiplexIDFromContext(ctx) + id, err := getMultiplexIDFromContext(ctx) if err != nil { return nil, err } @@ -309,42 +322,6 @@ func (g *gRPCServer) Close(ctx context.Context, _ *proto.Empty) (*proto.Empty, e return &proto.Empty{}, nil } -// getOrForceCreateDatabase will create a database even if the multiplexing ID is not present -func (g *gRPCServer) getOrForceCreateDatabase(ctx context.Context) (Database, error) { - impl, err := g.getOrCreateDatabase(ctx) - if errors.Is(err, pluginutil.ErrNoMultiplexingIDFound) { - // if this is called without a multiplexing context, like from the plugin catalog directly, - // then we won't have a database ID, so let's generate a new database instance - id, err := base62.Random(10) - if err != nil { - return nil, err - } - - g.Lock() - defer g.Unlock() - impl, err = g.createDatabase(id) - if err != nil { - return nil, err - } - } else if err != nil { - return nil, err - } - return impl, nil -} - -// Version forwards the version request to the underlying Database implementation. -func (g *gRPCServer) Version(ctx context.Context, _ *logical.Empty) (*logical.VersionReply, error) { - impl, err := g.getOrForceCreateDatabase(ctx) - if err != nil { - return nil, err - } - - if versioner, ok := impl.(logical.PluginVersioner); ok { - return &logical.VersionReply{PluginVersion: versioner.PluginVersion().Version}, nil - } - return &logical.VersionReply{}, nil -} - func getStatementsFromProto(protoStmts *proto.Statements) (statements Statements) { if protoStmts == nil { return statements diff --git a/sdk/database/dbplugin/v5/grpc_server_test.go b/sdk/database/dbplugin/v5/grpc_server_test.go index 7399bf55789bc..4f45e54bb74f8 100644 --- a/sdk/database/dbplugin/v5/grpc_server_test.go +++ b/sdk/database/dbplugin/v5/grpc_server_test.go @@ -8,7 +8,6 @@ import ( "testing" "time" - "github.com/hashicorp/vault/sdk/logical" "google.golang.org/protobuf/types/known/structpb" "github.com/golang/protobuf/ptypes" @@ -582,49 +581,51 @@ func TestGRPCServer_Close(t *testing.T) { } } -func TestGRPCServer_Version(t *testing.T) { +func TestGetMultiplexIDFromContext(t *testing.T) { type testCase struct { - db Database + ctx context.Context expectedResp string - expectErr bool - expectCode codes.Code + expectedErr error } tests := map[string]testCase{ - "backend that does not implement version": { - db: fakeDatabase{}, + "missing plugin multiplexing metadata": { + ctx: context.Background(), expectedResp: "", - expectErr: false, - expectCode: codes.OK, + expectedErr: fmt.Errorf("missing plugin multiplexing metadata"), }, - "backend with version": { - db: fakeDatabaseWithVersion{ - version: "v123", - }, - expectedResp: "v123", - expectErr: false, - expectCode: codes.OK, + "unexpected number of IDs in metadata": { + ctx: idCtx(t, "12345", "67891"), + expectedResp: "", + expectedErr: fmt.Errorf("unexpected number of IDs in metadata: (2)"), + }, + "empty multiplex ID in metadata": { + ctx: idCtx(t, ""), + expectedResp: "", + expectedErr: fmt.Errorf("empty multiplex ID in metadata"), + }, + "happy path, id is returned from metadata": { + ctx: idCtx(t, "12345"), + expectedResp: "12345", + expectedErr: nil, }, } for name, test := range tests { t.Run(name, func(t *testing.T) { - idCtx, g := testGrpcServer(t, test.db) - resp, err := g.Version(idCtx, &logical.Empty{}) + resp, err := getMultiplexIDFromContext(test.ctx) - if test.expectErr && err == nil { + if test.expectedErr != nil && test.expectedErr.Error() != "" && err == nil { t.Fatalf("err expected, got nil") - } - if !test.expectErr && err != nil { - t.Fatalf("no error expected, got: %s", err) + } else if !reflect.DeepEqual(err, test.expectedErr) { + t.Fatalf("Actual error: %#v\nExpected error: %#v", err, test.expectedErr) } - actualCode := status.Code(err) - if actualCode != test.expectCode { - t.Fatalf("Actual code: %s Expected code: %s", actualCode, test.expectCode) + if test.expectedErr != nil && test.expectedErr.Error() == "" && err != nil { + t.Fatalf("no error expected, got: %s", err) } - if !reflect.DeepEqual(resp.PluginVersion, test.expectedResp) { + if !reflect.DeepEqual(resp, test.expectedResp) { t.Fatalf("Actual response: %#v\nExpected response: %#v", resp, test.expectedResp) } }) @@ -797,40 +798,3 @@ func (f *recordingDatabase) Close() error { } return f.next.Close() } - -type fakeDatabaseWithVersion struct { - version string -} - -func (e fakeDatabaseWithVersion) PluginVersion() logical.PluginVersion { - return logical.PluginVersion{Version: e.version} -} - -func (e fakeDatabaseWithVersion) Initialize(_ context.Context, _ InitializeRequest) (InitializeResponse, error) { - return InitializeResponse{}, nil -} - -func (e fakeDatabaseWithVersion) NewUser(_ context.Context, _ NewUserRequest) (NewUserResponse, error) { - return NewUserResponse{}, nil -} - -func (e fakeDatabaseWithVersion) UpdateUser(_ context.Context, _ UpdateUserRequest) (UpdateUserResponse, error) { - return UpdateUserResponse{}, nil -} - -func (e fakeDatabaseWithVersion) DeleteUser(_ context.Context, _ DeleteUserRequest) (DeleteUserResponse, error) { - return DeleteUserResponse{}, nil -} - -func (e fakeDatabaseWithVersion) Type() (string, error) { - return "", nil -} - -func (e fakeDatabaseWithVersion) Close() error { - return nil -} - -var ( - _ Database = (*fakeDatabaseWithVersion)(nil) - _ logical.PluginVersioner = (*fakeDatabaseWithVersion)(nil) -) diff --git a/sdk/database/dbplugin/v5/middleware.go b/sdk/database/dbplugin/v5/middleware.go index 240d64e6915ea..20b4212060299 100644 --- a/sdk/database/dbplugin/v5/middleware.go +++ b/sdk/database/dbplugin/v5/middleware.go @@ -7,10 +7,9 @@ import ( "strings" "time" - "github.com/armon/go-metrics" + metrics "github.com/armon/go-metrics" "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/logical" "google.golang.org/grpc/status" ) @@ -18,10 +17,7 @@ import ( // Tracing Middleware // /////////////////////////////////////////////////// -var ( - _ Database = databaseTracingMiddleware{} - _ logical.PluginVersioner = databaseTracingMiddleware{} -) +var _ Database = databaseTracingMiddleware{} // databaseTracingMiddleware wraps a implementation of Database and executes // trace logging on function call. @@ -30,21 +26,6 @@ type databaseTracingMiddleware struct { logger log.Logger } -func (mw databaseTracingMiddleware) PluginVersion() (resp logical.PluginVersion) { - defer func(then time.Time) { - mw.logger.Trace("version", - "status", "finished", - "version", resp, - "took", time.Since(then)) - }(time.Now()) - - mw.logger.Trace("version", "status", "started") - if versioner, ok := mw.next.(logical.PluginVersioner); ok { - return versioner.PluginVersion() - } - return logical.EmptyPluginVersion -} - func (mw databaseTracingMiddleware) Initialize(ctx context.Context, req InitializeRequest) (resp InitializeResponse, err error) { defer func(then time.Time) { mw.logger.Trace("initialize", @@ -117,10 +98,7 @@ func (mw databaseTracingMiddleware) Close() (err error) { // Metrics Middleware Domain // /////////////////////////////////////////////////// -var ( - _ Database = databaseMetricsMiddleware{} - _ logical.PluginVersioner = databaseMetricsMiddleware{} -) +var _ Database = databaseMetricsMiddleware{} // databaseMetricsMiddleware wraps an implementation of Databases and on // function call logs metrics about this instance. @@ -130,21 +108,6 @@ type databaseMetricsMiddleware struct { typeStr string } -func (mw databaseMetricsMiddleware) PluginVersion() logical.PluginVersion { - defer func(now time.Time) { - metrics.MeasureSince([]string{"database", "PluginVersion"}, now) - metrics.MeasureSince([]string{"database", mw.typeStr, "PluginVersion"}, now) - }(time.Now()) - - metrics.IncrCounter([]string{"database", "PluginVersion"}, 1) - metrics.IncrCounter([]string{"database", mw.typeStr, "PluginVersion"}, 1) - - if versioner, ok := mw.next.(logical.PluginVersioner); ok { - return versioner.PluginVersion() - } - return logical.EmptyPluginVersion -} - func (mw databaseMetricsMiddleware) Initialize(ctx context.Context, req InitializeRequest) (resp InitializeResponse, err error) { defer func(now time.Time) { metrics.MeasureSince([]string{"database", "Initialize"}, now) @@ -233,10 +196,7 @@ func (mw databaseMetricsMiddleware) Close() (err error) { // Error Sanitizer Middleware Domain // /////////////////////////////////////////////////// -var ( - _ Database = (*DatabaseErrorSanitizerMiddleware)(nil) - _ logical.PluginVersioner = (*DatabaseErrorSanitizerMiddleware)(nil) -) +var _ Database = DatabaseErrorSanitizerMiddleware{} // DatabaseErrorSanitizerMiddleware wraps an implementation of Databases and // sanitizes returned error messages @@ -283,13 +243,6 @@ func (mw DatabaseErrorSanitizerMiddleware) Close() (err error) { return mw.sanitize(mw.next.Close()) } -func (mw DatabaseErrorSanitizerMiddleware) PluginVersion() logical.PluginVersion { - if versioner, ok := mw.next.(logical.PluginVersioner); ok { - return versioner.PluginVersion() - } - return logical.EmptyPluginVersion -} - // sanitize errors by removing any sensitive strings within their messages. This uses // the secretsFn to determine what fields should be sanitized. func (mw DatabaseErrorSanitizerMiddleware) sanitize(err error) error { @@ -311,11 +264,11 @@ func (mw DatabaseErrorSanitizerMiddleware) sanitize(err error) error { // error while changing the actual error message s, ok := status.FromError(err) if ok { - err = status.Error(s.Code(), strings.ReplaceAll(s.Message(), find, replace)) + err = status.Error(s.Code(), strings.Replace(s.Message(), find, replace, -1)) continue } - err = errors.New(strings.ReplaceAll(err.Error(), find, replace)) + err = errors.New(strings.Replace(err.Error(), find, replace, -1)) } return err } diff --git a/sdk/database/dbplugin/v5/plugin_client.go b/sdk/database/dbplugin/v5/plugin_client.go index caea00a8fdaf1..4ef24cb8e2420 100644 --- a/sdk/database/dbplugin/v5/plugin_client.go +++ b/sdk/database/dbplugin/v5/plugin_client.go @@ -4,26 +4,16 @@ import ( "context" "errors" - "github.com/hashicorp/go-plugin" + plugin "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/sdk/database/dbplugin/v5/proto" "github.com/hashicorp/vault/sdk/helper/pluginutil" - "github.com/hashicorp/vault/sdk/logical" ) -var _ logical.PluginVersioner = (*DatabasePluginClient)(nil) - type DatabasePluginClient struct { client pluginutil.PluginClient Database } -func (dc *DatabasePluginClient) PluginVersion() logical.PluginVersion { - if versioner, ok := dc.Database.(logical.PluginVersioner); ok { - return versioner.PluginVersion() - } - return logical.EmptyPluginVersion -} - // This wraps the Close call and ensures we both close the database connection // and kill the plugin. func (dc *DatabasePluginClient) Close() error { @@ -65,7 +55,6 @@ func NewPluginClient(ctx context.Context, sys pluginutil.RunnerUtil, config plug // This is an abstraction leak from go-plugin but it is necessary in // order to enable multiplexing on multiplexed plugins c.client = proto.NewDatabaseClient(pluginClient.Conn()) - c.versionClient = logical.NewPluginVersionClient(pluginClient.Conn()) db = c default: diff --git a/sdk/database/dbplugin/v5/plugin_client_test.go b/sdk/database/dbplugin/v5/plugin_client_test.go index f41ecf8ef4ab0..0ff8309f1092e 100644 --- a/sdk/database/dbplugin/v5/plugin_client_test.go +++ b/sdk/database/dbplugin/v5/plugin_client_test.go @@ -12,7 +12,6 @@ import ( "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/pluginutil" "github.com/hashicorp/vault/sdk/helper/wrapping" - "github.com/hashicorp/vault/sdk/logical" "github.com/stretchr/testify/mock" "google.golang.org/grpc" ) @@ -39,7 +38,7 @@ func TestNewPluginClient(t *testing.T) { dispenseResp: gRPCClient{client: fakeClient{}}, dispenseErr: nil, }, - Database: gRPCClient{client: proto.NewDatabaseClient(nil), versionClient: logical.NewPluginVersionClient(nil), doneCtx: context.Context(nil)}, + Database: gRPCClient{proto.NewDatabaseClient(nil), context.Context(nil)}, }, expectedErr: nil, }, @@ -113,10 +112,6 @@ func (f *fakePluginClient) Conn() grpc.ClientConnInterface { return nil } -func (f *fakePluginClient) Reload() error { - return nil -} - func (f *fakePluginClient) Dispense(name string) (interface{}, error) { return f.dispenseResp, f.dispenseErr } diff --git a/sdk/database/dbplugin/v5/plugin_factory.go b/sdk/database/dbplugin/v5/plugin_factory.go index f68cc5621aa41..b87dc3a75a605 100644 --- a/sdk/database/dbplugin/v5/plugin_factory.go +++ b/sdk/database/dbplugin/v5/plugin_factory.go @@ -13,14 +13,8 @@ import ( // PluginFactory is used to build plugin database types. It wraps the database // object in a logging and metrics middleware. func PluginFactory(ctx context.Context, pluginName string, sys pluginutil.LookRunnerUtil, logger log.Logger) (Database, error) { - return PluginFactoryVersion(ctx, pluginName, "", sys, logger) -} - -// PluginFactoryVersion is used to build plugin database types with a version specified. -// It wraps the database object in a logging and metrics middleware. -func PluginFactoryVersion(ctx context.Context, pluginName string, pluginVersion string, sys pluginutil.LookRunnerUtil, logger log.Logger) (Database, error) { // Look for plugin in the plugin catalog - pluginRunner, err := sys.LookupPluginVersion(ctx, pluginName, consts.PluginTypeDatabase, pluginVersion) + pluginRunner, err := sys.LookupPlugin(ctx, pluginName, consts.PluginTypeDatabase) if err != nil { return nil, err } @@ -49,7 +43,6 @@ func PluginFactoryVersion(ctx context.Context, pluginName string, pluginVersion config := pluginutil.PluginClientConfig{ Name: pluginName, PluginType: consts.PluginTypeDatabase, - Version: pluginVersion, PluginSets: PluginSets, HandshakeConfig: HandshakeConfig, Logger: namedLogger, diff --git a/sdk/database/dbplugin/v5/proto/database.pb.go b/sdk/database/dbplugin/v5/proto/database.pb.go index f814c60bcec1d..a5f52dab999d5 100644 --- a/sdk/database/dbplugin/v5/proto/database.pb.go +++ b/sdk/database/dbplugin/v5/proto/database.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc-gen-go v1.27.1 +// protoc v3.19.4 // source: sdk/database/dbplugin/v5/proto/database.proto package proto @@ -22,9 +22,9 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// /////////////// +///////////////// // Initialize() -// /////////////// +///////////////// type InitializeRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -324,9 +324,9 @@ func (x *NewUserResponse) GetUsername() string { return "" } -// /////////////// +///////////////// // UpdateUser() -// /////////////// +///////////////// type UpdateUserRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -609,9 +609,9 @@ func (*UpdateUserResponse) Descriptor() ([]byte, []int) { return file_sdk_database_dbplugin_v5_proto_database_proto_rawDescGZIP(), []int{9} } -// /////////////// +///////////////// // DeleteUser() -// /////////////// +///////////////// type DeleteUserRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -705,9 +705,9 @@ func (*DeleteUserResponse) Descriptor() ([]byte, []int) { return file_sdk_database_dbplugin_v5_proto_database_proto_rawDescGZIP(), []int{11} } -// /////////////// +///////////////// // Type() -// /////////////// +///////////////// type TypeResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -755,9 +755,9 @@ func (x *TypeResponse) GetType() string { return "" } -// /////////////// +///////////////// // General purpose -// /////////////// +///////////////// type Statements struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/sdk/database/dbplugin/v5/proto/database.proto b/sdk/database/dbplugin/v5/proto/database.proto index b4959f709e1f9..c5d4c5562576b 100644 --- a/sdk/database/dbplugin/v5/proto/database.proto +++ b/sdk/database/dbplugin/v5/proto/database.proto @@ -101,4 +101,4 @@ service Database { rpc DeleteUser(DeleteUserRequest) returns (DeleteUserResponse); rpc Type(Empty) returns (TypeResponse); rpc Close(Empty) returns (Empty); -} \ No newline at end of file +} diff --git a/sdk/database/helper/dbutil/dbutil.go b/sdk/database/helper/dbutil/dbutil.go index 19198bcfdddd7..84b98d1889679 100644 --- a/sdk/database/helper/dbutil/dbutil.go +++ b/sdk/database/helper/dbutil/dbutil.go @@ -18,7 +18,7 @@ var ( // Query templates a query for us. func QueryHelper(tpl string, data map[string]string) string { for k, v := range data { - tpl = strings.ReplaceAll(tpl, fmt.Sprintf("{{%s}}", k), v) + tpl = strings.Replace(tpl, fmt.Sprintf("{{%s}}", k), v, -1) } return tpl diff --git a/sdk/database/helper/dbutil/quoteidentifier.go b/sdk/database/helper/dbutil/quoteidentifier.go index 92c6fda8dcaa4..dc63abbe67fd8 100644 --- a/sdk/database/helper/dbutil/quoteidentifier.go +++ b/sdk/database/helper/dbutil/quoteidentifier.go @@ -28,10 +28,10 @@ import "strings" // QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be // used as part of an SQL statement. For example: // -// tblname := "my_table" -// data := "my_data" -// quoted := pq.QuoteIdentifier(tblname) -// err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data) +// tblname := "my_table" +// data := "my_data" +// quoted := pq.QuoteIdentifier(tblname) +// err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data) // // Any double quotes in name will be escaped. The quoted identifier will be // case sensitive when used in a query. If the input string contains a zero @@ -41,5 +41,5 @@ func QuoteIdentifier(name string) string { if end > -1 { name = name[:end] } - return `"` + strings.ReplaceAll(name, `"`, `""`) + `"` + return `"` + strings.Replace(name, `"`, `""`, -1) + `"` } diff --git a/sdk/framework/backend.go b/sdk/framework/backend.go index 73d8f14e60f88..a4937f4c8588e 100644 --- a/sdk/framework/backend.go +++ b/sdk/framework/backend.go @@ -14,11 +14,10 @@ import ( "sync" "time" - "github.com/hashicorp/go-kms-wrapping/entropy/v2" - jsonpatch "github.com/evanphx/json-patch/v5" "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-kms-wrapping/entropy" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/vault/sdk/helper/consts" @@ -92,9 +91,6 @@ type Backend struct { // BackendType is the logical.BackendType for the backend implementation BackendType logical.BackendType - // RunningVersion is the optional version that will be self-reported - RunningVersion string - logger log.Logger system logical.SystemView once sync.Once @@ -432,13 +428,6 @@ func (b *Backend) Type() logical.BackendType { return b.BackendType } -// Version returns the plugin version information -func (b *Backend) PluginVersion() logical.PluginVersion { - return logical.PluginVersion{ - Version: b.RunningVersion, - } -} - // Route looks up the path that would be used for a given path string. func (b *Backend) Route(path string) *Path { result, _ := b.route(path) @@ -539,16 +528,9 @@ func (b *Backend) handleRootHelp(req *logical.Request) (*logical.Response, error // names in the OAS document. requestResponsePrefix := req.GetString("requestResponsePrefix") - // Generic mount paths will primarily be used for code generation purposes. - // This will result in dynamic mount paths being placed instead of - // hardcoded default paths. For example /auth/approle/login would be replaced - // with /auth/{mountPath}/login. This will be replaced for all secrets - // engines and auth methods that are enabled. - genericMountPaths, _ := req.Get("genericMountPaths").(bool) - // Build OpenAPI response for the entire backend doc := NewOASDocument() - if err := documentPaths(b, requestResponsePrefix, genericMountPaths, doc); err != nil { + if err := documentPaths(b, requestResponsePrefix, doc); err != nil { b.Logger().Warn("error generating OpenAPI", "error", err) } diff --git a/sdk/framework/openapi.go b/sdk/framework/openapi.go index 4659f7ae2f1a9..2c3377f502d59 100644 --- a/sdk/framework/openapi.go +++ b/sdk/framework/openapi.go @@ -213,9 +213,9 @@ var ( ) // documentPaths parses all paths in a framework.Backend into OpenAPI paths. -func documentPaths(backend *Backend, requestResponsePrefix string, genericMountPaths bool, doc *OASDocument) error { +func documentPaths(backend *Backend, requestResponsePrefix string, doc *OASDocument) error { for _, p := range backend.Paths { - if err := documentPath(p, backend.SpecialPaths(), requestResponsePrefix, genericMountPaths, backend.BackendType, doc); err != nil { + if err := documentPath(p, backend.SpecialPaths(), requestResponsePrefix, backend.BackendType, doc); err != nil { return err } } @@ -224,7 +224,7 @@ func documentPaths(backend *Backend, requestResponsePrefix string, genericMountP } // documentPath parses a framework.Path into one or more OpenAPI paths. -func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix string, genericMountPaths bool, backendType logical.BackendType, doc *OASDocument) error { +func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix string, backendType logical.BackendType, doc *OASDocument) error { var sudoPaths []string var unauthPaths []string @@ -263,21 +263,6 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st // Body fields will be added to individual operations. pathFields, bodyFields := splitFields(p.Fields, path) - if genericMountPaths && requestResponsePrefix != "system" && requestResponsePrefix != "identity" { - // Add mount path as a parameter - p := OASParameter{ - Name: "mountPath", - Description: "Path that the backend was mounted at", - In: "path", - Schema: &OASSchema{ - Type: "string", - }, - Required: true, - } - - pi.Parameters = append(pi.Parameters, p) - } - for name, field := range pathFields { location := "path" required := true @@ -553,7 +538,7 @@ func expandPattern(pattern string) []string { if start != -1 && end != -1 && end > start { regexToRemove = base[start+1 : end] } - pattern = strings.ReplaceAll(pattern, regexToRemove, "") + pattern = strings.Replace(pattern, regexToRemove, "", -1) // Simplify named fields that have limited options, e.g. (?Pa|b|c) -> (.+) pattern = altFieldsGroupRe.ReplaceAllStringFunc(pattern, func(s string) string { @@ -734,8 +719,7 @@ func cleanResponse(resp *logical.Response) *cleanedResponse { // /sys/tools/random/{urlbytes} -> postSysToolsRandomUrlbytes // // In the unlikely case of a duplicate ids, a numeric suffix is added: -// -// postSysToolsRandomUrlbytes_2 +// postSysToolsRandomUrlbytes_2 // // An optional user-provided suffix ("context") may also be appended. func (d *OASDocument) CreateOperationIDs(context string) { @@ -768,7 +752,7 @@ func (d *OASDocument) CreateOperationIDs(context string) { // Space-split on non-words, title case everything, recombine opID := nonWordRe.ReplaceAllString(strings.ToLower(path), " ") opID = strings.Title(opID) - opID = method + strings.ReplaceAll(opID, " ", "") + opID = method + strings.Replace(opID, " ", "", -1) // deduplicate operationIds. This is a safeguard, since generated IDs should // already be unique given our current path naming conventions. diff --git a/sdk/framework/openapi_test.go b/sdk/framework/openapi_test.go index 8d3ecfea018b4..592406d9fb722 100644 --- a/sdk/framework/openapi_test.go +++ b/sdk/framework/openapi_test.go @@ -271,7 +271,7 @@ func TestOpenAPI_SpecialPaths(t *testing.T) { Root: test.rootPaths, Unauthenticated: test.unauthPaths, } - err := documentPath(&path, sp, "kv", false, logical.TypeLogical, doc) + err := documentPath(&path, sp, "kv", logical.TypeLogical, doc) if err != nil { t.Fatal(err) } @@ -519,11 +519,11 @@ func TestOpenAPI_OperationID(t *testing.T) { for _, context := range []string{"", "bar"} { doc := NewOASDocument() - err := documentPath(path1, nil, "kv", false, logical.TypeLogical, doc) + err := documentPath(path1, nil, "kv", logical.TypeLogical, doc) if err != nil { t.Fatal(err) } - err = documentPath(path2, nil, "kv", false, logical.TypeLogical, doc) + err = documentPath(path2, nil, "kv", logical.TypeLogical, doc) if err != nil { t.Fatal(err) } @@ -583,7 +583,7 @@ func TestOpenAPI_CustomDecoder(t *testing.T) { } docOrig := NewOASDocument() - err := documentPath(p, nil, "kv", false, logical.TypeLogical, docOrig) + err := documentPath(p, nil, "kv", logical.TypeLogical, docOrig) if err != nil { t.Fatal(err) } @@ -646,7 +646,7 @@ func testPath(t *testing.T, path *Path, sp *logical.Paths, expectedJSON string) t.Helper() doc := NewOASDocument() - if err := documentPath(path, sp, "kv", false, logical.TypeLogical, doc); err != nil { + if err := documentPath(path, sp, "kv", logical.TypeLogical, doc); err != nil { t.Fatal(err) } doc.CreateOperationIDs("") diff --git a/sdk/framework/path.go b/sdk/framework/path.go index 8a8b1c75879d4..fe29a400890a9 100644 --- a/sdk/framework/path.go +++ b/sdk/framework/path.go @@ -317,7 +317,7 @@ func (p *Path) helpCallback(b *Backend) OperationFunc { // Build OpenAPI response for this path doc := NewOASDocument() - if err := documentPath(p, b.SpecialPaths(), requestResponsePrefix, false, b.BackendType, doc); err != nil { + if err := documentPath(p, b.SpecialPaths(), requestResponsePrefix, b.BackendType, doc); err != nil { b.Logger().Warn("error generating OpenAPI", "error", err) } diff --git a/sdk/framework/secret.go b/sdk/framework/secret.go index 0c8f0dfcccdf6..d338e06f0379c 100644 --- a/sdk/framework/secret.go +++ b/sdk/framework/secret.go @@ -42,8 +42,7 @@ func (s *Secret) Renewable() bool { } func (s *Secret) Response( - data, internal map[string]interface{}, -) *logical.Response { + data, internal map[string]interface{}) *logical.Response { internalData := make(map[string]interface{}) for k, v := range internal { internalData[k] = v diff --git a/sdk/go.mod b/sdk/go.mod index 9e4e10fdc1c91..6945d15d77b26 100644 --- a/sdk/go.mod +++ b/sdk/go.mod @@ -1,6 +1,6 @@ module github.com/hashicorp/vault/sdk -go 1.19 +go 1.17 require ( github.com/armon/go-metrics v0.3.9 @@ -14,7 +14,7 @@ require ( github.com/hashicorp/errwrap v1.1.0 github.com/hashicorp/go-hclog v0.16.2 github.com/hashicorp/go-immutable-radix v1.3.1 - github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 + github.com/hashicorp/go-kms-wrapping/entropy v0.1.0 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-plugin v1.4.3 github.com/hashicorp/go-secure-stdlib/base62 v0.1.1 diff --git a/sdk/go.sum b/sdk/go.sum index 9930ad8884e0f..7fd2f4ad80299 100644 --- a/sdk/go.sum +++ b/sdk/go.sum @@ -95,8 +95,8 @@ github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39 github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 h1:pSjQfW3vPtrOTcasTUKgCTQT7OGPPTTMVRrOfU6FJD8= -github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0 h1:xuTi5ZwjimfpvpL09jDE71smCBRpnF5xfo871BSX4gs= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= diff --git a/sdk/helper/certutil/helpers.go b/sdk/helper/certutil/helpers.go index 348c85f9dd49f..457de9b022cc0 100644 --- a/sdk/helper/certutil/helpers.go +++ b/sdk/helper/certutil/helpers.go @@ -49,26 +49,6 @@ var expectedNISTPCurveHashBits = map[int]int{ 521: 512, } -// Mapping of constant names<->constant values for SignatureAlgorithm -var SignatureAlgorithmNames = map[string]x509.SignatureAlgorithm{ - "sha256withrsa": x509.SHA256WithRSA, - "sha384withrsa": x509.SHA384WithRSA, - "sha512withrsa": x509.SHA512WithRSA, - "ecdsawithsha256": x509.ECDSAWithSHA256, - "ecdsawithsha384": x509.ECDSAWithSHA384, - "ecdsawithsha512": x509.ECDSAWithSHA512, - "sha256withrsapss": x509.SHA256WithRSAPSS, - "sha384withrsapss": x509.SHA384WithRSAPSS, - "sha512withrsapss": x509.SHA512WithRSAPSS, - "pureed25519": x509.PureEd25519, - "ed25519": x509.PureEd25519, // Duplicated for clarity; most won't expect the "Pure" prefix. -} - -// OID for RFC 5280 Delta CRL Indicator CRL extension. -// -// > id-ce-deltaCRLIndicator OBJECT IDENTIFIER ::= { id-ce 27 } -var DeltaCRLIndicatorOID = asn1.ObjectIdentifier([]int{2, 5, 29, 27}) - // GetHexFormatted returns the byte buffer formatted in hex with // the specified separator between bytes. func GetHexFormatted(buf []byte, sep string) string { @@ -107,16 +87,6 @@ func GetSubjKeyID(privateKey crypto.Signer) ([]byte, error) { return getSubjectKeyID(privateKey.Public()) } -// Returns the explicit SKID when used for cross-signing, else computes a new -// SKID from the key itself. -func getSubjectKeyIDFromBundle(data *CreationBundle) ([]byte, error) { - if len(data.Params.SKID) > 0 { - return data.Params.SKID, nil - } - - return getSubjectKeyID(data.CSR.PublicKey) -} - func getSubjectKeyID(pub interface{}) ([]byte, error) { var publicKeyBytes []byte switch pub := pub.(type) { @@ -181,21 +151,18 @@ func ParsePKIJSON(input []byte) (*ParsedCertBundle, error) { } func ParseDERKey(privateKeyBytes []byte) (signer crypto.Signer, format BlockType, err error) { - var firstError error - if signer, firstError = x509.ParseECPrivateKey(privateKeyBytes); firstError == nil { + if signer, err = x509.ParseECPrivateKey(privateKeyBytes); err == nil { format = ECBlock return } - var secondError error - if signer, secondError = x509.ParsePKCS1PrivateKey(privateKeyBytes); secondError == nil { + if signer, err = x509.ParsePKCS1PrivateKey(privateKeyBytes); err == nil { format = PKCS1Block return } - var thirdError error var rawKey interface{} - if rawKey, thirdError = x509.ParsePKCS8PrivateKey(privateKeyBytes); thirdError == nil { + if rawKey, err = x509.ParsePKCS8PrivateKey(privateKeyBytes); err == nil { switch rawSigner := rawKey.(type) { case *rsa.PrivateKey: signer = rawSigner @@ -211,7 +178,7 @@ func ParseDERKey(privateKeyBytes []byte) (signer crypto.Signer, format BlockType return } - return nil, UnknownBlock, fmt.Errorf("got errors attempting to parse DER private key:\n1. %v\n2. %v\n3. %v", firstError, secondError, thirdError) + return nil, UnknownBlock, err } func ParsePEMKey(keyPem string) (crypto.Signer, BlockType, error) { @@ -789,29 +756,6 @@ func CreateCertificateWithKeyGenerator(data *CreationBundle, randReader io.Reade return createCertificate(data, randReader, keyGenerator) } -// Set correct correct RSA sig algo -func certTemplateSetSigAlgo(certTemplate *x509.Certificate, data *CreationBundle) { - if data.Params.UsePSS { - switch data.Params.SignatureBits { - case 256: - certTemplate.SignatureAlgorithm = x509.SHA256WithRSAPSS - case 384: - certTemplate.SignatureAlgorithm = x509.SHA384WithRSAPSS - case 512: - certTemplate.SignatureAlgorithm = x509.SHA512WithRSAPSS - } - } else { - switch data.Params.SignatureBits { - case 256: - certTemplate.SignatureAlgorithm = x509.SHA256WithRSA - case 384: - certTemplate.SignatureAlgorithm = x509.SHA384WithRSA - case 512: - certTemplate.SignatureAlgorithm = x509.SHA512WithRSA - } - } -} - func createCertificate(data *CreationBundle, randReader io.Reader, privateKeyGenerator KeyGenerator) (*ParsedCertBundle, error) { var err error result := &ParsedCertBundle{} @@ -880,7 +824,14 @@ func createCertificate(data *CreationBundle, randReader io.Reader, privateKeyGen if data.SigningBundle != nil { switch data.SigningBundle.PrivateKeyType { case RSAPrivateKey: - certTemplateSetSigAlgo(certTemplate, data) + switch data.Params.SignatureBits { + case 256: + certTemplate.SignatureAlgorithm = x509.SHA256WithRSA + case 384: + certTemplate.SignatureAlgorithm = x509.SHA384WithRSA + case 512: + certTemplate.SignatureAlgorithm = x509.SHA512WithRSA + } case Ed25519PrivateKey: certTemplate.SignatureAlgorithm = x509.PureEd25519 case ECPrivateKey: @@ -902,7 +853,14 @@ func createCertificate(data *CreationBundle, randReader io.Reader, privateKeyGen switch data.Params.KeyType { case "rsa": - certTemplateSetSigAlgo(certTemplate, data) + switch data.Params.SignatureBits { + case 256: + certTemplate.SignatureAlgorithm = x509.SHA256WithRSA + case 384: + certTemplate.SignatureAlgorithm = x509.SHA384WithRSA + case 512: + certTemplate.SignatureAlgorithm = x509.SHA512WithRSA + } case "ed25519": certTemplate.SignatureAlgorithm = x509.PureEd25519 case "ec": @@ -1108,7 +1066,7 @@ func signCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertBun return nil, err } - subjKeyID, err := getSubjectKeyIDFromBundle(data) + subjKeyID, err := getSubjectKeyID(data.CSR.PublicKey) if err != nil { return nil, err } @@ -1129,7 +1087,14 @@ func signCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertBun switch data.SigningBundle.PrivateKeyType { case RSAPrivateKey: - certTemplateSetSigAlgo(certTemplate, data) + switch data.Params.SignatureBits { + case 256: + certTemplate.SignatureAlgorithm = x509.SHA256WithRSA + case 384: + certTemplate.SignatureAlgorithm = x509.SHA384WithRSA + case 512: + certTemplate.SignatureAlgorithm = x509.SHA512WithRSA + } case ECPrivateKey: switch data.Params.SignatureBits { case 256: @@ -1301,26 +1266,3 @@ func CreateKeyBundleWithKeyGenerator(keyType string, keyBits int, randReader io. } return result, nil } - -// CreateDeltaCRLIndicatorExt allows creating correctly formed delta CRLs -// that point back to the last complete CRL that they're based on. -func CreateDeltaCRLIndicatorExt(completeCRLNumber int64) (pkix.Extension, error) { - bigNum := big.NewInt(completeCRLNumber) - bigNumValue, err := asn1.Marshal(bigNum) - if err != nil { - return pkix.Extension{}, fmt.Errorf("unable to marshal complete CRL number (%v): %v", completeCRLNumber, err) - } - return pkix.Extension{ - Id: DeltaCRLIndicatorOID, - // > When a conforming CRL issuer generates a delta CRL, the delta - // > CRL MUST include a critical delta CRL indicator extension. - Critical: true, - // This extension only includes the complete CRL number: - // - // > BaseCRLNumber ::= CRLNumber - // - // But, this needs to be encoded as a big number for encoding/asn1 - // to work properly. - Value: bigNumValue, - }, nil -} diff --git a/sdk/helper/certutil/types.go b/sdk/helper/certutil/types.go index 03aba84996b17..a5caa2e4409fd 100644 --- a/sdk/helper/certutil/types.go +++ b/sdk/helper/certutil/types.go @@ -710,7 +710,6 @@ type CAInfoBundle struct { ParsedCertBundle URLs *URLEntries LeafNotAfterBehavior NotAfterBehavior - RevocationSigAlg x509.SignatureAlgorithm } func (b *CAInfoBundle) GetCAChain() []*CertBlock { @@ -783,7 +782,6 @@ type CreationParameters struct { PolicyIdentifiers []string BasicConstraintsValidForNonCA bool SignatureBits int - UsePSS bool ForceAppendCaChain bool // Only used when signing a CA cert @@ -798,9 +796,6 @@ type CreationParameters struct { // The duration the certificate will use NotBefore NotBeforeDuration time.Duration - - // The explicit SKID to use; especially useful for cross-signing. - SKID []byte } type CreationBundle struct { diff --git a/sdk/helper/consts/deprecation_status.go b/sdk/helper/consts/deprecation_status.go deleted file mode 100644 index 5591924a77080..0000000000000 --- a/sdk/helper/consts/deprecation_status.go +++ /dev/null @@ -1,31 +0,0 @@ -package consts - -const VaultAllowPendingRemovalMountsEnv = "VAULT_ALLOW_PENDING_REMOVAL_MOUNTS" - -// DeprecationStatus represents the current deprecation state for builtins -type DeprecationStatus uint32 - -// These are the states of deprecation for builtin plugins -const ( - Supported = iota - Deprecated - PendingRemoval - Removed - Unknown -) - -// String returns the string representation of a builtin deprecation status -func (s DeprecationStatus) String() string { - switch s { - case Supported: - return "supported" - case Deprecated: - return "deprecated" - case PendingRemoval: - return "pending removal" - case Removed: - return "removed" - default: - return "" - } -} diff --git a/sdk/helper/custommetadata/custom_metadata.go b/sdk/helper/custommetadata/custom_metadata.go deleted file mode 100644 index 7d4ff8763d116..0000000000000 --- a/sdk/helper/custommetadata/custom_metadata.go +++ /dev/null @@ -1,103 +0,0 @@ -package custommetadata - -import ( - "fmt" - - "github.com/mitchellh/mapstructure" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/go-secure-stdlib/strutil" -) - -// The following constants are used by Validate and are meant to be imposed -// broadly for consistency. -const ( - maxKeys = 64 - maxKeyLength = 128 - maxValueLength = 512 - validationErrorPrefix = "custom_metadata validation failed" -) - -// Parse is used to effectively convert the TypeMap -// (map[string]interface{}) into a TypeKVPairs (map[string]string) -// which is how custom_metadata is stored. Defining custom_metadata -// as a TypeKVPairs will convert nulls into empty strings. A null, -// however, is essential for a PATCH operation in that it signals -// the handler to remove the field. The filterNils flag should -// only be used during a patch operation. -func Parse(raw map[string]interface{}, filterNils bool) (map[string]string, error) { - customMetadata := map[string]string{} - for k, v := range raw { - if filterNils && v == nil { - continue - } - - var s string - if err := mapstructure.WeakDecode(v, &s); err != nil { - return nil, err - } - - customMetadata[k] = s - } - - return customMetadata, nil -} - -// Validate will perform input validation for custom metadata. -// CustomMetadata should be arbitrary user-provided key-value pairs meant to -// provide supplemental information about a resource. If the key count -// exceeds maxKeys, the validation will be short-circuited to prevent -// unnecessary (and potentially costly) validation to be run. If the key count -// falls at or below maxKeys, multiple checks will be made per key and value. -// These checks include: -// - 0 < length of key <= maxKeyLength -// - 0 < length of value <= maxValueLength -// - keys and values cannot include unprintable characters -func Validate(cm map[string]string) error { - var errs *multierror.Error - - if keyCount := len(cm); keyCount > maxKeys { - errs = multierror.Append(errs, fmt.Errorf("%s: payload must contain at most %d keys, provided %d", - validationErrorPrefix, - maxKeys, - keyCount)) - - return errs.ErrorOrNil() - } - - // Perform validation on each key and value and return ALL errors - for key, value := range cm { - if keyLen := len(key); 0 == keyLen || keyLen > maxKeyLength { - errs = multierror.Append(errs, fmt.Errorf("%s: length of key %q is %d but must be 0 < len(key) <= %d", - validationErrorPrefix, - key, - keyLen, - maxKeyLength)) - } - - if valueLen := len(value); 0 == valueLen || valueLen > maxValueLength { - errs = multierror.Append(errs, fmt.Errorf("%s: length of value for key %q is %d but must be 0 < len(value) <= %d", - validationErrorPrefix, - key, - valueLen, - maxValueLength)) - } - - if !strutil.Printable(key) { - // Include unquoted format (%s) to also include the string without the unprintable - // characters visible to allow for easier debug and key identification - errs = multierror.Append(errs, fmt.Errorf("%s: key %q (%s) contains unprintable characters", - validationErrorPrefix, - key, - key)) - } - - if !strutil.Printable(value) { - errs = multierror.Append(errs, fmt.Errorf("%s: value for key %q contains unprintable characters", - validationErrorPrefix, - key)) - } - } - - return errs.ErrorOrNil() -} diff --git a/sdk/helper/custommetadata/custom_metadata_test.go b/sdk/helper/custommetadata/custom_metadata_test.go deleted file mode 100644 index e71bd59462fed..0000000000000 --- a/sdk/helper/custommetadata/custom_metadata_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package custommetadata - -import ( - "strconv" - "strings" - "testing" -) - -func TestValidate(t *testing.T) { - cases := []struct { - name string - input map[string]string - shouldPass bool - }{ - { - "valid", - map[string]string{ - "foo": "abc", - "bar": "def", - "baz": "ghi", - }, - true, - }, - { - "too_many_keys", - func() map[string]string { - cm := make(map[string]string) - - for i := 0; i < maxKeyLength+1; i++ { - s := strconv.Itoa(i) - cm[s] = s - } - - return cm - }(), - false, - }, - { - "key_too_long", - map[string]string{ - strings.Repeat("a", maxKeyLength+1): "abc", - }, - false, - }, - { - "value_too_long", - map[string]string{ - "foo": strings.Repeat("a", maxValueLength+1), - }, - false, - }, - { - "unprintable_key", - map[string]string{ - "unprint\u200bable": "abc", - }, - false, - }, - { - "unprintable_value", - map[string]string{ - "foo": "unprint\u200bable", - }, - false, - }, - } - - for _, tc := range cases { - tc := tc - - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - - err := Validate(tc.input) - - if tc.shouldPass && err != nil { - t.Fatalf("expected validation to pass, input: %#v, err: %v", tc.input, err) - } - - if !tc.shouldPass && err == nil { - t.Fatalf("expected validation to fail, input: %#v, err: %v", tc.input, err) - } - }) - } -} diff --git a/sdk/helper/dbtxn/dbtxn.go b/sdk/helper/dbtxn/dbtxn.go index 133b360e73e82..a5c8bf961e37a 100644 --- a/sdk/helper/dbtxn/dbtxn.go +++ b/sdk/helper/dbtxn/dbtxn.go @@ -77,7 +77,7 @@ func parseQuery(m map[string]string, tpl string) string { } for k, v := range m { - tpl = strings.ReplaceAll(tpl, fmt.Sprintf("{{%s}}", k), v) + tpl = strings.Replace(tpl, fmt.Sprintf("{{%s}}", k), v, -1) } return tpl } diff --git a/sdk/helper/keysutil/consts.go b/sdk/helper/keysutil/consts.go index cbb8123517f13..2a83ab8496617 100644 --- a/sdk/helper/keysutil/consts.go +++ b/sdk/helper/keysutil/consts.go @@ -1,7 +1,6 @@ package keysutil import ( - "crypto" "crypto/sha1" "crypto/sha256" "crypto/sha512" @@ -58,18 +57,6 @@ var ( HashTypeSHA3512: sha3.New512, } - CryptoHashMap = map[HashType]crypto.Hash{ - HashTypeSHA1: crypto.SHA1, - HashTypeSHA2224: crypto.SHA224, - HashTypeSHA2256: crypto.SHA256, - HashTypeSHA2384: crypto.SHA384, - HashTypeSHA2512: crypto.SHA512, - HashTypeSHA3224: crypto.SHA3_224, - HashTypeSHA3256: crypto.SHA3_256, - HashTypeSHA3384: crypto.SHA3_384, - HashTypeSHA3512: crypto.SHA3_512, - } - MarshalingTypeMap = map[string]MarshalingType{ "asn1": MarshalingTypeASN1, "jws": MarshalingTypeJWS, diff --git a/sdk/helper/keysutil/lock_manager.go b/sdk/helper/keysutil/lock_manager.go index a60cf69d53f2c..df855c93fa11f 100644 --- a/sdk/helper/keysutil/lock_manager.go +++ b/sdk/helper/keysutil/lock_manager.go @@ -36,9 +36,6 @@ type PolicyRequest struct { // The key type KeyType KeyType - // The key size for variable key size algorithms - KeySize int - // Whether it should be derived Derived bool @@ -376,11 +373,6 @@ func (lm *LockManager) GetPolicy(ctx context.Context, req PolicyRequest, rand io cleanup() return nil, false, fmt.Errorf("key derivation and convergent encryption not supported for keys of type %v", req.KeyType) } - case KeyType_HMAC: - if req.Derived || req.Convergent { - cleanup() - return nil, false, fmt.Errorf("key derivation and convergent encryption not supported for keys of type %v", req.KeyType) - } default: cleanup() @@ -395,7 +387,6 @@ func (lm *LockManager) GetPolicy(ctx context.Context, req PolicyRequest, rand io Exportable: req.Exportable, AllowPlaintextBackup: req.AllowPlaintextBackup, AutoRotatePeriod: req.AutoRotatePeriod, - KeySize: req.KeySize, } if req.Derived { diff --git a/sdk/helper/keysutil/policy.go b/sdk/helper/keysutil/policy.go index 73afb9ecd7384..59029756d2e17 100644 --- a/sdk/helper/keysutil/policy.go +++ b/sdk/helper/keysutil/policy.go @@ -45,9 +45,6 @@ import ( const ( Kdf_hmac_sha256_counter = iota // built-in helper Kdf_hkdf_sha256 // golang.org/x/crypto/hkdf - - HmacMinKeySize = 256 / 8 - HmacMaxKeySize = 4096 / 8 ) // Or this one...we need the default of zero to be the original AES256-GCM96 @@ -62,8 +59,6 @@ const ( KeyType_ECDSA_P521 KeyType_AES128_GCM96 KeyType_RSA3072 - KeyType_MANAGED_KEY - KeyType_HMAC ) const ( @@ -75,10 +70,6 @@ const ( DefaultVersionTemplate = "vault:v{{version}}:" ) -type AEADFactory interface { - GetAEAD(iv []byte) (cipher.AEAD, error) -} - type RestoreInfo struct { Time time.Time `json:"time"` Version int `json:"version"` @@ -89,13 +80,6 @@ type BackupInfo struct { Version int `json:"version"` } -type SigningOptions struct { - HashAlgorithm HashType - Marshaling MarshalingType - SaltLength int - SigAlgorithm string -} - type SigningResult struct { Signature string PublicKey []byte @@ -169,8 +153,6 @@ func (kt KeyType) String() string { return "rsa-3072" case KeyType_RSA4096: return "rsa-4096" - case KeyType_HMAC: - return "hmac" } return "[unknown]" @@ -329,10 +311,9 @@ type Policy struct { // served after a delete. deleted uint32 - Name string `json:"name"` - Key []byte `json:"key,omitempty"` // DEPRECATED - KeySize int `json:"key_size,omitempty"` // For algorithms with variable key sizes - Keys keyEntryMap `json:"keys"` + Name string `json:"name"` + Key []byte `json:"key,omitempty"` // DEPRECATED + Keys keyEntryMap `json:"keys"` // Derived keys MUST provide a context and the master underlying key is // never used. If convergent encryption is true, the context will be used @@ -407,8 +388,6 @@ type Policy struct { // AllowImportedKeyRotation indicates whether an imported key may be rotated by Vault AllowImportedKeyRotation bool - - ManagedKeyName string `json:"managed_key_name,omitempty"` } func (p *Policy) Lock(exclusive bool) { @@ -840,7 +819,98 @@ func (p *Policy) convergentVersion(ver int) int { } func (p *Policy) Encrypt(ver int, context, nonce []byte, value string) (string, error) { - return p.EncryptWithFactory(ver, context, nonce, value, nil) + if !p.Type.EncryptionSupported() { + return "", errutil.UserError{Err: fmt.Sprintf("message encryption not supported for key type %v", p.Type)} + } + + // Decode the plaintext value + plaintext, err := base64.StdEncoding.DecodeString(value) + if err != nil { + return "", errutil.UserError{Err: err.Error()} + } + + switch { + case ver == 0: + ver = p.LatestVersion + case ver < 0: + return "", errutil.UserError{Err: "requested version for encryption is negative"} + case ver > p.LatestVersion: + return "", errutil.UserError{Err: "requested version for encryption is higher than the latest key version"} + case ver < p.MinEncryptionVersion: + return "", errutil.UserError{Err: "requested version for encryption is less than the minimum encryption key version"} + } + + var ciphertext []byte + + switch p.Type { + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305: + hmacKey := context + + var encKey []byte + var deriveHMAC bool + + encBytes := 32 + hmacBytes := 0 + if p.convergentVersion(ver) > 2 { + deriveHMAC = true + hmacBytes = 32 + } + if p.Type == KeyType_AES128_GCM96 { + encBytes = 16 + } + + key, err := p.GetKey(context, ver, encBytes+hmacBytes) + if err != nil { + return "", err + } + + if len(key) < encBytes+hmacBytes { + return "", errutil.InternalError{Err: "could not derive key, length too small"} + } + + encKey = key[:encBytes] + if len(encKey) != encBytes { + return "", errutil.InternalError{Err: "could not derive enc key, length not correct"} + } + if deriveHMAC { + hmacKey = key[encBytes:] + if len(hmacKey) != hmacBytes { + return "", errutil.InternalError{Err: "could not derive hmac key, length not correct"} + } + } + + ciphertext, err = p.SymmetricEncryptRaw(ver, encKey, plaintext, + SymmetricOpts{ + Convergent: p.ConvergentEncryption, + HMACKey: hmacKey, + Nonce: nonce, + }) + + if err != nil { + return "", err + } + case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: + keyEntry, err := p.safeGetKeyEntry(ver) + if err != nil { + return "", err + } + key := keyEntry.RSAKey + ciphertext, err = rsa.EncryptOAEP(sha256.New(), rand.Reader, &key.PublicKey, plaintext, nil) + if err != nil { + return "", errutil.InternalError{Err: fmt.Sprintf("failed to RSA encrypt the plaintext: %v", err)} + } + + default: + return "", errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)} + } + + // Convert to base64 + encoded := base64.StdEncoding.EncodeToString(ciphertext) + + // Prepend some information + encoded = p.getVersionPrefix(ver) + encoded + + return encoded, nil } func (p *Policy) Decrypt(context, nonce []byte, value string) (string, error) { @@ -948,40 +1018,14 @@ func (p *Policy) HMACKey(version int) ([]byte, error) { if err != nil { return nil, err } - - if p.Type == KeyType_HMAC { - return keyEntry.Key, nil - } if keyEntry.HMACKey == nil { return nil, fmt.Errorf("no HMAC key exists for that key version") } + return keyEntry.HMACKey, nil } func (p *Policy) Sign(ver int, context, input []byte, hashAlgorithm HashType, sigAlgorithm string, marshaling MarshalingType) (*SigningResult, error) { - return p.SignWithOptions(ver, context, input, &SigningOptions{ - HashAlgorithm: hashAlgorithm, - Marshaling: marshaling, - SaltLength: rsa.PSSSaltLengthAuto, - SigAlgorithm: sigAlgorithm, - }) -} - -func (p *Policy) minRSAPSSSaltLength() int { - // https://cs.opensource.google/go/go/+/refs/tags/go1.19:src/crypto/rsa/pss.go;l=247 - return rsa.PSSSaltLengthEqualsHash -} - -func (p *Policy) maxRSAPSSSaltLength(priv *rsa.PrivateKey, hash crypto.Hash) int { - // https://cs.opensource.google/go/go/+/refs/tags/go1.19:src/crypto/rsa/pss.go;l=288 - return (priv.N.BitLen()-1+7)/8 - 2 - hash.Size() -} - -func (p *Policy) validRSAPSSSaltLength(priv *rsa.PrivateKey, hash crypto.Hash, saltLength int) bool { - return p.minRSAPSSSaltLength() <= saltLength && saltLength <= p.maxRSAPSSSaltLength(priv, hash) -} - -func (p *Policy) SignWithOptions(ver int, context, input []byte, options *SigningOptions) (*SigningResult, error) { if !p.Type.SigningSupported() { return nil, fmt.Errorf("message signing not supported for key type %v", p.Type) } @@ -1005,11 +1049,6 @@ func (p *Policy) SignWithOptions(ver int, context, input []byte, options *Signin return nil, err } - hashAlgorithm := options.HashAlgorithm - marshaling := options.Marshaling - saltLength := options.SaltLength - sigAlgorithm := options.SigAlgorithm - switch p.Type { case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521: var curveBits int @@ -1100,8 +1139,27 @@ func (p *Policy) SignWithOptions(ver int, context, input []byte, options *Signin case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: key := keyParams.RSAKey - algo, ok := CryptoHashMap[hashAlgorithm] - if !ok { + var algo crypto.Hash + switch hashAlgorithm { + case HashTypeSHA1: + algo = crypto.SHA1 + case HashTypeSHA2224: + algo = crypto.SHA224 + case HashTypeSHA2256: + algo = crypto.SHA256 + case HashTypeSHA2384: + algo = crypto.SHA384 + case HashTypeSHA2512: + algo = crypto.SHA512 + case HashTypeSHA3224: + algo = crypto.SHA3_224 + case HashTypeSHA3256: + algo = crypto.SHA3_256 + case HashTypeSHA3384: + algo = crypto.SHA3_384 + case HashTypeSHA3512: + algo = crypto.SHA3_512 + default: return nil, errutil.InternalError{Err: "unsupported hash algorithm"} } @@ -1111,10 +1169,7 @@ func (p *Policy) SignWithOptions(ver int, context, input []byte, options *Signin switch sigAlgorithm { case "pss": - if !p.validRSAPSSSaltLength(key, algo, saltLength) { - return nil, errutil.UserError{Err: fmt.Sprintf("requested salt length %d is invalid", saltLength)} - } - sig, err = rsa.SignPSS(rand.Reader, key, algo, input, &rsa.PSSOptions{SaltLength: saltLength}) + sig, err = rsa.SignPSS(rand.Reader, key, algo, input, nil) if err != nil { return nil, err } @@ -1148,15 +1203,6 @@ func (p *Policy) SignWithOptions(ver int, context, input []byte, options *Signin } func (p *Policy) VerifySignature(context, input []byte, hashAlgorithm HashType, sigAlgorithm string, marshaling MarshalingType, sig string) (bool, error) { - return p.VerifySignatureWithOptions(context, input, sig, &SigningOptions{ - HashAlgorithm: hashAlgorithm, - Marshaling: marshaling, - SaltLength: rsa.PSSSaltLengthAuto, - SigAlgorithm: sigAlgorithm, - }) -} - -func (p *Policy) VerifySignatureWithOptions(context, input []byte, sig string, options *SigningOptions) (bool, error) { if !p.Type.SigningSupported() { return false, errutil.UserError{Err: fmt.Sprintf("message verification not supported for key type %v", p.Type)} } @@ -1189,11 +1235,6 @@ func (p *Policy) VerifySignatureWithOptions(context, input []byte, sig string, o return false, errutil.UserError{Err: ErrTooOld} } - hashAlgorithm := options.HashAlgorithm - marshaling := options.Marshaling - saltLength := options.SaltLength - sigAlgorithm := options.SigAlgorithm - var sigBytes []byte switch marshaling { case MarshalingTypeASN1: @@ -1277,8 +1318,27 @@ func (p *Policy) VerifySignatureWithOptions(context, input []byte, sig string, o key := keyEntry.RSAKey - algo, ok := CryptoHashMap[hashAlgorithm] - if !ok { + var algo crypto.Hash + switch hashAlgorithm { + case HashTypeSHA1: + algo = crypto.SHA1 + case HashTypeSHA2224: + algo = crypto.SHA224 + case HashTypeSHA2256: + algo = crypto.SHA256 + case HashTypeSHA2384: + algo = crypto.SHA384 + case HashTypeSHA2512: + algo = crypto.SHA512 + case HashTypeSHA3224: + algo = crypto.SHA3_224 + case HashTypeSHA3256: + algo = crypto.SHA3_256 + case HashTypeSHA3384: + algo = crypto.SHA3_384 + case HashTypeSHA3512: + algo = crypto.SHA3_512 + default: return false, errutil.InternalError{Err: "unsupported hash algorithm"} } @@ -1288,10 +1348,7 @@ func (p *Policy) VerifySignatureWithOptions(context, input []byte, sig string, o switch sigAlgorithm { case "pss": - if !p.validRSAPSSSaltLength(key, algo, saltLength) { - return false, errutil.UserError{Err: fmt.Sprintf("requested salt length %d is invalid", saltLength)} - } - err = rsa.VerifyPSS(&key.PublicKey, algo, input, sigBytes, &rsa.PSSOptions{SaltLength: saltLength}) + err = rsa.VerifyPSS(&key.PublicKey, algo, input, sigBytes, nil) case "pkcs1v15": err = rsa.VerifyPKCS1v15(&key.PublicKey, algo, input, sigBytes) default: @@ -1312,39 +1369,23 @@ func (p *Policy) Import(ctx context.Context, storage logical.Storage, key []byte DeprecatedCreationTime: now.Unix(), } - if p.Type != KeyType_HMAC { - hmacKey, err := uuid.GenerateRandomBytesWithReader(32, randReader) - if err != nil { - return err - } - entry.HMACKey = hmacKey + hmacKey, err := uuid.GenerateRandomBytesWithReader(32, randReader) + if err != nil { + return err } + entry.HMACKey = hmacKey if (p.Type == KeyType_AES128_GCM96 && len(key) != 16) || - ((p.Type == KeyType_AES256_GCM96 || p.Type == KeyType_ChaCha20_Poly1305) && len(key) != 32) || - (p.Type == KeyType_HMAC && (len(key) < HmacMinKeySize || len(key) > HmacMaxKeySize)) { + ((p.Type == KeyType_AES256_GCM96 || p.Type == KeyType_ChaCha20_Poly1305) && len(key) != 32) { return fmt.Errorf("invalid key size %d bytes for key type %s", len(key), p.Type) } - if p.Type == KeyType_AES128_GCM96 || p.Type == KeyType_AES256_GCM96 || p.Type == KeyType_ChaCha20_Poly1305 || p.Type == KeyType_HMAC { + if p.Type == KeyType_AES128_GCM96 || p.Type == KeyType_AES256_GCM96 || p.Type == KeyType_ChaCha20_Poly1305 { entry.Key = key - if p.Type == KeyType_HMAC { - p.KeySize = len(key) - } } else { parsedPrivateKey, err := x509.ParsePKCS8PrivateKey(key) if err != nil { - if strings.Contains(err.Error(), "unknown elliptic curve") { - var edErr error - parsedPrivateKey, edErr = ParsePKCS8Ed25519PrivateKey(key) - if edErr != nil { - return fmt.Errorf("error parsing asymmetric key:\n - assuming contents are an ed25519 private key: %s\n - original error: %v", edErr, err) - } - - // Parsing as Ed25519-in-PKCS8-ECPrivateKey succeeded! - } else { - return fmt.Errorf("error parsing asymmetric key: %s", err) - } + return fmt.Errorf("error parsing asymmetric key: %s", err) } switch parsedPrivateKey.(type) { @@ -1481,16 +1522,11 @@ func (p *Policy) RotateInMemory(randReader io.Reader) (retErr error) { entry.HMACKey = hmacKey switch p.Type { - case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305, KeyType_HMAC: + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305: // Default to 256 bit key numBytes := 32 if p.Type == KeyType_AES128_GCM96 { numBytes = 16 - } else if p.Type == KeyType_HMAC { - numBytes := p.KeySize - if numBytes < HmacMinKeySize || numBytes > HmacMaxKeySize { - return fmt.Errorf("invalid key size for HMAC key, must be between %d and %d bytes", HmacMinKeySize, HmacMaxKeySize) - } } newKey, err := uuid.GenerateRandomBytesWithReader(numBytes, randReader) if err != nil { @@ -1670,7 +1706,7 @@ func (p *Policy) getVersionPrefix(ver int) string { template = DefaultVersionTemplate } - prefix := strings.ReplaceAll(template, "{{version}}", strconv.Itoa(ver)) + prefix := strings.Replace(template, "{{version}}", strconv.Itoa(ver), -1) p.versionPrefixCache.Store(ver, prefix) return prefix @@ -1689,8 +1725,6 @@ type SymmetricOpts struct { AdditionalData []byte // The HMAC key, for generating IVs in convergent encryption HMACKey []byte - // Allows an external provider of the AEAD, for e.g. managed keys - AEADFactory AEADFactory } // Symmetrically encrypt a plaintext given the convergence configuration and appropriate keys @@ -1722,17 +1756,6 @@ func (p *Policy) SymmetricEncryptRaw(ver int, encKey, plaintext []byte, opts Sym } aead = cha - case KeyType_MANAGED_KEY: - if opts.Convergent || len(opts.Nonce) != 0 { - return nil, errutil.UserError{Err: "cannot use convergent encryption or provide a nonce to managed-key backed encryption"} - } - if opts.AEADFactory == nil { - return nil, errors.New("expected AEAD factory from managed key, none provided") - } - aead, err = opts.AEADFactory.GetAEAD(nonce) - if err != nil { - return nil, err - } } if opts.Convergent { @@ -1776,7 +1799,6 @@ func (p *Policy) SymmetricEncryptRaw(ver int, encKey, plaintext []byte, opts Sym // Symmetrically decrypt a ciphertext given the convergence configuration and appropriate keys func (p *Policy) SymmetricDecryptRaw(encKey, ciphertext []byte, opts SymmetricOpts) ([]byte, error) { var aead cipher.AEAD - var err error var nonce []byte switch p.Type { @@ -1802,11 +1824,6 @@ func (p *Policy) SymmetricDecryptRaw(encKey, ciphertext []byte, opts SymmetricOp } aead = cha - case KeyType_MANAGED_KEY: - aead, err = opts.AEADFactory.GetAEAD(nonce) - if err != nil { - return nil, err - } } if len(ciphertext) < aead.NonceSize() { @@ -1829,99 +1846,3 @@ func (p *Policy) SymmetricDecryptRaw(encKey, ciphertext []byte, opts SymmetricOp } return plain, nil } - -func (p *Policy) EncryptWithFactory(ver int, context []byte, nonce []byte, value string, factory AEADFactory) (string, error) { - if !p.Type.EncryptionSupported() { - return "", errutil.UserError{Err: fmt.Sprintf("message encryption not supported for key type %v", p.Type)} - } - - // Decode the plaintext value - plaintext, err := base64.StdEncoding.DecodeString(value) - if err != nil { - return "", errutil.UserError{Err: err.Error()} - } - - switch { - case ver == 0: - ver = p.LatestVersion - case ver < 0: - return "", errutil.UserError{Err: "requested version for encryption is negative"} - case ver > p.LatestVersion: - return "", errutil.UserError{Err: "requested version for encryption is higher than the latest key version"} - case ver < p.MinEncryptionVersion: - return "", errutil.UserError{Err: "requested version for encryption is less than the minimum encryption key version"} - } - - var ciphertext []byte - - switch p.Type { - case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305: - hmacKey := context - - var encKey []byte - var deriveHMAC bool - - encBytes := 32 - hmacBytes := 0 - if p.convergentVersion(ver) > 2 { - deriveHMAC = true - hmacBytes = 32 - } - if p.Type == KeyType_AES128_GCM96 { - encBytes = 16 - } - - key, err := p.GetKey(context, ver, encBytes+hmacBytes) - if err != nil { - return "", err - } - - if len(key) < encBytes+hmacBytes { - return "", errutil.InternalError{Err: "could not derive key, length too small"} - } - - encKey = key[:encBytes] - if len(encKey) != encBytes { - return "", errutil.InternalError{Err: "could not derive enc key, length not correct"} - } - if deriveHMAC { - hmacKey = key[encBytes:] - if len(hmacKey) != hmacBytes { - return "", errutil.InternalError{Err: "could not derive hmac key, length not correct"} - } - } - - ciphertext, err = p.SymmetricEncryptRaw(ver, encKey, plaintext, - SymmetricOpts{ - Convergent: p.ConvergentEncryption, - HMACKey: hmacKey, - Nonce: nonce, - AEADFactory: factory, - }) - - if err != nil { - return "", err - } - case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: - keyEntry, err := p.safeGetKeyEntry(ver) - if err != nil { - return "", err - } - key := keyEntry.RSAKey - ciphertext, err = rsa.EncryptOAEP(sha256.New(), rand.Reader, &key.PublicKey, plaintext, nil) - if err != nil { - return "", errutil.InternalError{Err: fmt.Sprintf("failed to RSA encrypt the plaintext: %v", err)} - } - - default: - return "", errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)} - } - - // Convert to base64 - encoded := base64.StdEncoding.EncodeToString(ciphertext) - - // Prepend some information - encoded = p.getVersionPrefix(ver) + encoded - - return encoded, nil -} diff --git a/sdk/helper/keysutil/policy_test.go b/sdk/helper/keysutil/policy_test.go index 91767cfd9d398..a2d9206a8acae 100644 --- a/sdk/helper/keysutil/policy_test.go +++ b/sdk/helper/keysutil/policy_test.go @@ -8,19 +8,14 @@ import ( "crypto/rand" "crypto/rsa" "crypto/x509" - "errors" - "fmt" - mathrand "math/rand" "reflect" "strconv" - "strings" "sync" "testing" "time" "golang.org/x/crypto/ed25519" - "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/logical" "github.com/mitchellh/copystructure" @@ -703,26 +698,6 @@ func generateTestKeys() (map[KeyType][]byte, error) { } keyMap[KeyType_RSA2048] = rsaKeyBytes - rsaKey, err = rsa.GenerateKey(rand.Reader, 3072) - if err != nil { - return nil, err - } - rsaKeyBytes, err = x509.MarshalPKCS8PrivateKey(rsaKey) - if err != nil { - return nil, err - } - keyMap[KeyType_RSA3072] = rsaKeyBytes - - rsaKey, err = rsa.GenerateKey(rand.Reader, 4096) - if err != nil { - return nil, err - } - rsaKeyBytes, err = x509.MarshalPKCS8PrivateKey(rsaKey) - if err != nil { - return nil, err - } - keyMap[KeyType_RSA4096] = rsaKeyBytes - ecdsaKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { return nil, err @@ -779,199 +754,3 @@ func BenchmarkSymmetric(b *testing.B) { } } } - -func saltOptions(options SigningOptions, saltLength int) SigningOptions { - return SigningOptions{ - HashAlgorithm: options.HashAlgorithm, - Marshaling: options.Marshaling, - SaltLength: saltLength, - SigAlgorithm: options.SigAlgorithm, - } -} - -func manualVerify(depth int, t *testing.T, p *Policy, input []byte, sig *SigningResult, options SigningOptions) { - tabs := strings.Repeat("\t", depth) - t.Log(tabs, "Manually verifying signature with options:", options) - - tabs = strings.Repeat("\t", depth+1) - verified, err := p.VerifySignatureWithOptions(nil, input, sig.Signature, &options) - if err != nil { - t.Fatal(tabs, "❌ Failed to manually verify signature:", err) - } - if !verified { - t.Fatal(tabs, "❌ Failed to manually verify signature") - } -} - -func autoVerify(depth int, t *testing.T, p *Policy, input []byte, sig *SigningResult, options SigningOptions) { - tabs := strings.Repeat("\t", depth) - t.Log(tabs, "Automatically verifying signature with options:", options) - - tabs = strings.Repeat("\t", depth+1) - verified, err := p.VerifySignature(nil, input, options.HashAlgorithm, options.SigAlgorithm, options.Marshaling, sig.Signature) - if err != nil { - t.Fatal(tabs, "❌ Failed to automatically verify signature:", err) - } - if !verified { - t.Fatal(tabs, "❌ Failed to automatically verify signature") - } -} - -func Test_RSA_PSS(t *testing.T) { - t.Log("Testing RSA PSS") - mathrand.Seed(time.Now().UnixNano()) - - var userError errutil.UserError - ctx := context.Background() - storage := &logical.InmemStorage{} - // https://crypto.stackexchange.com/a/1222 - input := []byte("the ancients say the longer the salt, the more provable the security") - sigAlgorithm := "pss" - - tabs := make(map[int]string) - for i := 1; i <= 6; i++ { - tabs[i] = strings.Repeat("\t", i) - } - - test_RSA_PSS := func(t *testing.T, p *Policy, rsaKey *rsa.PrivateKey, hashType HashType, - marshalingType MarshalingType, - ) { - unsaltedOptions := SigningOptions{ - HashAlgorithm: hashType, - Marshaling: marshalingType, - SigAlgorithm: sigAlgorithm, - } - cryptoHash := CryptoHashMap[hashType] - minSaltLength := p.minRSAPSSSaltLength() - maxSaltLength := p.maxRSAPSSSaltLength(rsaKey, cryptoHash) - hash := cryptoHash.New() - hash.Write(input) - input = hash.Sum(nil) - - // 1. Make an "automatic" signature with the given key size and hash algorithm, - // but an automatically chosen salt length. - t.Log(tabs[3], "Make an automatic signature") - sig, err := p.Sign(0, nil, input, hashType, sigAlgorithm, marshalingType) - if err != nil { - // A bit of a hack but FIPS go does not support some hash types - if isUnsupportedGoHashType(hashType, err) { - t.Skip(tabs[4], "skipping test as FIPS Go does not support hash type") - return - } - t.Fatal(tabs[4], "❌ Failed to automatically sign:", err) - } - - // 1.1 Verify this automatic signature using the *inferred* salt length. - autoVerify(4, t, p, input, sig, unsaltedOptions) - - // 1.2. Verify this automatic signature using the *correct, given* salt length. - manualVerify(4, t, p, input, sig, saltOptions(unsaltedOptions, maxSaltLength)) - - // 1.3. Try to verify this automatic signature using *incorrect, given* salt lengths. - t.Log(tabs[4], "Test incorrect salt lengths") - incorrectSaltLengths := []int{minSaltLength, maxSaltLength - 1} - for _, saltLength := range incorrectSaltLengths { - t.Log(tabs[5], "Salt length:", saltLength) - saltedOptions := saltOptions(unsaltedOptions, saltLength) - - verified, _ := p.VerifySignatureWithOptions(nil, input, sig.Signature, &saltedOptions) - if verified { - t.Fatal(tabs[6], "❌ Failed to invalidate", verified, "signature using incorrect salt length:", err) - } - } - - // 2. Rule out boundary, invalid salt lengths. - t.Log(tabs[3], "Test invalid salt lengths") - invalidSaltLengths := []int{minSaltLength - 1, maxSaltLength + 1} - for _, saltLength := range invalidSaltLengths { - t.Log(tabs[4], "Salt length:", saltLength) - saltedOptions := saltOptions(unsaltedOptions, saltLength) - - // 2.1. Fail to sign. - t.Log(tabs[5], "Try to make a manual signature") - _, err := p.SignWithOptions(0, nil, input, &saltedOptions) - if !errors.As(err, &userError) { - t.Fatal(tabs[6], "❌ Failed to reject invalid salt length:", err) - } - - // 2.2. Fail to verify. - t.Log(tabs[5], "Try to verify an automatic signature using an invalid salt length") - _, err = p.VerifySignatureWithOptions(nil, input, sig.Signature, &saltedOptions) - if !errors.As(err, &userError) { - t.Fatal(tabs[6], "❌ Failed to reject invalid salt length:", err) - } - } - - // 3. For three possible valid salt lengths... - t.Log(tabs[3], "Test three possible valid salt lengths") - midSaltLength := mathrand.Intn(maxSaltLength-1) + 1 // [1, maxSaltLength) - validSaltLengths := []int{minSaltLength, midSaltLength, maxSaltLength} - for _, saltLength := range validSaltLengths { - t.Log(tabs[4], "Salt length:", saltLength) - saltedOptions := saltOptions(unsaltedOptions, saltLength) - - // 3.1. Make a "manual" signature with the given key size, hash algorithm, and salt length. - t.Log(tabs[5], "Make a manual signature") - sig, err := p.SignWithOptions(0, nil, input, &saltedOptions) - if err != nil { - t.Fatal(tabs[6], "❌ Failed to manually sign:", err) - } - - // 3.2. Verify this manual signature using the *correct, given* salt length. - manualVerify(6, t, p, input, sig, saltedOptions) - - // 3.3. Verify this manual signature using the *inferred* salt length. - autoVerify(6, t, p, input, sig, unsaltedOptions) - } - } - - rsaKeyTypes := []KeyType{KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096} - testKeys, err := generateTestKeys() - if err != nil { - t.Fatalf("error generating test keys: %s", err) - } - - // 1. For each standard RSA key size 2048, 3072, and 4096... - for _, rsaKeyType := range rsaKeyTypes { - t.Log("Key size: ", rsaKeyType) - p := &Policy{ - Name: fmt.Sprint(rsaKeyType), // NOTE: crucial to create a new key per key size - Type: rsaKeyType, - } - - rsaKeyBytes := testKeys[rsaKeyType] - err := p.Import(ctx, storage, rsaKeyBytes, rand.Reader) - if err != nil { - t.Fatal(tabs[1], "❌ Failed to import key:", err) - } - rsaKeyAny, err := x509.ParsePKCS8PrivateKey(rsaKeyBytes) - if err != nil { - t.Fatalf("error parsing test keys: %s", err) - } - rsaKey := rsaKeyAny.(*rsa.PrivateKey) - - // 2. For each hash algorithm... - for hashAlgorithm, hashType := range HashTypeMap { - t.Log(tabs[1], "Hash algorithm:", hashAlgorithm) - - // 3. For each marshaling type... - for marshalingName, marshalingType := range MarshalingTypeMap { - t.Log(tabs[2], "Marshaling type:", marshalingName) - testName := fmt.Sprintf("%s-%s-%s", rsaKeyType, hashAlgorithm, marshalingName) - t.Run(testName, func(t *testing.T) { test_RSA_PSS(t, p, rsaKey, hashType, marshalingType) }) - } - } - } -} - -// Normal Go builds support all the hash functions for RSA_PSS signatures but the -// FIPS Go build does not support at this time the SHA3 hashes as FIPS 140_2 does -// not accept them. -func isUnsupportedGoHashType(hashType HashType, err error) bool { - switch hashType { - case HashTypeSHA3224, HashTypeSHA3256, HashTypeSHA3384, HashTypeSHA3512: - return strings.Contains(err.Error(), "unsupported hash function") - } - - return false -} diff --git a/sdk/helper/keysutil/util.go b/sdk/helper/keysutil/util.go deleted file mode 100644 index 063af5914672d..0000000000000 --- a/sdk/helper/keysutil/util.go +++ /dev/null @@ -1,115 +0,0 @@ -package keysutil - -import ( - "crypto/x509/pkix" - "encoding/asn1" - "errors" - "fmt" - - "golang.org/x/crypto/ed25519" -) - -// pkcs8 reflects an ASN.1, PKCS #8 PrivateKey. See -// ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-8/pkcs-8v1_2.asn -// and RFC 5208. -// -// Copied from Go: https://github.com/golang/go/blob/master/src/crypto/x509/pkcs8.go#L17-L80 -type pkcs8 struct { - Version int - Algo pkix.AlgorithmIdentifier - PrivateKey []byte - // optional attributes omitted. -} - -// ecPrivateKey reflects an ASN.1 Elliptic Curve Private Key Structure. -// References: -// -// RFC 5915 -// SEC1 - http://www.secg.org/sec1-v2.pdf -// -// Per RFC 5915 the NamedCurveOID is marked as ASN.1 OPTIONAL, however in -// most cases it is not. -// -// Copied from Go: https://github.com/golang/go/blob/master/src/crypto/x509/sec1.go#L18-L31 -type ecPrivateKey struct { - Version int - PrivateKey []byte - NamedCurveOID asn1.ObjectIdentifier `asn1:"optional,explicit,tag:0"` - - // Because the PKCS8/RFC 5915 encoding of the Ed25519 key uses the - // RFC 8032 Ed25519 seed format, we can ignore the public key parameter - // and infer it later. -} - -var ( - // See crypto/x509/x509.go in the Go toolchain source distribution. - oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1} - - // NSS encodes Ed25519 private keys with the OID 1.3.6.1.4.1.11591.15.1 - // from https://tools.ietf.org/html/draft-josefsson-pkix-newcurves-01. - // See https://github.com/nss-dev/nss/blob/NSS_3_79_BRANCH/lib/util/secoid.c#L600-L603. - oidNSSPKIXEd25519 = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11591, 15, 1} - - // Other implementations may use the OID 1.3.101.110 from - // https://datatracker.ietf.org/doc/html/rfc8410. - oidRFC8410Ed25519 = asn1.ObjectIdentifier{1, 3, 101, 110} -) - -func isEd25519OID(oid asn1.ObjectIdentifier) bool { - return oidNSSPKIXEd25519.Equal(oid) || oidRFC8410Ed25519.Equal(oid) -} - -// ParsePKCS8PrivateKey parses an unencrypted private key in PKCS #8, ASN.1 DER form. -// -// It returns a *rsa.PrivateKey, a *ecdsa.PrivateKey, or a ed25519.PrivateKey. -// More types might be supported in the future. -// -// This kind of key is commonly encoded in PEM blocks of type "PRIVATE KEY". -func ParsePKCS8Ed25519PrivateKey(der []byte) (key interface{}, err error) { - var privKey pkcs8 - var ed25519Key ecPrivateKey - - var checkedOID bool - - // If this err is nil, we assume we directly have a ECPrivateKey structure - // with explicit OID; ignore this error for now and return the latter err - // instead if neither parse correctly. - if _, err := asn1.Unmarshal(der, &privKey); err == nil { - switch { - case privKey.Algo.Algorithm.Equal(oidPublicKeyECDSA): - bytes := privKey.Algo.Parameters.FullBytes - namedCurveOID := new(asn1.ObjectIdentifier) - if _, err := asn1.Unmarshal(bytes, namedCurveOID); err != nil { - namedCurveOID = nil - } - - if namedCurveOID == nil || !isEd25519OID(*namedCurveOID) { - return nil, errors.New("keysutil: failed to parse private key (invalid, non-ed25519 curve parameter OID)") - } - - der = privKey.PrivateKey - checkedOID = true - default: - // The Go standard library already parses RFC 8410 keys; the - // inclusion of the OID here is in case it is used with the - // regular ECDSA PrivateKey structure, rather than the struct - // recognized by the Go standard library. - return nil, errors.New("keysutil: failed to parse key as ed25519 private key") - } - } - - _, err = asn1.Unmarshal(der, &ed25519Key) - if err != nil { - return nil, fmt.Errorf("keysutil: failed to parse private key (inner Ed25519 ECPrivateKey format was incorrect): %v", err) - } - - if !checkedOID && !isEd25519OID(ed25519Key.NamedCurveOID) { - return nil, errors.New("keysutil: failed to parse private key (invalid, non-ed25519 curve parameter OID)") - } - - if len(ed25519Key.PrivateKey) != 32 { - return nil, fmt.Errorf("keysutil: failed to parse private key as ed25519 private key: got %v bytes but expected %v byte RFC 8032 seed", len(ed25519Key.PrivateKey), ed25519.SeedSize) - } - - return ed25519.NewKeyFromSeed(ed25519Key.PrivateKey), nil -} diff --git a/sdk/helper/locksutil/locks.go b/sdk/helper/locksutil/locks.go index 35ffcf739d9dc..1c8540249379b 100644 --- a/sdk/helper/locksutil/locks.go +++ b/sdk/helper/locksutil/locks.go @@ -25,6 +25,7 @@ type LockEntry struct { // Lock B, Lock A // // Where process 1 is now deadlocked trying to lock B, and process 2 deadlocked trying to lock A +// func CreateLocks() []*LockEntry { ret := make([]*LockEntry, LockCount) for i := range ret { diff --git a/sdk/helper/pathmanager/pathmanager_test.go b/sdk/helper/pathmanager/pathmanager_test.go index 7d6207b625e60..6924a7cbdecba 100644 --- a/sdk/helper/pathmanager/pathmanager_test.go +++ b/sdk/helper/pathmanager/pathmanager_test.go @@ -20,7 +20,7 @@ func TestPathManager(t *testing.T) { for _, path := range paths { if m.HasPath(path) { - t.Fatalf("path should not exist in filtered paths %q", path) + t.Fatalf("path should not exist in filtered paths '%s'", path) } } @@ -34,7 +34,7 @@ func TestPathManager(t *testing.T) { } for _, path := range paths { if !m.HasPath(path) { - t.Fatalf("path should exist in filtered paths %q", path) + t.Fatalf("path should exist in filtered paths '%s'", path) } } @@ -43,7 +43,7 @@ func TestPathManager(t *testing.T) { for _, path := range paths { if m.HasPath(path) { - t.Fatalf("path should not exist in filtered paths %q", path) + t.Fatalf("path should not exist in filtered paths '%s'", path) } } } @@ -63,7 +63,7 @@ func TestPathManager_RemovePrefix(t *testing.T) { for _, path := range paths { if m.HasPath(path) { - t.Fatalf("path should not exist in filtered paths %q", path) + t.Fatalf("path should not exist in filtered paths '%s'", path) } } @@ -77,7 +77,7 @@ func TestPathManager_RemovePrefix(t *testing.T) { } for _, path := range paths { if !m.HasPath(path) { - t.Fatalf("path should exist in filtered paths %q", path) + t.Fatalf("path should exist in filtered paths '%s'", path) } } @@ -90,7 +90,7 @@ func TestPathManager_RemovePrefix(t *testing.T) { for _, path := range paths { if m.HasPath(path) { - t.Fatalf("path should not exist in filtered paths %q", path) + t.Fatalf("path should not exist in filtered paths '%s'", path) } } } diff --git a/sdk/helper/pluginutil/env.go b/sdk/helper/pluginutil/env.go index df1fdbeede939..fd0cd4fb8308a 100644 --- a/sdk/helper/pluginutil/env.go +++ b/sdk/helper/pluginutil/env.go @@ -7,11 +7,7 @@ import ( version "github.com/hashicorp/go-version" ) -const ( - // PluginAutoMTLSEnv is used to ensure AutoMTLS is used. This will override - // setting a TLSProviderFunc for a plugin. - PluginAutoMTLSEnv = "VAULT_PLUGIN_AUTOMTLS_ENABLED" - +var ( // PluginMlockEnabled is the ENV name used to pass the configuration for // enabling mlock PluginMlockEnabled = "VAULT_PLUGIN_MLOCK_ENABLED" @@ -31,10 +27,6 @@ const ( // PluginCACertPEMEnv is an ENV name used for holding a CA PEM-encoded // string. Used for testing. PluginCACertPEMEnv = "VAULT_TESTING_PLUGIN_CA_PEM" - - // PluginMultiplexingOptOut is an ENV name used to define a comma separated list of plugin names - // opted-out of the multiplexing feature; for emergencies if multiplexing ever causes issues - PluginMultiplexingOptOut = "VAULT_PLUGIN_MULTIPLEXING_OPT_OUT" ) // OptionallyEnableMlock determines if mlock should be called, and if so enables diff --git a/sdk/helper/pluginutil/multiplexing.go b/sdk/helper/pluginutil/multiplexing.go index 41316ec49df28..cbf50335d0bff 100644 --- a/sdk/helper/pluginutil/multiplexing.go +++ b/sdk/helper/pluginutil/multiplexing.go @@ -1,43 +1,31 @@ package pluginutil import ( - "context" - "errors" + context "context" "fmt" - "os" - "strings" - "github.com/hashicorp/go-secure-stdlib/strutil" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" ) -var ErrNoMultiplexingIDFound = errors.New("no multiplexing ID found") - type PluginMultiplexingServerImpl struct { UnimplementedPluginMultiplexingServer Supported bool } -func (pm PluginMultiplexingServerImpl) MultiplexingSupport(_ context.Context, _ *MultiplexingSupportRequest) (*MultiplexingSupportResponse, error) { +func (pm PluginMultiplexingServerImpl) MultiplexingSupport(ctx context.Context, req *MultiplexingSupportRequest) (*MultiplexingSupportResponse, error) { return &MultiplexingSupportResponse{ Supported: pm.Supported, }, nil } -func MultiplexingSupported(ctx context.Context, cc grpc.ClientConnInterface, name string) (bool, error) { +func MultiplexingSupported(ctx context.Context, cc grpc.ClientConnInterface) (bool, error) { if cc == nil { return false, fmt.Errorf("client connection is nil") } - out := strings.Split(os.Getenv(PluginMultiplexingOptOut), ",") - if strutil.StrListContains(out, name) { - return false, nil - } - req := new(MultiplexingSupportRequest) resp, err := NewPluginMultiplexingClient(cc).MultiplexingSupport(ctx, req) if err != nil { @@ -57,24 +45,3 @@ func MultiplexingSupported(ctx context.Context, cc grpc.ClientConnInterface, nam return resp.Supported, nil } - -func GetMultiplexIDFromContext(ctx context.Context) (string, error) { - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return "", fmt.Errorf("missing plugin multiplexing metadata") - } - - multiplexIDs := md[MultiplexingCtxKey] - if len(multiplexIDs) == 0 { - return "", ErrNoMultiplexingIDFound - } else if len(multiplexIDs) != 1 { - return "", fmt.Errorf("unexpected number of IDs in metadata: (%d)", len(multiplexIDs)) - } - - multiplexID := multiplexIDs[0] - if multiplexID == "" { - return "", fmt.Errorf("empty multiplex ID in metadata") - } - - return multiplexID, nil -} diff --git a/sdk/helper/pluginutil/multiplexing.pb.go b/sdk/helper/pluginutil/multiplexing.pb.go index d7073b10e0a5b..d0ff51e57b242 100644 --- a/sdk/helper/pluginutil/multiplexing.pb.go +++ b/sdk/helper/pluginutil/multiplexing.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc-gen-go v1.27.1 +// protoc v3.19.4 // source: sdk/helper/pluginutil/multiplexing.proto package pluginutil diff --git a/sdk/helper/pluginutil/multiplexing_test.go b/sdk/helper/pluginutil/multiplexing_test.go deleted file mode 100644 index 125a4a120c624..0000000000000 --- a/sdk/helper/pluginutil/multiplexing_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package pluginutil - -import ( - "context" - "fmt" - "reflect" - "testing" - - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" -) - -func TestMultiplexingSupported(t *testing.T) { - type args struct { - ctx context.Context - cc grpc.ClientConnInterface - name string - } - - type testCase struct { - name string - args args - env string - want bool - wantErr bool - } - - tests := []testCase{ - { - name: "multiplexing is supported if plugin is not opted out", - args: args{ - ctx: context.Background(), - cc: &MockClientConnInterfaceNoop{}, - name: "plugin", - }, - env: "", - want: true, - }, - { - name: "multiplexing is not supported if plugin is opted out", - args: args{ - ctx: context.Background(), - cc: &MockClientConnInterfaceNoop{}, - name: "optedOutPlugin", - }, - env: "optedOutPlugin", - want: false, - }, - { - name: "multiplexing is not supported if plugin among one of the opted out", - args: args{ - ctx: context.Background(), - cc: &MockClientConnInterfaceNoop{}, - name: "optedOutPlugin", - }, - env: "firstPlugin,optedOutPlugin,otherPlugin", - want: false, - }, - { - name: "multiplexing is supported if different plugin is opted out", - args: args{ - ctx: context.Background(), - cc: &MockClientConnInterfaceNoop{}, - name: "plugin", - }, - env: "optedOutPlugin", - want: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Setenv(PluginMultiplexingOptOut, tt.env) - got, err := MultiplexingSupported(tt.args.ctx, tt.args.cc, tt.args.name) - if (err != nil) != tt.wantErr { - t.Errorf("MultiplexingSupported() error = %v, wantErr %v", err, tt.wantErr) - return - } - if got != tt.want { - t.Errorf("MultiplexingSupported() got = %v, want %v", got, tt.want) - } - }) - } -} - -func TestGetMultiplexIDFromContext(t *testing.T) { - type testCase struct { - ctx context.Context - expectedResp string - expectedErr error - } - - tests := map[string]testCase{ - "missing plugin multiplexing metadata": { - ctx: context.Background(), - expectedResp: "", - expectedErr: fmt.Errorf("missing plugin multiplexing metadata"), - }, - "unexpected number of IDs in metadata": { - ctx: idCtx(t, "12345", "67891"), - expectedResp: "", - expectedErr: fmt.Errorf("unexpected number of IDs in metadata: (2)"), - }, - "empty multiplex ID in metadata": { - ctx: idCtx(t, ""), - expectedResp: "", - expectedErr: fmt.Errorf("empty multiplex ID in metadata"), - }, - "happy path, id is returned from metadata": { - ctx: idCtx(t, "12345"), - expectedResp: "12345", - expectedErr: nil, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - resp, err := GetMultiplexIDFromContext(test.ctx) - - if test.expectedErr != nil && test.expectedErr.Error() != "" && err == nil { - t.Fatalf("err expected, got nil") - } else if !reflect.DeepEqual(err, test.expectedErr) { - t.Fatalf("Actual error: %#v\nExpected error: %#v", err, test.expectedErr) - } - - if test.expectedErr != nil && test.expectedErr.Error() == "" && err != nil { - t.Fatalf("no error expected, got: %s", err) - } - - if !reflect.DeepEqual(resp, test.expectedResp) { - t.Fatalf("Actual response: %#v\nExpected response: %#v", resp, test.expectedResp) - } - }) - } -} - -// idCtx is a test helper that will return a context with the IDs set in its -// metadata -func idCtx(t *testing.T, ids ...string) context.Context { - // Context doesn't need to timeout since this is just passed through - ctx := context.Background() - md := metadata.MD{} - for _, id := range ids { - md.Append(MultiplexingCtxKey, id) - } - return metadata.NewIncomingContext(ctx, md) -} - -type MockClientConnInterfaceNoop struct{} - -func (m *MockClientConnInterfaceNoop) Invoke(_ context.Context, _ string, _ interface{}, reply interface{}, _ ...grpc.CallOption) error { - reply.(*MultiplexingSupportResponse).Supported = true - return nil -} - -func (m *MockClientConnInterfaceNoop) NewStream(_ context.Context, _ *grpc.StreamDesc, _ string, _ ...grpc.CallOption) (grpc.ClientStream, error) { - return nil, nil -} diff --git a/sdk/helper/pluginutil/run_config.go b/sdk/helper/pluginutil/run_config.go index 3eb8fb2b28173..cb804f60d8738 100644 --- a/sdk/helper/pluginutil/run_config.go +++ b/sdk/helper/pluginutil/run_config.go @@ -16,14 +16,12 @@ import ( type PluginClientConfig struct { Name string PluginType consts.PluginType - Version string PluginSets map[int]plugin.PluginSet HandshakeConfig plugin.HandshakeConfig Logger log.Logger IsMetadataMode bool AutoMTLS bool MLock bool - Wrapper RunnerUtil } type runConfig struct { @@ -35,6 +33,8 @@ type runConfig struct { // Initialized with what's in PluginRunner.Env, but can be added to env []string + wrapper RunnerUtil + PluginClientConfig } @@ -43,7 +43,7 @@ func (rc runConfig) makeConfig(ctx context.Context) (*plugin.ClientConfig, error cmd.Env = append(cmd.Env, rc.env...) // Add the mlock setting to the ENV of the plugin - if rc.MLock || (rc.Wrapper != nil && rc.Wrapper.MlockEnabled()) { + if rc.MLock || (rc.wrapper != nil && rc.wrapper.MlockEnabled()) { cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMlockEnabled, "true")) } cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version.GetVersion().Version)) @@ -54,9 +54,6 @@ func (rc runConfig) makeConfig(ctx context.Context) (*plugin.ClientConfig, error metadataEnv := fmt.Sprintf("%s=%t", PluginMetadataModeEnv, rc.IsMetadataMode) cmd.Env = append(cmd.Env, metadataEnv) - automtlsEnv := fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, rc.AutoMTLS) - cmd.Env = append(cmd.Env, automtlsEnv) - var clientTLSConfig *tls.Config if !rc.AutoMTLS && !rc.IsMetadataMode { // Get a CA TLS Certificate @@ -73,7 +70,7 @@ func (rc runConfig) makeConfig(ctx context.Context) (*plugin.ClientConfig, error // Use CA to sign a server cert and wrap the values in a response wrapped // token. - wrapToken, err := wrapServerConfig(ctx, rc.Wrapper, certBytes, key) + wrapToken, err := wrapServerConfig(ctx, rc.wrapper, certBytes, key) if err != nil { return nil, err } @@ -123,7 +120,7 @@ func Env(env ...string) RunOpt { func Runner(wrapper RunnerUtil) RunOpt { return func(rc *runConfig) { - rc.Wrapper = wrapper + rc.wrapper = wrapper } } diff --git a/sdk/helper/pluginutil/run_config_test.go b/sdk/helper/pluginutil/run_config_test.go index 3c2fef2196c14..f2373fe9b4a5a 100644 --- a/sdk/helper/pluginutil/run_config_test.go +++ b/sdk/helper/pluginutil/run_config_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "os/exec" + "reflect" "testing" "time" @@ -13,7 +14,6 @@ import ( "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/sdk/helper/wrapping" "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" ) func TestMakeConfig(t *testing.T) { @@ -78,7 +78,6 @@ func TestMakeConfig(t *testing.T) { "initial=true", fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version.GetVersion().Version), fmt.Sprintf("%s=%t", PluginMetadataModeEnv, true), - fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, false), }, ), SecureConfig: &plugin.SecureConfig{ @@ -144,7 +143,6 @@ func TestMakeConfig(t *testing.T) { fmt.Sprintf("%s=%t", PluginMlockEnabled, true), fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version.GetVersion().Version), fmt.Sprintf("%s=%t", PluginMetadataModeEnv, false), - fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, false), fmt.Sprintf("%s=%s", PluginUnwrapTokenEnv, "testtoken"), }, ), @@ -207,7 +205,6 @@ func TestMakeConfig(t *testing.T) { "initial=true", fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version.GetVersion().Version), fmt.Sprintf("%s=%t", PluginMetadataModeEnv, true), - fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, true), }, ), SecureConfig: &plugin.SecureConfig{ @@ -269,7 +266,6 @@ func TestMakeConfig(t *testing.T) { "initial=true", fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version.GetVersion().Version), fmt.Sprintf("%s=%t", PluginMetadataModeEnv, false), - fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, true), }, ), SecureConfig: &plugin.SecureConfig{ @@ -294,7 +290,7 @@ func TestMakeConfig(t *testing.T) { Return(test.responseWrapInfo, test.responseWrapInfoErr) mockWrapper.On("MlockEnabled"). Return(test.mlockEnabled) - test.rc.Wrapper = mockWrapper + test.rc.wrapper = mockWrapper defer mockWrapper.AssertNumberOfCalls(t, "ResponseWrapData", test.responseWrapInfoTimes) defer mockWrapper.AssertNumberOfCalls(t, "MlockEnabled", test.mlockEnabledTimes) @@ -322,7 +318,9 @@ func TestMakeConfig(t *testing.T) { } config.TLSConfig = nil - require.Equal(t, test.expectedConfig, config) + if !reflect.DeepEqual(config, test.expectedConfig) { + t.Fatalf("Actual config: %#v\nExpected config: %#v", config, test.expectedConfig) + } }) } } diff --git a/sdk/helper/pluginutil/runner.go b/sdk/helper/pluginutil/runner.go index 631c4f3a2f35d..f2822efc10408 100644 --- a/sdk/helper/pluginutil/runner.go +++ b/sdk/helper/pluginutil/runner.go @@ -5,8 +5,7 @@ import ( "time" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin" - "github.com/hashicorp/go-version" + plugin "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/wrapping" "google.golang.org/grpc" @@ -15,8 +14,7 @@ import ( // Looker defines the plugin Lookup function that looks into the plugin catalog // for available plugins and returns a PluginRunner type Looker interface { - LookupPlugin(ctx context.Context, pluginName string, pluginType consts.PluginType) (*PluginRunner, error) - LookupPluginVersion(ctx context.Context, pluginName string, pluginType consts.PluginType, version string) (*PluginRunner, error) + LookupPlugin(context.Context, string, consts.PluginType) (*PluginRunner, error) } // RunnerUtil interface defines the functions needed by the runner to wrap the @@ -37,7 +35,6 @@ type LookRunnerUtil interface { type PluginClient interface { Conn() grpc.ClientConnInterface - Reload() error plugin.ClientProtocol } @@ -48,7 +45,6 @@ const MultiplexingCtxKey string = "multiplex_id" type PluginRunner struct { Name string `json:"name" structs:"name"` Type consts.PluginType `json:"type" structs:"type"` - Version string `json:"version" structs:"version"` Command string `json:"command" structs:"command"` Args []string `json:"args" structs:"args"` Env []string `json:"env" structs:"env"` @@ -85,20 +81,6 @@ func (r *PluginRunner) RunMetadataMode(ctx context.Context, wrapper RunnerUtil, ) } -// VersionedPlugin holds any versioning information stored about a plugin in the -// plugin catalog. -type VersionedPlugin struct { - Type string `json:"type"` // string instead of consts.PluginType so that we get the string form in API responses. - Name string `json:"name"` - Version string `json:"version"` - SHA256 string `json:"sha256,omitempty"` - Builtin bool `json:"builtin"` - DeprecationStatus string `json:"deprecation_status,omitempty"` - - // Pre-parsed semver struct of the Version field - SemanticVersion *version.Version `json:"-"` -} - // CtxCancelIfCanceled takes a context cancel func and a context. If the context is // shutdown the cancelfunc is called. This is useful for merging two cancel // functions. diff --git a/sdk/helper/template/template.go b/sdk/helper/template/template.go index 2918825b978ec..4ced1528faae7 100644 --- a/sdk/helper/template/template.go +++ b/sdk/helper/template/template.go @@ -39,48 +39,37 @@ func Function(name string, f interface{}) Opt { // - random // - Randomly generated characters. This uses the charset specified in RandomCharset. Must include a length. // Example: {{ rand 20 }} -// // - truncate // - Truncates the previous value to the specified length. Must include a maximum length. // Example: {{ .DisplayName | truncate 10 }} -// // - truncate_sha256 // - Truncates the previous value to the specified length. If the original length is greater than the length // specified, the remaining characters will be sha256 hashed and appended to the end. The hash will be only the first 8 characters The maximum length will // be no longer than the length specified. // Example: {{ .DisplayName | truncate_sha256 30 }} -// // - uppercase // - Uppercases the previous value. // Example: {{ .RoleName | uppercase }} -// // - lowercase // - Lowercases the previous value. // Example: {{ .DisplayName | lowercase }} -// // - replace // - Performs a string find & replace // Example: {{ .DisplayName | replace - _ }} -// // - sha256 // - SHA256 hashes the previous value. // Example: {{ .DisplayName | sha256 }} -// // - base64 // - base64 encodes the previous value. // Example: {{ .DisplayName | base64 }} -// // - unix_time // - Provides the current unix time in seconds. // Example: {{ unix_time }} -// // - unix_time_millis // - Provides the current unix time in milliseconds. // Example: {{ unix_time_millis }} -// // - timestamp // - Provides the current time. Must include a standard Go format string -// // - uuid // - Generates a UUID // Example: {{ uuid }} diff --git a/sdk/logical/identity.pb.go b/sdk/logical/identity.pb.go index 6c1c4b2c9b903..4b1a36b39826a 100644 --- a/sdk/logical/identity.pb.go +++ b/sdk/logical/identity.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc-gen-go v1.27.1 +// protoc v3.19.4 // source: sdk/logical/identity.proto package logical diff --git a/sdk/logical/logical.go b/sdk/logical/logical.go index 601148952f0fb..fb9619ae20d26 100644 --- a/sdk/logical/logical.go +++ b/sdk/logical/logical.go @@ -137,20 +137,3 @@ type Auditor interface { AuditRequest(ctx context.Context, input *LogInput) error AuditResponse(ctx context.Context, input *LogInput) error } - -// Externaler allows us to check if a backend is running externally (i.e., over GRPC) -type Externaler interface { - IsExternal() bool -} - -type PluginVersion struct { - Version string -} - -// PluginVersioner is an optional interface to return version info. -type PluginVersioner interface { - // PluginVersion returns the version for the backend - PluginVersion() PluginVersion -} - -var EmptyPluginVersion = PluginVersion{""} diff --git a/sdk/logical/managed_key.go b/sdk/logical/managed_key.go index e892c9cce9483..750459542c214 100644 --- a/sdk/logical/managed_key.go +++ b/sdk/logical/managed_key.go @@ -3,7 +3,6 @@ package logical import ( "context" "crypto" - "crypto/cipher" "io" ) @@ -34,9 +33,8 @@ type ManagedKey interface { } type ( - ManagedKeyConsumer func(context.Context, ManagedKey) error - ManagedSigningKeyConsumer func(context.Context, ManagedSigningKey) error - ManagedEncryptingKeyConsumer func(context.Context, ManagedEncryptingKey) error + ManagedKeyConsumer func(context.Context, ManagedKey) error + ManagedSigningKeyConsumer func(context.Context, ManagedSigningKey) error ) type ManagedKeySystemView interface { @@ -53,12 +51,6 @@ type ManagedKeySystemView interface { // WithManagedSigningKeyByUUID retrieves an instantiated managed signing key for consumption by the given function, // with the same semantics as WithManagedKeyByUUID WithManagedSigningKeyByUUID(ctx context.Context, keyUuid, backendUUID string, f ManagedSigningKeyConsumer) error - // WithManagedSigningKeyByName retrieves an instantiated managed signing key for consumption by the given function, - // with the same semantics as WithManagedKeyByName - WithManagedEncryptingKeyByName(ctx context.Context, keyName, backendUUID string, f ManagedEncryptingKeyConsumer) error - // WithManagedSigningKeyByUUID retrieves an instantiated managed signing key for consumption by the given function, - // with the same semantics as WithManagedKeyByUUID - WithManagedEncryptingKeyByUUID(ctx context.Context, keyUuid, backendUUID string, f ManagedEncryptingKeyConsumer) error } type ManagedAsymmetricKey interface { @@ -90,8 +82,3 @@ type ManagedSigningKey interface { // as needed so as to use per request contexts. GetSigner(context.Context) (crypto.Signer, error) } - -type ManagedEncryptingKey interface { - ManagedKey - GetAEAD(iv []byte) (cipher.AEAD, error) -} diff --git a/sdk/logical/plugin.pb.go b/sdk/logical/plugin.pb.go index 03be5d3cba056..1fb53f9a79c92 100644 --- a/sdk/logical/plugin.pb.go +++ b/sdk/logical/plugin.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc-gen-go v1.27.1 +// protoc v3.19.4 // source: sdk/logical/plugin.proto package logical diff --git a/sdk/logical/request.go b/sdk/logical/request.go index d774fd176b4a7..1c400a4cb7795 100644 --- a/sdk/logical/request.go +++ b/sdk/logical/request.go @@ -365,7 +365,6 @@ const ( ListOperation = "list" HelpOperation = "help" AliasLookaheadOperation = "alias-lookahead" - ResolveRoleOperation = "resolve-role" // The operations below are called globally, the path is less relevant. RevokeOperation Operation = "revoke" @@ -378,6 +377,7 @@ type MFACreds map[string][]string // InitializationRequest stores the parameters and context of an Initialize() // call being made to a logical.Backend. type InitializationRequest struct { + // Storage can be used to durably store and retrieve state. Storage Storage } diff --git a/sdk/logical/response.go b/sdk/logical/response.go index 19194f524871c..e8276c789ace6 100644 --- a/sdk/logical/response.go +++ b/sdk/logical/response.go @@ -310,12 +310,3 @@ func (w *StatusHeaderResponseWriter) setCustomResponseHeaders(status int) { } var _ WrappingResponseWriter = &StatusHeaderResponseWriter{} - -// ResolveRoleResponse returns a standard response to be returned by functions handling a ResolveRoleOperation -func ResolveRoleResponse(roleName string) (*Response, error) { - return &Response{ - Data: map[string]interface{}{ - "role": roleName, - }, - }, nil -} diff --git a/sdk/logical/system_view.go b/sdk/logical/system_view.go index 4e5627b1c8805..83b4a951e842e 100644 --- a/sdk/logical/system_view.go +++ b/sdk/logical/system_view.go @@ -54,15 +54,7 @@ type SystemView interface { // LookupPlugin looks into the plugin catalog for a plugin with the given // name. Returns a PluginRunner or an error if a plugin can not be found. - LookupPlugin(ctx context.Context, pluginName string, pluginType consts.PluginType) (*pluginutil.PluginRunner, error) - - // LookupPluginVersion looks into the plugin catalog for a plugin with the given - // name and version. Returns a PluginRunner or an error if a plugin can not be found. - LookupPluginVersion(ctx context.Context, pluginName string, pluginType consts.PluginType, version string) (*pluginutil.PluginRunner, error) - - // ListVersionedPlugins returns information about all plugins of a certain - // type in the catalog, including any versioning information stored for them. - ListVersionedPlugins(ctx context.Context, pluginType consts.PluginType) ([]pluginutil.VersionedPlugin, error) + LookupPlugin(context.Context, string, consts.PluginType) (*pluginutil.PluginRunner, error) // NewPluginClient returns a client for managing the lifecycle of plugin // processes @@ -176,14 +168,6 @@ func (d StaticSystemView) LookupPlugin(_ context.Context, _ string, _ consts.Plu return nil, errors.New("LookupPlugin is not implemented in StaticSystemView") } -func (d StaticSystemView) LookupPluginVersion(_ context.Context, _ string, _ consts.PluginType, _ string) (*pluginutil.PluginRunner, error) { - return nil, errors.New("LookupPluginVersion is not implemented in StaticSystemView") -} - -func (d StaticSystemView) ListVersionedPlugins(_ context.Context, _ consts.PluginType) ([]pluginutil.VersionedPlugin, error) { - return nil, errors.New("ListVersionedPlugins is not implemented in StaticSystemView") -} - func (d StaticSystemView) MlockEnabled() bool { return d.EnableMlock } diff --git a/sdk/logical/version.pb.go b/sdk/logical/version.pb.go deleted file mode 100644 index 7845aeaf5c372..0000000000000 --- a/sdk/logical/version.pb.go +++ /dev/null @@ -1,204 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 -// source: sdk/logical/version.proto - -package logical - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type Empty struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *Empty) Reset() { - *x = Empty{} - if protoimpl.UnsafeEnabled { - mi := &file_sdk_logical_version_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Empty) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Empty) ProtoMessage() {} - -func (x *Empty) ProtoReflect() protoreflect.Message { - mi := &file_sdk_logical_version_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Empty.ProtoReflect.Descriptor instead. -func (*Empty) Descriptor() ([]byte, []int) { - return file_sdk_logical_version_proto_rawDescGZIP(), []int{0} -} - -// VersionReply is the reply for the Version method. -type VersionReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PluginVersion string `protobuf:"bytes,1,opt,name=plugin_version,json=pluginVersion,proto3" json:"plugin_version,omitempty"` -} - -func (x *VersionReply) Reset() { - *x = VersionReply{} - if protoimpl.UnsafeEnabled { - mi := &file_sdk_logical_version_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *VersionReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*VersionReply) ProtoMessage() {} - -func (x *VersionReply) ProtoReflect() protoreflect.Message { - mi := &file_sdk_logical_version_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use VersionReply.ProtoReflect.Descriptor instead. -func (*VersionReply) Descriptor() ([]byte, []int) { - return file_sdk_logical_version_proto_rawDescGZIP(), []int{1} -} - -func (x *VersionReply) GetPluginVersion() string { - if x != nil { - return x.PluginVersion - } - return "" -} - -var File_sdk_logical_version_proto protoreflect.FileDescriptor - -var file_sdk_logical_version_proto_rawDesc = []byte{ - 0x0a, 0x19, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x6c, 0x6f, 0x67, - 0x69, 0x63, 0x61, 0x6c, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x35, 0x0a, - 0x0c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x25, 0x0a, - 0x0e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x32, 0x41, 0x0a, 0x0d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x1a, 0x15, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x42, 0x28, 0x5a, 0x26, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, - 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, - 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_sdk_logical_version_proto_rawDescOnce sync.Once - file_sdk_logical_version_proto_rawDescData = file_sdk_logical_version_proto_rawDesc -) - -func file_sdk_logical_version_proto_rawDescGZIP() []byte { - file_sdk_logical_version_proto_rawDescOnce.Do(func() { - file_sdk_logical_version_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_logical_version_proto_rawDescData) - }) - return file_sdk_logical_version_proto_rawDescData -} - -var file_sdk_logical_version_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_sdk_logical_version_proto_goTypes = []interface{}{ - (*Empty)(nil), // 0: logical.Empty - (*VersionReply)(nil), // 1: logical.VersionReply -} -var file_sdk_logical_version_proto_depIdxs = []int32{ - 0, // 0: logical.PluginVersion.Version:input_type -> logical.Empty - 1, // 1: logical.PluginVersion.Version:output_type -> logical.VersionReply - 1, // [1:2] is the sub-list for method output_type - 0, // [0:1] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_sdk_logical_version_proto_init() } -func file_sdk_logical_version_proto_init() { - if File_sdk_logical_version_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_sdk_logical_version_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Empty); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sdk_logical_version_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VersionReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_sdk_logical_version_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_sdk_logical_version_proto_goTypes, - DependencyIndexes: file_sdk_logical_version_proto_depIdxs, - MessageInfos: file_sdk_logical_version_proto_msgTypes, - }.Build() - File_sdk_logical_version_proto = out.File - file_sdk_logical_version_proto_rawDesc = nil - file_sdk_logical_version_proto_goTypes = nil - file_sdk_logical_version_proto_depIdxs = nil -} diff --git a/sdk/logical/version.proto b/sdk/logical/version.proto deleted file mode 100644 index 345051ae9de95..0000000000000 --- a/sdk/logical/version.proto +++ /dev/null @@ -1,17 +0,0 @@ -syntax = "proto3"; -package logical; - -option go_package = "github.com/hashicorp/vault/sdk/logical"; - -message Empty {} - -// VersionReply is the reply for the Version method. -message VersionReply { - string plugin_version = 1; -} - -// PluginVersion is an optional RPC service implemented by plugins. -service PluginVersion { - // Version returns version information for the plugin. - rpc Version(Empty) returns (VersionReply); -} \ No newline at end of file diff --git a/sdk/logical/version_grpc.pb.go b/sdk/logical/version_grpc.pb.go deleted file mode 100644 index a69e97059978c..0000000000000 --- a/sdk/logical/version_grpc.pb.go +++ /dev/null @@ -1,103 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. - -package logical - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// PluginVersionClient is the client API for PluginVersion service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type PluginVersionClient interface { - // Version returns version information for the plugin. - Version(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*VersionReply, error) -} - -type pluginVersionClient struct { - cc grpc.ClientConnInterface -} - -func NewPluginVersionClient(cc grpc.ClientConnInterface) PluginVersionClient { - return &pluginVersionClient{cc} -} - -func (c *pluginVersionClient) Version(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*VersionReply, error) { - out := new(VersionReply) - err := c.cc.Invoke(ctx, "/logical.PluginVersion/Version", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// PluginVersionServer is the server API for PluginVersion service. -// All implementations must embed UnimplementedPluginVersionServer -// for forward compatibility -type PluginVersionServer interface { - // Version returns version information for the plugin. - Version(context.Context, *Empty) (*VersionReply, error) - mustEmbedUnimplementedPluginVersionServer() -} - -// UnimplementedPluginVersionServer must be embedded to have forward compatible implementations. -type UnimplementedPluginVersionServer struct { -} - -func (UnimplementedPluginVersionServer) Version(context.Context, *Empty) (*VersionReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method Version not implemented") -} -func (UnimplementedPluginVersionServer) mustEmbedUnimplementedPluginVersionServer() {} - -// UnsafePluginVersionServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to PluginVersionServer will -// result in compilation errors. -type UnsafePluginVersionServer interface { - mustEmbedUnimplementedPluginVersionServer() -} - -func RegisterPluginVersionServer(s grpc.ServiceRegistrar, srv PluginVersionServer) { - s.RegisterService(&PluginVersion_ServiceDesc, srv) -} - -func _PluginVersion_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(PluginVersionServer).Version(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/logical.PluginVersion/Version", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(PluginVersionServer).Version(ctx, req.(*Empty)) - } - return interceptor(ctx, in, info, handler) -} - -// PluginVersion_ServiceDesc is the grpc.ServiceDesc for PluginVersion service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var PluginVersion_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "logical.PluginVersion", - HandlerType: (*PluginVersionServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Version", - Handler: _PluginVersion_Version_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "sdk/logical/version.proto", -} diff --git a/sdk/physical/entry.go b/sdk/physical/entry.go index 389fe6c81c144..418b0d2ca53a1 100644 --- a/sdk/physical/entry.go +++ b/sdk/physical/entry.go @@ -1,10 +1,5 @@ package physical -import ( - "encoding/hex" - "fmt" -) - // Entry is used to represent data stored by the physical backend type Entry struct { Key string @@ -14,7 +9,3 @@ type Entry struct { // Only used in replication ValueHash []byte } - -func (e *Entry) String() string { - return fmt.Sprintf("Key: %s. SealWrap: %t. Value: %s. ValueHash: %s", e.Key, e.SealWrap, hex.EncodeToString(e.Value), hex.EncodeToString(e.ValueHash)) -} diff --git a/sdk/physical/inmem/inmem.go b/sdk/physical/inmem/inmem.go index be16b4caa12fc..b366eb84bf567 100644 --- a/sdk/physical/inmem/inmem.go +++ b/sdk/physical/inmem/inmem.go @@ -10,9 +10,10 @@ import ( "sync" "sync/atomic" - "github.com/armon/go-radix" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/physical" + + radix "github.com/armon/go-radix" ) // Verify interfaces are satisfied @@ -26,11 +27,10 @@ var ( ) var ( - PutDisabledError = errors.New("put operations disabled in inmem backend") - GetDisabledError = errors.New("get operations disabled in inmem backend") - DeleteDisabledError = errors.New("delete operations disabled in inmem backend") - ListDisabledError = errors.New("list operations disabled in inmem backend") - GetInTxnDisabledError = errors.New("get operations inside transactions are disabled in inmem backend") + PutDisabledError = errors.New("put operations disabled in inmem backend") + GetDisabledError = errors.New("get operations disabled in inmem backend") + DeleteDisabledError = errors.New("delete operations disabled in inmem backend") + ListDisabledError = errors.New("list operations disabled in inmem backend") ) // InmemBackend is an in-memory only physical backend. It is useful @@ -45,7 +45,6 @@ type InmemBackend struct { failPut *uint32 failDelete *uint32 failList *uint32 - failGetInTxn *uint32 logOps bool maxValueSize int } @@ -74,7 +73,6 @@ func NewInmem(conf map[string]string, logger log.Logger) (physical.Backend, erro failPut: new(uint32), failDelete: new(uint32), failList: new(uint32), - failGetInTxn: new(uint32), logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "", maxValueSize: maxValueSize, }, nil @@ -102,7 +100,6 @@ func NewTransactionalInmem(conf map[string]string, logger log.Logger) (physical. failPut: new(uint32), failDelete: new(uint32), failList: new(uint32), - failGetInTxn: new(uint32), logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "", maxValueSize: maxValueSize, }, @@ -192,14 +189,6 @@ func (i *InmemBackend) FailGet(fail bool) { atomic.StoreUint32(i.failGet, val) } -func (i *InmemBackend) FailGetInTxn(fail bool) { - var val uint32 - if fail { - val = 1 - } - atomic.StoreUint32(i.failGetInTxn, val) -} - // Delete is used to permanently delete an entry func (i *InmemBackend) Delete(ctx context.Context, key string) error { i.permitPool.Acquire() @@ -291,7 +280,7 @@ func (i *InmemBackend) FailList(fail bool) { atomic.StoreUint32(i.failList, val) } -// Transaction implements the transaction interface +// Implements the transaction interface func (t *TransactionalInmemBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error { t.permitPool.Acquire() defer t.permitPool.Release() @@ -299,12 +288,5 @@ func (t *TransactionalInmemBackend) Transaction(ctx context.Context, txns []*phy t.Lock() defer t.Unlock() - failGetInTxn := atomic.LoadUint32(t.failGetInTxn) - for _, t := range txns { - if t.Operation == physical.GetOperation && failGetInTxn != 0 { - return GetInTxnDisabledError - } - } - return physical.GenericTransactionHandler(ctx, t, txns) } diff --git a/sdk/physical/transactions.go b/sdk/physical/transactions.go index a943c6bd95efa..19f0d2cbedeb5 100644 --- a/sdk/physical/transactions.go +++ b/sdk/physical/transactions.go @@ -2,9 +2,8 @@ package physical import ( "context" - "fmt" - "github.com/hashicorp/go-multierror" + multierror "github.com/hashicorp/go-multierror" ) // TxnEntry is an operation that takes atomically as part of @@ -14,10 +13,6 @@ type TxnEntry struct { Entry *Entry } -func (t *TxnEntry) String() string { - return fmt.Sprintf("Operation: %s. Entry: %s", t.Operation, t.Entry) -} - // Transactional is an optional interface for backends that // support doing transactional updates of multiple keys. This is // required for some features such as replication. @@ -45,19 +40,6 @@ func GenericTransactionHandler(ctx context.Context, t PseudoTransactional, txns rollbackStack := make([]*TxnEntry, 0, len(txns)) var dirty bool - // Update all of our GET transaction entries, so we can populate existing values back at the wal layer. - for _, txn := range txns { - if txn.Operation == GetOperation { - entry, err := t.GetInternal(ctx, txn.Entry.Key) - if err != nil { - return err - } - if entry != nil { - txn.Entry.Value = entry.Value - } - } - } - // We walk the transactions in order; each successful operation goes into a // LIFO for rollback if we hit an error along the way TxnWalk: @@ -96,7 +78,6 @@ TxnWalk: dirty = true break TxnWalk } - // Nothing existed so in fact rolling back requires a delete var rollbackEntry *TxnEntry if entry == nil { diff --git a/sdk/plugin/backend.go b/sdk/plugin/backend.go index 8bfa978008046..82c728732703a 100644 --- a/sdk/plugin/backend.go +++ b/sdk/plugin/backend.go @@ -7,8 +7,7 @@ import ( "google.golang.org/grpc" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin" - "github.com/hashicorp/vault/sdk/helper/pluginutil" + plugin "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/plugin/pb" ) @@ -25,45 +24,29 @@ type GRPCBackendPlugin struct { MetadataMode bool Logger log.Logger - MultiplexingSupport bool - // Embeding this will disable the netRPC protocol plugin.NetRPCUnsupportedPlugin } func (b GRPCBackendPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { - server := backendGRPCPluginServer{ - broker: broker, - factory: b.Factory, - instances: make(map[string]backendInstance), - // We pass the logger down into the backend so go-plugin will - // forward logs for us. + pb.RegisterBackendServer(s, &backendGRPCPluginServer{ + broker: broker, + factory: b.Factory, + // We pass the logger down into the backend so go-plugin will forward + // logs for us. logger: b.Logger, - } - - if b.MultiplexingSupport { - // Multiplexing is enabled for this plugin, register the server so we - // can tell the client in Vault. - pluginutil.RegisterPluginMultiplexingServer(s, pluginutil.PluginMultiplexingServerImpl{ - Supported: true, - }) - server.multiplexingSupport = true - } - - pb.RegisterBackendServer(s, &server) - logical.RegisterPluginVersionServer(s, &server) + }) return nil } func (b *GRPCBackendPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { ret := &backendGRPCPluginClient{ - client: pb.NewBackendClient(c), - versionClient: logical.NewPluginVersionClient(c), - clientConn: c, - broker: broker, - cleanupCh: make(chan struct{}), - doneCtx: ctx, - metadataMode: b.MetadataMode, + client: pb.NewBackendClient(c), + clientConn: c, + broker: broker, + cleanupCh: make(chan struct{}), + doneCtx: ctx, + metadataMode: b.MetadataMode, } // Create the value and set the type diff --git a/sdk/plugin/grpc_backend_client.go b/sdk/plugin/grpc_backend_client.go index 51cdaf174b319..9ea3c23f8c31c 100644 --- a/sdk/plugin/grpc_backend_client.go +++ b/sdk/plugin/grpc_backend_client.go @@ -6,14 +6,15 @@ import ( "math" "sync/atomic" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin" + plugin "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/sdk/helper/pluginutil" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/plugin/pb" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) var ( @@ -27,10 +28,9 @@ var _ logical.Backend = &backendGRPCPluginClient{} // backendPluginClient implements logical.Backend and is the // go-plugin client. type backendGRPCPluginClient struct { - broker *plugin.GRPCBroker - client pb.BackendClient - versionClient logical.PluginVersionClient - metadataMode bool + broker *plugin.GRPCBroker + client pb.BackendClient + metadataMode bool system logical.SystemView logger log.Logger @@ -280,23 +280,3 @@ func (b *backendGRPCPluginClient) Type() logical.BackendType { return logical.BackendType(reply.Type) } - -func (b *backendGRPCPluginClient) PluginVersion() logical.PluginVersion { - reply, err := b.versionClient.Version(b.doneCtx, &logical.Empty{}) - if err != nil { - if stErr, ok := status.FromError(err); ok { - if stErr.Code() == codes.Unimplemented { - return logical.EmptyPluginVersion - } - } - b.Logger().Warn("Unknown error getting plugin version", "err", err) - return logical.EmptyPluginVersion - } - return logical.PluginVersion{ - Version: reply.GetPluginVersion(), - } -} - -func (b *backendGRPCPluginClient) IsExternal() bool { - return true -} diff --git a/sdk/plugin/grpc_backend_server.go b/sdk/plugin/grpc_backend_server.go index 361f3171f6734..ce9ecdf0633fd 100644 --- a/sdk/plugin/grpc_backend_server.go +++ b/sdk/plugin/grpc_backend_server.go @@ -3,11 +3,9 @@ package plugin import ( "context" "errors" - "fmt" - "sync" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin" + plugin "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/sdk/helper/pluginutil" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/plugin/pb" @@ -16,80 +14,29 @@ import ( var ErrServerInMetadataMode = errors.New("plugin server can not perform action while in metadata mode") -// singleImplementationID is the string used to define the instance ID of a -// non-multiplexed plugin -const singleImplementationID string = "single" - -type backendInstance struct { - brokeredClient *grpc.ClientConn - backend logical.Backend -} - type backendGRPCPluginServer struct { pb.UnimplementedBackendServer - logical.UnimplementedPluginVersionServer - broker *plugin.GRPCBroker - - instances map[string]backendInstance - instancesLock sync.RWMutex - multiplexingSupport bool + broker *plugin.GRPCBroker + backend logical.Backend factory logical.Factory - logger log.Logger -} - -// getBackendAndBrokeredClientInternal returns the backend and client -// connection but does not hold a lock -func (b *backendGRPCPluginServer) getBackendAndBrokeredClientInternal(ctx context.Context) (logical.Backend, *grpc.ClientConn, error) { - if b.multiplexingSupport { - id, err := pluginutil.GetMultiplexIDFromContext(ctx) - if err != nil { - return nil, nil, err - } - - if inst, ok := b.instances[id]; ok { - return inst.backend, inst.brokeredClient, nil - } - - } - - if singleImpl, ok := b.instances[singleImplementationID]; ok { - return singleImpl.backend, singleImpl.brokeredClient, nil - } - - return nil, nil, fmt.Errorf("no backend instance found") -} + brokeredClient *grpc.ClientConn -// getBackendAndBrokeredClient holds a read lock and returns the backend and -// client connection -func (b *backendGRPCPluginServer) getBackendAndBrokeredClient(ctx context.Context) (logical.Backend, *grpc.ClientConn, error) { - b.instancesLock.RLock() - defer b.instancesLock.RUnlock() - return b.getBackendAndBrokeredClientInternal(ctx) + logger log.Logger } // Setup dials into the plugin's broker to get a shimmed storage, logger, and // system view of the backend. This method also instantiates the underlying // backend through its factory func for the server side of the plugin. func (b *backendGRPCPluginServer) Setup(ctx context.Context, args *pb.SetupArgs) (*pb.SetupReply, error) { - var err error - id := singleImplementationID - - if b.multiplexingSupport { - id, err = pluginutil.GetMultiplexIDFromContext(ctx) - if err != nil { - return &pb.SetupReply{}, err - } - } - // Dial for storage brokeredClient, err := b.broker.Dial(args.BrokerID) if err != nil { return &pb.SetupReply{}, err } - + b.brokeredClient = brokeredClient storage := newGRPCStorageClient(brokeredClient) sysView := newGRPCSystemView(brokeredClient) @@ -109,23 +56,12 @@ func (b *backendGRPCPluginServer) Setup(ctx context.Context, args *pb.SetupArgs) Err: pb.ErrToString(err), }, nil } - - b.instancesLock.Lock() - defer b.instancesLock.Unlock() - b.instances[id] = backendInstance{ - brokeredClient: brokeredClient, - backend: backend, - } + b.backend = backend return &pb.SetupReply{}, nil } func (b *backendGRPCPluginServer) HandleRequest(ctx context.Context, args *pb.HandleRequestArgs) (*pb.HandleRequestReply, error) { - backend, brokeredClient, err := b.getBackendAndBrokeredClient(ctx) - if err != nil { - return &pb.HandleRequestReply{}, err - } - if pluginutil.InMetadataMode() { return &pb.HandleRequestReply{}, ErrServerInMetadataMode } @@ -135,9 +71,9 @@ func (b *backendGRPCPluginServer) HandleRequest(ctx context.Context, args *pb.Ha return &pb.HandleRequestReply{}, err } - logicalReq.Storage = newGRPCStorageClient(brokeredClient) + logicalReq.Storage = newGRPCStorageClient(b.brokeredClient) - resp, respErr := backend.HandleRequest(ctx, logicalReq) + resp, respErr := b.backend.HandleRequest(ctx, logicalReq) pbResp, err := pb.LogicalResponseToProtoResponse(resp) if err != nil { @@ -151,20 +87,15 @@ func (b *backendGRPCPluginServer) HandleRequest(ctx context.Context, args *pb.Ha } func (b *backendGRPCPluginServer) Initialize(ctx context.Context, _ *pb.InitializeArgs) (*pb.InitializeReply, error) { - backend, brokeredClient, err := b.getBackendAndBrokeredClient(ctx) - if err != nil { - return &pb.InitializeReply{}, err - } - if pluginutil.InMetadataMode() { return &pb.InitializeReply{}, ErrServerInMetadataMode } req := &logical.InitializationRequest{ - Storage: newGRPCStorageClient(brokeredClient), + Storage: newGRPCStorageClient(b.brokeredClient), } - respErr := backend.Initialize(ctx, req) + respErr := b.backend.Initialize(ctx, req) return &pb.InitializeReply{ Err: pb.ErrToProtoErr(respErr), @@ -172,12 +103,7 @@ func (b *backendGRPCPluginServer) Initialize(ctx context.Context, _ *pb.Initiali } func (b *backendGRPCPluginServer) SpecialPaths(ctx context.Context, args *pb.Empty) (*pb.SpecialPathsReply, error) { - backend, _, err := b.getBackendAndBrokeredClient(ctx) - if err != nil { - return &pb.SpecialPathsReply{}, err - } - - paths := backend.SpecialPaths() + paths := b.backend.SpecialPaths() if paths == nil { return &pb.SpecialPathsReply{ Paths: nil, @@ -195,11 +121,6 @@ func (b *backendGRPCPluginServer) SpecialPaths(ctx context.Context, args *pb.Emp } func (b *backendGRPCPluginServer) HandleExistenceCheck(ctx context.Context, args *pb.HandleExistenceCheckArgs) (*pb.HandleExistenceCheckReply, error) { - backend, brokeredClient, err := b.getBackendAndBrokeredClient(ctx) - if err != nil { - return &pb.HandleExistenceCheckReply{}, err - } - if pluginutil.InMetadataMode() { return &pb.HandleExistenceCheckReply{}, ErrServerInMetadataMode } @@ -208,10 +129,9 @@ func (b *backendGRPCPluginServer) HandleExistenceCheck(ctx context.Context, args if err != nil { return &pb.HandleExistenceCheckReply{}, err } + logicalReq.Storage = newGRPCStorageClient(b.brokeredClient) - logicalReq.Storage = newGRPCStorageClient(brokeredClient) - - checkFound, exists, err := backend.HandleExistenceCheck(ctx, logicalReq) + checkFound, exists, err := b.backend.HandleExistenceCheck(ctx, logicalReq) return &pb.HandleExistenceCheckReply{ CheckFound: checkFound, Exists: exists, @@ -220,69 +140,24 @@ func (b *backendGRPCPluginServer) HandleExistenceCheck(ctx context.Context, args } func (b *backendGRPCPluginServer) Cleanup(ctx context.Context, _ *pb.Empty) (*pb.Empty, error) { - b.instancesLock.Lock() - defer b.instancesLock.Unlock() - - backend, brokeredClient, err := b.getBackendAndBrokeredClientInternal(ctx) - if err != nil { - return &pb.Empty{}, err - } - - backend.Cleanup(ctx) + b.backend.Cleanup(ctx) // Close rpc clients - brokeredClient.Close() - - if b.multiplexingSupport { - id, err := pluginutil.GetMultiplexIDFromContext(ctx) - if err != nil { - return nil, err - } - delete(b.instances, id) - } else if _, ok := b.instances[singleImplementationID]; ok { - delete(b.instances, singleImplementationID) - } - + b.brokeredClient.Close() return &pb.Empty{}, nil } func (b *backendGRPCPluginServer) InvalidateKey(ctx context.Context, args *pb.InvalidateKeyArgs) (*pb.Empty, error) { - backend, _, err := b.getBackendAndBrokeredClient(ctx) - if err != nil { - return &pb.Empty{}, err - } - if pluginutil.InMetadataMode() { return &pb.Empty{}, ErrServerInMetadataMode } - backend.InvalidateKey(ctx, args.Key) + b.backend.InvalidateKey(ctx, args.Key) return &pb.Empty{}, nil } func (b *backendGRPCPluginServer) Type(ctx context.Context, _ *pb.Empty) (*pb.TypeReply, error) { - backend, _, err := b.getBackendAndBrokeredClient(ctx) - if err != nil { - return &pb.TypeReply{}, err - } - return &pb.TypeReply{ - Type: uint32(backend.Type()), - }, nil -} - -func (b *backendGRPCPluginServer) Version(ctx context.Context, _ *logical.Empty) (*logical.VersionReply, error) { - backend, _, err := b.getBackendAndBrokeredClient(ctx) - if err != nil { - return &logical.VersionReply{}, err - } - - if versioner, ok := backend.(logical.PluginVersioner); ok { - return &logical.VersionReply{ - PluginVersion: versioner.PluginVersion().Version, - }, nil - } - return &logical.VersionReply{ - PluginVersion: "", + Type: uint32(b.backend.Type()), }, nil } diff --git a/sdk/plugin/grpc_backend_test.go b/sdk/plugin/grpc_backend_test.go index 5ab99fd42a388..e332a9c226bf4 100644 --- a/sdk/plugin/grpc_backend_test.go +++ b/sdk/plugin/grpc_backend_test.go @@ -147,21 +147,6 @@ func TestGRPCBackendPlugin_Initialize(t *testing.T) { } } -func TestGRPCBackendPlugin_Version(t *testing.T) { - b, cleanup := testGRPCBackend(t) - defer cleanup() - - versioner, ok := b.(logical.PluginVersioner) - if !ok { - t.Fatalf("Expected %T to implement logical.PluginVersioner interface", b) - } - - version := versioner.PluginVersion().Version - if version != "mock" { - t.Fatalf("Got version %s, expected 'mock'", version) - } -} - func testGRPCBackend(t *testing.T) (logical.Backend, func()) { // Create a mock provider pluginMap := map[string]gplugin.Plugin{ diff --git a/sdk/plugin/grpc_system.go b/sdk/plugin/grpc_system.go index 3761269c48a00..81bc324bf0fa6 100644 --- a/sdk/plugin/grpc_system.go +++ b/sdk/plugin/grpc_system.go @@ -107,14 +107,6 @@ func (s *gRPCSystemViewClient) LookupPlugin(_ context.Context, _ string, _ const return nil, fmt.Errorf("cannot call LookupPlugin from a plugin backend") } -func (s *gRPCSystemViewClient) LookupPluginVersion(_ context.Context, _ string, _ consts.PluginType, _ string) (*pluginutil.PluginRunner, error) { - return nil, fmt.Errorf("cannot call LookupPluginVersion from a plugin backend") -} - -func (s *gRPCSystemViewClient) ListVersionedPlugins(_ context.Context, _ consts.PluginType) ([]pluginutil.VersionedPlugin, error) { - return nil, fmt.Errorf("cannot call ListVersionedPlugins from a plugin backend") -} - func (s *gRPCSystemViewClient) MlockEnabled() bool { reply, err := s.client.MlockEnabled(context.Background(), &pb.Empty{}) if err != nil { diff --git a/sdk/plugin/grpc_system_test.go b/sdk/plugin/grpc_system_test.go index 8d4de0afa8771..fa97444346250 100644 --- a/sdk/plugin/grpc_system_test.go +++ b/sdk/plugin/grpc_system_test.go @@ -6,12 +6,12 @@ import ( "testing" "time" + "github.com/golang/protobuf/proto" plugin "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/plugin/pb" "google.golang.org/grpc" - "google.golang.org/protobuf/proto" ) func TestSystem_GRPC_GRPC_impl(t *testing.T) { diff --git a/sdk/plugin/logger_test.go b/sdk/plugin/logger_test.go index a2b8a80155cde..99c27b15b17c2 100644 --- a/sdk/plugin/logger_test.go +++ b/sdk/plugin/logger_test.go @@ -222,6 +222,7 @@ func (l *deprecatedLoggerClient) Error(msg string, args ...interface{}) error { func (l *deprecatedLoggerClient) Fatal(msg string, args ...interface{}) { // NOOP since it's not actually used within vault + return } func (l *deprecatedLoggerClient) Log(level int, msg string, args []interface{}) { diff --git a/sdk/plugin/middleware.go b/sdk/plugin/middleware.go index 546584ccc7367..04a6f4c5000a6 100644 --- a/sdk/plugin/middleware.go +++ b/sdk/plugin/middleware.go @@ -10,16 +10,16 @@ import ( // backendPluginClient implements logical.Backend and is the // go-plugin client. -type BackendTracingMiddleware struct { +type backendTracingMiddleware struct { logger log.Logger next logical.Backend } // Validate the backendTracingMiddle object satisfies the backend interface -var _ logical.Backend = &BackendTracingMiddleware{} +var _ logical.Backend = &backendTracingMiddleware{} -func (b *BackendTracingMiddleware) Initialize(ctx context.Context, req *logical.InitializationRequest) (err error) { +func (b *backendTracingMiddleware) Initialize(ctx context.Context, req *logical.InitializationRequest) (err error) { defer func(then time.Time) { b.logger.Trace("initialize", "status", "finished", "err", err, "took", time.Since(then)) }(time.Now()) @@ -28,7 +28,7 @@ func (b *BackendTracingMiddleware) Initialize(ctx context.Context, req *logical. return b.next.Initialize(ctx, req) } -func (b *BackendTracingMiddleware) HandleRequest(ctx context.Context, req *logical.Request) (resp *logical.Response, err error) { +func (b *backendTracingMiddleware) HandleRequest(ctx context.Context, req *logical.Request) (resp *logical.Response, err error) { defer func(then time.Time) { b.logger.Trace("handle request", "path", req.Path, "status", "finished", "err", err, "took", time.Since(then)) }(time.Now()) @@ -37,7 +37,7 @@ func (b *BackendTracingMiddleware) HandleRequest(ctx context.Context, req *logic return b.next.HandleRequest(ctx, req) } -func (b *BackendTracingMiddleware) SpecialPaths() *logical.Paths { +func (b *backendTracingMiddleware) SpecialPaths() *logical.Paths { defer func(then time.Time) { b.logger.Trace("special paths", "status", "finished", "took", time.Since(then)) }(time.Now()) @@ -46,15 +46,15 @@ func (b *BackendTracingMiddleware) SpecialPaths() *logical.Paths { return b.next.SpecialPaths() } -func (b *BackendTracingMiddleware) System() logical.SystemView { +func (b *backendTracingMiddleware) System() logical.SystemView { return b.next.System() } -func (b *BackendTracingMiddleware) Logger() log.Logger { +func (b *backendTracingMiddleware) Logger() log.Logger { return b.next.Logger() } -func (b *BackendTracingMiddleware) HandleExistenceCheck(ctx context.Context, req *logical.Request) (found bool, exists bool, err error) { +func (b *backendTracingMiddleware) HandleExistenceCheck(ctx context.Context, req *logical.Request) (found bool, exists bool, err error) { defer func(then time.Time) { b.logger.Trace("handle existence check", "path", req.Path, "status", "finished", "err", err, "took", time.Since(then)) }(time.Now()) @@ -63,7 +63,7 @@ func (b *BackendTracingMiddleware) HandleExistenceCheck(ctx context.Context, req return b.next.HandleExistenceCheck(ctx, req) } -func (b *BackendTracingMiddleware) Cleanup(ctx context.Context) { +func (b *backendTracingMiddleware) Cleanup(ctx context.Context) { defer func(then time.Time) { b.logger.Trace("cleanup", "status", "finished", "took", time.Since(then)) }(time.Now()) @@ -72,7 +72,7 @@ func (b *BackendTracingMiddleware) Cleanup(ctx context.Context) { b.next.Cleanup(ctx) } -func (b *BackendTracingMiddleware) InvalidateKey(ctx context.Context, key string) { +func (b *backendTracingMiddleware) InvalidateKey(ctx context.Context, key string) { defer func(then time.Time) { b.logger.Trace("invalidate key", "key", key, "status", "finished", "took", time.Since(then)) }(time.Now()) @@ -81,7 +81,7 @@ func (b *BackendTracingMiddleware) InvalidateKey(ctx context.Context, key string b.next.InvalidateKey(ctx, key) } -func (b *BackendTracingMiddleware) Setup(ctx context.Context, config *logical.BackendConfig) (err error) { +func (b *backendTracingMiddleware) Setup(ctx context.Context, config *logical.BackendConfig) (err error) { defer func(then time.Time) { b.logger.Trace("setup", "status", "finished", "err", err, "took", time.Since(then)) }(time.Now()) @@ -90,7 +90,7 @@ func (b *BackendTracingMiddleware) Setup(ctx context.Context, config *logical.Ba return b.next.Setup(ctx, config) } -func (b *BackendTracingMiddleware) Type() logical.BackendType { +func (b *backendTracingMiddleware) Type() logical.BackendType { defer func(then time.Time) { b.logger.Trace("type", "status", "finished", "took", time.Since(then)) }(time.Now()) @@ -98,15 +98,3 @@ func (b *BackendTracingMiddleware) Type() logical.BackendType { b.logger.Trace("type", "status", "started") return b.next.Type() } - -func (b *BackendTracingMiddleware) PluginVersion() logical.PluginVersion { - defer func(then time.Time) { - b.logger.Trace("version", "status", "finished", "took", time.Since(then)) - }(time.Now()) - - b.logger.Trace("version", "status", "started") - if versioner, ok := b.next.(logical.PluginVersioner); ok { - return versioner.PluginVersion() - } - return logical.EmptyPluginVersion -} diff --git a/sdk/plugin/mock/backend.go b/sdk/plugin/mock/backend.go index fc840809cf10a..a5e8b5622551a 100644 --- a/sdk/plugin/mock/backend.go +++ b/sdk/plugin/mock/backend.go @@ -59,7 +59,6 @@ func Backend() *backend { BackendType: logical.TypeLogical, } b.internal = "bar" - b.RunningVersion = "mock" return &b } diff --git a/sdk/plugin/pb/backend.pb.go b/sdk/plugin/pb/backend.pb.go index 8b764b5fd01da..dbad4da977ce2 100644 --- a/sdk/plugin/pb/backend.pb.go +++ b/sdk/plugin/pb/backend.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc-gen-go v1.27.1 +// protoc v3.19.4 // source: sdk/plugin/pb/backend.proto package pb diff --git a/sdk/plugin/plugin.go b/sdk/plugin/plugin.go index edbffcd6983ac..f4f2d8e18f674 100644 --- a/sdk/plugin/plugin.go +++ b/sdk/plugin/plugin.go @@ -4,9 +4,11 @@ import ( "context" "errors" "fmt" + "sync" + "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin" + plugin "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/pluginutil" "github.com/hashicorp/vault/sdk/logical" @@ -17,6 +19,7 @@ import ( // used to cleanly kill the client on Cleanup() type BackendPluginClient struct { client *plugin.Client + sync.Mutex logical.Backend } @@ -28,13 +31,13 @@ func (b *BackendPluginClient) Cleanup(ctx context.Context) { b.client.Kill() } -// NewBackendWithVersion will return an instance of an RPC-based client implementation of the backend for +// NewBackend will return an instance of an RPC-based client implementation of the backend for // external plugins, or a concrete implementation of the backend if it is a builtin backend. // The backend is returned as a logical.Backend interface. The isMetadataMode param determines whether // the plugin should run in metadata mode. -func NewBackendWithVersion(ctx context.Context, pluginName string, pluginType consts.PluginType, sys pluginutil.LookRunnerUtil, conf *logical.BackendConfig, isMetadataMode bool, version string) (logical.Backend, error) { +func NewBackend(ctx context.Context, pluginName string, pluginType consts.PluginType, sys pluginutil.LookRunnerUtil, conf *logical.BackendConfig, isMetadataMode bool) (logical.Backend, error) { // Look for plugin in the plugin catalog - pluginRunner, err := sys.LookupPluginVersion(ctx, pluginName, pluginType, version) + pluginRunner, err := sys.LookupPlugin(ctx, pluginName, pluginType) if err != nil { return nil, err } @@ -45,7 +48,7 @@ func NewBackendWithVersion(ctx context.Context, pluginName string, pluginType co // from the pluginRunner. Then cast it to logical.Factory. rawFactory, err := pluginRunner.BuiltinFactory() if err != nil { - return nil, fmt.Errorf("error getting plugin type: %q", err) + return nil, errwrap.Wrapf("error getting plugin type: {{err}}", err) } if factory, ok := rawFactory.(logical.Factory); !ok { @@ -66,14 +69,6 @@ func NewBackendWithVersion(ctx context.Context, pluginName string, pluginType co return backend, nil } -// NewBackend will return an instance of an RPC-based client implementation of the backend for -// external plugins, or a concrete implementation of the backend if it is a builtin backend. -// The backend is returned as a logical.Backend interface. The isMetadataMode param determines whether -// the plugin should run in metadata mode. -func NewBackend(ctx context.Context, pluginName string, pluginType consts.PluginType, sys pluginutil.LookRunnerUtil, conf *logical.BackendConfig, isMetadataMode bool) (logical.Backend, error) { - return NewBackendWithVersion(ctx, pluginName, pluginType, sys, conf, isMetadataMode, "") -} - func NewPluginClient(ctx context.Context, sys pluginutil.RunnerUtil, pluginRunner *pluginutil.PluginRunner, logger log.Logger, isMetadataMode bool) (logical.Backend, error) { // pluginMap is the map of plugins we can dispense. pluginSet := map[int]plugin.PluginSet{ @@ -98,9 +93,9 @@ func NewPluginClient(ctx context.Context, sys pluginutil.RunnerUtil, pluginRunne var client *plugin.Client var err error if isMetadataMode { - client, err = pluginRunner.RunMetadataMode(ctx, sys, pluginSet, HandshakeConfig, []string{}, namedLogger) + client, err = pluginRunner.RunMetadataMode(ctx, sys, pluginSet, handshakeConfig, []string{}, namedLogger) } else { - client, err = pluginRunner.Run(ctx, sys, pluginSet, HandshakeConfig, []string{}, namedLogger) + client, err = pluginRunner.Run(ctx, sys, pluginSet, handshakeConfig, []string{}, namedLogger) } if err != nil { return nil, err @@ -122,9 +117,9 @@ func NewPluginClient(ctx context.Context, sys pluginutil.RunnerUtil, pluginRunne var transport string // We should have a logical backend type now. This feels like a normal interface // implementation but is in fact over an RPC connection. - switch b := raw.(type) { + switch raw.(type) { case *backendGRPCPluginClient: - backend = b + backend = raw.(*backendGRPCPluginClient) transport = "gRPC" default: return nil, errors.New("unsupported plugin client type") @@ -132,7 +127,7 @@ func NewPluginClient(ctx context.Context, sys pluginutil.RunnerUtil, pluginRunne // Wrap the backend in a tracing middleware if namedLogger.IsTrace() { - backend = &BackendTracingMiddleware{ + backend = &backendTracingMiddleware{ logger: namedLogger.With("transport", transport), next: backend, } @@ -144,21 +139,21 @@ func NewPluginClient(ctx context.Context, sys pluginutil.RunnerUtil, pluginRunne }, nil } -func (b *BackendPluginClient) PluginVersion() logical.PluginVersion { - if versioner, ok := b.Backend.(logical.PluginVersioner); ok { - return versioner.PluginVersion() +// wrapError takes a generic error type and makes it usable with the plugin +// interface. Only errors which have exported fields and have been registered +// with gob can be unwrapped and transported. This checks error types and, if +// none match, wrap the error in a plugin.BasicError. +func wrapError(err error) error { + if err == nil { + return nil } - return logical.EmptyPluginVersion -} -func (b *BackendPluginClient) IsExternal() bool { - if externaler, ok := b.Backend.(logical.Externaler); ok { - return externaler.IsExternal() + switch err.(type) { + case *plugin.BasicError, + logical.HTTPCodedError, + *logical.StatusBadRequest: + return err } - return true // default to true since this is only used for GRPC plugins -} -var ( - _ logical.PluginVersioner = (*BackendPluginClient)(nil) - _ logical.Externaler = (*BackendPluginClient)(nil) -) + return plugin.NewBasicError(err) +} diff --git a/sdk/plugin/plugin_v5.go b/sdk/plugin/plugin_v5.go deleted file mode 100644 index 2adf020a48eee..0000000000000 --- a/sdk/plugin/plugin_v5.go +++ /dev/null @@ -1,184 +0,0 @@ -package plugin - -import ( - "context" - "errors" - "fmt" - - "github.com/hashicorp/go-plugin" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/pluginutil" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/sdk/plugin/pb" -) - -// BackendPluginClientV5 is a wrapper around backendPluginClient -// that also contains its plugin.Client instance. It's primarily -// used to cleanly kill the client on Cleanup() -type BackendPluginClientV5 struct { - client pluginutil.PluginClient - - logical.Backend -} - -type ContextKey string - -func (c ContextKey) String() string { - return "plugin" + string(c) -} - -const ContextKeyPluginReload = ContextKey("plugin-reload") - -// Cleanup cleans up the go-plugin client and the plugin catalog -func (b *BackendPluginClientV5) Cleanup(ctx context.Context) { - _, ok := ctx.Value(ContextKeyPluginReload).(string) - if !ok { - b.Backend.Cleanup(ctx) - b.client.Close() - return - } - b.Backend.Cleanup(ctx) - b.client.Reload() -} - -func (b *BackendPluginClientV5) IsExternal() bool { - return true -} - -func (b *BackendPluginClientV5) PluginVersion() logical.PluginVersion { - if versioner, ok := b.Backend.(logical.PluginVersioner); ok { - return versioner.PluginVersion() - } - return logical.EmptyPluginVersion -} - -var ( - _ logical.PluginVersioner = (*BackendPluginClientV5)(nil) - _ logical.Externaler = (*BackendPluginClientV5)(nil) -) - -// NewBackendV5 will return an instance of an RPC-based client implementation of -// the backend for external plugins, or a concrete implementation of the -// backend if it is a builtin backend. The backend is returned as a -// logical.Backend interface. -func NewBackendV5(ctx context.Context, pluginName string, pluginType consts.PluginType, pluginVersion string, sys pluginutil.LookRunnerUtil, conf *logical.BackendConfig) (logical.Backend, error) { - // Look for plugin in the plugin catalog - pluginRunner, err := sys.LookupPluginVersion(ctx, pluginName, pluginType, pluginVersion) - if err != nil { - return nil, err - } - - var backend logical.Backend - if pluginRunner.Builtin { - // Plugin is builtin so we can retrieve an instance of the interface - // from the pluginRunner. Then cast it to logical.Factory. - rawFactory, err := pluginRunner.BuiltinFactory() - if err != nil { - return nil, fmt.Errorf("error getting plugin type: %q", err) - } - - if factory, ok := rawFactory.(logical.Factory); !ok { - return nil, fmt.Errorf("unsupported backend type: %q", pluginName) - } else { - if backend, err = factory(ctx, conf); err != nil { - return nil, err - } - } - } else { - // create a backendPluginClient instance - config := pluginutil.PluginClientConfig{ - Name: pluginName, - PluginSets: PluginSet, - PluginType: pluginType, - Version: pluginVersion, - HandshakeConfig: HandshakeConfig, - Logger: conf.Logger.Named(pluginName), - AutoMTLS: true, - Wrapper: sys, - } - backend, err = NewPluginClientV5(ctx, sys, config) - if err != nil { - return nil, err - } - } - - return backend, nil -} - -// PluginSet is the map of plugins we can dispense. -var PluginSet = map[int]plugin.PluginSet{ - 5: { - "backend": &GRPCBackendPlugin{}, - }, -} - -func Dispense(rpcClient plugin.ClientProtocol, pluginClient pluginutil.PluginClient) (logical.Backend, error) { - // Request the plugin - raw, err := rpcClient.Dispense("backend") - if err != nil { - return nil, err - } - - var backend logical.Backend - // We should have a logical backend type now. This feels like a normal interface - // implementation but is in fact over an RPC connection. - switch c := raw.(type) { - case *backendGRPCPluginClient: - // This is an abstraction leak from go-plugin but it is necessary in - // order to enable multiplexing on multiplexed plugins - c.client = pb.NewBackendClient(pluginClient.Conn()) - c.versionClient = logical.NewPluginVersionClient(pluginClient.Conn()) - - backend = c - default: - return nil, errors.New("unsupported plugin client type") - } - - return &BackendPluginClientV5{ - client: pluginClient, - Backend: backend, - }, nil -} - -func NewPluginClientV5(ctx context.Context, sys pluginutil.RunnerUtil, config pluginutil.PluginClientConfig) (logical.Backend, error) { - pluginClient, err := sys.NewPluginClient(ctx, config) - if err != nil { - return nil, err - } - - // Request the plugin - raw, err := pluginClient.Dispense("backend") - if err != nil { - return nil, err - } - - var backend logical.Backend - var transport string - // We should have a logical backend type now. This feels like a normal interface - // implementation but is in fact over an RPC connection. - switch c := raw.(type) { - case *backendGRPCPluginClient: - // This is an abstraction leak from go-plugin but it is necessary in - // order to enable multiplexing on multiplexed plugins - c.client = pb.NewBackendClient(pluginClient.Conn()) - c.versionClient = logical.NewPluginVersionClient(pluginClient.Conn()) - - backend = c - transport = "gRPC" - default: - return nil, errors.New("unsupported plugin client type") - } - - // Wrap the backend in a tracing middleware - if config.Logger.IsTrace() { - backend = &BackendTracingMiddleware{ - logger: config.Logger.With("transport", transport), - next: backend, - } - } - - return &BackendPluginClientV5{ - client: pluginClient, - Backend: backend, - }, nil -} diff --git a/sdk/plugin/serve.go b/sdk/plugin/serve.go index 0da143f769b8b..1119a2dac6456 100644 --- a/sdk/plugin/serve.go +++ b/sdk/plugin/serve.go @@ -55,13 +55,6 @@ func Serve(opts *ServeOpts) error { Logger: logger, }, }, - 5: { - "backend": &GRPCBackendPlugin{ - Factory: opts.BackendFactoryFunc, - MultiplexingSupport: false, - Logger: logger, - }, - }, } err := pluginutil.OptionallyEnableMlock() @@ -70,7 +63,7 @@ func Serve(opts *ServeOpts) error { } serveOpts := &plugin.ServeConfig{ - HandshakeConfig: HandshakeConfig, + HandshakeConfig: handshakeConfig, VersionedPlugins: pluginSets, TLSProvider: opts.TLSProviderFunc, Logger: logger, @@ -88,77 +81,12 @@ func Serve(opts *ServeOpts) error { return nil } -// ServeMultiplex is a helper function used to serve a backend plugin. This -// should be ran on the plugin's main process. -func ServeMultiplex(opts *ServeOpts) error { - logger := opts.Logger - if logger == nil { - logger = log.New(&log.LoggerOptions{ - Level: log.Info, - Output: os.Stderr, - JSONFormat: true, - }) - } - - // pluginMap is the map of plugins we can dispense. - pluginSets := map[int]plugin.PluginSet{ - // Version 3 used to supports both protocols. We want to keep it around - // since it's possible old plugins built against this version will still - // work with gRPC. There is currently no difference between version 3 - // and version 4. - 3: { - "backend": &GRPCBackendPlugin{ - Factory: opts.BackendFactoryFunc, - Logger: logger, - }, - }, - 4: { - "backend": &GRPCBackendPlugin{ - Factory: opts.BackendFactoryFunc, - Logger: logger, - }, - }, - 5: { - "backend": &GRPCBackendPlugin{ - Factory: opts.BackendFactoryFunc, - MultiplexingSupport: true, - Logger: logger, - }, - }, - } - - err := pluginutil.OptionallyEnableMlock() - if err != nil { - return err - } - - serveOpts := &plugin.ServeConfig{ - HandshakeConfig: HandshakeConfig, - VersionedPlugins: pluginSets, - Logger: logger, - - // A non-nil value here enables gRPC serving for this plugin... - GRPCServer: func(opts []grpc.ServerOption) *grpc.Server { - opts = append(opts, grpc.MaxRecvMsgSize(math.MaxInt32)) - opts = append(opts, grpc.MaxSendMsgSize(math.MaxInt32)) - return plugin.DefaultGRPCServer(opts) - }, - - // TLSProvider is required to support v3 and v4 plugins. - // It will be ignored for v5 which uses AutoMTLS - TLSProvider: opts.TLSProviderFunc, - } - - plugin.Serve(serveOpts) - - return nil -} - // handshakeConfigs are used to just do a basic handshake between // a plugin and host. If the handshake fails, a user friendly error is shown. // This prevents users from executing bad plugins or executing a plugin // directory. It is a UX feature, not a security feature. -var HandshakeConfig = plugin.HandshakeConfig{ +var handshakeConfig = plugin.HandshakeConfig{ + ProtocolVersion: 4, MagicCookieKey: "VAULT_BACKEND_PLUGIN", MagicCookieValue: "6669da05-b1c8-4f49-97d9-c8e5bed98e20", } diff --git a/sdk/version/version_base.go b/sdk/version/version_base.go index e45626e2cd8a1..4021982e7469c 100644 --- a/sdk/version/version_base.go +++ b/sdk/version/version_base.go @@ -11,7 +11,7 @@ var ( // Whether cgo is enabled or not; set at build time CgoEnabled bool - Version = "1.13.0" - VersionPrerelease = "dev1" + Version = "1.11.4" + VersionPrerelease = "" VersionMetadata = "" ) diff --git a/serviceregistration/kubernetes/testing/testserver.go b/serviceregistration/kubernetes/testing/testserver.go index 50232a2e573e6..cb1c122775e53 100644 --- a/serviceregistration/kubernetes/testing/testserver.go +++ b/serviceregistration/kubernetes/testing/testserver.go @@ -47,15 +47,15 @@ var ( // the test server. This must be done by the caller to avoid an import // cycle between the client and the testserver. Example usage: // -// client.Scheme = testConf.ClientScheme -// client.TokenFile = testConf.PathToTokenFile -// client.RootCAFile = testConf.PathToRootCAFile -// if err := os.Setenv(client.EnvVarKubernetesServiceHost, testConf.ServiceHost); err != nil { -// t.Fatal(err) -// } -// if err := os.Setenv(client.EnvVarKubernetesServicePort, testConf.ServicePort); err != nil { -// t.Fatal(err) -// } +// client.Scheme = testConf.ClientScheme +// client.TokenFile = testConf.PathToTokenFile +// client.RootCAFile = testConf.PathToRootCAFile +// if err := os.Setenv(client.EnvVarKubernetesServiceHost, testConf.ServiceHost); err != nil { +// t.Fatal(err) +// } +// if err := os.Setenv(client.EnvVarKubernetesServicePort, testConf.ServicePort); err != nil { +// t.Fatal(err) +// } type Conf struct { ClientScheme, PathToTokenFile, PathToRootCAFile, ServiceHost, ServicePort string } diff --git a/tools/semgrep/ci/loopclosure.yml b/tools/semgrep/ci/loopclosure.yml deleted file mode 100644 index 967376127db86..0000000000000 --- a/tools/semgrep/ci/loopclosure.yml +++ /dev/null @@ -1,28 +0,0 @@ -rules: - - id: loopclosure - patterns: - - pattern-inside: | - for $A, $B := range $C { - ... - } - - pattern-inside: | - go func() { - ... - }() - - pattern-not-inside: | - go func(..., $B, ...) { - ... - }(..., $B, ...) - - pattern-not-inside: | - go func() { - ... - for ... { - ... - } - ... - }() - - pattern: $B - message: Loop variable $B used inside goroutine - languages: - - go - severity: WARNING diff --git a/tools/semgrep/replication-has-state.yml b/tools/semgrep/replication-has-state.yml deleted file mode 100644 index 416a59e6af6a2..0000000000000 --- a/tools/semgrep/replication-has-state.yml +++ /dev/null @@ -1,58 +0,0 @@ -rules: - - id: replication-state-should-use-IsPerfSecondary - patterns: - - pattern: | - $CORE.ReplicationState().HasState(consts.ReplicationPerformanceSecondary) - # Not the defining function - - pattern-not-inside: | - func ($CORE *Core) IsPerfSecondary() bool { - ... - } - # Not a call to System() - - pattern-not: | - $BACKEND.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary) - - pattern-not: | - $IDENTITYSTORE.localNode.ReplicationState().HasState(consts.ReplicationPerformanceSecondary) - message: "Consider replacing ReplicationState().HasState(...) with IsPerfSecondary()" - languages: [go] - severity: WARNING - fix: $CORE.IsPerfSecondary() - - - id: replication-state-should-use-IsDrSecondar - patterns: - - pattern: | - $CORE.ReplicationState().HasState(consts.ReplicationDRSecondary) - # Not the defining function - - pattern-not-inside: | - func ($CORE *Core) IsDRSecondary() bool { - ... - } - # Not a call to System() - - pattern-not: | - $BACKEND.System().ReplicationState().HasState(consts.ReplicationDRSecondary) - - pattern-not: | - $IDENTITYSTORE.localNode.ReplicationState().HasState(consts.ReplicationDRSecondary) - message: "Consider replacing ReplicationState().HasState(...) with IsDRSecondary()" - languages: [go] - severity: WARNING - fix: $CORE.IsDRSecondary() - - - id: replication-state-in-handler-op - patterns: - - pattern: | - $B.System().ReplicationState().HasState($STATE) - - pattern-inside: | - func ($T $TYPE) $FUNC($CTX context.Context, $REQ *logical.Request, $D *framework.FieldData) (*logical.Response, error) { - ... - } - message: "Consider using frameworks ForwardPerformance* setting" - languages: [go] - severity: WARNING - - - id: replication-state-bad-logic - patterns: - - pattern: | - b.System().LocalMount() || !b.System().ReplicationState().HasState(<... consts.ReplicationPerformanceStandby ...>) - message: "Invalid replication state handling of local mounts" - languages: [go] - severity: ERROR diff --git a/tools/tools.go b/tools/tools.go index 7458113cec221..008536efe187d 100644 --- a/tools/tools.go +++ b/tools/tools.go @@ -11,6 +11,7 @@ package tools //go:generate go install golang.org/x/tools/cmd/goimports +//go:generate go install github.com/mitchellh/gox //go:generate go install github.com/client9/misspell/cmd/misspell //go:generate go install mvdan.cc/gofumpt //go:generate go install google.golang.org/protobuf/cmd/protoc-gen-go @@ -19,6 +20,8 @@ package tools import ( _ "golang.org/x/tools/cmd/goimports" + _ "github.com/mitchellh/gox" + _ "github.com/client9/misspell/cmd/misspell" _ "mvdan.cc/gofumpt" diff --git a/ui/.browserslistrc b/ui/.browserslistrc index 4a67162a087df..d6104e66505e9 100644 --- a/ui/.browserslistrc +++ b/ui/.browserslistrc @@ -1,4 +1,3 @@ -last 1 chrome version -last 1 firefox version -last 1 safari version -last 1 edge version +defaults +not IE 11 +maintained node versions diff --git a/ui/.eslintrc.js b/ui/.eslintrc.js index 9e626cbc17bba..16b2b22d38bc5 100644 --- a/ui/.eslintrc.js +++ b/ui/.eslintrc.js @@ -13,12 +13,7 @@ module.exports = { }, }, plugins: ['ember'], - extends: [ - 'eslint:recommended', - 'plugin:ember/recommended', - 'plugin:prettier/recommended', - 'plugin:compat/recommended', - ], + extends: ['eslint:recommended', 'plugin:ember/recommended', 'plugin:prettier/recommended'], env: { browser: true, }, diff --git a/ui/README.md b/ui/README.md index c8f224b7e6a56..6bbe9b3368eed 100644 --- a/ui/README.md +++ b/ui/README.md @@ -1,15 +1,15 @@ -**Table of Contents** - +**Table of Contents** + - [Vault UI](#vault-ui) - - [Ember CLI Version Matrix](#ember-cli-version-matrix) - [Prerequisites](#prerequisites) - - [Running a Vault Server](#running-a-vault-server) - [Running / Development](#running--development) - [Code Generators](#code-generators) - [Running Tests](#running-tests) + - [Automated Cross-Browser Testing](#automated-cross-browser-testing) + - [Running Browserstack Locally](#running-browserstack-locally) - [Linting](#linting) - [Building Vault UI into a Vault Binary](#building-vault-ui-into-a-vault-binary) - [Further Reading / Useful Links](#further-reading--useful-links) @@ -106,6 +106,17 @@ acceptance tests then run, proxing requests back to that server. - `yarn run test:oss -s` to keep the test server running after the initial run. - `yarn run test -f="policies"` to filter the tests that are run. `-f` gets passed into [QUnit's `filter` config](https://api.qunitjs.com/config/QUnit.config#qunitconfigfilter-string--default-undefined) +- `yarn run test:browserstack` to run the kv acceptance tests in Browserstack + +#### Automated Cross-Browser Testing + +Vault uses [Browserstack Automate](https://automate.browserstack.com/) to run all the kv acceptance tests on various browsers. You can view the list of browsers we test by viewing `testem.browserstack.js`. + +##### Running Browserstack Locally + +To run the Browserstack tests locally you will need to add your `BROWSERSTACK_USERNAME` and `BROWSERSTACK_ACCESS_KEY` to your environment. Then run `yarn run test:browserstack`. You can view the currently running tests at `localhost:7357` or log in to [Browserstack Automate](https://automate.browserstack.com/) to view a previous build. + +To run the tests locally in a browser other than IE11, swap out `launch_in_ci: ['BS_IE_11']` inside `testem.browserstack.js`. ### Linting @@ -132,3 +143,4 @@ setting `VAULT_UI` environment variable. - Development Browser Extensions - [ember inspector for chrome](https://chrome.google.com/webstore/detail/ember-inspector/bmdblncegkenkacieihfhpjfppoconhi) - [ember inspector for firefox](https://addons.mozilla.org/en-US/firefox/addon/ember-inspector/) +- [Browserstack Automate](https://automate.browserstack.com/) diff --git a/ui/app/adapters/auth-config/_base.js b/ui/app/adapters/auth-config/_base.js index f89dfb06be346..6baccc9cf73b6 100644 --- a/ui/app/adapters/auth-config/_base.js +++ b/ui/app/adapters/auth-config/_base.js @@ -6,7 +6,8 @@ export default ApplicationAdapter.extend({ pathForType(modelType) { // we want the last part of the path const type = modelType.split('/').pop(); - if (type === 'identity-accesslist' || type === 'roletag-denylist') { + // TODO: Update endpoints from PR#10997 + if (type === 'identity-whitelist' || type === 'roletag-blacklist') { return `tidy/${type}`; } return type; diff --git a/ui/app/adapters/auth-config/aws/identity-accesslist.js b/ui/app/adapters/auth-config/aws/identity-whitelist.js similarity index 100% rename from ui/app/adapters/auth-config/aws/identity-accesslist.js rename to ui/app/adapters/auth-config/aws/identity-whitelist.js diff --git a/ui/app/adapters/auth-config/aws/roletag-denylist.js b/ui/app/adapters/auth-config/aws/roletag-blacklist.js similarity index 100% rename from ui/app/adapters/auth-config/aws/roletag-denylist.js rename to ui/app/adapters/auth-config/aws/roletag-blacklist.js diff --git a/ui/app/adapters/cluster.js b/ui/app/adapters/cluster.js index 3c53106d71261..f8600442640dc 100644 --- a/ui/app/adapters/cluster.js +++ b/ui/app/adapters/cluster.js @@ -107,7 +107,7 @@ export default ApplicationAdapter.extend({ }, authenticate({ backend, data }) { - const { role, jwt, token, password, username, path, nonce } = data; + const { role, jwt, token, password, username, path } = data; const url = this.urlForAuth(backend, username, path); const verb = backend === 'token' ? 'GET' : 'POST'; let options = { @@ -119,8 +119,6 @@ export default ApplicationAdapter.extend({ }; } else if (backend === 'jwt' || backend === 'oidc') { options.data = { role, jwt }; - } else if (backend === 'okta') { - options.data = { password, nonce }; } else { options.data = token ? { token, password } : { password }; } diff --git a/ui/app/adapters/named-path.js b/ui/app/adapters/named-path.js deleted file mode 100644 index 383fa75b5bf5d..0000000000000 --- a/ui/app/adapters/named-path.js +++ /dev/null @@ -1,76 +0,0 @@ -/** - * base adapter for resources that are saved to a path whose unique identifier is name - * save requests are made to the same endpoint and the resource is either created if not found or updated - * */ -import ApplicationAdapter from './application'; -import { assert } from '@ember/debug'; -export default class NamedPathAdapter extends ApplicationAdapter { - namespace = 'v1'; - saveMethod = 'POST'; // override when extending if PUT is used rather than POST - - _saveRecord(store, { modelName }, snapshot) { - // since the response is empty return the serialized data rather than nothing - const data = store.serializerFor(modelName).serialize(snapshot); - return this.ajax(this.urlForUpdateRecord(snapshot.attr('name'), modelName, snapshot), this.saveMethod, { - data, - }).then(() => data); - } - - // create does not return response similar to PUT request - createRecord() { - let [store, { modelName }, snapshot] = arguments; - let name = snapshot.attr('name'); - // throw error if user attempts to create a record with same name, otherwise POST request silently overrides (updates) the existing model - if (store.hasRecordForId(modelName, name)) { - throw new Error(`A record already exists with the name: ${name}`); - } else { - return this._saveRecord(...arguments); - } - } - - // update uses same endpoint and method as create - updateRecord() { - return this._saveRecord(...arguments); - } - - // if backend does not return name in response Ember Data will throw an error for pushing a record with no id - // use the id (name) supplied to findRecord to set property on response data - findRecord(store, type, name) { - return super.findRecord(...arguments).then((resp) => { - if (!resp.data.name) { - resp.data.name = name; - } - return resp; - }); - } - - // GET request with list=true as query param - async query(store, type, query) { - const url = this.urlForQuery(query, type.modelName); - const { paramKey, filterFor, allowed_client_id } = query; - // * 'paramKey' is a string of the param name (model attr) we're filtering for, e.g. 'client_id' - // * 'filterFor' is an array of values to filter for (value type must match the attr type), e.g. array of ID strings - // * 'allowed_client_id' is a valid query param to the /provider endpoint - let queryParams = { list: true, ...(allowed_client_id && { allowed_client_id }) }; - const response = await this.ajax(url, 'GET', { data: queryParams }); - - // filter LIST response only if key_info exists and query includes both 'paramKey' & 'filterFor' - if (filterFor) assert('filterFor must be an array', Array.isArray(filterFor)); - if (response.data.key_info && filterFor && paramKey && !filterFor.includes('*')) { - const data = this.filterListResponse(paramKey, filterFor, response.data.key_info); - return { ...response, data }; - } - return response; - } - - filterListResponse(paramKey, matchValues, key_info) { - const keyInfoAsArray = Object.entries(key_info); - const filtered = keyInfoAsArray.filter((key) => { - const value = key[1]; // value is an object of model attributes - return matchValues.includes(value[paramKey]); - }); - const filteredKeyInfo = Object.fromEntries(filtered); - const filteredKeys = Object.keys(filteredKeyInfo); - return { keys: filteredKeys, key_info: filteredKeyInfo }; - } -} diff --git a/ui/app/adapters/oidc/assignment.js b/ui/app/adapters/oidc/assignment.js deleted file mode 100644 index 0c78f6492471d..0000000000000 --- a/ui/app/adapters/oidc/assignment.js +++ /dev/null @@ -1,7 +0,0 @@ -import NamedPathAdapter from '../named-path'; - -export default class OidcAssignmentAdapter extends NamedPathAdapter { - pathForType() { - return 'identity/oidc/assignment'; - } -} diff --git a/ui/app/adapters/oidc/client.js b/ui/app/adapters/oidc/client.js deleted file mode 100644 index 3331b6d7efca7..0000000000000 --- a/ui/app/adapters/oidc/client.js +++ /dev/null @@ -1,7 +0,0 @@ -import NamedPathAdapter from '../named-path'; - -export default class OidcClientAdapter extends NamedPathAdapter { - pathForType() { - return 'identity/oidc/client'; - } -} diff --git a/ui/app/adapters/oidc/key.js b/ui/app/adapters/oidc/key.js deleted file mode 100644 index 0b7561ed023d0..0000000000000 --- a/ui/app/adapters/oidc/key.js +++ /dev/null @@ -1,11 +0,0 @@ -import NamedPathAdapter from '../named-path'; - -export default class OidcKeyAdapter extends NamedPathAdapter { - pathForType() { - return 'identity/oidc/key'; - } - rotate(name, verification_ttl) { - const data = verification_ttl ? { verification_ttl } : {}; - return this.ajax(`${this.urlForUpdateRecord(name, 'oidc/key')}/rotate`, 'POST', { data }); - } -} diff --git a/ui/app/adapters/oidc/provider.js b/ui/app/adapters/oidc/provider.js deleted file mode 100644 index 064e569685a50..0000000000000 --- a/ui/app/adapters/oidc/provider.js +++ /dev/null @@ -1,7 +0,0 @@ -import NamedPathAdapter from '../named-path'; - -export default class OidcProviderAdapter extends NamedPathAdapter { - pathForType() { - return 'identity/oidc/provider'; - } -} diff --git a/ui/app/adapters/oidc/scope.js b/ui/app/adapters/oidc/scope.js deleted file mode 100644 index af69799d87192..0000000000000 --- a/ui/app/adapters/oidc/scope.js +++ /dev/null @@ -1,7 +0,0 @@ -import NamedPathAdapter from '../named-path'; - -export default class OidcScopeAdapter extends NamedPathAdapter { - pathForType() { - return 'identity/oidc/scope'; - } -} diff --git a/ui/app/adapters/pki/cert.js b/ui/app/adapters/pki-certificate.js similarity index 97% rename from ui/app/adapters/pki/cert.js rename to ui/app/adapters/pki-certificate.js index 321c406809bda..6e587cedfeee5 100644 --- a/ui/app/adapters/pki/cert.js +++ b/ui/app/adapters/pki-certificate.js @@ -1,5 +1,5 @@ import { assign } from '@ember/polyfills'; -import Adapter from '../pki'; +import Adapter from './pki'; export default Adapter.extend({ url(role) { diff --git a/ui/app/adapters/pki/pki-config.js b/ui/app/adapters/pki-config.js similarity index 98% rename from ui/app/adapters/pki/pki-config.js rename to ui/app/adapters/pki-config.js index c94d30d1c7dab..efc908929a313 100644 --- a/ui/app/adapters/pki/pki-config.js +++ b/ui/app/adapters/pki-config.js @@ -2,7 +2,7 @@ import AdapterError from '@ember-data/adapter/error'; import { hash, resolve } from 'rsvp'; import { capitalize } from '@ember/string'; import { set } from '@ember/object'; -import ApplicationAdapter from '../application'; +import ApplicationAdapter from './application'; export default ApplicationAdapter.extend({ namespace: 'v1', diff --git a/ui/app/adapters/pki/pki-certificate-engine.js b/ui/app/adapters/pki/pki-certificate-engine.js deleted file mode 100644 index d4eadb92dddc1..0000000000000 --- a/ui/app/adapters/pki/pki-certificate-engine.js +++ /dev/null @@ -1,3 +0,0 @@ -import PkiCertAdapter from './cert'; - -export default class PkiCertificateEngineAdapter extends PkiCertAdapter {} diff --git a/ui/app/adapters/pki/pki-issuer-engine.js b/ui/app/adapters/pki/pki-issuer-engine.js deleted file mode 100644 index 7f3e21c1ae4ef..0000000000000 --- a/ui/app/adapters/pki/pki-issuer-engine.js +++ /dev/null @@ -1,28 +0,0 @@ -import ApplicationAdapter from '../application'; -import { encodePath } from 'vault/utils/path-encoding-helpers'; - -export default class PkiIssuerEngineAdapter extends ApplicationAdapter { - namespace = 'v1'; - - optionsForQuery(id) { - let data = {}; - if (!id) { - data['list'] = true; - } - return { data }; - } - - urlForQuery(backend, id) { - let url = `${this.buildURL()}/${encodePath(backend)}/issuers`; - if (id) { - url = url + '/' + encodePath(id); - } - return url; - } - - async query(store, type, query) { - const { backend, id } = query; - let response = await this.ajax(this.urlForQuery(backend, id), 'GET', this.optionsForQuery(id)); - return response; - } -} diff --git a/ui/app/adapters/pki/pki-key-engine.js b/ui/app/adapters/pki/pki-key-engine.js deleted file mode 100644 index a5e4665f06455..0000000000000 --- a/ui/app/adapters/pki/pki-key-engine.js +++ /dev/null @@ -1,28 +0,0 @@ -import ApplicationAdapter from '../application'; -import { encodePath } from 'vault/utils/path-encoding-helpers'; - -export default class PkiKeyEngineAdapter extends ApplicationAdapter { - namespace = 'v1'; - - optionsForQuery(id) { - let data = {}; - if (!id) { - data['list'] = true; - } - return { data }; - } - - urlForQuery(backend, id) { - let url = `${this.buildURL()}/${encodePath(backend)}/keys`; - if (id) { - url = url + '/' + encodePath(id); - } - return url; - } - - async query(store, type, query) { - const { backend, id } = query; - let response = await this.ajax(this.urlForQuery(backend, id), 'GET', this.optionsForQuery(id)); - return response; - } -} diff --git a/ui/app/adapters/pki/pki-role-engine.js b/ui/app/adapters/pki/pki-role-engine.js deleted file mode 100644 index c756445af9d05..0000000000000 --- a/ui/app/adapters/pki/pki-role-engine.js +++ /dev/null @@ -1,3 +0,0 @@ -import PkiRoleAdapter from './pki-role'; - -export default class PkiRoleEngineAdapter extends PkiRoleAdapter {} diff --git a/ui/app/adapters/pki/pki-role.js b/ui/app/adapters/role-pki.js similarity index 97% rename from ui/app/adapters/pki/pki-role.js rename to ui/app/adapters/role-pki.js index 44f0163c03071..ad40d848b4d2e 100644 --- a/ui/app/adapters/pki/pki-role.js +++ b/ui/app/adapters/role-pki.js @@ -1,5 +1,5 @@ import { assign } from '@ember/polyfills'; -import ApplicationAdapter from '../application'; +import ApplicationAdapter from './application'; import { encodePath } from 'vault/utils/path-encoding-helpers'; export default ApplicationAdapter.extend({ diff --git a/ui/app/app.js b/ui/app/app.js index 1f17e4ce6672e..23f2396edb331 100644 --- a/ui/app/app.js +++ b/ui/app/app.js @@ -48,24 +48,6 @@ export default class App extends Application { }, }, }, - pki: { - dependencies: { - services: [ - 'auth', - 'flash-messages', - 'namespace', - 'path-help', - 'router', - 'store', - 'version', - 'wizard', - 'secret-mount-path', - ], - externalRoutes: { - secrets: 'vault.cluster.secrets.backends', - }, - }, - }, }; } diff --git a/ui/app/components/alert-popup.js b/ui/app/components/alert-popup.js index 14c05c7b30cea..666f26e69c85d 100644 --- a/ui/app/components/alert-popup.js +++ b/ui/app/components/alert-popup.js @@ -1,4 +1,4 @@ -import Component from '@glimmer/component'; +import OuterHTML from './outer-html'; /** * @module AlertPopup @@ -8,21 +8,14 @@ import Component from '@glimmer/component'; * // All properties are passed in from the flashMessage service. * ``` * - * @param {string} type=null - The alert type. This comes from the message-types helper. - * @param {string} [message=null] - The alert message. - * @param {function} close=null - The close action which will close the alert. - * @param {boolean} isPreformatted - if true modifies class. + * @param type=null {String} - The alert type. This comes from the message-types helper. + * @param [message=null] {String} - The alert message. + * @param close=null {Func} - The close action which will close the alert. * */ -export default class AlertPopup extends Component { - get type() { - return this.args.type || null; - } - get message() { - return this.args.message || null; - } - get close() { - return this.args.close || null; - } -} +export default OuterHTML.extend({ + type: null, + message: null, + close: null, +}); diff --git a/ui/app/components/auth-form.js b/ui/app/components/auth-form.js index eddbb9b27b04b..6d4208ce28c62 100644 --- a/ui/app/components/auth-form.js +++ b/ui/app/components/auth-form.js @@ -18,17 +18,13 @@ const BACKENDS = supportedAuthBackends(); * * @example ```js * // All properties are passed in via query params. - * ``` + * ``` * * @param {string} wrappedToken - The auth method that is currently selected in the dropdown. * @param {object} cluster - The auth method that is currently selected in the dropdown. This corresponds to an Ember Model. * @param {string} namespace- The currently active namespace. * @param {string} selectedAuth - The auth method that is currently selected in the dropdown. - * @param {function} onSuccess - Fired on auth success. - * @param {function} [setOktaNumberChallenge] - Sets whether we are waiting for okta number challenge to be used to sign in. - * @param {boolean} [waitingForOktaNumberChallenge=false] - Determines if we are waiting for the Okta Number Challenge to sign in. - * @param {function} [setCancellingAuth] - Sets whether we are cancelling or not the login authentication for Okta Number Challenge. - * @param {boolean} [cancelAuthForOktaNumberChallenge=false] - Determines if we are cancelling the login authentication for the Okta Number Challenge. + * @param {function} onSuccess - Fired on auth success */ const DEFAULTS = { @@ -55,9 +51,6 @@ export default Component.extend(DEFAULTS, { oldNamespace: null, authMethods: BACKENDS, - // number answer for okta number challenge if applicable - oktaNumberChallengeAnswer: null, - didReceiveAttrs() { this._super(...arguments); let { @@ -67,14 +60,8 @@ export default Component.extend(DEFAULTS, { namespace: ns, selectedAuth: newMethod, oldSelectedAuth: oldMethod, - cancelAuthForOktaNumberChallenge: cancelAuth, } = this; - // if we are cancelling the login then we reset the number challenge answer and cancel the current authenticate and polling tasks - if (cancelAuth) { - this.set('oktaNumberChallengeAnswer', null); - this.authenticate.cancelAll(); - this.pollForOktaNumberChallenge.cancelAll(); - } + next(() => { if (!token && (oldNS === null || oldNS !== ns)) { this.fetchMethods.perform(); @@ -232,11 +219,7 @@ export default Component.extend(DEFAULTS, { cluster: { id: clusterId }, } = this; try { - if (backendType === 'okta') { - this.pollForOktaNumberChallenge.perform(data.nonce, data.path); - } else { - this.delayAuthMessageReminder.perform(); - } + this.delayAuthMessageReminder.perform(); const authResponse = yield this.auth.authenticate({ clusterId, backend: backendType, @@ -253,28 +236,6 @@ export default Component.extend(DEFAULTS, { }) ), - pollForOktaNumberChallenge: task(function* (nonce, mount) { - // yield for 1s to wait to see if there is a login error before polling - yield timeout(1000); - if (this.error) { - return; - } - let response = null; - this.setOktaNumberChallenge(true); - this.setCancellingAuth(false); - // keep polling /auth/okta/verify/:nonce API every 1s until a response is given with the correct number for the Okta Number Challenge - while (response === null) { - // when testing, the polling loop causes promises to be rejected making acceptance tests fail - // so disable the poll in tests - if (Ember.testing) { - return; - } - yield timeout(1000); - response = yield this.auth.getOktaNumberChallengeAnswer(nonce, mount); - } - this.set('oktaNumberChallengeAnswer', response); - }), - delayAuthMessageReminder: task(function* () { if (Ember.testing) { this.showLoading = true; @@ -314,14 +275,6 @@ export default Component.extend(DEFAULTS, { if (this.customPath || backend.id) { data.path = this.customPath || backend.id; } - // add nonce field for okta backend - if (backend.type === 'okta') { - data.nonce = crypto.randomUUID(); - // add a default path of okta if it doesn't exist to be used for Okta Number Challenge - if (!data.path) { - data.path = 'okta'; - } - } return this.authenticate.unlinked().perform(backend.type, data); }, handleError(e) { @@ -330,9 +283,5 @@ export default Component.extend(DEFAULTS, { error: e ? this.auth.handleError(e) : null, }); }, - returnToLoginFromOktaNumberChallenge() { - this.setOktaNumberChallenge(false); - this.set('oktaNumberChallengeAnswer', null); - }, }, }); diff --git a/ui/app/components/auth-jwt.js b/ui/app/components/auth-jwt.js index 1605aadea6bc4..f5ae2a44fb5a6 100644 --- a/ui/app/components/auth-jwt.js +++ b/ui/app/components/auth-jwt.js @@ -1,6 +1,5 @@ import Ember from 'ember'; import { inject as service } from '@ember/service'; -// ARG NOTE: Once you remove outer-html after glimmerizing you can remove the outer-html component import Component from './outer-html'; import { later } from '@ember/runloop'; import { task, timeout, waitForEvent } from 'ember-concurrency'; diff --git a/ui/app/components/block-error.js b/ui/app/components/block-error.js new file mode 100644 index 0000000000000..96167992d7ced --- /dev/null +++ b/ui/app/components/block-error.js @@ -0,0 +1,2 @@ +import OuterHTML from './outer-html'; +export default OuterHTML.extend(); diff --git a/ui/app/components/pki/config-pki-ca.js b/ui/app/components/config-pki-ca.js similarity index 100% rename from ui/app/components/pki/config-pki-ca.js rename to ui/app/components/config-pki-ca.js diff --git a/ui/app/components/pki/config-pki.js b/ui/app/components/config-pki.js similarity index 88% rename from ui/app/components/pki/config-pki.js rename to ui/app/components/config-pki.js index d528cb0af4c1f..a3ac4f76ba9b0 100644 --- a/ui/app/components/pki/config-pki.js +++ b/ui/app/components/config-pki.js @@ -37,10 +37,6 @@ export default Component.extend({ loading: false, actions: { - handleCrlTtl({ enabled, goSafeTimeString }) { - this.config.disable = !enabled; // when TTL enabled, config disable=false - this.config.expiry = goSafeTimeString; - }, save(section) { this.set('loading', true); const config = this.config; diff --git a/ui/app/components/database-role-setting-form.js b/ui/app/components/database-role-setting-form.js index 9107c38b20a1e..01b6170da2bae 100644 --- a/ui/app/components/database-role-setting-form.js +++ b/ui/app/components/database-role-setting-form.js @@ -1,6 +1,3 @@ -import Component from '@glimmer/component'; -import { getStatementFields, getRoleFields } from '../utils/database-helpers'; - /** * @module DatabaseRoleSettingForm * DatabaseRoleSettingForm components are used to handle the role settings section on the database/role form @@ -16,6 +13,9 @@ import { getStatementFields, getRoleFields } from '../utils/database-helpers'; * @param {string} [dbType=default] - type of database, eg 'mongodb-database-plugin' */ +import Component from '@glimmer/component'; +import { getStatementFields, getRoleFields } from '../utils/database-helpers'; + export default class DatabaseRoleSettingForm extends Component { get settingFields() { if (!this.args.roleType) return null; diff --git a/ui/app/components/generate-credentials.js b/ui/app/components/generate-credentials.js index 630c0a0ba0d9a..f71f62ecb11e0 100644 --- a/ui/app/components/generate-credentials.js +++ b/ui/app/components/generate-credentials.js @@ -16,7 +16,7 @@ const MODEL_TYPES = { backIsListLink: true, }, 'pki-issue': { - model: 'pki/cert', + model: 'pki-certificate', title: 'Issue Certificate', }, 'pki-sign': { diff --git a/ui/app/components/generated-item-list.js b/ui/app/components/generated-item-list.js index 8e1f8e6a7c6fc..61258ba040ca6 100644 --- a/ui/app/components/generated-item-list.js +++ b/ui/app/components/generated-item-list.js @@ -1,6 +1,5 @@ import { inject as service } from '@ember/service'; -import Component from '@glimmer/component'; -import { action } from '@ember/object'; +import Component from '@ember/component'; import { getOwner } from '@ember/application'; /** @@ -9,30 +8,24 @@ import { getOwner } from '@ember/application'; * * @example * ```js - * + * * ``` * - * @param {class} model=null - The corresponding item model that is being configured. - * @param {string} itemType - The type of item displayed. - * @param {array} paths - Relevant to the link for the LinkTo element. - * @param {class} methodModel - Model for the particular method selected. + * @property model=null {DS.Model} - The corresponding item model that is being configured. + * @property itemType {String} - the type of item displayed + * */ -export default class GeneratedItemList extends Component { - @service router; - @service store; - - get model() { - return this.args.model || null; - } - get itemType() { - return this.args.itemType || null; - } - - @action - refreshItemList() { - let route = getOwner(this).lookup(`route:${this.router.currentRouteName}`); - this.store.clearAllDatasets(); - route.refresh(); - } -} +export default Component.extend({ + model: null, + itemType: null, + router: service(), + store: service(), + actions: { + refreshItemList() { + let route = getOwner(this).lookup(`route:${this.router.currentRouteName}`); + this.store.clearAllDatasets(); + route.refresh(); + }, + }, +}); diff --git a/ui/app/components/home-link.js b/ui/app/components/home-link.js index 6d406c0d247d0..8d4273477bd4a 100644 --- a/ui/app/components/home-link.js +++ b/ui/app/components/home-link.js @@ -1,4 +1,5 @@ -import Component from '@glimmer/component'; +import Component from '@ember/component'; +import { computed } from '@ember/object'; /** * @module HomeLink @@ -10,19 +11,19 @@ import Component from '@glimmer/component'; * * * ``` - * @param {string} class - Classes attached to the the component. - * @param {string} text - Text displayed instead of logo. * * @see {@link https://github.com/hashicorp/vault/search?l=Handlebars&q=HomeLink|Uses of HomeLink} * @see {@link https://github.com/hashicorp/vault/blob/main/ui/app/components/home-link.js|HomeLink Source Code} */ -export default class HomeLink extends Component { - get text() { +export default Component.extend({ + tagName: '', + + text: computed(function () { return 'home'; - } + }), - get computedClasses() { + computedClasses: computed('classNames', function () { return this.classNames.join(' '); - } -} + }), +}); diff --git a/ui/app/components/hover-copy-button.js b/ui/app/components/hover-copy-button.js index fc2a6705e2760..63a4564ab8464 100644 --- a/ui/app/components/hover-copy-button.js +++ b/ui/app/components/hover-copy-button.js @@ -1,24 +1,11 @@ -import Component from '@glimmer/component'; -import { tracked } from '@glimmer/tracking'; +import Component from '@ember/component'; -/** - * @module HoverCopyButton - * The `HoverCopyButton` is used on dark backgrounds to show a copy button. - * - * @example ```js - * ``` - * - * @param {string} copyValue - The value to be copied. - * @param {boolean} [alwaysShow] - Boolean that affects the class. - */ +export default Component.extend({ + 'data-test-hover-copy': true, + attributeBindings: ['data-test-hover-copy'], + classNameBindings: 'alwaysShow:hover-copy-button-static:hover-copy-button', + copyValue: null, + alwaysShow: false, -export default class HoverCopyButton extends Component { - get alwaysShow() { - return this.args.alwaysShow || false; - } - get copyValue() { - return this.args.copyValue || false; - } - - @tracked tooltipText = 'Copy'; -} + tooltipText: 'Copy', +}); diff --git a/ui/app/components/link-status.js b/ui/app/components/link-status.js deleted file mode 100644 index 153d4667ce6a8..0000000000000 --- a/ui/app/components/link-status.js +++ /dev/null @@ -1,74 +0,0 @@ -import Component from '@glimmer/component'; -import { inject as service } from '@ember/service'; - -/** - * @module LinkStatus - * LinkStatus components are used to indicate link status to the hashicorp cloud platform - * - * @example - * ```js - * - * ``` - * - * @param {string} status - cluster.hcpLinkStatus value from currentCluster service -- returned from seal-status endpoint - */ - -export default class LinkStatus extends Component { - @service store; - @service version; - - get state() { - if (!this.args.status) return null; - // connected state is returned with no further information - if (this.args.status === 'connected') return this.args.status; - // disconnected and connecting states are returned with a timestamp and error - // state is always the first word of the string - return this.args.status.split(' ', 1).toString(); - } - - get timestamp() { - try { - return this.state !== 'connected' ? this.args.status.split('since')[1].split('m=')[0].trim() : null; - } catch { - return null; - } - } - - get message() { - if (this.args.status) { - const error = this.args.status.split('error:')[1] || ''; - const timestamp = this.timestamp ? ` [${this.timestamp}]` : ''; - const sinceTimestamp = timestamp ? ` since${timestamp}` : ''; - if (this.state === 'disconnected') { - // if generally disconnected hide the banner - return !error || error.includes('UNKNOWN') - ? null - : `Vault has been disconnected from HCP${sinceTimestamp}. Error: ${error}`; - } else if (this.state === 'connecting') { - if (error.includes('connection refused')) { - return `Vault has been trying to connect to HCP${sinceTimestamp}, but HCP is not reachable. Vault will try again soon.`; - } else if (error.includes('principal does not have permission to register as provider')) { - return `Vault tried connecting to HCP, but the Resource ID is invalid. Check your resource ID.${timestamp}`; - } else if (error.includes('cannot fetch token: 401 Unauthorized')) { - return `Vault tried connecting to HCP, but the authorization information is wrong. Update it and try again.${timestamp}`; - } else { - // catch all for any unknown errors or missing error - const errorMessage = error ? ` Error: ${error}` : ''; - return `Vault has been trying to connect to HCP${sinceTimestamp}. Vault will try again soon.${errorMessage}`; - } - } - } - return null; - } - - get showStatus() { - // enterprise only feature at this time but will expand to OSS in future release - if (!this.version.isEnterprise || !this.args.status) { - return false; - } - if (this.state !== 'connected' && !this.message) { - return false; - } - return true; - } -} diff --git a/ui/app/components/logo-splash.js b/ui/app/components/logo-splash.js new file mode 100644 index 0000000000000..0fef514611cae --- /dev/null +++ b/ui/app/components/logo-splash.js @@ -0,0 +1 @@ +export { default } from './outer-html'; diff --git a/ui/app/components/mfa/mfa-form.js b/ui/app/components/mfa-form.js similarity index 98% rename from ui/app/components/mfa/mfa-form.js rename to ui/app/components/mfa-form.js index c73fb1197a05c..e83e835a48726 100644 --- a/ui/app/components/mfa/mfa-form.js +++ b/ui/app/components/mfa-form.js @@ -10,7 +10,7 @@ import { numberToWord } from 'vault/helpers/number-to-word'; * * @example * ```js - * + * * ``` * @param {string} clusterId - id of selected cluster * @param {object} authData - data from initial auth request -- { mfa_requirement, backend, data } diff --git a/ui/app/components/mfa/mfa-login-enforcement-form.js b/ui/app/components/mfa-login-enforcement-form.js similarity index 90% rename from ui/app/components/mfa/mfa-login-enforcement-form.js rename to ui/app/components/mfa-login-enforcement-form.js index 250019be60d9e..ef130f0aff314 100644 --- a/ui/app/components/mfa/mfa-login-enforcement-form.js +++ b/ui/app/components/mfa-login-enforcement-form.js @@ -3,7 +3,6 @@ import { tracked } from '@glimmer/tracking'; import { action } from '@ember/object'; import { inject as service } from '@ember/service'; import { task } from 'ember-concurrency'; -import handleHasManySelection from 'core/utils/search-select-has-many'; /** * @module MfaLoginEnforcementForm @@ -116,9 +115,21 @@ export default class MfaLoginEnforcementForm extends Component { @action async onMethodChange(selectedIds) { const methods = await this.args.model.mfa_methods; - handleHasManySelection(selectedIds, methods, this.store, 'mfa-method'); + // first check for existing methods that have been removed from selection + methods.forEach((method) => { + if (!selectedIds.includes(method.id)) { + methods.removeObject(method); + } + }); + // now check for selected items that don't exist and add them to the model + const methodIds = methods.mapBy('id'); + selectedIds.forEach((id) => { + if (!methodIds.includes(id)) { + const model = this.store.peekRecord('mfa-method', id); + methods.addObject(model); + } + }); } - @action onTargetSelect(type) { this.selectedTargetType = type; diff --git a/ui/app/components/mfa/mfa-login-enforcement-header.js b/ui/app/components/mfa-login-enforcement-header.js similarity index 100% rename from ui/app/components/mfa/mfa-login-enforcement-header.js rename to ui/app/components/mfa-login-enforcement-header.js diff --git a/ui/app/components/mfa/mfa-setup-step-one.js b/ui/app/components/mfa-setup-step-one.js similarity index 100% rename from ui/app/components/mfa/mfa-setup-step-one.js rename to ui/app/components/mfa-setup-step-one.js diff --git a/ui/app/components/mfa/mfa-setup-step-two.js b/ui/app/components/mfa-setup-step-two.js similarity index 100% rename from ui/app/components/mfa/mfa-setup-step-two.js rename to ui/app/components/mfa-setup-step-two.js diff --git a/ui/app/components/mount-backend-form.js b/ui/app/components/mount-backend-form.js index 63a26c84c3cdb..263981141892d 100644 --- a/ui/app/components/mount-backend-form.js +++ b/ui/app/components/mount-backend-form.js @@ -1,74 +1,80 @@ import Ember from 'ember'; -import Component from '@glimmer/component'; -import { tracked } from '@glimmer/tracking'; import { inject as service } from '@ember/service'; -import { action, setProperties } from '@ember/object'; +import { computed } from '@ember/object'; +import Component from '@ember/component'; import { task } from 'ember-concurrency'; import { methods } from 'vault/helpers/mountable-auth-methods'; import { engines, KMIP, TRANSFORM, KEYMGMT } from 'vault/helpers/mountable-secret-engines'; import { waitFor } from '@ember/test-waiters'; -/** - * @module MountBackendForm - * The `MountBackendForm` is used to mount either a secret or auth backend. - * - * @example ```js - * ``` - * - * @param {function} onMountSuccess - A function that transitions once the Mount has been successfully posted. - * @param {string} [mountType=auth] - The type of backend we want to mount. - * - */ - const METHODS = methods(); const ENGINES = engines(); -export default class MountBackendForm extends Component { - @service store; - @service wizard; - @service flashMessages; - @service version; - - get mountType() { - return this.args.mountType || 'auth'; - } - - @tracked mountModel = null; - @tracked showEnable = false; +export default Component.extend({ + store: service(), + wizard: service(), + flashMessages: service(), + version: service(), + + /* + * @param Function + * @public + * + * Optional param to call a function upon successfully mounting a backend + * + */ + onMountSuccess() {}, + /* + * @param String + * @public + * the type of backend we want to mount + * defaults to `auth` + * + */ + mountType: 'auth', + + /* + * + * @param DS.Model + * @private + * Ember Data model corresponding to the `mountType`. + * Created and set during `init` + * + */ + mountModel: null, + + showEnable: false, // validation related properties - @tracked modelValidations = null; - @tracked invalidFormAlert = null; + modelValidations: null, + invalidFormAlert: null, - @tracked mountIssue = false; + mountIssue: false, - @tracked errors = ''; - @tracked errorMessage = ''; - - constructor() { - super(...arguments); - const type = this.args.mountType || 'auth'; + init() { + this._super(...arguments); + const type = this.mountType; const modelType = type === 'secret' ? 'secret-engine' : 'auth-method'; const model = this.store.createRecord(modelType); - this.mountModel = model; - } + this.set('mountModel', model); + }, - get mountTypes() { + mountTypes: computed('engines', 'mountType', function () { return this.mountType === 'secret' ? this.engines : METHODS; - } + }), - get engines() { + engines: computed('version.{features[],isEnterprise}', function () { if (this.version.isEnterprise) { return ENGINES.concat([KMIP, TRANSFORM, KEYMGMT]); } return ENGINES; - } + }), willDestroy() { + this._super(...arguments); // if unsaved, we want to unload so it doesn't show up in the auth mount list - super.willDestroy(...arguments); this.mountModel.rollbackAttributes(); - } + }, checkPathChange(type) { let mount = this.mountModel; @@ -78,115 +84,114 @@ export default class MountBackendForm extends Component { // change it here to match the new type let isUnchanged = list.findBy('type', currentPath); if (!currentPath || isUnchanged) { - mount.path = type; + mount.set('path', type); } - } + }, checkModelValidity(model) { const { isValid, state, invalidFormMessage } = model.validate(); - setProperties(this, { + this.setProperties({ modelValidations: state, invalidFormAlert: invalidFormMessage, }); return isValid; - } - - @task - @waitFor - *mountBackend() { - const mountModel = this.mountModel; - const { type, path } = mountModel; - // only submit form if validations pass - if (!this.checkModelValidity(mountModel)) { - return; - } - let capabilities = null; - try { - capabilities = yield this.store.findRecord('capabilities', `${path}/config`); - } catch (err) { - if (Ember.testing) { - //captures mount-backend-form component test - yield mountModel.save(); - let mountType = this.mountType; - mountType = mountType === 'secret' ? `${mountType}s engine` : `${mountType} method`; - this.flashMessages.success(`Successfully mounted the ${type} ${mountType} at ${path}.`); - yield this.args.onMountSuccess(type, path); + }, + + mountBackend: task( + waitFor(function* () { + const mountModel = this.mountModel; + const { type, path } = mountModel; + // only submit form if validations pass + if (!this.checkModelValidity(mountModel)) { return; - } else { - throw err; } - } + let capabilities = null; + try { + capabilities = yield this.store.findRecord('capabilities', `${path}/config`); + } catch (err) { + if (Ember.testing) { + //captures mount-backend-form component test + yield mountModel.save(); + let mountType = this.mountType; + mountType = mountType === 'secret' ? `${mountType}s engine` : `${mountType} method`; + this.flashMessages.success(`Successfully mounted the ${type} ${mountType} at ${path}.`); + yield this.onMountSuccess(type, path); + return; + } else { + throw err; + } + } - let changedAttrKeys = Object.keys(mountModel.changedAttributes()); - let updatesConfig = - changedAttrKeys.includes('casRequired') || - changedAttrKeys.includes('deleteVersionAfter') || - changedAttrKeys.includes('maxVersions'); - - try { - yield mountModel.save(); - } catch (err) { - if (err.httpStatus === 403) { - this.mountIssue = true; - this.flashMessages.danger( - 'You do not have access to the sys/mounts endpoint. The secret engine was not mounted.' - ); + let changedAttrKeys = Object.keys(mountModel.changedAttributes()); + let updatesConfig = + changedAttrKeys.includes('casRequired') || + changedAttrKeys.includes('deleteVersionAfter') || + changedAttrKeys.includes('maxVersions'); + + try { + yield mountModel.save(); + } catch (err) { + if (err.httpStatus === 403) { + this.mountIssue = true; + this.flashMessages.danger( + 'You do not have access to the sys/mounts endpoint. The secret engine was not mounted.' + ); + return; + } + if (err.errors) { + let errors = err.errors.map((e) => { + if (typeof e === 'object') return e.title || e.message || JSON.stringify(e); + return e; + }); + this.set('errors', errors); + } else if (err.message) { + this.set('errorMessage', err.message); + } else { + this.set('errorMessage', 'An error occurred, check the vault logs.'); + } return; } - if (err.errors) { - let errors = err.errors.map((e) => { - if (typeof e === 'object') return e.title || e.message || JSON.stringify(e); - return e; - }); - this.errors = errors; - } else if (err.message) { - this.errorMessage = err.message; - } else { - this.errorMessage = 'An error occurred, check the vault logs.'; + // mountModel must be after the save + if (mountModel.isV2KV && updatesConfig && !capabilities.get('canUpdate')) { + // config error is not thrown from secret-engine adapter, so handling here + this.flashMessages.warning( + 'You do not have access to the config endpoint. The secret engine was mounted, but the configuration settings were not saved.' + ); + // remove the config data from the model otherwise it will save it even if the network request failed. + [this.mountModel.maxVersions, this.mountModel.casRequired, this.mountModel.deleteVersionAfter] = [ + 0, + false, + 0, + ]; } + let mountType = this.mountType; + mountType = mountType === 'secret' ? `${mountType}s engine` : `${mountType} method`; + this.flashMessages.success(`Successfully mounted the ${type} ${mountType} at ${path}.`); + yield this.onMountSuccess(type, path); return; - } - // mountModel must be after the save - if (mountModel.isV2KV && updatesConfig && !capabilities.get('canUpdate')) { - // config error is not thrown from secret-engine adapter, so handling here - this.flashMessages.warning( - 'You do not have access to the config endpoint. The secret engine was mounted, but the configuration settings were not saved.' - ); - // remove the config data from the model otherwise it will save it even if the network request failed. - [this.mountModel.maxVersions, this.mountModel.casRequired, this.mountModel.deleteVersionAfter] = [ - 0, - false, - 0, - ]; - } - let mountType = this.mountType; - mountType = mountType === 'secret' ? `${mountType}s engine` : `${mountType} method`; - this.flashMessages.success(`Successfully mounted the ${type} ${mountType} at ${path}.`); - yield this.args.onMountSuccess(type, path); - return; - } - - @action - onKeyUp(name, value) { - this.mountModel.set(name, value); - } - - @action - onTypeChange(path, value) { - if (path === 'type') { - this.wizard.set('componentState', value); - this.checkPathChange(value); - } - } - - @action - toggleShowEnable(value) { - this.showEnable = value; - if (value === true && this.wizard.featureState === 'idle') { - this.wizard.transitionFeatureMachine(this.wizard.featureState, 'CONTINUE', this.mountModel.type); - } else { - this.wizard.transitionFeatureMachine(this.wizard.featureState, 'RESET', this.mountModel.type); - } - } -} + }) + ).drop(), + + actions: { + onKeyUp(name, value) { + this.mountModel.set(name, value); + }, + + onTypeChange(path, value) { + if (path === 'type') { + this.wizard.set('componentState', value); + this.checkPathChange(value); + } + }, + + toggleShowEnable(value) { + this.set('showEnable', value); + if (value === true && this.wizard.featureState === 'idle') { + this.wizard.transitionFeatureMachine(this.wizard.featureState, 'CONTINUE', this.mountModel.type); + } else { + this.wizard.transitionFeatureMachine(this.wizard.featureState, 'RESET', this.mountModel.type); + } + }, + }, +}); diff --git a/ui/app/components/nav-header.js b/ui/app/components/nav-header.js index 7f67b8f2582a0..7697c6fc1a0bc 100644 --- a/ui/app/components/nav-header.js +++ b/ui/app/components/nav-header.js @@ -4,7 +4,6 @@ import { computed } from '@ember/object'; export default Component.extend({ router: service(), - currentCluster: service(), 'data-test-navheader': true, attributeBindings: ['data-test-navheader'], classNameBindings: 'consoleFullscreen:panel-fullscreen', diff --git a/ui/app/components/oidc/assignment-form.js b/ui/app/components/oidc/assignment-form.js deleted file mode 100644 index d521bce93cc1e..0000000000000 --- a/ui/app/components/oidc/assignment-form.js +++ /dev/null @@ -1,72 +0,0 @@ -import Component from '@glimmer/component'; -import { action } from '@ember/object'; -import { inject as service } from '@ember/service'; -import { task } from 'ember-concurrency'; -import { tracked } from '@glimmer/tracking'; - -/** - * @module Oidc::AssignmentForm - * Oidc::AssignmentForm components are used to display the create view for OIDC providers assignments. - * - * @example - * ```js - * - * ``` - * @callback onCancel - * @callback onSave - * @param {object} model - The parent's model - * @param {string} onCancel - callback triggered when cancel button is clicked - * @param {string} onSave - callback triggered when save button is clicked - */ - -export default class OidcAssignmentFormComponent extends Component { - @service store; - @service flashMessages; - @tracked modelValidations; - @tracked errorBanner; - - @task - *save(event) { - event.preventDefault(); - try { - const { isValid, state } = this.args.model.validate(); - this.modelValidations = isValid ? null : state; - if (isValid) { - const { isNew, name } = this.args.model; - yield this.args.model.save(); - this.flashMessages.success(`Successfully ${isNew ? 'created' : 'updated'} the assignment ${name}.`); - // this form is sometimes used in modal, passing the model notifies - // the parent if the save was successful - this.args.onSave(this.args.model); - } - } catch (error) { - const message = error.errors ? error.errors.join('. ') : error.message; - this.errorBanner = message; - } - } - - @action - cancel() { - const method = this.args.model.isNew ? 'unloadRecord' : 'rollbackAttributes'; - this.args.model[method](); - this.args.onCancel(); - } - - @action - handleOperation({ target }) { - this.args.model.name = target.value; - } - - @action - onEntitiesSelect(selectedIds) { - this.args.model.entityIds = selectedIds; - } - - @action - onGroupsSelect(selectedIds) { - this.args.model.groupIds = selectedIds; - } -} diff --git a/ui/app/components/oidc/client-form.js b/ui/app/components/oidc/client-form.js deleted file mode 100644 index 2de6cf92a04c7..0000000000000 --- a/ui/app/components/oidc/client-form.js +++ /dev/null @@ -1,91 +0,0 @@ -import Component from '@glimmer/component'; -import { action } from '@ember/object'; -import { inject as service } from '@ember/service'; -import { tracked } from '@glimmer/tracking'; -import { task } from 'ember-concurrency'; -/** - * @module OidcClientForm - * OidcClientForm components are used to create and update OIDC clients (a.k.a. applications) - * - * @example - * ```js - * - * ``` - * @callback onCancel - * @callback onSave - * @param {Object} model - oidc client model - * @param {onCancel} onCancel - callback triggered when cancel button is clicked - * @param {onSave} onSave - callback triggered on save success - */ - -export default class OidcClientForm extends Component { - @service store; - @service flashMessages; - @tracked modelValidations; - @tracked errorBanner; - @tracked invalidFormAlert; - @tracked radioCardGroupValue = - !this.args.model.assignments || this.args.model.assignments.includes('allow_all') - ? 'allow_all' - : 'limited'; - - get modelAssignments() { - const { assignments } = this.args.model; - if (assignments.includes('allow_all') && assignments.length === 1) { - return []; - } else { - return assignments; - } - } - - @action - handleAssignmentSelection(selection) { - // if array then coming from search-select component, set selection as model assignments - if (Array.isArray(selection)) { - this.args.model.assignments = selection; - } else { - // otherwise update radio button value and reset assignments so - // UI always reflects a user's selection (including when no assignments are selected) - this.radioCardGroupValue = selection; - this.args.model.assignments = []; - } - } - - @action - cancel() { - const method = this.args.model.isNew ? 'unloadRecord' : 'rollbackAttributes'; - this.args.model[method](); - this.args.onCancel(); - } - - @task - *save(event) { - event.preventDefault(); - try { - const { isValid, state, invalidFormMessage } = this.args.model.validate(); - this.modelValidations = isValid ? null : state; - this.invalidFormAlert = invalidFormMessage; - if (isValid) { - if (this.radioCardGroupValue === 'allow_all') { - // the backend permits 'allow_all' AND other assignments, though 'allow_all' will take precedence - // the UI limits the config by allowing either 'allow_all' OR a list of other assignments - // note: when editing the UI removes any additional assignments previously configured via CLI - this.args.model.assignments = ['allow_all']; - } - // if TTL components are toggled off, set to default lease duration - const { idTokenTtl, accessTokenTtl } = this.args.model; - // value returned from API is a number, and string when from form action - if (Number(idTokenTtl) === 0) this.args.model.idTokenTtl = '24h'; - if (Number(accessTokenTtl) === 0) this.args.model.accessTokenTtl = '24h'; - const { isNew, name } = this.args.model; - yield this.args.model.save(); - this.flashMessages.success(`Successfully ${isNew ? 'created' : 'updated'} the application ${name}.`); - this.args.onSave(); - } - } catch (error) { - const message = error.errors ? error.errors.join('. ') : error.message; - this.errorBanner = message; - this.invalidFormAlert = 'There was an error submitting this form.'; - } - } -} diff --git a/ui/app/components/oidc/key-form.js b/ui/app/components/oidc/key-form.js deleted file mode 100644 index 3fe518cf0608f..0000000000000 --- a/ui/app/components/oidc/key-form.js +++ /dev/null @@ -1,89 +0,0 @@ -import Component from '@glimmer/component'; -import { action } from '@ember/object'; -import { inject as service } from '@ember/service'; -import { tracked } from '@glimmer/tracking'; -import { task } from 'ember-concurrency'; - -/** - * @module OidcKeyForm - * OidcKeyForm components are used to create and update OIDC providers - * - * @example - * ```js - * - * ``` - * @callback onCancel - * @callback onSave - * @param {Object} model - oidc client model - * @param {onCancel} onCancel - callback triggered when cancel button is clicked - * @param {onSave} onSave - callback triggered on save success - */ - -export default class OidcKeyForm extends Component { - @service store; - @service flashMessages; - @tracked errorBanner; - @tracked invalidFormAlert; - @tracked modelValidations; - @tracked radioCardGroupValue = - // If "*" is provided, all clients are allowed: https://www.vaultproject.io/api-docs/secret/identity/oidc-provider#parameters - !this.args.model.allowedClientIds || this.args.model.allowedClientIds.includes('*') - ? 'allow_all' - : 'limited'; - - get filterDropdownOptions() { - // query object sent to search-select so only clients that reference this key appear in dropdown - return { paramKey: 'key', filterFor: [this.args.model.name] }; - } - - @action - handleClientSelection(selection) { - // if array then coming from search-select component, set selection as model clients - if (Array.isArray(selection)) { - this.args.model.allowedClientIds = selection.map((client) => client.clientId); - } else { - // otherwise update radio button value and reset clients so - // UI always reflects a user's selection (including when no clients are selected) - this.radioCardGroupValue = selection; - this.args.model.allowedClientIds = []; - } - } - - @action - cancel() { - const method = this.args.model.isNew ? 'unloadRecord' : 'rollbackAttributes'; - this.args.model[method](); - this.args.onCancel(); - } - - @task - *save(event) { - event.preventDefault(); - try { - const { isValid, state, invalidFormMessage } = this.args.model.validate(); - this.modelValidations = isValid ? null : state; - this.invalidFormAlert = invalidFormMessage; - if (isValid) { - const { isNew, name } = this.args.model; - if (this.radioCardGroupValue === 'allow_all') { - this.args.model.allowedClientIds = ['*']; - } - // if TTL components are toggled off, set to default lease duration - const { rotationPeriod, verificationTtl } = this.args.model; - // value returned from API is a number, and string when from form action - if (Number(rotationPeriod) === 0) this.args.model.rotationPeriod = '24h'; - if (Number(verificationTtl) === 0) this.args.model.verificationTtl = '24h'; - yield this.args.model.save(); - this.flashMessages.success( - `Successfully ${isNew ? 'created' : 'updated'} the key - ${name}.` - ); - this.args.onSave(); - } - } catch (error) { - const message = error.errors ? error.errors.join('. ') : error.message; - this.errorBanner = message; - this.invalidFormAlert = 'There was an error submitting this form.'; - } - } -} diff --git a/ui/app/components/oidc/provider-form.js b/ui/app/components/oidc/provider-form.js deleted file mode 100644 index c51754da8ce6a..0000000000000 --- a/ui/app/components/oidc/provider-form.js +++ /dev/null @@ -1,85 +0,0 @@ -import Component from '@glimmer/component'; -import { action } from '@ember/object'; -import { inject as service } from '@ember/service'; -import { tracked } from '@glimmer/tracking'; -import { task } from 'ember-concurrency'; -import parseURL from 'core/utils/parse-url'; -/** - * @module OidcProviderForm - * OidcProviderForm components are used to create and update OIDC providers - * - * @example - * ```js - * - * ``` - * @callback onCancel - * @callback onSave - * @param {Object} model - oidc client model - * @param {onCancel} onCancel - callback triggered when cancel button is clicked - * @param {onSave} onSave - callback triggered on save success - */ - -export default class OidcProviderForm extends Component { - @service store; - @service flashMessages; - @tracked modelValidations; - @tracked errorBanner; - @tracked invalidFormAlert; - @tracked radioCardGroupValue = - // If "*" is provided, all clients are allowed: https://www.vaultproject.io/api-docs/secret/identity/oidc-provider#parameters - !this.args.model.allowedClientIds || this.args.model.allowedClientIds.includes('*') - ? 'allow_all' - : 'limited'; - - constructor() { - super(...arguments); - const { model } = this.args; - model.issuer = model.isNew ? '' : parseURL(model.issuer).origin; - } - - @action - handleClientSelection(selection) { - // if array then coming from search-select component, set selection as model clients - if (Array.isArray(selection)) { - this.args.model.allowedClientIds = selection.map((client) => client.clientId); - } else { - // otherwise update radio button value and reset clients so - // UI always reflects a user's selection (including when no clients are selected) - this.radioCardGroupValue = selection; - this.args.model.allowedClientIds = []; - } - } - - @action - cancel() { - const method = this.args.model.isNew ? 'unloadRecord' : 'rollbackAttributes'; - this.args.model[method](); - this.args.onCancel(); - } - - @task - *save(event) { - event.preventDefault(); - try { - const { isValid, state, invalidFormMessage } = this.args.model.validate(); - this.modelValidations = isValid ? null : state; - this.invalidFormAlert = invalidFormMessage; - if (isValid) { - const { isNew, name } = this.args.model; - if (this.radioCardGroupValue === 'allow_all') { - this.args.model.allowedClientIds = ['*']; - } - yield this.args.model.save(); - this.flashMessages.success( - `Successfully ${isNew ? 'created' : 'updated'} the OIDC provider - ${name}.` - ); - this.args.onSave(); - } - } catch (error) { - const message = error.errors ? error.errors.join('. ') : error.message; - this.errorBanner = message; - this.invalidFormAlert = 'There was an error submitting this form.'; - } - } -} diff --git a/ui/app/components/oidc/scope-form.js b/ui/app/components/oidc/scope-form.js deleted file mode 100644 index 0e1c3a9396957..0000000000000 --- a/ui/app/components/oidc/scope-form.js +++ /dev/null @@ -1,62 +0,0 @@ -import Component from '@glimmer/component'; -import { tracked } from '@glimmer/tracking'; -import { action } from '@ember/object'; -import { task } from 'ember-concurrency'; -import { inject as service } from '@ember/service'; - -/** - * @module OidcScopeForm - * Oidc scope form components are used to create and edit oidc scopes - * - * @example - * ```js - * - * ``` - * @callback onCancel - * @callback onSave - * @param {Object} model - oidc scope model - * @param {onCancel} onCancel - callback triggered when cancel button is clicked - * @param {onSave} onSave - callback triggered on save success - */ - -export default class OidcScopeFormComponent extends Component { - @service flashMessages; - @tracked errorBanner; - @tracked invalidFormAlert; - @tracked modelValidations; - // formatting here is purposeful so that whitespace renders correctly in JsonEditor - exampleTemplate = `{ - "username": {{identity.entity.aliases.$MOUNT_ACCESSOR.name}}, - "contact": { - "email": {{identity.entity.metadata.email}}, - "phone_number": {{identity.entity.metadata.phone_number}} - }, - "groups": {{identity.entity.groups.names}} -}`; - - @task - *save(event) { - event.preventDefault(); - try { - const { isValid, state, invalidFormMessage } = this.args.model.validate(); - this.modelValidations = isValid ? null : state; - this.invalidFormAlert = invalidFormMessage; - if (isValid) { - const { isNew, name } = this.args.model; - yield this.args.model.save(); - this.flashMessages.success(`Successfully ${isNew ? 'created' : 'updated'} the scope ${name}.`); - this.args.onSave(); - } - } catch (error) { - const message = error.errors ? error.errors.join('. ') : error.message; - this.errorBanner = message; - this.invalidFormAlert = 'There was an error submitting this form.'; - } - } - @action - cancel() { - const method = this.args.model.isNew ? 'unloadRecord' : 'rollbackAttributes'; - this.args.model[method](); - this.args.onCancel(); - } -} diff --git a/ui/app/components/okta-number-challenge.js b/ui/app/components/okta-number-challenge.js deleted file mode 100644 index 7c9566e11546b..0000000000000 --- a/ui/app/components/okta-number-challenge.js +++ /dev/null @@ -1,24 +0,0 @@ -import Component from '@glimmer/component'; - -/** - * @module OktaNumberChallenge - * OktaNumberChallenge components are used to display loading screen and correct answer for Okta Number Challenge when signing in through Okta - * - * @example - * ```js - * - * ``` - * @param {number} correctAnswer - The correct answer to click for the okta number challenge. - * @param {boolean} hasError - Determines if there is an error being thrown. - * @param {function} onReturnToLogin - Sets waitingForOktaNumberChallenge to false if want to return to main login. - */ - -export default class OktaNumberChallenge extends Component { - get oktaNumberChallengeCorrectAnswer() { - return this.args.correctAnswer; - } - - get errorThrown() { - return this.args.hasError; - } -} diff --git a/ui/app/components/pki-cert-popup.js b/ui/app/components/pki-cert-popup.js new file mode 100644 index 0000000000000..6f4da8d1e3ecb --- /dev/null +++ b/ui/app/components/pki-cert-popup.js @@ -0,0 +1,17 @@ +import Component from '@ember/component'; + +export default Component.extend({ + /* + * @public + * @param DS.Model + * + * the pki-certificate model + */ + item: null, + + actions: { + delete(item) { + item.save({ adapterOptions: { method: 'revoke' } }); + }, + }, +}); diff --git a/ui/app/components/pki/pki-cert-show.js b/ui/app/components/pki-cert-show.js similarity index 79% rename from ui/app/components/pki/pki-cert-show.js rename to ui/app/components/pki-cert-show.js index 0c743eb151814..bd1dad467b351 100644 --- a/ui/app/components/pki/pki-cert-show.js +++ b/ui/app/components/pki-cert-show.js @@ -1,4 +1,4 @@ -import RoleEdit from '../role-edit'; +import RoleEdit from './role-edit'; export default RoleEdit.extend({ actions: { diff --git a/ui/app/components/pki/pki-cert-popup.js b/ui/app/components/pki/pki-cert-popup.js deleted file mode 100644 index 49237e4b6471d..0000000000000 --- a/ui/app/components/pki/pki-cert-popup.js +++ /dev/null @@ -1,24 +0,0 @@ -import Component from '@glimmer/component'; -import { action } from '@ember/object'; - -/** - * @module PkiCertPopup - * PkiCertPopup component is the hotdog menu button that allows you to see details or revoke a certificate. - * - * @example - * ```js - * - * ``` - * @param {class} item - the PKI cert in question. - */ - -export default class PkiCertPopup extends Component { - get item() { - return this.args.item || null; - } - - @action - delete(item) { - item.save({ adapterOptions: { method: 'revoke' } }); - } -} diff --git a/ui/app/components/regex-validator.hbs b/ui/app/components/regex-validator.hbs index 6576703c0a115..10b3a36af6149 100644 --- a/ui/app/components/regex-validator.hbs +++ b/ui/app/components/regex-validator.hbs @@ -16,9 +16,9 @@

{{@attr.options.subText}} {{#if @attr.options.docLink}} - + See our documentation - + for help. {{/if}}

diff --git a/ui/app/components/pki/role-pki-edit.js b/ui/app/components/role-pki-edit.js similarity index 76% rename from ui/app/components/pki/role-pki-edit.js rename to ui/app/components/role-pki-edit.js index 357b00cc2acfc..0134050950147 100644 --- a/ui/app/components/pki/role-pki-edit.js +++ b/ui/app/components/role-pki-edit.js @@ -1,4 +1,4 @@ -import RoleEdit from '../role-edit'; +import RoleEdit from './role-edit'; export default RoleEdit.extend({ init() { diff --git a/ui/lib/core/addon/components/secret-list-header-tab.js b/ui/app/components/secret-list-header-tab.js similarity index 92% rename from ui/lib/core/addon/components/secret-list-header-tab.js rename to ui/app/components/secret-list-header-tab.js index ef281b5d13463..a6cf273962b8b 100644 --- a/ui/lib/core/addon/components/secret-list-header-tab.js +++ b/ui/app/components/secret-list-header-tab.js @@ -14,8 +14,6 @@ * @param {string} [path] - set on options-for-backend this tells us the specifics of the URL the query should hit. * @param {string} label - The name displayed on the tab. Set on the options-for-backend. * @param {string} [tab] - The name of the tab. Set on the options-for-backend. - * @param {boolean} [isEngine=false] - If used within an Ember engine, will need to modify how the links to routes are defined. - * @param {string} [link] - If within an engine provide the name of the link that is defined in the routes file fo the engine, example : 'overview'. * */ import Component from '@glimmer/component'; @@ -25,7 +23,6 @@ import { inject as service } from '@ember/service'; export default class SecretListHeaderTab extends Component { @service store; @tracked dontShowTab; - constructor() { super(...arguments); this.fetchCapabilities(); diff --git a/ui/app/components/secret-list-header.js b/ui/app/components/secret-list-header.js new file mode 100644 index 0000000000000..4bb884d46ee12 --- /dev/null +++ b/ui/app/components/secret-list-header.js @@ -0,0 +1,15 @@ +import Component from '@glimmer/component'; + +export default class SecretListHeader extends Component { + // api + isCertTab = false; + isConfigure = false; + baseKey = null; + backendCrumb = null; + model = null; + options = null; + + get isKV() { + return ['kv', 'generic'].includes(this.args.model.engineType); + } +} diff --git a/ui/app/components/secret-version-menu.js b/ui/app/components/secret-version-menu.js index d718e5a30c33b..dc8d0505c4a25 100644 --- a/ui/app/components/secret-version-menu.js +++ b/ui/app/components/secret-version-menu.js @@ -1,11 +1,8 @@ import Component from '@glimmer/component'; import { action } from '@ember/object'; -import { inject as service } from '@ember/service'; import { next } from '@ember/runloop'; export default class SecretVersionMenu extends Component { - @service router; - onRefresh() {} @action diff --git a/ui/app/components/selectable-card.js b/ui/app/components/selectable-card.js index abe19949c5f25..d1f6edc8fcda3 100644 --- a/ui/app/components/selectable-card.js +++ b/ui/app/components/selectable-card.js @@ -1,5 +1,5 @@ -import Component from '@glimmer/component'; - +import Component from '@ember/component'; +import { computed } from '@ember/object'; /** * @module SelectableCard * SelectableCard components are card-like components that display a title, total, subtotal, and anything after the yield. @@ -9,20 +9,30 @@ import Component from '@glimmer/component'; * ```js * * ``` - * @param {string} [cardTitle] - cardTitle displays the card title. - * @param {number} [total = 0] - the number displayed as the largest text in the component. - * @param {string} [subText] - subText describes the total. - * @param {string} [actionText] - action text link. - * @param {string} [actionTo] - route where link will take you. - * @param {string} [queryParam] - tab for the route the link will take you. - * @param {string} [type] - type used in the link type. + * @param cardTitle=null {String} - cardTitle displays the card title + * @param total=0 {Number} - the Total number displays like a title, it's the largest text in the component + * @param subText=null {String} - subText describes the total + * @param actionCard=false {Boolean} - false default selectable card container used in metrics, true a card that focus on actions as seen in database secret engine overview + * @param actionText=null {String} - that action that happens in an actionCard */ -export default class SelectableCard extends Component { - get gridContainer() { - return this.args.gridContainer || false; - } - get total() { - return this.args.total || 0; - } -} +export default Component.extend({ + cardTitle: '', + total: 0, + subText: '', + actionCard: false, + actionText: '', + gridContainer: false, + tagName: '', // do not wrap component with div + formattedCardTitle: computed('total', function () { + const { cardTitle, total } = this; + + if (cardTitle === 'Tokens') { + return total !== 1 ? 'Tokens' : 'Token'; + } else if (cardTitle === 'Entities') { + return total !== 1 ? 'Entities' : 'Entity'; + } + + return cardTitle; + }), +}); diff --git a/ui/app/components/splash-page.js b/ui/app/components/splash-page.js index 8a1d4f5461cd9..5942d3ab8187b 100644 --- a/ui/app/components/splash-page.js +++ b/ui/app/components/splash-page.js @@ -1,33 +1,15 @@ -/** - * @module SplashPage - * SplashPage component is used as a landing page with a box horizontally and center aligned on the page. It's used as the login landing page. - * - * - * @example - * ```js - * - * content here - * - * ``` - * @param {string} [ariaLabel] - aria label for the status icon. - * @param {string} [label] - label for the status menu. - * @param {string} [type] - determines where the component is being used. e.g. replication, auth, etc. - * @param {function} [onLinkClick] - function to handle click on the nested links under content. - * - */ +export default Component.extend({ + currentCluster: service('current-cluster'), + cluster: alias('currentCluster.cluster'), + auth: service(), + media: service(), + type: 'cluster', + itemTag: null, + glyphName: computed('type', function () { + return { + cluster: 'circle-dot', + user: 'user', + }[this.type]; + }), -export default class StatusMenu extends Component { - @service currentCluster; - @service auth; - @service media; - @service router; - - get type() { - return this.args.type || 'cluster'; - } - - get glyphName() { - return this.type === 'user' ? 'user' : 'circle-dot'; - } - - @action - onLinkClick(dropdown) { - if (dropdown) { - // strange issue where closing dropdown triggers full transition which redirects to auth screen in production builds - // closing dropdown in next tick of run loop fixes it - next(() => dropdown.actions.close()); - } - this.args.onLinkClick(); - } -} + actions: { + onLinkClick(dropdown) { + if (dropdown) { + // strange issue where closing dropdown triggers full transition which redirects to auth screen in production builds + // closing dropdown in next tick of run loop fixes it + next(() => dropdown.actions.close()); + } + this.onLinkClick(); + }, + }, +}); diff --git a/ui/app/components/wizard-section.js b/ui/app/components/wizard-section.js new file mode 100644 index 0000000000000..f8c97b4b804d1 --- /dev/null +++ b/ui/app/components/wizard-section.js @@ -0,0 +1,9 @@ +import outerHTMLComponent from './outer-html'; + +export default outerHTMLComponent.extend({ + headerText: null, + headerIcon: null, + docText: null, + docPath: null, + instructions: null, +}); diff --git a/ui/app/controllers/vault/cluster/access/oidc.js b/ui/app/controllers/vault/cluster/access/oidc.js deleted file mode 100644 index c6ef6dd706f37..0000000000000 --- a/ui/app/controllers/vault/cluster/access/oidc.js +++ /dev/null @@ -1,34 +0,0 @@ -import Controller from '@ember/controller'; -import { inject as service } from '@ember/service'; -import { tracked } from '@glimmer/tracking'; - -export default class OidcConfigureController extends Controller { - @service router; - - @tracked header = null; - - constructor() { - super(...arguments); - this.router.on('routeDidChange', (transition) => this.setHeader(transition)); - } - - setHeader(transition) { - // set correct header state based on child route - // when no clients have been created, display create button as call to action - // list views share the same header with tabs as resource links - // the remaining routes are responsible for their own header - const routeName = transition.to.name; - if (routeName.includes('oidc.index')) { - this.header = 'cta'; - } else { - const isList = ['clients', 'assignments', 'keys', 'scopes', 'providers'].find((resource) => { - return routeName.includes(`${resource}.index`); - }); - this.header = isList ? 'list' : null; - } - } - - get isCta() { - return this.header === 'cta'; - } -} diff --git a/ui/app/controllers/vault/cluster/access/oidc/assignments/assignment/details.js b/ui/app/controllers/vault/cluster/access/oidc/assignments/assignment/details.js deleted file mode 100644 index bf21dc72f8b55..0000000000000 --- a/ui/app/controllers/vault/cluster/access/oidc/assignments/assignment/details.js +++ /dev/null @@ -1,21 +0,0 @@ -import Controller from '@ember/controller'; -import { action } from '@ember/object'; -import { inject as service } from '@ember/service'; - -export default class OidcAssignmentDetailsController extends Controller { - @service router; - @service flashMessages; - - @action - async delete() { - try { - await this.model.destroyRecord(); - this.flashMessages.success('Assignment deleted successfully'); - this.router.transitionTo('vault.cluster.access.oidc.assignments'); - } catch (error) { - this.model.rollbackAttributes(); - const message = error.errors ? error.errors.join('. ') : error.message; - this.flashMessages.danger(message); - } - } -} diff --git a/ui/app/controllers/vault/cluster/access/oidc/clients/client.js b/ui/app/controllers/vault/cluster/access/oidc/clients/client.js deleted file mode 100644 index 4e3e0c148990f..0000000000000 --- a/ui/app/controllers/vault/cluster/access/oidc/clients/client.js +++ /dev/null @@ -1,21 +0,0 @@ -import Controller from '@ember/controller'; -import { inject as service } from '@ember/service'; -import { tracked } from '@glimmer/tracking'; - -export default class OidcClientController extends Controller { - @service router; - @tracked isEditRoute; - - constructor() { - super(...arguments); - this.router.on( - 'routeDidChange', - ({ targetName }) => (this.isEditRoute = targetName.includes('edit') ? true : false) - ); - } - - get showHeader() { - // hide header when rendering the edit form - return !this.isEditRoute; - } -} diff --git a/ui/app/controllers/vault/cluster/access/oidc/clients/client/details.js b/ui/app/controllers/vault/cluster/access/oidc/clients/client/details.js deleted file mode 100644 index 55a8b296ee49a..0000000000000 --- a/ui/app/controllers/vault/cluster/access/oidc/clients/client/details.js +++ /dev/null @@ -1,21 +0,0 @@ -import Controller from '@ember/controller'; -import { action } from '@ember/object'; -import { inject as service } from '@ember/service'; - -export default class OidcClientDetailsController extends Controller { - @service router; - @service flashMessages; - - @action - async delete() { - try { - await this.model.destroyRecord(); - this.flashMessages.success('Application deleted successfully'); - this.router.transitionTo('vault.cluster.access.oidc.clients'); - } catch (error) { - this.model.rollbackAttributes(); - const message = error.errors ? error.errors.join('. ') : error.message; - this.flashMessages.danger(message); - } - } -} diff --git a/ui/app/controllers/vault/cluster/access/oidc/keys/key.js b/ui/app/controllers/vault/cluster/access/oidc/keys/key.js deleted file mode 100644 index 1658fda3a6d3d..0000000000000 --- a/ui/app/controllers/vault/cluster/access/oidc/keys/key.js +++ /dev/null @@ -1,20 +0,0 @@ -import Controller from '@ember/controller'; -import { inject as service } from '@ember/service'; -import { tracked } from '@glimmer/tracking'; - -export default class OidcKeyController extends Controller { - @service router; - @tracked isEditRoute; - - constructor() { - super(...arguments); - this.router.on('routeDidChange', ({ targetName }) => { - return (this.isEditRoute = targetName.includes('edit') ? true : false); - }); - } - - get showHeader() { - // hide header when rendering the edit form - return !this.isEditRoute; - } -} diff --git a/ui/app/controllers/vault/cluster/access/oidc/keys/key/details.js b/ui/app/controllers/vault/cluster/access/oidc/keys/key/details.js deleted file mode 100644 index de844e1d7ff5d..0000000000000 --- a/ui/app/controllers/vault/cluster/access/oidc/keys/key/details.js +++ /dev/null @@ -1,36 +0,0 @@ -import Controller from '@ember/controller'; -import { action } from '@ember/object'; -import { inject as service } from '@ember/service'; -import { task } from 'ember-concurrency'; -import { waitFor } from '@ember/test-waiters'; - -export default class OidcKeyDetailsController extends Controller { - @service router; - @service flashMessages; - - @task - @waitFor - *rotateKey() { - const adapter = this.store.adapterFor('oidc/key'); - yield adapter - .rotate(this.model.name, this.model.verificationTtl) - .then(() => { - this.flashMessages.success(`Success: ${this.model.name} connection was rotated.`); - }) - .catch((e) => { - this.flashMessages.danger(e.errors); - }); - } - @action - async delete() { - try { - await this.model.destroyRecord(); - this.flashMessages.success('Key deleted successfully'); - this.router.transitionTo('vault.cluster.access.oidc.keys'); - } catch (error) { - this.model.rollbackAttributes(); - const message = error.errors ? error.errors.join('. ') : error.message; - this.flashMessages.danger(message); - } - } -} diff --git a/ui/app/controllers/vault/cluster/access/oidc/providers/provider.js b/ui/app/controllers/vault/cluster/access/oidc/providers/provider.js deleted file mode 100644 index a0b1b295b9083..0000000000000 --- a/ui/app/controllers/vault/cluster/access/oidc/providers/provider.js +++ /dev/null @@ -1,20 +0,0 @@ -import Controller from '@ember/controller'; -import { inject as service } from '@ember/service'; -import { tracked } from '@glimmer/tracking'; - -export default class OidcProviderController extends Controller { - @service router; - @tracked isEditRoute; - - constructor() { - super(...arguments); - this.router.on('routeDidChange', ({ targetName }) => { - return (this.isEditRoute = targetName.includes('edit') ? true : false); - }); - } - - get showHeader() { - // hide header when rendering the edit form - return !this.isEditRoute; - } -} diff --git a/ui/app/controllers/vault/cluster/access/oidc/providers/provider/details.js b/ui/app/controllers/vault/cluster/access/oidc/providers/provider/details.js deleted file mode 100644 index 6a15ef2e3fb71..0000000000000 --- a/ui/app/controllers/vault/cluster/access/oidc/providers/provider/details.js +++ /dev/null @@ -1,21 +0,0 @@ -import Controller from '@ember/controller'; -import { action } from '@ember/object'; -import { inject as service } from '@ember/service'; - -export default class OidcProviderDetailsController extends Controller { - @service router; - @service flashMessages; - - @action - async delete() { - try { - await this.model.destroyRecord(); - this.flashMessages.success('Provider deleted successfully'); - this.router.transitionTo('vault.cluster.access.oidc.providers'); - } catch (error) { - this.model.rollbackAttributes(); - const message = error.errors ? error.errors.join('. ') : error.message; - this.flashMessages.danger(message); - } - } -} diff --git a/ui/app/controllers/vault/cluster/access/oidc/scopes/scope/details.js b/ui/app/controllers/vault/cluster/access/oidc/scopes/scope/details.js deleted file mode 100644 index 1934ae082e5c3..0000000000000 --- a/ui/app/controllers/vault/cluster/access/oidc/scopes/scope/details.js +++ /dev/null @@ -1,21 +0,0 @@ -import Controller from '@ember/controller'; -import { action } from '@ember/object'; -import { inject as service } from '@ember/service'; - -export default class OidcScopeDetailsController extends Controller { - @service router; - @service flashMessages; - - @action - async delete() { - try { - await this.model.destroyRecord(); - this.flashMessages.success('Scope deleted successfully'); - this.router.transitionTo('vault.cluster.access.oidc.scopes'); - } catch (error) { - this.model.rollbackAttributes(); - const message = error.errors ? error.errors.join('. ') : error.message; - this.flashMessages.danger(message); - } - } -} diff --git a/ui/app/controllers/vault/cluster/auth.js b/ui/app/controllers/vault/cluster/auth.js index 496ceaf1c8bcf..41eb3dcb6ec39 100644 --- a/ui/app/controllers/vault/cluster/auth.js +++ b/ui/app/controllers/vault/cluster/auth.js @@ -85,9 +85,5 @@ export default Controller.extend({ mfaErrors: null, }); }, - cancelAuthentication() { - this.set('cancelAuth', true); - this.set('waitingForOktaNumberChallenge', false); - }, }, }); diff --git a/ui/app/decorators/model-validations.js b/ui/app/decorators/model-validations.js index 1db84446d1e42..8520811daeb10 100644 --- a/ui/app/decorators/model-validations.js +++ b/ui/app/decorators/model-validations.js @@ -22,7 +22,7 @@ import { get } from '@ember/object'; * state represents the error state of the properties defined in the validations object * const { isValid, errors } = state[propertyKeyName]; * isValid represents the validity of the property - * errors will be populated with messages defined in the validations object when validations fail. message must be a complete sentence (and include punctuation) + * errors will be populated with messages defined in the validations object when validations fail * since a property can have multiple validations, errors is always returned as an array * *** basic example @@ -30,8 +30,7 @@ import { get } from '@ember/object'; * import Model from '@ember-data/model'; * import withModelValidations from 'vault/decorators/model-validations'; * - * Notes: all messages need to have a period at the end of them. - * const validations = { foo: [{ type: 'presence', message: 'foo is a required field.' }] }; + * const validations = { foo: [{ type: 'presence', message: 'foo is a required field' }] }; * @withModelValidations(validations) * class SomeModel extends Model { foo = null; } * @@ -43,7 +42,7 @@ import { get } from '@ember/object'; * *** example using custom validator * - * const validations = { foo: [{ validator: (model) => model.bar.includes('test') ? model.foo : false, message: 'foo is required if bar includes test.' }] }; + * const validations = { foo: [{ validator: (model) => model.bar.includes('test') ? model.foo : false, message: 'foo is required if bar includes test' }] }; * @withModelValidations(validations) * class SomeModel extends Model { foo = false; bar = ['foo', 'baz']; } * @@ -51,11 +50,7 @@ import { get } from '@ember/object'; * const { isValid, state } = model.validate(); * -> isValid = false; * -> state.foo.isValid = false; - * -> state.foo.errors = ['foo is required if bar includes test.']; - * - * *** example adding class in hbs file - * all form-validations need to have a red border around them. Add this by adding a conditional class 'has-error-border' - * class="input field {{if this.errors.name.errors 'has-error-border'}}" + * -> state.foo.errors = ['foo is required if bar includes test']; */ export function withModelValidations(validations) { diff --git a/ui/app/helpers/-date-base.js b/ui/app/helpers/-date-base.js index 4ca8dad2a5af9..384cf790907c0 100644 --- a/ui/app/helpers/-date-base.js +++ b/ui/app/helpers/-date-base.js @@ -1,15 +1,10 @@ import { run } from '@ember/runloop'; import Helper from '@ember/component/helper'; -import Ember from 'ember'; export default Helper.extend({ disableInterval: false, compute(value, { interval }) { - if (Ember.testing) { - // issues with flaky test, suspect it has to the do with the run loop not being cleared as intended farther down. - return; - } if (this.disableInterval) { return; } diff --git a/ui/lib/core/addon/helpers/options-for-backend.js b/ui/app/helpers/options-for-backend.js similarity index 85% rename from ui/lib/core/addon/helpers/options-for-backend.js rename to ui/app/helpers/options-for-backend.js index 64728d8f599ed..88e457b7932c1 100644 --- a/ui/lib/core/addon/helpers/options-for-backend.js +++ b/ui/app/helpers/options-for-backend.js @@ -10,38 +10,6 @@ const DEFAULT_DISPLAY = { editComponent: 'secret-edit', listItemPartial: 'secret-list/item', }; -const ENGINE_SECRET_BACKENDS = { - pki: { - displayName: 'PKI', - navigateTree: false, - tabs: [ - { - label: 'Overview', - link: 'overview', - }, - { - label: 'Roles', - link: 'roles', - }, - { - label: 'Issuers', - link: 'issuers', - }, - { - label: 'Certificates', - link: 'certificates', - }, - { - label: 'Keys', - link: 'keys', - }, - { - label: 'Configuration', - link: 'configuration', - }, - ], - }, -}; const SECRET_BACKENDS = { aws: { displayName: 'AWS', @@ -63,18 +31,18 @@ const SECRET_BACKENDS = { searchPlaceholder: 'Filter roles', item: 'role', create: 'Create role', - editComponent: 'pki/role-pki-edit', + editComponent: 'role-pki-edit', }, { - name: 'cert', + name: 'certs', modelPrefix: 'cert/', label: 'Certificates', searchPlaceholder: 'Filter certificates', item: 'certificates', create: 'Create role', - tab: 'cert', + tab: 'certs', listItemPartial: 'secret-list/pki-cert-item', - editComponent: 'pki/pki-cert-show', + editComponent: 'pki-cert-show', }, ], }, @@ -198,8 +166,8 @@ const SECRET_BACKENDS = { }, }; -export function optionsForBackend([backend, tab, isEngine]) { - const selected = isEngine ? ENGINE_SECRET_BACKENDS[backend] : SECRET_BACKENDS[backend]; +export function optionsForBackend([backend, tab]) { + const selected = SECRET_BACKENDS[backend]; let backendOptions; if (selected && selected.tabs) { diff --git a/ui/app/helpers/tabs-for-auth-section.js b/ui/app/helpers/tabs-for-auth-section.js index 9911c80152f8f..8bd32885f5b28 100644 --- a/ui/app/helpers/tabs-for-auth-section.js +++ b/ui/app/helpers/tabs-for-auth-section.js @@ -10,11 +10,11 @@ const TABS_FOR_SETTINGS = { }, { label: 'Identity Allow List Tidy', - routeParams: ['vault.cluster.settings.auth.configure.section', 'identity-accesslist'], + routeParams: ['vault.cluster.settings.auth.configure.section', 'identity-whitelist'], // TODO: Update endpoint from PR#10997 }, { label: 'Role Tag Deny List Tidy', - routeParams: ['vault.cluster.settings.auth.configure.section', 'roletag-denylist'], + routeParams: ['vault.cluster.settings.auth.configure.section', 'roletag-blacklist'], // TODO: Update endpoints from PR#10997 }, ], azure: [ diff --git a/ui/app/lib/route-paths.js b/ui/app/lib/route-paths.js deleted file mode 100644 index 221a6401ce64f..0000000000000 --- a/ui/app/lib/route-paths.js +++ /dev/null @@ -1,12 +0,0 @@ -export const INIT = 'vault.cluster.init'; -export const UNSEAL = 'vault.cluster.unseal'; -export const AUTH = 'vault.cluster.auth'; -export const REDIRECT = 'vault.cluster.redirect'; -export const CLUSTER = 'vault.cluster'; -export const CLUSTER_INDEX = 'vault.cluster.index'; -export const OIDC_CALLBACK = 'vault.cluster.oidc-callback'; -export const OIDC_PROVIDER = 'vault.cluster.oidc-provider'; -export const NS_OIDC_PROVIDER = 'vault.cluster.oidc-provider-ns'; -export const DR_REPLICATION_SECONDARY = 'vault.cluster.replication-dr-promote'; -export const DR_REPLICATION_SECONDARY_DETAILS = 'vault.cluster.replication-dr-promote.details'; -export const EXCLUDED_REDIRECT_URLS = ['/vault/logout']; diff --git a/ui/app/mixins/cluster-route.js b/ui/app/mixins/cluster-route.js index d3f83318d91a7..87ad33b2b4bed 100644 --- a/ui/app/mixins/cluster-route.js +++ b/ui/app/mixins/cluster-route.js @@ -1,20 +1,19 @@ import { inject as service } from '@ember/service'; import Mixin from '@ember/object/mixin'; import RSVP from 'rsvp'; -import { - INIT, - UNSEAL, - AUTH, - CLUSTER, - CLUSTER_INDEX, - OIDC_CALLBACK, - OIDC_PROVIDER, - NS_OIDC_PROVIDER, - DR_REPLICATION_SECONDARY, - DR_REPLICATION_SECONDARY_DETAILS, - EXCLUDED_REDIRECT_URLS, - REDIRECT, -} from 'vault/lib/route-paths'; +const INIT = 'vault.cluster.init'; +const UNSEAL = 'vault.cluster.unseal'; +const AUTH = 'vault.cluster.auth'; +const CLUSTER = 'vault.cluster'; +const CLUSTER_INDEX = 'vault.cluster.index'; +const OIDC_CALLBACK = 'vault.cluster.oidc-callback'; +const OIDC_PROVIDER = 'vault.cluster.oidc-provider'; +const NS_OIDC_PROVIDER = 'vault.cluster.oidc-provider-ns'; +const DR_REPLICATION_SECONDARY = 'vault.cluster.replication-dr-promote'; +const DR_REPLICATION_SECONDARY_DETAILS = 'vault.cluster.replication-dr-promote.details'; +const EXCLUDED_REDIRECT_URLS = ['/vault/logout']; + +export { INIT, UNSEAL, AUTH, CLUSTER, CLUSTER_INDEX, DR_REPLICATION_SECONDARY }; export default Mixin.create({ auth: service(), @@ -97,14 +96,11 @@ export default Mixin.create({ if ( (!cluster.needsInit && this.routeName === INIT) || (!cluster.sealed && this.routeName === UNSEAL) || - (!cluster?.dr?.isSecondary && this.routeName === DR_REPLICATION_SECONDARY) + (!cluster?.dr?.isSecondary && this.routeName === DR_REPLICATION_SECONDARY) || + (isAuthed && this.routeName === AUTH) ) { return CLUSTER; } - if (isAuthed && this.routeName === AUTH) { - // if you're already authed and you wanna go to auth, you probably want to redirect - return REDIRECT; - } return null; }, }); diff --git a/ui/app/models/auth-config/aws/identity-accesslist.js b/ui/app/models/auth-config/aws/identity-whitelist.js similarity index 100% rename from ui/app/models/auth-config/aws/identity-accesslist.js rename to ui/app/models/auth-config/aws/identity-whitelist.js diff --git a/ui/app/models/auth-config/aws/roletag-denylist.js b/ui/app/models/auth-config/aws/roletag-blacklist.js similarity index 100% rename from ui/app/models/auth-config/aws/roletag-denylist.js rename to ui/app/models/auth-config/aws/roletag-blacklist.js diff --git a/ui/app/models/cluster.js b/ui/app/models/cluster.js index 1f24ca9dc8279..07e259a284248 100644 --- a/ui/app/models/cluster.js +++ b/ui/app/models/cluster.js @@ -43,7 +43,6 @@ export default Model.extend({ sealProgress: alias('leaderNode.progress'), sealType: alias('leaderNode.type'), storageType: alias('leaderNode.storageType'), - hcpLinkStatus: alias('leaderNode.hcpLinkStatus'), hasProgress: gte('sealProgress', 1), usingRaft: equal('storageType', 'raft'), diff --git a/ui/app/models/database/connection.js b/ui/app/models/database/connection.js index 143882dca0cc1..d1fc762909681 100644 --- a/ui/app/models/database/connection.js +++ b/ui/app/models/database/connection.js @@ -54,7 +54,7 @@ export default Model.extend({ defaultSubText: 'Unless a custom policy is specified, Vault will use a default: 20 characters with at least 1 uppercase, 1 lowercase, 1 number, and 1 dash character.', defaultShown: 'Default', - docLink: '/docs/concepts/password-policies', + docLink: 'https://www.vaultproject.io/docs/concepts/password-policies', }), // common fields @@ -106,7 +106,7 @@ export default Model.extend({ subText: 'Enter the custom username template to use.', defaultSubText: 'Template describing how dynamic usernames are generated. Vault will use the default for this plugin.', - docLink: '/docs/concepts/username-templating', + docLink: 'https://www.vaultproject.io/docs/concepts/username-templating', defaultShown: 'Default', }), max_open_connections: attr('number', { diff --git a/ui/app/models/mount-config.js b/ui/app/models/mount-config.js index 4a9be19835490..f2f12b563ce7a 100644 --- a/ui/app/models/mount-config.js +++ b/ui/app/models/mount-config.js @@ -28,7 +28,7 @@ export default Fragment.extend({ }), passthroughRequestHeaders: attr({ label: 'Allowed passthrough request headers', - helpText: 'Headers to allow and pass from the request to the backend', + helpText: 'Headers to whitelist and pass from the request to the backend', editType: 'stringArray', }), tokenType: attr('string', { diff --git a/ui/app/models/node.js b/ui/app/models/node.js index efc9bd60a93c6..2acd3a05e2cb8 100644 --- a/ui/app/models/node.js +++ b/ui/app/models/node.js @@ -24,7 +24,6 @@ export default Model.extend({ version: attr('string'), type: attr('string'), storageType: attr('string'), - hcpLinkStatus: attr('string'), //https://www.vaultproject.io/docs/http/sys-leader.html haEnabled: attr('boolean'), diff --git a/ui/app/models/oidc/assignment.js b/ui/app/models/oidc/assignment.js deleted file mode 100644 index 45cbe519cd96d..0000000000000 --- a/ui/app/models/oidc/assignment.js +++ /dev/null @@ -1,41 +0,0 @@ -import Model, { attr } from '@ember-data/model'; -import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; -import { withModelValidations } from 'vault/decorators/model-validations'; -import { isPresent } from '@ember/utils'; - -const validations = { - name: [ - { type: 'presence', message: 'Name is required.' }, - { - type: 'containsWhiteSpace', - message: 'Name cannot contain whitespace.', - }, - ], - targets: [ - { - validator(model) { - return isPresent(model.entityIds) || isPresent(model.groupIds); - }, - message: 'At least one entity or group is required.', - }, - ], -}; - -@withModelValidations(validations) -export default class OidcAssignmentModel extends Model { - @attr('string') name; - @attr('array') entityIds; - @attr('array') groupIds; - - // CAPABILITIES - @lazyCapabilities(apiPath`identity/oidc/assignment/${'name'}`, 'name') assignmentPath; - get canRead() { - return this.assignmentPath.get('canRead'); - } - get canEdit() { - return this.assignmentPath.get('canUpdate'); - } - get canDelete() { - return this.assignmentPath.get('canDelete'); - } -} diff --git a/ui/app/models/oidc/client.js b/ui/app/models/oidc/client.js deleted file mode 100644 index 7622dccde7673..0000000000000 --- a/ui/app/models/oidc/client.js +++ /dev/null @@ -1,107 +0,0 @@ -import Model, { attr } from '@ember-data/model'; -import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; -import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; -import fieldToAttrs from 'vault/utils/field-to-attrs'; -import { withModelValidations } from 'vault/decorators/model-validations'; - -const validations = { - name: [ - { type: 'presence', message: 'Name is required.' }, - { - type: 'containsWhiteSpace', - message: 'Name cannot contain whitespace.', - }, - ], - key: [{ type: 'presence', message: 'Key is required.' }], -}; - -@withModelValidations(validations) -export default class OidcClientModel extends Model { - @attr('string', { label: 'Application name', editDisabled: true }) name; - @attr('string', { - label: 'Type', - subText: - 'Specify whether the application type is confidential or public. The public type must use PKCE. This cannot be edited later.', - editType: 'radio', - editDisabled: true, - defaultValue: 'confidential', - possibleValues: ['confidential', 'public'], - }) - clientType; - - @attr('array', { - label: 'Redirect URIs', - subText: - 'One of these values must exactly match the redirect_uri parameter value used in each authentication request.', - editType: 'stringArray', - }) - redirectUris; - - // >> MORE OPTIONS TOGGLE << - - @attr('string', { - label: 'Signing key', - subText: 'Add a key to sign and verify the JSON web tokens (JWT). This cannot be edited later.', - editType: 'searchSelect', - editDisabled: true, - onlyAllowExisting: true, - defaultValue() { - return ['default']; - }, - fallbackComponent: 'input-search', - selectLimit: 1, - models: ['oidc/key'], - }) - key; - @attr({ - label: 'Access Token TTL', - editType: 'ttl', - defaultValue: '24h', - }) - accessTokenTtl; - - @attr({ - label: 'ID Token TTL', - editType: 'ttl', - defaultValue: '24h', - }) - idTokenTtl; - - // >> END MORE OPTIONS TOGGLE << - - @attr('array', { label: 'Assign access' }) assignments; // no editType because does not use form-field component - @attr('string', { label: 'Client ID' }) clientId; - @attr('string') clientSecret; - - // TODO refactor when field-to-attrs util is refactored as decorator - _attributeMeta = null; // cache initial result of expandAttributeMeta in getter and return - get formFields() { - if (!this._attributeMeta) { - this._attributeMeta = expandAttributeMeta(this, ['name', 'clientType', 'redirectUris']); - } - return this._attributeMeta; - } - - _fieldToAttrsGroups = null; - // more options fields - get fieldGroups() { - if (!this._fieldToAttrsGroups) { - this._fieldToAttrsGroups = fieldToAttrs(this, [ - { 'More options': ['key', 'idTokenTtl', 'accessTokenTtl'] }, - ]); - } - return this._fieldToAttrsGroups; - } - - // CAPABILITIES // - @lazyCapabilities(apiPath`identity/oidc/client/${'name'}`, 'name') clientPath; - get canRead() { - return this.clientPath.get('canRead'); - } - get canEdit() { - return this.clientPath.get('canUpdate'); - } - get canDelete() { - return this.clientPath.get('canDelete'); - } -} diff --git a/ui/app/models/oidc/key.js b/ui/app/models/oidc/key.js deleted file mode 100644 index 6c90be4860585..0000000000000 --- a/ui/app/models/oidc/key.js +++ /dev/null @@ -1,57 +0,0 @@ -import Model, { attr } from '@ember-data/model'; -import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; -import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; -import { withModelValidations } from 'vault/decorators/model-validations'; - -const validations = { - name: [ - { type: 'presence', message: 'Name is required.' }, - { - type: 'containsWhiteSpace', - message: 'Name cannot contain whitespace.', - }, - ], -}; - -@withModelValidations(validations) -export default class OidcKeyModel extends Model { - @attr('string', { editDisabled: true }) name; - @attr('string', { - defaultValue: 'RS256', - possibleValues: ['RS256', 'RS384', 'RS512', 'ES256', 'ES384', 'ES512', 'EdDSA'], - }) - algorithm; - - @attr({ editType: 'ttl', defaultValue: '24h' }) rotationPeriod; - @attr({ label: 'Verification TTL', editType: 'ttl', defaultValue: '24h' }) verificationTtl; - @attr('array', { label: 'Allowed applications' }) allowedClientIds; // no editType because does not use form-field component - - // TODO refactor when field-to-attrs is refactored as decorator - _attributeMeta = null; // cache initial result of expandAttributeMeta in getter and return - get formFields() { - if (!this._attributeMeta) { - this._attributeMeta = expandAttributeMeta(this, [ - 'name', - 'algorithm', - 'rotationPeriod', - 'verificationTtl', - ]); - } - return this._attributeMeta; - } - - @lazyCapabilities(apiPath`identity/oidc/key/${'name'}`, 'name') keyPath; - @lazyCapabilities(apiPath`identity/oidc/key/${'name'}/rotate`, 'name') rotatePath; - get canRead() { - return this.keyPath.get('canRead'); - } - get canEdit() { - return this.keyPath.get('canUpdate'); - } - get canRotate() { - return this.rotatePath.get('canUpdate'); - } - get canDelete() { - return this.keyPath.get('canDelete'); - } -} diff --git a/ui/app/models/oidc/provider.js b/ui/app/models/oidc/provider.js deleted file mode 100644 index 7ffa8fc6fa2eb..0000000000000 --- a/ui/app/models/oidc/provider.js +++ /dev/null @@ -1,59 +0,0 @@ -import Model, { attr } from '@ember-data/model'; -import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; -import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; -import { withModelValidations } from 'vault/decorators/model-validations'; - -const validations = { - name: [ - { type: 'presence', message: 'Name is required.' }, - { - type: 'containsWhiteSpace', - message: 'Name cannot contain whitespace.', - }, - ], -}; - -@withModelValidations(validations) -export default class OidcProviderModel extends Model { - @attr('string', { editDisabled: true }) name; - @attr('string', { - subText: - 'The scheme, host, and optional port for your issuer. This will be used to build the URL that validates ID tokens.', - placeholderText: 'e.g. https://example.com:8200', - docLink: '/api-docs/secret/identity/oidc-provider#create-or-update-a-provider', - helpText: `Optional. This defaults to a URL with Vault's api_addr`, - }) - issuer; - - @attr('array', { - label: 'Supported scopes', - subText: 'Scopes define information about a user and the OIDC service. Optional.', - editType: 'searchSelect', - models: ['oidc/scope'], - fallbackComponent: 'string-list', - onlyAllowExisting: true, - }) - scopesSupported; - - @attr('array', { label: 'Allowed applications' }) allowedClientIds; // no editType because does not use form-field component - - // TODO refactor when field-to-attrs is refactored as decorator - _attributeMeta = null; // cache initial result of expandAttributeMeta in getter and return - get formFields() { - if (!this._attributeMeta) { - this._attributeMeta = expandAttributeMeta(this, ['name', 'issuer', 'scopesSupported']); - } - return this._attributeMeta; - } - - @lazyCapabilities(apiPath`identity/oidc/provider/${'name'}`, 'name') providerPath; - get canRead() { - return this.providerPath.get('canRead'); - } - get canEdit() { - return this.providerPath.get('canUpdate'); - } - get canDelete() { - return this.providerPath.get('canDelete'); - } -} diff --git a/ui/app/models/oidc/scope.js b/ui/app/models/oidc/scope.js deleted file mode 100644 index d15ed52eeb5c9..0000000000000 --- a/ui/app/models/oidc/scope.js +++ /dev/null @@ -1,35 +0,0 @@ -import Model, { attr } from '@ember-data/model'; -import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; -import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; -import { withModelValidations } from 'vault/decorators/model-validations'; - -const validations = { - name: [{ type: 'presence', message: 'Name is required.' }], -}; - -@withModelValidations(validations) -export default class OidcScopeModel extends Model { - @attr('string', { editDisabled: true }) name; - @attr('string', { editType: 'textarea' }) description; - @attr('string', { label: 'JSON Template', editType: 'json', mode: 'ruby' }) template; - - // TODO refactor when field-to-attrs is refactored as decorator - _attributeMeta = null; // cache initial result of expandAttributeMeta in getter and return - get formFields() { - if (!this._attributeMeta) { - this._attributeMeta = expandAttributeMeta(this, ['name', 'description', 'template']); - } - return this._attributeMeta; - } - - @lazyCapabilities(apiPath`identity/oidc/scope/${'name'}`, 'name') scopePath; - get canRead() { - return this.scopePath.get('canRead'); - } - get canEdit() { - return this.scopePath.get('canUpdate'); - } - get canDelete() { - return this.scopePath.get('canDelete'); - } -} diff --git a/ui/app/models/pki-ca-certificate.js b/ui/app/models/pki-ca-certificate.js index ec444c138361f..887353c083ff3 100644 --- a/ui/app/models/pki-ca-certificate.js +++ b/ui/app/models/pki-ca-certificate.js @@ -1,6 +1,6 @@ import { attr } from '@ember-data/model'; import { computed } from '@ember/object'; -import Certificate from './pki/cert'; +import Certificate from './pki-certificate'; export default Certificate.extend({ DISPLAY_FIELDS: computed(function () { diff --git a/ui/app/models/pki-certificate-sign.js b/ui/app/models/pki-certificate-sign.js index afc937f89cb72..556fe6e94b1d2 100644 --- a/ui/app/models/pki-certificate-sign.js +++ b/ui/app/models/pki-certificate-sign.js @@ -1,7 +1,7 @@ import { attr } from '@ember-data/model'; import { copy } from 'ember-copy'; import { computed } from '@ember/object'; -import Certificate from './pki/cert'; +import Certificate from './pki-certificate'; import { combineFieldGroups } from 'vault/utils/openapi-to-attrs'; export default Certificate.extend({ diff --git a/ui/app/models/pki/cert.js b/ui/app/models/pki-certificate.js similarity index 100% rename from ui/app/models/pki/cert.js rename to ui/app/models/pki-certificate.js diff --git a/ui/app/models/pki/pki-config.js b/ui/app/models/pki-config.js similarity index 90% rename from ui/app/models/pki/pki-config.js rename to ui/app/models/pki-config.js index 91770fca63cd5..9d8a88cefb7ed 100644 --- a/ui/app/models/pki/pki-config.js +++ b/ui/app/models/pki-config.js @@ -47,10 +47,12 @@ export default Model.extend({ }), crlAttrs: computed(function () { - let keys = ['expiry', 'disable']; + let keys = ['expiry']; return this.attrList(keys); }), //crl - expiry: attr('string', { defaultValue: '72h' }), - disable: attr('boolean', { defaultValue: false }), + expiry: attr({ + defaultValue: '72h', + editType: 'ttl', + }), }); diff --git a/ui/app/models/pki/pki-certificate-engine.js b/ui/app/models/pki/pki-certificate-engine.js deleted file mode 100644 index 8185a511e6628..0000000000000 --- a/ui/app/models/pki/pki-certificate-engine.js +++ /dev/null @@ -1,10 +0,0 @@ -import Model, { attr } from '@ember-data/model'; - -export default class PkiCertificateEngineModel extends Model { - @attr('string', { readOnly: true }) backend; - @attr('string') commonName; - @attr('string') issueDate; - @attr('string') serialNumber; - @attr('string') notAfter; - @attr('string') notBeforeDuration; -} diff --git a/ui/app/models/pki/pki-issuer-engine.js b/ui/app/models/pki/pki-issuer-engine.js deleted file mode 100644 index 00767f4216fb1..0000000000000 --- a/ui/app/models/pki/pki-issuer-engine.js +++ /dev/null @@ -1,51 +0,0 @@ -import Model, { attr } from '@ember-data/model'; -import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; -import { withModelValidations } from 'vault/decorators/model-validations'; - -const validations = { - name: [ - { type: 'presence', message: 'Name is required.' }, - { - type: 'containsWhiteSpace', - message: 'Name cannot contain whitespace.', - }, - ], -}; - -@withModelValidations(validations) -export default class PkiIssuerEngineModel extends Model { - @attr('string', { readOnly: true }) backend; - @attr('string', { - label: 'Issuer name', - fieldValue: 'id', - }) - name; - - get useOpenAPI() { - return true; - } - getHelpUrl(backend) { - return `/v1/${backend}/issuer/example?help=1`; - } - - @attr('boolean') isDefault; - @attr('string') issuerName; - - // Form Fields not hidden in toggle options - _attributeMeta = null; - get formFields() { - if (!this._attributeMeta) { - this._attributeMeta = expandAttributeMeta(this, [ - 'name', - 'leafNotAfterBehavior', - 'usage', - 'manualChain', - 'issuingCertifications', - 'crlDistributionPoints', - 'ocspServers', - 'deltaCrlUrls', // new endpoint, mentioned in RFC, but need to confirm it's there. - ]); - } - return this._attributeMeta; - } -} diff --git a/ui/app/models/pki/pki-key-engine.js b/ui/app/models/pki/pki-key-engine.js deleted file mode 100644 index 150680f94949b..0000000000000 --- a/ui/app/models/pki/pki-key-engine.js +++ /dev/null @@ -1,10 +0,0 @@ -import Model, { attr } from '@ember-data/model'; - -export default class PkiKeyEngineModel extends Model { - @attr('string', { readOnly: true }) backend; - @attr('boolean') isDefault; - @attr('string') keyRef; // reference to an existing key: either, vault generate identifier, literal string 'default', or the name assigned to the key. Part of the request URL. - @attr('string') keyId; - @attr('string') keyName; - @attr('string') keyType; -} diff --git a/ui/app/models/pki/pki-role-engine.js b/ui/app/models/pki/pki-role-engine.js deleted file mode 100644 index 05ad0c8fd98aa..0000000000000 --- a/ui/app/models/pki/pki-role-engine.js +++ /dev/null @@ -1,123 +0,0 @@ -import Model, { attr } from '@ember-data/model'; -import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; -import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; -import { withModelValidations } from 'vault/decorators/model-validations'; - -import fieldToAttrs from 'vault/utils/field-to-attrs'; - -const validations = { - name: [ - { type: 'presence', message: 'Name is required.' }, - { - type: 'containsWhiteSpace', - message: 'Name cannot contain whitespace.', - }, - ], -}; - -@withModelValidations(validations) -export default class PkiRoleEngineModel extends Model { - @attr('string', { readOnly: true }) backend; - @attr('string', { - label: 'Role name', - fieldValue: 'id', - readOnly: true, - }) - name; - - // must be a getter so it can be added to the prototype needed in the pathHelp service on the line here: if (newModel.merged || modelProto.useOpenAPI !== true) { - get useOpenAPI() { - return true; - } - getHelpUrl(backend) { - return `/v1/${backend}/roles/example?help=1`; - } - @lazyCapabilities(apiPath`${'backend'}/roles/${'id'}`, 'backend', 'id') updatePath; - get canDelete() { - return this.updatePath.get('canCreate'); - } - get canEdit() { - return this.updatePath.get('canEdit'); - } - get canRead() { - return this.updatePath.get('canRead'); - } - - @lazyCapabilities(apiPath`${'backend'}/issue/${'id'}`, 'backend', 'id') generatePath; - get canReadIssue() { - // ARG TODO was duplicate name, added Issue - return this.generatePath.get('canUpdate'); - } - @lazyCapabilities(apiPath`${'backend'}/sign/${'id'}`, 'backend', 'id') signPath; - get canSign() { - return this.signPath.get('canUpdate'); - } - @lazyCapabilities(apiPath`${'backend'}/sign-verbatim/${'id'}`, 'backend', 'id') signVerbatimPath; - get canSignVerbatim() { - return this.signVerbatimPath.get('canUpdate'); - } - - // Form Fields not hidden in toggle options - _attributeMeta = null; - get formFields() { - if (!this._attributeMeta) { - this._attributeMeta = expandAttributeMeta(this, ['name', 'clientType', 'redirectUris']); - } - return this._attributeMeta; - } - - // Form fields hidden behind toggle options - _fieldToAttrsGroups = null; - // ARG TODO: I removed 'allowedDomains' but I'm fairly certain it needs to be somewhere. Confirm with design. - get fieldGroups() { - if (!this._fieldToAttrsGroups) { - this._fieldToAttrsGroups = fieldToAttrs(this, [ - { default: ['name'] }, - { - 'Domain handling': [ - 'allowedDomains', - 'allowedDomainTemplate', - 'allowBareDomains', - 'allowSubdomains', - 'allowGlobDomains', - 'allowWildcardCertificates', - 'allowLocalhost', - 'allowAnyName', - 'enforceHostnames', - ], - }, - { - 'Key parameters': ['keyType', 'keyBits', 'signatureBits'], - }, - { - 'Key usage': [ - 'DigitalSignature', // ARG TODO: capitalized in the docs, but should confirm - 'KeyAgreement', - 'KeyEncipherment', - 'extKeyUsage', // ARG TODO: takes a list, but we have these as checkboxes from the options on the golang site: https://pkg.go.dev/crypto/x509#ExtKeyUsage - ], - }, - { 'Policy identifiers': ['policyIdentifiers'] }, - { - 'Subject Alternative Name (SAN) Options': ['allowIpSans', 'allowedUriSans', 'allowedOtherSans'], - }, - { - 'Additional subject fields': [ - 'allowed_serial_numbers', - 'requireCn', - 'useCsrCommonName', - 'useCsrSans', - 'ou', - 'organization', - 'country', - 'locality', - 'province', - 'streetAddress', - 'postalCode', - ], - }, - ]); - } - return this._fieldToAttrsGroups; - } -} diff --git a/ui/app/models/pki/pki-role.js b/ui/app/models/role-pki.js similarity index 100% rename from ui/app/models/pki/pki-role.js rename to ui/app/models/role-pki.js diff --git a/ui/app/router.js b/ui/app/router.js index 4588691212963..3cd477670a0ff 100644 --- a/ui/app/router.js +++ b/ui/app/router.js @@ -13,7 +13,6 @@ Router.map(function () { this.route('oidc-provider', { path: '/identity/oidc/provider/:provider_name/authorize' }); this.route('oidc-callback', { path: '/auth/*auth_path/oidc/callback' }); this.route('auth'); - this.route('redirect'); this.route('init'); this.route('logout'); this.mount('open-api-explorer', { path: '/api-explorer' }); @@ -109,54 +108,11 @@ Router.map(function () { this.route('index', { path: '/' }); this.route('create'); }); - this.route('oidc', function () { - this.route('clients', function () { - this.route('create'); - this.route('client', { path: '/:name' }, function () { - this.route('details'); - this.route('providers'); - this.route('edit'); - }); - }); - this.route('keys', function () { - this.route('create'); - this.route('key', { path: '/:name' }, function () { - this.route('details'); - this.route('clients'); - this.route('edit'); - }); - }); - this.route('assignments', function () { - this.route('create'); - this.route('assignment', { path: '/:name' }, function () { - this.route('details'); - this.route('edit'); - }); - }); - this.route('providers', function () { - this.route('create'); - this.route('provider', { path: '/:name' }, function () { - this.route('details'); - this.route('clients'); - this.route('edit'); - }); - }); - this.route('scopes', function () { - this.route('create'); - this.route('scope', { path: '/:name' }, function () { - this.route('details'); - this.route('edit'); - }); - }); - }); }); this.route('secrets', function () { this.route('backends', { path: '/' }); this.route('backend', { path: '/:backend' }, function () { this.mount('kmip'); - if (config.environment !== 'production') { - this.mount('pki'); - } this.route('index', { path: '/' }); this.route('configuration'); // because globs / params can't be empty, diff --git a/ui/app/routes/vault/cluster.js b/ui/app/routes/vault/cluster.js index 327bfc99fc99c..9f80e82e7d911 100644 --- a/ui/app/routes/vault/cluster.js +++ b/ui/app/routes/vault/cluster.js @@ -72,9 +72,7 @@ export default Route.extend(ModelBoundaryRoute, ClusterRoute, { const id = this.getClusterId(params); if (id) { this.auth.setCluster(id); - if (this.auth.currentToken) { - await this.permissions.getPaths.perform(); - } + await this.permissions.getPaths.perform(); return this.version.fetchFeatures(); } else { return reject({ httpStatus: 404, message: 'not found', path: params.cluster_name }); diff --git a/ui/app/routes/vault/cluster/access/oidc.js b/ui/app/routes/vault/cluster/access/oidc.js deleted file mode 100644 index 18903d49eb9db..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc.js +++ /dev/null @@ -1,3 +0,0 @@ -import Route from '@ember/routing/route'; - -export default class OidcConfigureRoute extends Route {} diff --git a/ui/app/routes/vault/cluster/access/oidc/assignments/assignment.js b/ui/app/routes/vault/cluster/access/oidc/assignments/assignment.js deleted file mode 100644 index 54011c1df6e2f..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/assignments/assignment.js +++ /dev/null @@ -1,7 +0,0 @@ -import Route from '@ember/routing/route'; - -export default class OidcAssignmentRoute extends Route { - model({ name }) { - return this.store.findRecord('oidc/assignment', name); - } -} diff --git a/ui/app/routes/vault/cluster/access/oidc/assignments/assignment/details.js b/ui/app/routes/vault/cluster/access/oidc/assignments/assignment/details.js deleted file mode 100644 index bdc58a72308e0..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/assignments/assignment/details.js +++ /dev/null @@ -1,3 +0,0 @@ -import Route from '@ember/routing/route'; - -export default class OidcAssignmentDetailsRoute extends Route {} diff --git a/ui/app/routes/vault/cluster/access/oidc/assignments/assignment/edit.js b/ui/app/routes/vault/cluster/access/oidc/assignments/assignment/edit.js deleted file mode 100644 index e4dd299a21694..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/assignments/assignment/edit.js +++ /dev/null @@ -1,3 +0,0 @@ -import Route from '@ember/routing/route'; - -export default class OidcAssignmentEditRoute extends Route {} diff --git a/ui/app/routes/vault/cluster/access/oidc/assignments/create.js b/ui/app/routes/vault/cluster/access/oidc/assignments/create.js deleted file mode 100644 index 414ae467b52f5..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/assignments/create.js +++ /dev/null @@ -1,7 +0,0 @@ -import Route from '@ember/routing/route'; - -export default class OidcAssignmentsCreateRoute extends Route { - model() { - return this.store.createRecord('oidc/assignment'); - } -} diff --git a/ui/app/routes/vault/cluster/access/oidc/assignments/index.js b/ui/app/routes/vault/cluster/access/oidc/assignments/index.js deleted file mode 100644 index 3956673c517de..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/assignments/index.js +++ /dev/null @@ -1,13 +0,0 @@ -import Route from '@ember/routing/route'; - -export default class OidcAssignmentsRoute extends Route { - model() { - return this.store.query('oidc/assignment', {}).catch((err) => { - if (err.httpStatus === 404) { - return []; - } else { - throw err; - } - }); - } -} diff --git a/ui/app/routes/vault/cluster/access/oidc/clients/client.js b/ui/app/routes/vault/cluster/access/oidc/clients/client.js deleted file mode 100644 index f2363a6e31ef6..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/clients/client.js +++ /dev/null @@ -1,7 +0,0 @@ -import Route from '@ember/routing/route'; - -export default class OidcClientRoute extends Route { - model({ name }) { - return this.store.findRecord('oidc/client', name); - } -} diff --git a/ui/app/routes/vault/cluster/access/oidc/clients/client/details.js b/ui/app/routes/vault/cluster/access/oidc/clients/client/details.js deleted file mode 100644 index db38b60589a98..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/clients/client/details.js +++ /dev/null @@ -1,2 +0,0 @@ -import Route from '@ember/routing/route'; -export default class OidcClientDetailsRoute extends Route {} diff --git a/ui/app/routes/vault/cluster/access/oidc/clients/client/edit.js b/ui/app/routes/vault/cluster/access/oidc/clients/client/edit.js deleted file mode 100644 index fd4e7a5bb8cfc..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/clients/client/edit.js +++ /dev/null @@ -1,3 +0,0 @@ -import Route from '@ember/routing/route'; - -export default class OidcClientEditRoute extends Route {} diff --git a/ui/app/routes/vault/cluster/access/oidc/clients/client/providers.js b/ui/app/routes/vault/cluster/access/oidc/clients/client/providers.js deleted file mode 100644 index 653a50fabc135..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/clients/client/providers.js +++ /dev/null @@ -1,18 +0,0 @@ -import Route from '@ember/routing/route'; - -export default class OidcClientProvidersRoute extends Route { - model() { - const model = this.modelFor('vault.cluster.access.oidc.clients.client'); - return this.store - .query('oidc/provider', { - allowed_client_id: model.clientId, - }) - .catch((err) => { - if (err.httpStatus === 404) { - return []; - } else { - throw err; - } - }); - } -} diff --git a/ui/app/routes/vault/cluster/access/oidc/clients/create.js b/ui/app/routes/vault/cluster/access/oidc/clients/create.js deleted file mode 100644 index ba86744710930..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/clients/create.js +++ /dev/null @@ -1,7 +0,0 @@ -import Route from '@ember/routing/route'; - -export default class OidcClientsCreateRoute extends Route { - model() { - return this.store.createRecord('oidc/client'); - } -} diff --git a/ui/app/routes/vault/cluster/access/oidc/clients/index.js b/ui/app/routes/vault/cluster/access/oidc/clients/index.js deleted file mode 100644 index 08ec2bef288e3..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/clients/index.js +++ /dev/null @@ -1,21 +0,0 @@ -import Route from '@ember/routing/route'; -import { inject as service } from '@ember/service'; -export default class OidcClientsRoute extends Route { - @service router; - - model() { - return this.store.query('oidc/client', {}).catch((err) => { - if (err.httpStatus === 404) { - return []; - } else { - throw err; - } - }); - } - - afterModel(model) { - if (model.length === 0) { - this.router.transitionTo('vault.cluster.access.oidc'); - } - } -} diff --git a/ui/app/routes/vault/cluster/access/oidc/index.js b/ui/app/routes/vault/cluster/access/oidc/index.js deleted file mode 100644 index f7262a7e9fa1d..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/index.js +++ /dev/null @@ -1,18 +0,0 @@ -import Route from '@ember/routing/route'; -import { inject as service } from '@ember/service'; - -export default class OidcConfigureRoute extends Route { - @service router; - - beforeModel() { - return this.store - .query('oidc/client', {}) - .then(() => { - // transition to client list view if clients have been created - this.router.transitionTo('vault.cluster.access.oidc.clients'); - }) - .catch(() => { - // adapter throws error for 404 - swallow and remain on index route to show call to action - }); - } -} diff --git a/ui/app/routes/vault/cluster/access/oidc/keys/create.js b/ui/app/routes/vault/cluster/access/oidc/keys/create.js deleted file mode 100644 index 424e2806c358c..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/keys/create.js +++ /dev/null @@ -1,7 +0,0 @@ -import Route from '@ember/routing/route'; - -export default class OidcKeysCreateRoute extends Route { - model() { - return this.store.createRecord('oidc/key'); - } -} diff --git a/ui/app/routes/vault/cluster/access/oidc/keys/index.js b/ui/app/routes/vault/cluster/access/oidc/keys/index.js deleted file mode 100644 index 5e9b48be99f3a..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/keys/index.js +++ /dev/null @@ -1,12 +0,0 @@ -import Route from '@ember/routing/route'; -export default class OidcKeysRoute extends Route { - model() { - return this.store.query('oidc/key', {}).catch((err) => { - if (err.httpStatus === 404) { - return []; - } else { - throw err; - } - }); - } -} diff --git a/ui/app/routes/vault/cluster/access/oidc/keys/key.js b/ui/app/routes/vault/cluster/access/oidc/keys/key.js deleted file mode 100644 index d537e62c30049..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/keys/key.js +++ /dev/null @@ -1,7 +0,0 @@ -import Route from '@ember/routing/route'; - -export default class OidcKeyRoute extends Route { - model({ name }) { - return this.store.findRecord('oidc/key', name); - } -} diff --git a/ui/app/routes/vault/cluster/access/oidc/keys/key/clients.js b/ui/app/routes/vault/cluster/access/oidc/keys/key/clients.js deleted file mode 100644 index a96aba0ae9d4e..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/keys/key/clients.js +++ /dev/null @@ -1,8 +0,0 @@ -import Route from '@ember/routing/route'; - -export default class OidcKeyClientsRoute extends Route { - async model() { - const { allowedClientIds } = this.modelFor('vault.cluster.access.oidc.keys.key'); - return await this.store.query('oidc/client', { paramKey: 'client_id', filterFor: allowedClientIds }); - } -} diff --git a/ui/app/routes/vault/cluster/access/oidc/keys/key/details.js b/ui/app/routes/vault/cluster/access/oidc/keys/key/details.js deleted file mode 100644 index df05a168c427a..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/keys/key/details.js +++ /dev/null @@ -1,3 +0,0 @@ -import Route from '@ember/routing/route'; - -export default class OidcKeyDetailsRoute extends Route {} diff --git a/ui/app/routes/vault/cluster/access/oidc/keys/key/edit.js b/ui/app/routes/vault/cluster/access/oidc/keys/key/edit.js deleted file mode 100644 index c86f197c3f2b9..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/keys/key/edit.js +++ /dev/null @@ -1,3 +0,0 @@ -import Route from '@ember/routing/route'; - -export default class OidcKeyEditRoute extends Route {} diff --git a/ui/app/routes/vault/cluster/access/oidc/providers/create.js b/ui/app/routes/vault/cluster/access/oidc/providers/create.js deleted file mode 100644 index 3a6b9667b567a..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/providers/create.js +++ /dev/null @@ -1,7 +0,0 @@ -import Route from '@ember/routing/route'; - -export default class OidcProvidersCreateRoute extends Route { - model() { - return this.store.createRecord('oidc/provider'); - } -} diff --git a/ui/app/routes/vault/cluster/access/oidc/providers/index.js b/ui/app/routes/vault/cluster/access/oidc/providers/index.js deleted file mode 100644 index 82250f19dee93..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/providers/index.js +++ /dev/null @@ -1,13 +0,0 @@ -import Route from '@ember/routing/route'; - -export default class OidcProvidersRoute extends Route { - model() { - return this.store.query('oidc/provider', {}).catch((err) => { - if (err.httpStatus === 404) { - return []; - } else { - throw err; - } - }); - } -} diff --git a/ui/app/routes/vault/cluster/access/oidc/providers/provider.js b/ui/app/routes/vault/cluster/access/oidc/providers/provider.js deleted file mode 100644 index 66a8fadaae6c9..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/providers/provider.js +++ /dev/null @@ -1,7 +0,0 @@ -import Route from '@ember/routing/route'; - -export default class OidcProviderRoute extends Route { - model({ name }) { - return this.store.findRecord('oidc/provider', name); - } -} diff --git a/ui/app/routes/vault/cluster/access/oidc/providers/provider/clients.js b/ui/app/routes/vault/cluster/access/oidc/providers/provider/clients.js deleted file mode 100644 index 586a73c13e389..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/providers/provider/clients.js +++ /dev/null @@ -1,8 +0,0 @@ -import Route from '@ember/routing/route'; - -export default class OidcProviderClientsRoute extends Route { - async model() { - const { allowedClientIds } = this.modelFor('vault.cluster.access.oidc.providers.provider'); - return await this.store.query('oidc/client', { paramKey: 'client_id', filterFor: allowedClientIds }); - } -} diff --git a/ui/app/routes/vault/cluster/access/oidc/providers/provider/details.js b/ui/app/routes/vault/cluster/access/oidc/providers/provider/details.js deleted file mode 100644 index af4d1077d2d1e..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/providers/provider/details.js +++ /dev/null @@ -1,3 +0,0 @@ -import Route from '@ember/routing/route'; - -export default class OidcProviderDetailsRoute extends Route {} diff --git a/ui/app/routes/vault/cluster/access/oidc/providers/provider/edit.js b/ui/app/routes/vault/cluster/access/oidc/providers/provider/edit.js deleted file mode 100644 index 365b2328e2a0e..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/providers/provider/edit.js +++ /dev/null @@ -1,3 +0,0 @@ -import Route from '@ember/routing/route'; - -export default class OidcProviderEditRoute extends Route {} diff --git a/ui/app/routes/vault/cluster/access/oidc/scopes/create.js b/ui/app/routes/vault/cluster/access/oidc/scopes/create.js deleted file mode 100644 index cd1862279f466..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/scopes/create.js +++ /dev/null @@ -1,7 +0,0 @@ -import Route from '@ember/routing/route'; - -export default class OidcScopesCreateRoute extends Route { - model() { - return this.store.createRecord('oidc/scope'); - } -} diff --git a/ui/app/routes/vault/cluster/access/oidc/scopes/index.js b/ui/app/routes/vault/cluster/access/oidc/scopes/index.js deleted file mode 100644 index e8108980ad5dd..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/scopes/index.js +++ /dev/null @@ -1,13 +0,0 @@ -import Route from '@ember/routing/route'; - -export default class OidcScopesRoute extends Route { - model() { - return this.store.query('oidc/scope', {}).catch((err) => { - if (err.httpStatus === 404) { - return []; - } else { - throw err; - } - }); - } -} diff --git a/ui/app/routes/vault/cluster/access/oidc/scopes/scope.js b/ui/app/routes/vault/cluster/access/oidc/scopes/scope.js deleted file mode 100644 index 7a4c5c08dcc6c..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/scopes/scope.js +++ /dev/null @@ -1,7 +0,0 @@ -import Route from '@ember/routing/route'; - -export default class OidcScopeRoute extends Route { - model({ name }) { - return this.store.findRecord('oidc/scope', name); - } -} diff --git a/ui/app/routes/vault/cluster/access/oidc/scopes/scope/details.js b/ui/app/routes/vault/cluster/access/oidc/scopes/scope/details.js deleted file mode 100644 index 6047afcfc3c04..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/scopes/scope/details.js +++ /dev/null @@ -1,3 +0,0 @@ -import Route from '@ember/routing/route'; - -export default class OidcScopeDetailsRoute extends Route {} diff --git a/ui/app/routes/vault/cluster/access/oidc/scopes/scope/edit.js b/ui/app/routes/vault/cluster/access/oidc/scopes/scope/edit.js deleted file mode 100644 index fc8b61fc087e2..0000000000000 --- a/ui/app/routes/vault/cluster/access/oidc/scopes/scope/edit.js +++ /dev/null @@ -1,3 +0,0 @@ -import Route from '@ember/routing/route'; - -export default class OidcScopeEditRoute extends Route {} diff --git a/ui/app/routes/vault/cluster/oidc-callback.js b/ui/app/routes/vault/cluster/oidc-callback.js index 2e9e752974592..ccdaf93b0f3be 100644 --- a/ui/app/routes/vault/cluster/oidc-callback.js +++ b/ui/app/routes/vault/cluster/oidc-callback.js @@ -14,7 +14,7 @@ export default Route.extend({ } path = window.decodeURIComponent(path); const source = 'oidc-callback'; // required by event listener in auth-jwt component - let queryParams = { source, path: path || '', code: code || '', state: state || '' }; + let queryParams = { source, path, code, state }; if (namespace) { queryParams.namespace = namespace; } diff --git a/ui/app/routes/vault/cluster/redirect.js b/ui/app/routes/vault/cluster/redirect.js deleted file mode 100644 index 2f58ddfd2e403..0000000000000 --- a/ui/app/routes/vault/cluster/redirect.js +++ /dev/null @@ -1,30 +0,0 @@ -import Route from '@ember/routing/route'; -import { inject as service } from '@ember/service'; -import { AUTH, CLUSTER } from 'vault/lib/route-paths'; - -export default class VaultClusterRedirectRoute extends Route { - @service auth; - @service router; - - beforeModel({ to: { queryParams } }) { - let transition; - const isAuthed = this.auth.currentToken; - // eslint-disable-next-line ember/no-controller-access-in-routes - const controller = this.controllerFor('vault'); - const { redirect_to, ...otherParams } = queryParams; - - if (isAuthed && redirect_to) { - // if authenticated and redirect exists, redirect to that place and strip other params - transition = this.router.replaceWith(redirect_to); - } else if (isAuthed) { - // if authed no redirect, go to cluster - transition = this.router.replaceWith(CLUSTER, { queryParams: otherParams }); - } else { - // default go to Auth - transition = this.router.replaceWith(AUTH, { queryParams: otherParams }); - } - transition.followRedirects().then(() => { - controller.set('redirectTo', ''); - }); - } -} diff --git a/ui/app/routes/vault/cluster/secrets/backend/list.js b/ui/app/routes/vault/cluster/secrets/backend/list.js index dd5fbfd3500a9..019d4b3ad3b6d 100644 --- a/ui/app/routes/vault/cluster/secrets/backend/list.js +++ b/ui/app/routes/vault/cluster/secrets/backend/list.js @@ -80,7 +80,7 @@ export default Route.extend({ ssh: 'role-ssh', transform: this.modelTypeForTransform(tab), aws: 'role-aws', - pki: `pki/${tab || 'pki-role'}`, + pki: tab === 'certs' ? 'pki-certificate' : 'role-pki', // secret or secret-v2 cubbyhole: 'secret', kv: secretEngine.get('modelTypeForKV'), @@ -130,7 +130,7 @@ export default Route.extend({ afterModel(model) { const { tab } = this.paramsFor(this.routeName); const backend = this.enginePathParam(); - if (!tab || tab !== 'cert') { + if (!tab || tab !== 'certs') { return; } return all( @@ -138,7 +138,7 @@ export default Route.extend({ // possible that there is no certificate for them in order to know, // we fetch them specifically on the list page, and then unload the // records if there is no `certificate` attribute on the resultant model - ['ca', 'crl', 'ca_chain'].map((id) => this.store.queryRecord('pki/cert', { id, backend })) + ['ca', 'crl', 'ca_chain'].map((id) => this.store.queryRecord('pki-certificate', { id, backend })) ).then( (results) => { results.rejectBy('certificate').forEach((record) => record.unloadRecord()); diff --git a/ui/app/routes/vault/cluster/secrets/backend/overview.js b/ui/app/routes/vault/cluster/secrets/backend/overview.js index 7ba2e38c865dc..3952708d1ab73 100644 --- a/ui/app/routes/vault/cluster/secrets/backend/overview.js +++ b/ui/app/routes/vault/cluster/secrets/backend/overview.js @@ -54,7 +54,6 @@ export default Route.extend({ roleCapabilities, staticRoleCapabilities, connectionCapabilities, - icon: 'database', }); }, setupController(controller, model) { diff --git a/ui/app/routes/vault/cluster/secrets/backend/secret-edit.js b/ui/app/routes/vault/cluster/secrets/backend/secret-edit.js index 3b6b796fe3b4d..1ccc5bfdbd5de 100644 --- a/ui/app/routes/vault/cluster/secrets/backend/secret-edit.js +++ b/ui/app/routes/vault/cluster/secrets/backend/secret-edit.js @@ -101,7 +101,7 @@ export default Route.extend(UnloadModelRoute, { ssh: 'role-ssh', transform: this.modelTypeForTransform(secret), aws: 'role-aws', - pki: secret && secret.startsWith('cert/') ? 'pki/cert' : 'pki/pki-role', + pki: secret && secret.startsWith('cert/') ? 'pki-certificate' : 'role-pki', cubbyhole: 'secret', kv: backendModel.get('modelTypeForKV'), keymgmt: `keymgmt/${options.queryParams?.itemType || 'key'}`, @@ -230,7 +230,7 @@ export default Route.extend(UnloadModelRoute, { if (!secret) { secret = '\u0020'; } - if (modelType === 'pki/cert') { + if (modelType === 'pki-certificate') { secret = secret.replace('cert/', ''); } if (modelType.startsWith('transform/')) { diff --git a/ui/app/routes/vault/cluster/settings/auth/configure/section.js b/ui/app/routes/vault/cluster/settings/auth/configure/section.js index a4e54a471aeab..f4495cd06d693 100644 --- a/ui/app/routes/vault/cluster/settings/auth/configure/section.js +++ b/ui/app/routes/vault/cluster/settings/auth/configure/section.js @@ -10,10 +10,11 @@ export default Route.extend(UnloadModelRoute, { pathHelp: service('path-help'), modelType(backendType, section) { + // TODO: Update endpoints from PR#10997 const MODELS = { 'aws-client': 'auth-config/aws/client', - 'aws-identity-accesslist': 'auth-config/aws/identity-accesslist', - 'aws-roletag-denylist': 'auth-config/aws/roletag-denylist', + 'aws-identity-whitelist': 'auth-config/aws/identity-whitelist', + 'aws-roletag-blacklist': 'auth-config/aws/roletag-blacklist', 'azure-configuration': 'auth-config/azure', 'github-configuration': 'auth-config/github', 'gcp-configuration': 'auth-config/gcp', diff --git a/ui/app/routes/vault/cluster/settings/configure-secret-backend/section.js b/ui/app/routes/vault/cluster/settings/configure-secret-backend/section.js index ffb1e8d6a2c80..b4aa5da1a2856 100644 --- a/ui/app/routes/vault/cluster/settings/configure-secret-backend/section.js +++ b/ui/app/routes/vault/cluster/settings/configure-secret-backend/section.js @@ -2,7 +2,6 @@ import AdapterError from '@ember-data/adapter/error'; import { set } from '@ember/object'; import Route from '@ember/routing/route'; -// ARG TODO glimmerize const SECTIONS_FOR_TYPE = { pki: ['cert', 'urls', 'crl', 'tidy'], }; @@ -10,21 +9,14 @@ export default Route.extend({ fetchModel() { const { section_name: sectionName } = this.paramsFor(this.routeName); const backendModel = this.modelFor('vault.cluster.settings.configure-secret-backend'); - const type = backendModel.get('type'); - let modelType; - if (type === 'pki') { - // pki models are in models/pki - modelType = `${type}/${type}-config`; - } else { - modelType = `${type}-config`; - } + const modelType = `${backendModel.get('type')}-config`; return this.store .queryRecord(modelType, { backend: backendModel.id, section: sectionName, }) .then((model) => { - model.set('backendType', type); + model.set('backendType', backendModel.get('type')); model.set('section', sectionName); return model; }); diff --git a/ui/app/serializers/oidc/assignment.js b/ui/app/serializers/oidc/assignment.js deleted file mode 100644 index 1a7693d6c99c1..0000000000000 --- a/ui/app/serializers/oidc/assignment.js +++ /dev/null @@ -1,5 +0,0 @@ -import ApplicationSerializer from '../application'; - -export default class OidcAssignmentSerializer extends ApplicationSerializer { - primaryKey = 'name'; -} diff --git a/ui/app/serializers/oidc/client.js b/ui/app/serializers/oidc/client.js deleted file mode 100644 index 784d56cc852b1..0000000000000 --- a/ui/app/serializers/oidc/client.js +++ /dev/null @@ -1,17 +0,0 @@ -import ApplicationSerializer from '../application'; - -export default class OidcClientSerializer extends ApplicationSerializer { - primaryKey = 'name'; - - // rehydrate each client model so all model attributes are accessible from the LIST response - normalizeItems(payload) { - if (payload.data) { - if (payload.data?.keys && Array.isArray(payload.data.keys)) { - return payload.data.keys.map((key) => ({ name: key, ...payload.data.key_info[key] })); - } - Object.assign(payload, payload.data); - delete payload.data; - } - return payload; - } -} diff --git a/ui/app/serializers/oidc/key.js b/ui/app/serializers/oidc/key.js deleted file mode 100644 index bf2890f4004b4..0000000000000 --- a/ui/app/serializers/oidc/key.js +++ /dev/null @@ -1,5 +0,0 @@ -import ApplicationSerializer from '../application'; - -export default class OidcKeySerializer extends ApplicationSerializer { - primaryKey = 'name'; -} diff --git a/ui/app/serializers/oidc/provider.js b/ui/app/serializers/oidc/provider.js deleted file mode 100644 index da0b35e2945a1..0000000000000 --- a/ui/app/serializers/oidc/provider.js +++ /dev/null @@ -1,17 +0,0 @@ -import ApplicationSerializer from '../application'; - -export default class OidcProviderSerializer extends ApplicationSerializer { - primaryKey = 'name'; - - // need to normalize to get issuer metadata for provider's list view - normalizeItems(payload) { - if (payload.data) { - if (payload.data?.keys && Array.isArray(payload.data.keys)) { - return payload.data.keys.map((key) => ({ name: key, ...payload.data.key_info[key] })); - } - Object.assign(payload, payload.data); - delete payload.data; - } - return payload; - } -} diff --git a/ui/app/serializers/oidc/scope.js b/ui/app/serializers/oidc/scope.js deleted file mode 100644 index 17c7c02029523..0000000000000 --- a/ui/app/serializers/oidc/scope.js +++ /dev/null @@ -1,5 +0,0 @@ -import ApplicationSerializer from '../application'; - -export default class OidcScopeSerializer extends ApplicationSerializer { - primaryKey = 'name'; -} diff --git a/ui/app/serializers/pki/cert.js b/ui/app/serializers/pki-certificate.js similarity index 97% rename from ui/app/serializers/pki/cert.js rename to ui/app/serializers/pki-certificate.js index 3a91a3160599b..8991e82f91a41 100644 --- a/ui/app/serializers/pki/cert.js +++ b/ui/app/serializers/pki-certificate.js @@ -2,7 +2,7 @@ import RESTSerializer from '@ember-data/serializer/rest'; import { isNone, isBlank } from '@ember/utils'; import { assign } from '@ember/polyfills'; import { decamelize } from '@ember/string'; -import { parsePkiCert } from '../../helpers/parse-pki-cert'; +import { parsePkiCert } from '../helpers/parse-pki-cert'; export default RESTSerializer.extend({ keyForAttribute: function (attr) { diff --git a/ui/app/serializers/pki/pki-config.js b/ui/app/serializers/pki-config.js similarity index 100% rename from ui/app/serializers/pki/pki-config.js rename to ui/app/serializers/pki-config.js diff --git a/ui/app/serializers/pki/pki-certificate-engine.js b/ui/app/serializers/pki/pki-certificate-engine.js deleted file mode 100644 index b8caf1b9e095d..0000000000000 --- a/ui/app/serializers/pki/pki-certificate-engine.js +++ /dev/null @@ -1,3 +0,0 @@ -import CertSerializer from './cert'; - -export default class PkiCertificateEngineSerializer extends CertSerializer {} diff --git a/ui/app/serializers/pki/pki-issuer-engine.js b/ui/app/serializers/pki/pki-issuer-engine.js deleted file mode 100644 index 545b742b17b20..0000000000000 --- a/ui/app/serializers/pki/pki-issuer-engine.js +++ /dev/null @@ -1,15 +0,0 @@ -import ApplicationSerializer from '../application'; - -export default class PkiIssuerEngineSerializer extends ApplicationSerializer { - // rehydrate each issuer model so all model attributes are accessible from the LIST response - normalizeItems(payload) { - if (payload.data) { - if (payload.data?.keys && Array.isArray(payload.data.keys)) { - return payload.data.keys.map((key) => ({ id: key, ...payload.data.key_info[key] })); - } - Object.assign(payload, payload.data); - delete payload.data; - } - return payload; - } -} diff --git a/ui/app/serializers/pki/pki-key-engine.js b/ui/app/serializers/pki/pki-key-engine.js deleted file mode 100644 index 7b9911594585d..0000000000000 --- a/ui/app/serializers/pki/pki-key-engine.js +++ /dev/null @@ -1,15 +0,0 @@ -import ApplicationSerializer from '../application'; - -export default class PkiKeyEngineSerializer extends ApplicationSerializer { - // rehydrate each keys model so all model attributes are accessible from the LIST response - normalizeItems(payload) { - if (payload.data) { - if (payload.data?.keys && Array.isArray(payload.data.keys)) { - return payload.data.keys.map((key) => ({ id: key, ...payload.data.key_info[key] })); - } - Object.assign(payload, payload.data); - delete payload.data; - } - return payload; - } -} diff --git a/ui/app/serializers/pki/pki-role-engine.js b/ui/app/serializers/pki/pki-role-engine.js deleted file mode 100644 index 47d752bf39bd3..0000000000000 --- a/ui/app/serializers/pki/pki-role-engine.js +++ /dev/null @@ -1,3 +0,0 @@ -import RoleSerializer from '../role'; - -export default class PkiRoleEngineSerializer extends RoleSerializer {} diff --git a/ui/app/serializers/pki/pki-role.js b/ui/app/serializers/role-pki.js similarity index 51% rename from ui/app/serializers/pki/pki-role.js rename to ui/app/serializers/role-pki.js index 9366ee82b1dbe..d36af6b2f38b9 100644 --- a/ui/app/serializers/pki/pki-role.js +++ b/ui/app/serializers/role-pki.js @@ -1,3 +1,3 @@ -import RoleSerializer from '../role'; +import RoleSerializer from './role'; export default RoleSerializer.extend(); diff --git a/ui/app/services/auth.js b/ui/app/services/auth.js index 6134ac014ea4b..95cf6b88dc36e 100644 --- a/ui/app/services/auth.js +++ b/ui/app/services/auth.js @@ -441,21 +441,4 @@ export default Service.extend({ backend: BACKENDS.findBy('type', backend), }); }), - - getOktaNumberChallengeAnswer(nonce, mount) { - const url = `/v1/auth/${mount}/verify/${nonce}`; - return this.ajax(url, 'GET', {}).then( - (resp) => { - return resp.data.correct_answer; - }, - (e) => { - // if error status is 404, return and keep polling for a response - if (e.status === 404) { - return null; - } else { - throw e; - } - } - ); - }, }); diff --git a/ui/app/services/permissions.js b/ui/app/services/permissions.js index a8d50b953cfc2..6364ab445de67 100644 --- a/ui/app/services/permissions.js +++ b/ui/app/services/permissions.js @@ -5,7 +5,6 @@ const API_PATHS = { access: { methods: 'sys/auth', mfa: 'identity/mfa/method', - oidc: 'identity/oidc/client', entities: 'identity/entity/id', groups: 'identity/group/id', leases: 'sys/leases/lookup', @@ -45,7 +44,6 @@ const API_PATHS_TO_ROUTE_PARAMS = { 'sys/namespaces': { route: 'vault.cluster.access.namespaces', models: [] }, 'sys/control-group/': { route: 'vault.cluster.access.control-groups', models: [] }, 'identity/mfa/method': { route: 'vault.cluster.access.mfa', models: [] }, - 'identity/oidc/client': { route: 'vault.cluster.access.oidc', models: [] }, }; /* diff --git a/ui/app/styles/components/list-item-row.scss b/ui/app/styles/components/list-item-row.scss index 9bdc702f3e9b9..75cad84fb957b 100644 --- a/ui/app/styles/components/list-item-row.scss +++ b/ui/app/styles/components/list-item-row.scss @@ -22,10 +22,6 @@ margin-left: auto; margin-right: auto; } - - &.is-disabled { - opacity: 0.5; - } } a.list-item-row, diff --git a/ui/app/styles/components/radio-card.scss b/ui/app/styles/components/radio-card.scss index e98020eb68008..f65e21e6f1299 100644 --- a/ui/app/styles/components/radio-card.scss +++ b/ui/app/styles/components/radio-card.scss @@ -3,15 +3,16 @@ margin-bottom: $spacing-xs; } .radio-card { + width: 19rem; box-shadow: $box-shadow-low; - flex: 1 1 25%; + display: flex; flex-direction: column; justify-content: space-between; margin: $spacing-xs $spacing-m; border: $base-border; border-radius: $radius; transition: all ease-in-out $speed; - max-width: 60%; + input[type='radio'] { position: absolute; z-index: 1; diff --git a/ui/app/styles/core/buttons.scss b/ui/app/styles/core/buttons.scss index 9ae0a2e844c95..031ee33e980f9 100644 --- a/ui/app/styles/core/buttons.scss +++ b/ui/app/styles/core/buttons.scss @@ -272,13 +272,3 @@ a.button.disabled { border: none; cursor: pointer; } -.text-button { - padding: unset; - border: none; - background-color: inherit; - color: inherit; - font-size: inherit; - font-weight: inherit; - cursor: pointer; - color: $link; -} diff --git a/ui/app/styles/core/forms.scss b/ui/app/styles/core/forms.scss index ef41a3eddb63e..55d4350df0f40 100644 --- a/ui/app/styles/core/forms.scss +++ b/ui/app/styles/core/forms.scss @@ -331,11 +331,6 @@ select.has-error-border { border: 1px solid $red-500; } -.dropdown-has-error-border > div.ember-basic-dropdown-trigger { - border: 1px solid $red-500; -} - - .autocomplete-input { background: $white !important; border: 1px solid $grey-light; diff --git a/ui/app/styles/core/helpers.scss b/ui/app/styles/core/helpers.scss index 88b741088761b..72c0423ebeddf 100644 --- a/ui/app/styles/core/helpers.scss +++ b/ui/app/styles/core/helpers.scss @@ -174,9 +174,6 @@ .has-top-padding-l { padding-top: $spacing-l; } -.has-top-padding-xxl { - padding-top: $spacing-xxl; -} .has-bottom-margin-xs { margin-bottom: $spacing-xs; } diff --git a/ui/app/styles/core/navbar.scss b/ui/app/styles/core/navbar.scss index 48d9cc7d1f723..a981fcf12b104 100644 --- a/ui/app/styles/core/navbar.scss +++ b/ui/app/styles/core/navbar.scss @@ -1,45 +1,13 @@ .navbar { - left: 0; - position: fixed; - right: 0; - top: 0; - @include from($mobile) { - display: block; - } -} - -.navbar-status { - height: 40px; - display: flex; - justify-content: center; - align-items: center; - font-size: $size-7; - font-weight: $font-weight-semibold; - - &.connected { - background-color: $ui-gray-800; - color: #c2c5cb; - - a { - color: #c2c5cb; - } - } - &.warning { - background-color: #fcf6ea; - color: #975b06; - - a { - color: #975b06; - } - } -} - -.navbar-actions { background-color: $black; display: flex; height: $header-height; justify-content: flex-start; + left: 0; padding: $spacing-xs $spacing-s $spacing-xs 0; + position: fixed; + right: 0; + top: 0; } .navbar-brand { diff --git a/ui/app/templates/components/alert-popup.hbs b/ui/app/templates/components/alert-popup.hbs index c95751ff243fd..2a08deced4b05 100644 --- a/ui/app/templates/components/alert-popup.hbs +++ b/ui/app/templates/components/alert-popup.hbs @@ -4,7 +4,7 @@