diff --git a/.circleci/config.yml b/.circleci/config.yml
index faafe343e4643..a842337bee0e2 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -916,13 +916,13 @@ jobs:
jq -r 'select(.Deps != null) |
select(any(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker"))) |
.ForTest | select(. != null)' |
- sort -u | circleci tests split --split-by=timings --timings-type=classname)
+ sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname)
else
package_names=$(go list -test -json ./... |
jq -r 'select(.Deps != null) |
select(all(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker")|not)) |
.ForTest | select(. != null)' |
- sort -u | circleci tests split --split-by=timings --timings-type=classname)
+ sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname)
fi
# After running tests split step, we are now running the following steps
@@ -933,6 +933,16 @@ jobs:
make prep
mkdir -p test-results/go-test
+ # We don't want VAULT_LICENSE set when running Go tests, because that's
+ # not what developers have in their environments and it could break some
+ # tests; it would be like setting VAULT_TOKEN. However some non-Go
+ # CI commands, like the UI tests, shouldn't have to worry about licensing.
+ # So we set VAULT_LICENSE in CI, and here we unset it. Instead of
+ # VAULT_LICENSE, we populate VAULT_LICENSE_CI, so that tests which want
+ # an externally supplied license can opt-in to using it.
+ export VAULT_LICENSE_CI="$VAULT_LICENSE"
+ VAULT_LICENSE=
+
# Create a docker network for our testcontainer
if [ $USE_DOCKER == 1 ]; then
# Despite the fact that we're using a circleci image (thus getting the
@@ -952,7 +962,7 @@ jobs:
-e DOCKER_CERT_PATH -e DOCKER_HOST -e DOCKER_MACHINE_NAME -e DOCKER_TLS_VERIFY -e NO_PROXY \
-e VAULT_TEST_LOG_DIR=/tmp/testlogs \
--network vaulttest --name \
- testcontainer docker.mirror.hashicorp.services/circleci/golang:1.15.11-buster \
+ testcontainer docker.mirror.hashicorp.services/circleci/golang:1.16.2-buster \
tail -f /dev/null
# Run tests
@@ -963,6 +973,7 @@ jobs:
docker exec -w /go/src/github.com/hashicorp/vault/ \
-e GO111MODULE -e CIRCLECI -e GOCACHE=/tmp/gocache -e VAULT_CI_GO_TEST_RACE \
+ -e VAULT_LICENSE_CI \
testcontainer \
gotestsum --format=short-verbose \
--junitfile test-results/go-test/results.xml \
@@ -974,7 +985,8 @@ jobs:
\
${package_names}
else
- GOCACHE=/tmp/go-cache \
+ GOARCH=amd64 \
+ GOCACHE=/tmp/go-cache \
gotestsum --format=short-verbose \
--junitfile test-results/go-test/results.xml \
--jsonfile test-results/go-test/results.json \
@@ -1131,13 +1143,13 @@ jobs:
jq -r 'select(.Deps != null) |
select(any(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker"))) |
.ForTest | select(. != null)' |
- sort -u | circleci tests split --split-by=timings --timings-type=classname)
+ sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname)
else
package_names=$(go list -test -json ./... |
jq -r 'select(.Deps != null) |
select(all(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker")|not)) |
.ForTest | select(. != null)' |
- sort -u | circleci tests split --split-by=timings --timings-type=classname)
+ sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname)
fi
# After running tests split step, we are now running the following steps
@@ -1148,6 +1160,16 @@ jobs:
make prep
mkdir -p test-results/go-test
+ # We don't want VAULT_LICENSE set when running Go tests, because that's
+ # not what developers have in their environments and it could break some
+ # tests; it would be like setting VAULT_TOKEN. However some non-Go
+ # CI commands, like the UI tests, shouldn't have to worry about licensing.
+ # So we set VAULT_LICENSE in CI, and here we unset it. Instead of
+ # VAULT_LICENSE, we populate VAULT_LICENSE_CI, so that tests which want
+ # an externally supplied license can opt-in to using it.
+ export VAULT_LICENSE_CI="$VAULT_LICENSE"
+ VAULT_LICENSE=
+
# Create a docker network for our testcontainer
if [ $USE_DOCKER == 1 ]; then
# Despite the fact that we're using a circleci image (thus getting the
@@ -1167,7 +1189,7 @@ jobs:
-e DOCKER_CERT_PATH -e DOCKER_HOST -e DOCKER_MACHINE_NAME -e DOCKER_TLS_VERIFY -e NO_PROXY \
-e VAULT_TEST_LOG_DIR=/tmp/testlogs \
--network vaulttest --name \
- testcontainer docker.mirror.hashicorp.services/circleci/golang:1.15.11-buster \
+ testcontainer docker.mirror.hashicorp.services/circleci/golang:1.16.2-buster \
tail -f /dev/null
# Run tests
@@ -1178,6 +1200,7 @@ jobs:
docker exec -w /go/src/github.com/hashicorp/vault/ \
-e GO111MODULE -e CIRCLECI -e GOCACHE=/tmp/gocache -e VAULT_CI_GO_TEST_RACE \
+ -e VAULT_LICENSE_CI \
testcontainer \
gotestsum --format=short-verbose \
--junitfile test-results/go-test/results.xml \
@@ -1189,7 +1212,8 @@ jobs:
-race \
${package_names}
else
- GOCACHE=/tmp/go-cache \
+ GOARCH=amd64 \
+ GOCACHE=/tmp/go-cache \
gotestsum --format=short-verbose \
--junitfile test-results/go-test/results.xml \
--jsonfile test-results/go-test/results.json \
@@ -1597,13 +1621,13 @@ jobs:
jq -r 'select(.Deps != null) |
select(any(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker"))) |
.ForTest | select(. != null)' |
- sort -u | circleci tests split --split-by=timings --timings-type=classname)
+ sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname)
else
package_names=$(go list -test -json ./... |
jq -r 'select(.Deps != null) |
select(all(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker")|not)) |
.ForTest | select(. != null)' |
- sort -u | circleci tests split --split-by=timings --timings-type=classname)
+ sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname)
fi
# After running tests split step, we are now running the following steps
@@ -1614,6 +1638,16 @@ jobs:
make prep
mkdir -p test-results/go-test
+ # We don't want VAULT_LICENSE set when running Go tests, because that's
+ # not what developers have in their environments and it could break some
+ # tests; it would be like setting VAULT_TOKEN. However some non-Go
+ # CI commands, like the UI tests, shouldn't have to worry about licensing.
+ # So we set VAULT_LICENSE in CI, and here we unset it. Instead of
+ # VAULT_LICENSE, we populate VAULT_LICENSE_CI, so that tests which want
+ # an externally supplied license can opt-in to using it.
+ export VAULT_LICENSE_CI="$VAULT_LICENSE"
+ VAULT_LICENSE=
+
# Create a docker network for our testcontainer
if [ $USE_DOCKER == 1 ]; then
# Despite the fact that we're using a circleci image (thus getting the
@@ -1633,7 +1667,7 @@ jobs:
-e DOCKER_CERT_PATH -e DOCKER_HOST -e DOCKER_MACHINE_NAME -e DOCKER_TLS_VERIFY -e NO_PROXY \
-e VAULT_TEST_LOG_DIR=/tmp/testlogs \
--network vaulttest --name \
- testcontainer docker.mirror.hashicorp.services/circleci/golang:1.15.11-buster \
+ testcontainer docker.mirror.hashicorp.services/circleci/golang:1.16.2-buster \
tail -f /dev/null
# Run tests
@@ -1644,6 +1678,7 @@ jobs:
docker exec -w /go/src/github.com/hashicorp/vault/ \
-e GO111MODULE -e CIRCLECI -e GOCACHE=/tmp/gocache -e VAULT_CI_GO_TEST_RACE \
+ -e VAULT_LICENSE_CI \
testcontainer \
gotestsum --format=short-verbose \
--junitfile test-results/go-test/results.xml \
@@ -1655,7 +1690,8 @@ jobs:
\
${package_names}
else
- GOCACHE=/tmp/go-cache \
+ GOARCH=amd64 \
+ GOCACHE=/tmp/go-cache \
gotestsum --format=short-verbose \
--junitfile test-results/go-test/results.xml \
--jsonfile test-results/go-test/results.json \
@@ -1743,13 +1779,13 @@ jobs:
jq -r 'select(.Deps != null) |
select(any(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker"))) |
.ForTest | select(. != null)' |
- sort -u | circleci tests split --split-by=timings --timings-type=classname)
+ sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname)
else
package_names=$(go list -test -json ./... |
jq -r 'select(.Deps != null) |
select(all(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker")|not)) |
.ForTest | select(. != null)' |
- sort -u | circleci tests split --split-by=timings --timings-type=classname)
+ sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname)
fi
# After running tests split step, we are now running the following steps
@@ -1760,6 +1796,16 @@ jobs:
make prep
mkdir -p test-results/go-test
+ # We don't want VAULT_LICENSE set when running Go tests, because that's
+ # not what developers have in their environments and it could break some
+ # tests; it would be like setting VAULT_TOKEN. However some non-Go
+ # CI commands, like the UI tests, shouldn't have to worry about licensing.
+ # So we set VAULT_LICENSE in CI, and here we unset it. Instead of
+ # VAULT_LICENSE, we populate VAULT_LICENSE_CI, so that tests which want
+ # an externally supplied license can opt-in to using it.
+ export VAULT_LICENSE_CI="$VAULT_LICENSE"
+ VAULT_LICENSE=
+
# Create a docker network for our testcontainer
if [ $USE_DOCKER == 1 ]; then
# Despite the fact that we're using a circleci image (thus getting the
@@ -1779,7 +1825,7 @@ jobs:
-e DOCKER_CERT_PATH -e DOCKER_HOST -e DOCKER_MACHINE_NAME -e DOCKER_TLS_VERIFY -e NO_PROXY \
-e VAULT_TEST_LOG_DIR=/tmp/testlogs \
--network vaulttest --name \
- testcontainer docker.mirror.hashicorp.services/circleci/golang:1.15.11-buster \
+ testcontainer docker.mirror.hashicorp.services/circleci/golang:1.16.2-buster \
tail -f /dev/null
# Run tests
@@ -1790,6 +1836,7 @@ jobs:
docker exec -w /go/src/github.com/hashicorp/vault/ \
-e GO111MODULE -e CIRCLECI -e GOCACHE=/tmp/gocache -e VAULT_CI_GO_TEST_RACE \
+ -e VAULT_LICENSE_CI \
testcontainer \
gotestsum --format=short-verbose \
--junitfile test-results/go-test/results.xml \
@@ -1801,7 +1848,8 @@ jobs:
\
${package_names}
else
- GOCACHE=/tmp/go-cache \
+ GOARCH=amd64 \
+ GOCACHE=/tmp/go-cache \
gotestsum --format=short-verbose \
--junitfile test-results/go-test/results.xml \
--jsonfile test-results/go-test/results.json \
@@ -2340,13 +2388,13 @@ jobs:
jq -r 'select(.Deps != null) |
select(any(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker"))) |
.ForTest | select(. != null)' |
- sort -u | circleci tests split --split-by=timings --timings-type=classname)
+ sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname)
else
package_names=$(go list -test -json ./... |
jq -r 'select(.Deps != null) |
select(all(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker")|not)) |
.ForTest | select(. != null)' |
- sort -u | circleci tests split --split-by=timings --timings-type=classname)
+ sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname)
fi
# After running tests split step, we are now running the following steps
@@ -2357,6 +2405,16 @@ jobs:
make prep
mkdir -p test-results/go-test
+ # We don't want VAULT_LICENSE set when running Go tests, because that's
+ # not what developers have in their environments and it could break some
+ # tests; it would be like setting VAULT_TOKEN. However some non-Go
+ # CI commands, like the UI tests, shouldn't have to worry about licensing.
+ # So we set VAULT_LICENSE in CI, and here we unset it. Instead of
+ # VAULT_LICENSE, we populate VAULT_LICENSE_CI, so that tests which want
+ # an externally supplied license can opt-in to using it.
+ export VAULT_LICENSE_CI="$VAULT_LICENSE"
+ VAULT_LICENSE=
+
# Create a docker network for our testcontainer
if [ $USE_DOCKER == 1 ]; then
# Despite the fact that we're using a circleci image (thus getting the
@@ -2376,7 +2434,7 @@ jobs:
-e DOCKER_CERT_PATH -e DOCKER_HOST -e DOCKER_MACHINE_NAME -e DOCKER_TLS_VERIFY -e NO_PROXY \
-e VAULT_TEST_LOG_DIR=/tmp/testlogs \
--network vaulttest --name \
- testcontainer docker.mirror.hashicorp.services/circleci/golang:1.15.11-buster \
+ testcontainer docker.mirror.hashicorp.services/circleci/golang:1.16.2-buster \
tail -f /dev/null
# Run tests
@@ -2387,6 +2445,7 @@ jobs:
docker exec -w /go/src/github.com/hashicorp/vault/ \
-e GO111MODULE -e CIRCLECI -e GOCACHE=/tmp/gocache -e VAULT_CI_GO_TEST_RACE \
+ -e VAULT_LICENSE_CI \
testcontainer \
gotestsum --format=short-verbose \
--junitfile test-results/go-test/results.xml \
@@ -2398,7 +2457,8 @@ jobs:
-race \
${package_names}
else
- GOCACHE=/tmp/go-cache \
+ GOARCH=amd64 \
+ GOCACHE=/tmp/go-cache \
gotestsum --format=short-verbose \
--junitfile test-results/go-test/results.xml \
--jsonfile test-results/go-test/results.json \
diff --git a/.circleci/config/commands/go_test.yml b/.circleci/config/commands/go_test.yml
index b8146bd3396e5..afa9c049c8cba 100644
--- a/.circleci/config/commands/go_test.yml
+++ b/.circleci/config/commands/go_test.yml
@@ -14,10 +14,14 @@ parameters:
default: false
go_image:
type: string
- default: "docker.mirror.hashicorp.services/circleci/golang:1.15.11-buster"
+ default: "docker.mirror.hashicorp.services/circleci/golang:1.16.2-buster"
use_docker:
type: boolean
default: false
+ arch:
+ type: string
+ # Only supported for use_docker=false, and only other value allowed is 386
+ default: amd64
steps:
- run:
name: Compute test cache key
@@ -55,13 +59,13 @@ steps:
jq -r 'select(.Deps != null) |
select(any(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker"))) |
.ForTest | select(. != null)' |
- sort -u | circleci tests split --split-by=timings --timings-type=classname)
+ sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname)
else
package_names=$(go list -test -json ./... |
jq -r 'select(.Deps != null) |
select(all(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker")|not)) |
.ForTest | select(. != null)' |
- sort -u | circleci tests split --split-by=timings --timings-type=classname)
+ sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname)
fi
# After running tests split step, we are now running the following steps
@@ -72,6 +76,16 @@ steps:
make prep
mkdir -p test-results/go-test
+ # We don't want VAULT_LICENSE set when running Go tests, because that's
+ # not what developers have in their environments and it could break some
+ # tests; it would be like setting VAULT_TOKEN. However some non-Go
+ # CI commands, like the UI tests, shouldn't have to worry about licensing.
+ # So we set VAULT_LICENSE in CI, and here we unset it. Instead of
+ # VAULT_LICENSE, we populate VAULT_LICENSE_CI, so that tests which want
+ # an externally supplied license can opt-in to using it.
+ export VAULT_LICENSE_CI="$VAULT_LICENSE"
+ VAULT_LICENSE=
+
# Create a docker network for our testcontainer
if [ $USE_DOCKER == 1 ]; then
# Despite the fact that we're using a circleci image (thus getting the
@@ -102,6 +116,7 @@ steps:
docker exec -w /go/src/github.com/hashicorp/vault/ \
-e GO111MODULE -e CIRCLECI -e GOCACHE=/tmp/gocache -e VAULT_CI_GO_TEST_RACE \
+ -e VAULT_LICENSE_CI \
testcontainer \
gotestsum --format=short-verbose \
--junitfile test-results/go-test/results.xml \
@@ -113,7 +128,8 @@ steps:
<< parameters.extra_flags >> \
${package_names}
else
- GOCACHE=<< parameters.cache_dir >> \
+ GOARCH=<< parameters.arch >> \
+ GOCACHE=<< parameters.cache_dir >> \
gotestsum --format=short-verbose \
--junitfile test-results/go-test/results.xml \
--jsonfile test-results/go-test/results.json \
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 49d4114224aca..a779f02094a4f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,129 @@
## 1.8.0 (Unreleased)
+CHANGES:
+
+* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs
+when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)]
+
+FEATURES:
+
+* **MySQL Database UI**: The UI now supports adding and editing MySQL connections in the database secret engine [[GH-11532 | MySQL Database UI](https://github.com/hashicorp/vault/pull/11532 | MySQL Database UI)]
+* cli/api: Add lease lookup command [[GH-11129](https://github.com/hashicorp/vault/pull/11129)]
+* ssh: add support for templated values in SSH CA DefaultExtensions [[GH-11495](https://github.com/hashicorp/vault/pull/11495)]
+* ui: Add database secret engine support for MSSQL [[GH-11231](https://github.com/hashicorp/vault/pull/11231)]
+
+IMPROVEMENTS:
+
+* auth/aws: Underlying error included in validation failure message. [[GH-11638](https://github.com/hashicorp/vault/pull/11638)]
+* core: Add a small (<1s) exponential backoff to failed TCP listener Accept failures. [[GH-11588](https://github.com/hashicorp/vault/pull/11588)]
+* core: Add metrics for standby node forwarding. [[GH-11366](https://github.com/hashicorp/vault/pull/11366)]
+* core: Add metrics to report if a node is a perf standby, if a node is a dr
+secondary or primary, and if a node is a perf secondary or primary. Also allow
+DR secondaries to serve metrics requests when using unauthenticated_metrics_access. [[GH-1844](https://github.com/hashicorp/vault/pull/1844)]
+* core: Send notifications to systemd on start, stop, and configuration reload. [[GH-11517](https://github.com/hashicorp/vault/pull/11517)]
+* core: allow arbitrary length stack traces upon receiving SIGUSR2 (was 32MB) [[GH-11364](https://github.com/hashicorp/vault/pull/11364)]
+* http: Add optional HTTP response headers for hostname and raft node ID [[GH-11289](https://github.com/hashicorp/vault/pull/11289)]
+* replication (enterprise): The log shipper is now memory
+as well as length bound, and length and size can be
+separately configured.
+* secrets/aws: add ability to provide a role session name when generating STS credentials [[GH-11345](https://github.com/hashicorp/vault/pull/11345)]
+* secrets/database/mongodb: Add ability to customize `SocketTimeout`, `ConnectTimeout`, and `ServerSelectionTimeout` [[GH-11600](https://github.com/hashicorp/vault/pull/11600)]
+* secrets/database/mongodb: Increased throughput by allowing for multiple request threads to simultaneously update users in MongoDB [[GH-11600](https://github.com/hashicorp/vault/pull/11600)]
+* storage/raft: Support autopilot for HA only raft storage. [[GH-11260](https://github.com/hashicorp/vault/pull/11260)]
+* ui: Add push notification message when selecting okta auth. [[GH-11442](https://github.com/hashicorp/vault/pull/11442)]
+* ui: Add regex validation to Transform Template pattern input [[GH-11586](https://github.com/hashicorp/vault/pull/11586)]
+* ui: Obscure secret values on input and displayOnly fields like certificates. [[GH-11284](https://github.com/hashicorp/vault/pull/11284)]
+* ui: Redesign of KV 2 Delete toolbar. [[GH-11530](https://github.com/hashicorp/vault/pull/11530)]
+* ui: Update ember to latest LTS and upgrade UI dependencies [[GH-11447](https://github.com/hashicorp/vault/pull/11447)]
+* ui: Updated ivy code mirror component for consistency [[GH-11500](https://github.com/hashicorp/vault/pull/11500)]
+* ui: Updated search select component styling [[GH-11360](https://github.com/hashicorp/vault/pull/11360)]
+
+BUG FIXES:
+
+* agent/cert: Fix issue where the API client on agent was not honoring certificate
+information from the auto-auth config map on renewals or retries. [[GH-11576](https://github.com/hashicorp/vault/pull/11576)]
+* agent: Fixed agent templating to use configured tls servername values [[GH-11288](https://github.com/hashicorp/vault/pull/11288)]
+* core (enterprise): Fix orphan return value from auth methods executed on performance standby nodes.
+* core (enterprise): Fix plugins mounted in namespaces being unable to use password policies [[GH-11596](https://github.com/hashicorp/vault/pull/11596)]
+* core: Fix cleanup of storage entries from cubbyholes within namespaces. [[GH-11408](https://github.com/hashicorp/vault/pull/11408)]
+* core: Fix edge cases in the configuration endpoint for barrier key autorotation. [[GH-11541](https://github.com/hashicorp/vault/pull/11541)]
+* core: Fix goroutine leak when updating rate limit quota [[GH-11371](https://github.com/hashicorp/vault/pull/11371)]
+* core: Fix race that allowed remounting on path used by another mount [[GH-11453](https://github.com/hashicorp/vault/pull/11453)]
+* core: Fix storage entry leak when revoking leases created with non-orphan batch tokens. [[GH-11377](https://github.com/hashicorp/vault/pull/11377)]
+* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)]
+* identity: Use correct mount accessor when refreshing external group memberships. [[GH-11506](https://github.com/hashicorp/vault/pull/11506)]
+* pki: Only remove revoked entry for certificates during tidy if they are past their NotAfter value [[GH-11367](https://github.com/hashicorp/vault/pull/11367)]
+* replication: Fix panic trying to update walState during identity group invalidation. [[GH-1865](https://github.com/hashicorp/vault/pull/1865)]
+* replication: Fix: mounts created within a namespace that was part of an Allow
+filtering rule would not appear on performance secondary if created after rule
+was defined. [[GH-1807](https://github.com/hashicorp/vault/pull/1807)]
+* secret/pki: use case insensitive domain name comparison as per RFC1035 section 2.3.3
+* secrets/database/cassandra: Fixed issue where hostnames were not being validated when using TLS [[GH-11365](https://github.com/hashicorp/vault/pull/11365)]
+* secrets/database/cassandra: Updated default statement for password rotation to allow for special characters. This applies to root and static credentials. [[GH-11262](https://github.com/hashicorp/vault/pull/11262)]
+* secrets/database: Fix marshalling to allow providing numeric arguments to external database plugins. [[GH-11451](https://github.com/hashicorp/vault/pull/11451)]
+* secrets/database: Fixed minor race condition when rotate-root is called [[GH-11600](https://github.com/hashicorp/vault/pull/11600)]
+* secrets/database: Fixes issue for V4 database interface where `SetCredentials` wasn't falling back to using `RotateRootCredentials` if `SetCredentials` is `Unimplemented` [[GH-11585](https://github.com/hashicorp/vault/pull/11585)]
+* storage/dynamodb: Handle throttled batch write requests by retrying, without which writes could be lost. [[GH-10181](https://github.com/hashicorp/vault/pull/10181)]
+* storage/raft: Support cluster address change for nodes in a cluster managed by autopilot [[GH-11247](https://github.com/hashicorp/vault/pull/11247)]
+* storage/raft: leader_tls_servername wasn't used unless leader_ca_cert_file and/or mTLS were configured. [[GH-11252](https://github.com/hashicorp/vault/pull/11252)]
+* tokenutil: Perform the num uses check before token type. [[GH-11647](https://github.com/hashicorp/vault/pull/11647)]
+* transform (enterprise): Fix an issue with malformed transform configuration
+storage when upgrading from 1.5 to 1.6. See Upgrade Notes for 1.6.x.
+* ui: Add root rotation statements support to appropriate database secret engine plugins [[GH-11404](https://github.com/hashicorp/vault/pull/11404)]
+* ui: Fix bug where the UI does not recognize version 2 KV until refresh, and fix [object Object] error message [[GH-11258](https://github.com/hashicorp/vault/pull/11258)]
+* ui: Fix entity group membership and metadata not showing [[GH-11641](https://github.com/hashicorp/vault/pull/11641)]
+* ui: Fix error message caused by control group [[GH-11143](https://github.com/hashicorp/vault/pull/11143)]
+* ui: Fix footer URL linking to the correct version changelog. [[GH-11283](https://github.com/hashicorp/vault/pull/11283)]
+* ui: Fix namespace-bug on login [[GH-11182](https://github.com/hashicorp/vault/pull/11182)]
+* ui: Fix status menu no showing on login [[GH-11213](https://github.com/hashicorp/vault/pull/11213)]
+* ui: Fix text link URL on database roles list [[GH-11597](https://github.com/hashicorp/vault/pull/11597)]
+* ui: Fixed and updated lease renewal picker [[GH-11256](https://github.com/hashicorp/vault/pull/11256)]
+* ui: fix issue where select-one option was not showing in secrets database role creation [[GH-11294](https://github.com/hashicorp/vault/pull/11294)]
+
+## 1.7.2
+### May 20th, 2021
+
+SECURITY:
+
+* Non-Expiring Leases: Vault and Vault Enterprise renewed nearly-expiring token
+leases and dynamic secret leases with a zero-second TTL, causing them to be
+treated as non-expiring, and never revoked. This issue affects Vault and Vault
+Enterprise versions 0.10.0 through 1.7.1, and is fixed in 1.5.9, 1.6.5, and
+1.7.2 (CVE-2021-32923).
+
+CHANGES:
+
+* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs
+when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)]
+* auth/gcp: Update to v0.9.1 to use IAM Service Account Credentials API for
+signing JWTs [[GH-11494](https://github.com/hashicorp/vault/pull/11494)]
+
+IMPROVEMENTS:
+
+* api, agent: LifetimeWatcher now does more retries when renewal failures occur. This also impacts Agent auto-auth and leases managed via Agent caching. [[GH-11445](https://github.com/hashicorp/vault/pull/11445)]
+* auth/aws: Underlying error included in validation failure message. [[GH-11638](https://github.com/hashicorp/vault/pull/11638)]
+* http: Add optional HTTP response headers for hostname and raft node ID [[GH-11289](https://github.com/hashicorp/vault/pull/11289)]
+* secrets/aws: add ability to provide a role session name when generating STS credentials [[GH-11345](https://github.com/hashicorp/vault/pull/11345)]
+* secrets/database/mongodb: Add ability to customize `SocketTimeout`, `ConnectTimeout`, and `ServerSelectionTimeout` [[GH-11600](https://github.com/hashicorp/vault/pull/11600)]
+* secrets/database/mongodb: Increased throughput by allowing for multiple request threads to simultaneously update users in MongoDB [[GH-11600](https://github.com/hashicorp/vault/pull/11600)]
+
+BUG FIXES:
+
+* agent/cert: Fix issue where the API client on agent was not honoring certificate
+information from the auto-auth config map on renewals or retries. [[GH-11576](https://github.com/hashicorp/vault/pull/11576)]
+* agent: Fixed agent templating to use configured tls servername values [[GH-11288](https://github.com/hashicorp/vault/pull/11288)]
+* core (enterprise): Fix plugins mounted in namespaces being unable to use password policies [[GH-11596](https://github.com/hashicorp/vault/pull/11596)]
+* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)]
+* identity: Use correct mount accessor when refreshing external group memberships. [[GH-11506](https://github.com/hashicorp/vault/pull/11506)]
+* replication: Fix panic trying to update walState during identity group invalidation. [[GH-1865](https://github.com/hashicorp/vault/pull/1865)]
+* secrets/database: Fix marshalling to allow providing numeric arguments to external database plugins. [[GH-11451](https://github.com/hashicorp/vault/pull/11451)]
+* secrets/database: Fixed minor race condition when rotate-root is called [[GH-11600](https://github.com/hashicorp/vault/pull/11600)]
+* secrets/database: Fixes issue for V4 database interface where `SetCredentials` wasn't falling back to using `RotateRootCredentials` if `SetCredentials` is `Unimplemented` [[GH-11585](https://github.com/hashicorp/vault/pull/11585)]
+* secrets/keymgmt (enterprise): Fixes audit logging for the read key response.
+* storage/raft: Support cluster address change for nodes in a cluster managed by autopilot [[GH-11247](https://github.com/hashicorp/vault/pull/11247)]
+* ui: Fix entity group membership and metadata not showing [[GH-11641](https://github.com/hashicorp/vault/pull/11641)]
+* ui: Fix text link URL on database roles list [[GH-11597](https://github.com/hashicorp/vault/pull/11597)]
+
## 1.7.1
### 21 April 2021
@@ -40,7 +164,7 @@ BUG FIXES:
* storage/raft: leader_tls_servername wasn't used unless leader_ca_cert_file and/or mTLS were configured. [[GH-11252](https://github.com/hashicorp/vault/pull/11252)]
* ui: Add root rotation statements support to appropriate database secret engine plugins [[GH-11404](https://github.com/hashicorp/vault/pull/11404)]
* ui: Fix bug where the UI does not recognize version 2 KV until refresh, and fix [object Object] error message [[GH-11258](https://github.com/hashicorp/vault/pull/11258)]
-* ui: Fix footer URL linking to the correct version changelog. [[GH-11283](https://github.com/hashicorp/vault/pull/11283)]
+* ui: Fix OIDC bug seen when running on HCP [[GH-11283](https://github.com/hashicorp/vault/pull/11283)]
* ui: Fix namespace-bug on login [[GH-11182](https://github.com/hashicorp/vault/pull/11182)]
* ui: Fix status menu no showing on login [[GH-11213](https://github.com/hashicorp/vault/pull/11213)]
* ui: fix issue where select-one option was not showing in secrets database role creation [[GH-11294](https://github.com/hashicorp/vault/pull/11294)]
@@ -184,6 +308,32 @@ the given key will be used to encrypt the snapshot using AWS KMS.
DEPRECATIONS:
* aws/auth: AWS Auth endpoints that use the "whitelist" and "blacklist" terms have been deprecated.
Refer to the CHANGES section for additional details.
+
+## 1.6.5
+### May 20th, 2021
+
+SECURITY:
+
+* Non-Expiring Leases: Vault and Vault Enterprise renewed nearly-expiring token
+leases and dynamic secret leases with a zero-second TTL, causing them to be
+treated as non-expiring, and never revoked. This issue affects Vault and Vault
+Enterprise versions 0.10.0 through 1.7.1, and is fixed in 1.5.9, 1.6.5, and
+1.7.2 (CVE-2021-32923).
+
+CHANGES:
+
+* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs
+when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)]
+* auth/gcp: Update to v0.8.1 to use IAM Service Account Credentials API for
+signing JWTs [[GH-11498](https://github.com/hashicorp/vault/pull/11498)]
+
+BUG FIXES:
+
+* core (enterprise): Fix plugins mounted in namespaces being unable to use password policies [[GH-11596](https://github.com/hashicorp/vault/pull/11596)]
+* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)]
+* secrets/database: Fix marshalling to allow providing numeric arguments to external database plugins. [[GH-11451](https://github.com/hashicorp/vault/pull/11451)]
+* secrets/database: Fixes issue for V4 database interface where `SetCredentials` wasn't falling back to using `RotateRootCredentials` if `SetCredentials` is `Unimplemented` [[GH-11585](https://github.com/hashicorp/vault/pull/11585)]
+* ui: Fix namespace-bug on login [[GH-11182](https://github.com/hashicorp/vault/pull/11182)]
## 1.6.4
### 21 April 2021
@@ -415,6 +565,28 @@ BUG FIXES:
* ui: Update language on replication primary dashboard for clarity [[GH-10205](https://github.com/hashicorp/vault/pull/10217)]
* core: Fix bug where updating an existing path quota could introduce a conflict. [[GH-10285](https://github.com/hashicorp/vault/pull/10285)]
+## 1.5.9
+### May 20th, 2021
+
+SECURITY:
+
+* Non-Expiring Leases: Vault and Vault Enterprise renewed nearly-expiring token
+leases and dynamic secret leases with a zero-second TTL, causing them to be
+treated as non-expiring, and never revoked. This issue affects Vault and Vault
+Enterprise versions 0.10.0 through 1.7.1, and is fixed in 1.5.9, 1.6.5, and
+1.7.2 (CVE-2021-32923).
+
+CHANGES:
+
+* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs
+when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)]
+* auth/gcp: Update to v0.7.2 to use IAM Service Account Credentials API for
+signing JWTs [[GH-11499](https://github.com/hashicorp/vault/pull/11499)]
+
+BUG FIXES:
+
+* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)]
+
## 1.5.8
### 21 April 2021
diff --git a/CODEOWNERS b/CODEOWNERS
new file mode 100644
index 0000000000000..a7b33012a378c
--- /dev/null
+++ b/CODEOWNERS
@@ -0,0 +1,20 @@
+# Each line is a file pattern followed by one or more owners.
+# More on CODEOWNERS files: https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners
+
+
+/builtin/credential/ @hashicorp/vault-ecosystem
+
+# Secrets engines (pki, ssh, totp and transit omitted)
+/builtin/logical/aws/ @hashicorp/vault-ecosystem
+/builtin/logical/cassandra/ @hashicorp/vault-ecosystem
+/builtin/logical/consul/ @hashicorp/vault-ecosystem
+/builtin/logical/database/ @hashicorp/vault-ecosystem
+/builtin/logical/mongodb/ @hashicorp/vault-ecosystem
+/builtin/logical/mssql/ @hashicorp/vault-ecosystem
+/builtin/logical/mysql/ @hashicorp/vault-ecosystem
+/builtin/logical/nomad/ @hashicorp/vault-ecosystem
+/builtin/logical/postgresql/ @hashicorp/vault-ecosystem
+/builtin/logical/rabbitmq/ @hashicorp/vault-ecosystem
+
+/command/agent/ @hashicorp/vault-ecosystem
+/plugins/ @hashicorp/vault-ecosystem
diff --git a/Makefile b/Makefile
index 582c050fff6e9..85822c7c75b2f 100644
--- a/Makefile
+++ b/Makefile
@@ -53,10 +53,10 @@ dev-dynamic-mem: dev-dynamic
# Creates a Docker image by adding the compiled linux/amd64 binary found in ./bin.
# The resulting image is tagged "vault:dev".
docker-dev: prep
- docker build --build-arg VERSION=$(GO_VERSION_MIN) -f scripts/docker/Dockerfile -t vault:dev .
+ docker build --build-arg VERSION=$(GO_VERSION_MIN) --build-arg BUILD_TAGS="$(BUILD_TAGS)" -f scripts/docker/Dockerfile -t vault:dev .
docker-dev-ui: prep
- docker build --build-arg VERSION=$(GO_VERSION_MIN) -f scripts/docker/Dockerfile.ui -t vault:dev-ui .
+ docker build --build-arg VERSION=$(GO_VERSION_MIN) --build-arg BUILD_TAGS="$(BUILD_TAGS)" -f scripts/docker/Dockerfile.ui -t vault:dev-ui .
# test runs the unit tests and vets the code
test: prep
diff --git a/api/client.go b/api/client.go
index ce5f7798b291b..8674f7efcbdc6 100644
--- a/api/client.go
+++ b/api/client.go
@@ -97,6 +97,9 @@ type Config struct {
// The CheckRetry function to use; a default is used if not provided
CheckRetry retryablehttp.CheckRetry
+ // Logger is the leveled logger to provide to the retryable HTTP client.
+ Logger retryablehttp.LeveledLogger
+
// Limiter is the rate limiter used by the client.
// If this pointer is nil, then there will be no limit set.
// In contrast, if this pointer is set, even to an empty struct,
@@ -477,6 +480,7 @@ func (c *Client) CloneConfig() *Config {
newConfig.Timeout = c.config.Timeout
newConfig.Backoff = c.config.Backoff
newConfig.CheckRetry = c.config.CheckRetry
+ newConfig.Logger = c.config.Logger
newConfig.Limiter = c.config.Limiter
newConfig.OutputCurlString = c.config.OutputCurlString
newConfig.SRVLookup = c.config.SRVLookup
@@ -738,6 +742,15 @@ func (c *Client) SetBackoff(backoff retryablehttp.Backoff) {
c.config.Backoff = backoff
}
+func (c *Client) SetLogger(logger retryablehttp.LeveledLogger) {
+ c.modifyLock.RLock()
+ defer c.modifyLock.RUnlock()
+ c.config.modifyLock.Lock()
+ defer c.config.modifyLock.Unlock()
+
+ c.config.Logger = logger
+}
+
// Clone creates a new client with the same configuration. Note that the same
// underlying http.Client is used; modifying the client from more than one
// goroutine at once may not be safe, so modify the client as needed and then
@@ -761,6 +774,7 @@ func (c *Client) Clone() (*Client, error) {
Timeout: config.Timeout,
Backoff: config.Backoff,
CheckRetry: config.CheckRetry,
+ Logger: config.Logger,
Limiter: config.Limiter,
OutputCurlString: config.OutputCurlString,
AgentAddress: config.AgentAddress,
@@ -865,6 +879,7 @@ func (c *Client) RawRequestWithContext(ctx context.Context, r *Request) (*Respon
httpClient := c.config.HttpClient
timeout := c.config.Timeout
outputCurlString := c.config.OutputCurlString
+ logger := c.config.Logger
c.config.modifyLock.RUnlock()
c.modifyLock.RUnlock()
@@ -924,6 +939,7 @@ START:
RetryMax: maxRetries,
Backoff: backoff,
CheckRetry: checkRetry,
+ Logger: logger,
ErrorHandler: retryablehttp.PassthroughErrorHandler,
}
diff --git a/api/client_test.go b/api/client_test.go
index 87900856af24d..4fe356e1b85e4 100644
--- a/api/client_test.go
+++ b/api/client_test.go
@@ -13,6 +13,7 @@ import (
"testing"
"time"
+ "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/sdk/helper/consts"
)
@@ -427,6 +428,8 @@ func TestClone(t *testing.T) {
}
client1.SetCheckRetry(checkRetry)
+ client1.SetLogger(hclog.NewNullLogger())
+
client1.SetLimiter(5.0, 10)
client1.SetMaxRetries(5)
client1.SetOutputCurlString(true)
diff --git a/api/go.mod b/api/go.mod
index 75bcfefe687ba..7e93c1c5684b8 100644
--- a/api/go.mod
+++ b/api/go.mod
@@ -9,6 +9,7 @@ require (
github.com/go-test/deep v1.0.2
github.com/hashicorp/errwrap v1.0.0
github.com/hashicorp/go-cleanhttp v0.5.1
+ github.com/hashicorp/go-hclog v0.16.1
github.com/hashicorp/go-multierror v1.1.0
github.com/hashicorp/go-retryablehttp v0.6.6
github.com/hashicorp/go-rootcerts v1.0.2
diff --git a/api/go.sum b/api/go.sum
index 749ad0d191d33..b6e144536fed5 100644
--- a/api/go.sum
+++ b/api/go.sum
@@ -100,8 +100,9 @@ github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVo
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI=
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
-github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU=
github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-hclog v0.16.1 h1:IVQwpTGNRRIHafnTs2dQLIk4ENtneRIEEJWOVDqz99o=
+github.com/hashicorp/go-hclog v0.16.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g=
diff --git a/builtin/credential/approle/backend_test.go b/builtin/credential/approle/backend_test.go
index a23f4c3642259..044f02d2a2a4c 100644
--- a/builtin/credential/approle/backend_test.go
+++ b/builtin/credential/approle/backend_test.go
@@ -5,10 +5,13 @@ import (
"strings"
"testing"
+ "github.com/stretchr/testify/require"
+
"github.com/hashicorp/vault/sdk/logical"
)
func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) {
+ t.Helper()
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
@@ -26,6 +29,72 @@ func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) {
return b, config.StorageView
}
+func TestAppRole_RoleServiceToBatchNumUses(t *testing.T) {
+ b, s := createBackendWithStorage(t)
+
+ requestFunc := func(operation logical.Operation, data map[string]interface{}) {
+ resp, err := b.HandleRequest(context.Background(), &logical.Request{
+ Path: "role/testrole",
+ Operation: operation,
+ Storage: s,
+ Data: data,
+ })
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: err: %#v\nresp: %#v", err, resp)
+ }
+ }
+
+ data := map[string]interface{}{
+ "bind_secret_id": true,
+ "secret_id_num_uses": 0,
+ "secret_id_ttl": "10m",
+ "token_policies": "policy",
+ "token_ttl": "5m",
+ "token_max_ttl": "10m",
+ "token_num_uses": 2,
+ "token_type": "default",
+ }
+ requestFunc(logical.CreateOperation, data)
+
+ data["token_num_uses"] = 0
+ data["token_type"] = "batch"
+ requestFunc(logical.UpdateOperation, data)
+
+ resp, err := b.HandleRequest(context.Background(), &logical.Request{
+ Path: "role/testrole/role-id",
+ Operation: logical.ReadOperation,
+ Storage: s,
+ })
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: resp: %#v\nerr: %v", resp, err)
+ }
+ roleID := resp.Data["role_id"]
+
+ resp, err = b.HandleRequest(context.Background(), &logical.Request{
+ Path: "role/testrole/secret-id",
+ Operation: logical.UpdateOperation,
+ Storage: s,
+ })
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: resp: %#v\nerr: %v", resp, err)
+ }
+ secretID := resp.Data["secret_id"]
+
+ resp, err = b.HandleRequest(context.Background(), &logical.Request{
+ Path: "login",
+ Operation: logical.UpdateOperation,
+ Data: map[string]interface{}{
+ "role_id": roleID,
+ "secret_id": secretID,
+ },
+ Storage: s,
+ })
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: resp: %#v\nerr: %v", resp, err)
+ }
+ require.NotNil(t, resp.Auth)
+}
+
func TestAppRole_RoleNameCaseSensitivity(t *testing.T) {
testFunc := func(t *testing.T, roleName string) {
var resp *logical.Response
diff --git a/builtin/credential/aws/cli.go b/builtin/credential/aws/cli.go
index 394cef03b1c1d..be1f5065648e7 100644
--- a/builtin/credential/aws/cli.go
+++ b/builtin/credential/aws/cli.go
@@ -1,17 +1,9 @@
package awsauth
import (
- "encoding/base64"
- "encoding/json"
"fmt"
- "io/ioutil"
"strings"
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/endpoints"
- "github.com/aws/aws-sdk-go/aws/session"
- "github.com/aws/aws-sdk-go/service/sts"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/sdk/helper/awsutil"
@@ -19,68 +11,6 @@ import (
type CLIHandler struct{}
-// STS is a really weird service that used to only have global endpoints but now has regional endpoints as well.
-// For backwards compatibility, even if you request a region other than us-east-1, it'll still sign for us-east-1.
-// See, e.g., https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code
-// So we have to shim in this EndpointResolver to force it to sign for the right region
-func stsSigningResolver(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
- defaultEndpoint, err := endpoints.DefaultResolver().EndpointFor(service, region, optFns...)
- if err != nil {
- return defaultEndpoint, err
- }
- defaultEndpoint.SigningRegion = region
- return defaultEndpoint, nil
-}
-
-// GenerateLoginData populates the necessary data to send to the Vault server for generating a token
-// This is useful for other API clients to use
-func GenerateLoginData(creds *credentials.Credentials, headerValue, configuredRegion string) (map[string]interface{}, error) {
- loginData := make(map[string]interface{})
-
- // Use the credentials we've found to construct an STS session
- region, err := awsutil.GetRegion(configuredRegion)
- if err != nil {
- hclog.Default().Warn(fmt.Sprintf("defaulting region to %q due to %s", awsutil.DefaultRegion, err.Error()))
- region = awsutil.DefaultRegion
- }
- stsSession, err := session.NewSessionWithOptions(session.Options{
- Config: aws.Config{
- Credentials: creds,
- Region: ®ion,
- EndpointResolver: endpoints.ResolverFunc(stsSigningResolver),
- },
- })
- if err != nil {
- return nil, err
- }
-
- var params *sts.GetCallerIdentityInput
- svc := sts.New(stsSession)
- stsRequest, _ := svc.GetCallerIdentityRequest(params)
-
- // Inject the required auth header value, if supplied, and then sign the request including that header
- if headerValue != "" {
- stsRequest.HTTPRequest.Header.Add(iamServerIdHeader, headerValue)
- }
- stsRequest.Sign()
-
- // Now extract out the relevant parts of the request
- headersJson, err := json.Marshal(stsRequest.HTTPRequest.Header)
- if err != nil {
- return nil, err
- }
- requestBody, err := ioutil.ReadAll(stsRequest.HTTPRequest.Body)
- if err != nil {
- return nil, err
- }
- loginData["iam_http_request_method"] = stsRequest.HTTPRequest.Method
- loginData["iam_request_url"] = base64.StdEncoding.EncodeToString([]byte(stsRequest.HTTPRequest.URL.String()))
- loginData["iam_request_headers"] = base64.StdEncoding.EncodeToString(headersJson)
- loginData["iam_request_body"] = base64.StdEncoding.EncodeToString(requestBody)
-
- return loginData, nil
-}
-
func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) {
mount, ok := m["mount"]
if !ok {
@@ -108,7 +38,7 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, erro
hlogger := hclog.Default()
hlogger.SetLevel(level)
- creds, err := RetrieveCreds(m["aws_access_key_id"], m["aws_secret_access_key"], m["aws_security_token"], hlogger)
+ creds, err := awsutil.RetrieveCreds(m["aws_access_key_id"], m["aws_secret_access_key"], m["aws_security_token"], hlogger)
if err != nil {
return nil, err
}
@@ -117,7 +47,8 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, erro
if region == "" {
region = awsutil.DefaultRegion
}
- loginData, err := GenerateLoginData(creds, headerValue, region)
+
+ loginData, err := awsutil.GenerateLoginData(creds, headerValue, region, hlogger)
if err != nil {
return nil, err
}
@@ -137,28 +68,6 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, erro
return secret, nil
}
-func RetrieveCreds(accessKey, secretKey, sessionToken string, logger hclog.Logger) (*credentials.Credentials, error) {
- credConfig := &awsutil.CredentialsConfig{
- AccessKey: accessKey,
- SecretKey: secretKey,
- SessionToken: sessionToken,
- Logger: logger,
- }
- creds, err := credConfig.GenerateCredentialChain()
- if err != nil {
- return nil, err
- }
- if creds == nil {
- return nil, fmt.Errorf("could not compile valid credential providers from static config, environment, shared, or instance metadata")
- }
-
- _, err = creds.Get()
- if err != nil {
- return nil, fmt.Errorf("failed to retrieve credentials from credential chain: %w", err)
- }
- return creds, nil
-}
-
func (h *CLIHandler) Help() string {
help := `
Usage: vault login -method=aws [CONFIG K=V...]
diff --git a/builtin/credential/aws/path_login.go b/builtin/credential/aws/path_login.go
index 6aeaba6dcf68b..03c63f2e6a267 100644
--- a/builtin/credential/aws/path_login.go
+++ b/builtin/credential/aws/path_login.go
@@ -1364,7 +1364,7 @@ func (b *backend) pathLoginUpdateIam(ctx context.Context, req *logical.Request,
if roleEntry.InferredEntityType == ec2EntityType {
instance, err := b.validateInstance(ctx, req.Storage, entity.SessionInfo, roleEntry.InferredAWSRegion, callerID.Account)
if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("failed to verify %s as a valid EC2 instance in region %s", entity.SessionInfo, roleEntry.InferredAWSRegion)), nil
+ return logical.ErrorResponse("failed to verify %s as a valid EC2 instance in region %s: %s", entity.SessionInfo, roleEntry.InferredAWSRegion, err), nil
}
// build a fake identity doc to pass on metadata about the instance to verifyInstanceMeetsRoleRequirements
diff --git a/builtin/logical/aws/path_user.go b/builtin/logical/aws/path_user.go
index e98ac8e605a74..05c8730c353a4 100644
--- a/builtin/logical/aws/path_user.go
+++ b/builtin/logical/aws/path_user.go
@@ -32,6 +32,10 @@ func pathUser(b *backend) *framework.Path {
Description: "Lifetime of the returned credentials in seconds",
Default: 3600,
},
+ "role_session_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Session name to use when assuming role. Max chars: 64",
+ },
},
Callbacks: map[logical.Operation]framework.OperationFunc{
@@ -80,6 +84,7 @@ func (b *backend) pathCredsRead(ctx context.Context, req *logical.Request, d *fr
}
roleArn := d.Get("role_arn").(string)
+ roleSessionName := d.Get("role_session_name").(string)
var credentialType string
switch {
@@ -125,7 +130,7 @@ func (b *backend) pathCredsRead(ctx context.Context, req *logical.Request, d *fr
case !strutil.StrListContains(role.RoleArns, roleArn):
return logical.ErrorResponse(fmt.Sprintf("role_arn %q not in allowed role arns for Vault role %q", roleArn, roleName)), nil
}
- return b.assumeRole(ctx, req.Storage, req.DisplayName, roleName, roleArn, role.PolicyDocument, role.PolicyArns, role.IAMGroups, ttl)
+ return b.assumeRole(ctx, req.Storage, req.DisplayName, roleName, roleArn, role.PolicyDocument, role.PolicyArns, role.IAMGroups, ttl, roleSessionName)
case federationTokenCred:
return b.getFederationToken(ctx, req.Storage, req.DisplayName, roleName, role.PolicyDocument, role.PolicyArns, role.IAMGroups, ttl)
default:
diff --git a/builtin/logical/aws/secret_access_keys.go b/builtin/logical/aws/secret_access_keys.go
index 6e6ee9a6a1725..327eafd9b367a 100644
--- a/builtin/logical/aws/secret_access_keys.go
+++ b/builtin/logical/aws/secret_access_keys.go
@@ -141,7 +141,7 @@ func (b *backend) getFederationToken(ctx context.Context, s logical.Storage,
func (b *backend) assumeRole(ctx context.Context, s logical.Storage,
displayName, roleName, roleArn, policy string, policyARNs []string,
- iamGroups []string, lifeTimeInSeconds int64) (*logical.Response, error) {
+ iamGroups []string, lifeTimeInSeconds int64, roleSessionName string) (*logical.Response, error) {
// grab any IAM group policies associated with the vault role, both inline
// and managed
@@ -165,10 +165,19 @@ func (b *backend) assumeRole(ctx context.Context, s logical.Storage,
return logical.ErrorResponse(err.Error()), nil
}
- username, usernameWarning := genUsername(displayName, roleName, "iam_user")
+ roleSessionNameWarning := ""
+ if roleSessionName == "" {
+ roleSessionName, roleSessionNameWarning = genUsername(displayName, roleName, "iam_user")
+ } else {
+ roleSessionName = normalizeDisplayName(roleSessionName)
+ if len(roleSessionName) > 64 {
+ roleSessionName = roleSessionName[0:64]
+ roleSessionNameWarning = "the role session name was truncated to 64 characters to fit within IAM session name length limits"
+ }
+ }
assumeRoleInput := &sts.AssumeRoleInput{
- RoleSessionName: aws.String(username),
+ RoleSessionName: aws.String(roleSessionName),
RoleArn: aws.String(roleArn),
DurationSeconds: &lifeTimeInSeconds,
}
@@ -187,8 +196,9 @@ func (b *backend) assumeRole(ctx context.Context, s logical.Storage,
"access_key": *tokenResp.Credentials.AccessKeyId,
"secret_key": *tokenResp.Credentials.SecretAccessKey,
"security_token": *tokenResp.Credentials.SessionToken,
+ "arn": *tokenResp.AssumedRoleUser.Arn,
}, map[string]interface{}{
- "username": username,
+ "username": roleSessionName,
"policy": roleArn,
"is_sts": true,
})
@@ -199,8 +209,8 @@ func (b *backend) assumeRole(ctx context.Context, s logical.Storage,
// STS are purposefully short-lived and aren't renewable
resp.Secret.Renewable = false
- if usernameWarning != "" {
- resp.AddWarning(usernameWarning)
+ if roleSessionNameWarning != "" {
+ resp.AddWarning(roleSessionNameWarning)
}
return resp, nil
diff --git a/builtin/logical/database/path_rotate_credentials.go b/builtin/logical/database/path_rotate_credentials.go
index 84ed3db8d3ba9..5774ea8634600 100644
--- a/builtin/logical/database/path_rotate_credentials.go
+++ b/builtin/logical/database/path_rotate_credentials.go
@@ -78,6 +78,14 @@ func (b *databaseBackend) pathRotateRootCredentialsUpdate() framework.OperationF
return nil, err
}
+ // Take out the backend lock since we are swapping out the connection
+ b.Lock()
+ defer b.Unlock()
+
+ // Take the write lock on the instance
+ dbi.Lock()
+ defer dbi.Unlock()
+
defer func() {
// Close the plugin
dbi.closed = true
@@ -88,14 +96,6 @@ func (b *databaseBackend) pathRotateRootCredentialsUpdate() framework.OperationF
delete(b.connections, name)
}()
- // Take out the backend lock since we are swapping out the connection
- b.Lock()
- defer b.Unlock()
-
- // Take the write lock on the instance
- dbi.Lock()
- defer dbi.Unlock()
-
// Generate new credentials
oldPassword := config.ConnectionDetails["password"].(string)
newPassword, err := dbi.database.GeneratePassword(ctx, b.System(), config.PasswordPolicy)
diff --git a/builtin/logical/database/rollback.go b/builtin/logical/database/rollback.go
index ab261b87e16c8..c8221f3c4702a 100644
--- a/builtin/logical/database/rollback.go
+++ b/builtin/logical/database/rollback.go
@@ -4,6 +4,8 @@ import (
"context"
"errors"
+ "github.com/hashicorp/vault/sdk/database/dbplugin"
+
v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
"github.com/hashicorp/vault/sdk/logical"
"github.com/mitchellh/mapstructure"
@@ -104,7 +106,7 @@ func (b *databaseBackend) rollbackDatabaseCredentials(ctx context.Context, confi
// It actually is the root user here, but we only want to use SetCredentials since
// RotateRootCredentials doesn't give any control over what password is used
_, err = dbi.database.UpdateUser(ctx, updateReq, false)
- if status.Code(err) == codes.Unimplemented {
+ if status.Code(err) == codes.Unimplemented || err == dbplugin.ErrPluginStaticUnsupported {
return nil
}
return err
diff --git a/builtin/logical/database/version_wrapper.go b/builtin/logical/database/version_wrapper.go
index d1b5359717763..e625404e52d7d 100644
--- a/builtin/logical/database/version_wrapper.go
+++ b/builtin/logical/database/version_wrapper.go
@@ -152,7 +152,7 @@ func (d databaseVersionWrapper) changePasswordLegacy(ctx context.Context, userna
err = d.changeUserPasswordLegacy(ctx, username, passwordChange)
// If changing the root user's password but SetCredentials is unimplemented, fall back to RotateRootCredentials
- if isRootUser && status.Code(err) == codes.Unimplemented {
+ if isRootUser && (err == v4.ErrPluginStaticUnsupported || status.Code(err) == codes.Unimplemented) {
saveConfig, err = d.changeRootUserPasswordLegacy(ctx, passwordChange)
if err != nil {
return nil, err
diff --git a/builtin/logical/database/version_wrapper_test.go b/builtin/logical/database/version_wrapper_test.go
index 56ec37d029035..2680de93c6a6d 100644
--- a/builtin/logical/database/version_wrapper_test.go
+++ b/builtin/logical/database/version_wrapper_test.go
@@ -8,6 +8,7 @@ import (
"testing"
"time"
+ v4 "github.com/hashicorp/vault/sdk/database/dbplugin"
v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
"github.com/hashicorp/vault/sdk/logical"
"github.com/stretchr/testify/mock"
@@ -672,7 +673,7 @@ func TestUpdateUser_legacyDB(t *testing.T) {
expectedConfig: nil,
expectErr: true,
},
- "change password - RotateRootCredentials": {
+ "change password - RotateRootCredentials (gRPC Unimplemented)": {
req: v5.UpdateUserRequest{
Username: "existing_user",
Password: &v5.ChangePassword{
@@ -696,6 +697,30 @@ func TestUpdateUser_legacyDB(t *testing.T) {
},
expectErr: false,
},
+ "change password - RotateRootCredentials (ErrPluginStaticUnsupported)": {
+ req: v5.UpdateUserRequest{
+ Username: "existing_user",
+ Password: &v5.ChangePassword{
+ NewPassword: "newpassowrd",
+ },
+ },
+ isRootUser: true,
+
+ setCredentialsErr: v4.ErrPluginStaticUnsupported,
+ setCredentialsCalls: 1,
+
+ rotateRootConfig: map[string]interface{}{
+ "foo": "bar",
+ },
+ rotateRootCalls: 1,
+
+ renewUserCalls: 0,
+
+ expectedConfig: map[string]interface{}{
+ "foo": "bar",
+ },
+ expectErr: false,
+ },
"change password - RotateRootCredentials failed": {
req: v5.UpdateUserRequest{
Username: "existing_user",
diff --git a/builtin/logical/ssh/backend_test.go b/builtin/logical/ssh/backend_test.go
index baf60e05532da..b4253ba1ce0bf 100644
--- a/builtin/logical/ssh/backend_test.go
+++ b/builtin/logical/ssh/backend_test.go
@@ -19,8 +19,10 @@ import (
"golang.org/x/crypto/ssh"
+ "github.com/hashicorp/vault/builtin/credential/userpass"
"github.com/hashicorp/vault/helper/testhelpers/docker"
logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical"
+ vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/vault"
"github.com/mitchellh/mapstructure"
)
@@ -122,6 +124,7 @@ SjOQL/GkH1nkRcDS9++aAAAAAmNhAQID
dockerImageTagSupportsRSA1 = "8.1_p1-r0-ls20"
dockerImageTagSupportsNoRSA1 = "8.4_p1-r3-ls48"
+
)
func prepareTestContainer(t *testing.T, tag, caPublicKeyPEM string) (func(), string) {
@@ -158,7 +161,7 @@ func prepareTestContainer(t *testing.T, tag, caPublicKeyPEM string) (func(), str
// Install util-linux for non-busybox flock that supports timeout option
err = testSSH("vaultssh", sshAddress, ssh.PublicKeys(signer), fmt.Sprintf(`
- set -e;
+ set -e;
sudo ln -s /config /home/vaultssh
sudo apk add util-linux;
echo "LogLevel DEBUG" | sudo tee -a /config/ssh_host_keys/sshd_config;
@@ -1318,6 +1321,252 @@ func TestBackend_DisallowUserProvidedKeyIDs(t *testing.T) {
logicaltest.Test(t, testCase)
}
+func TestBackend_DefExtTemplatingEnabled(t *testing.T) {
+ cluster, userpassToken := getSshCaTestCluster(t, testUserName)
+ defer cluster.Cleanup()
+ client := cluster.Cores[0].Client
+
+ // Get auth accessor for identity template.
+ auths, err := client.Sys().ListAuth()
+ if err != nil {
+ t.Fatal(err)
+ }
+ userpassAccessor := auths["userpass/"].Accessor
+
+ // Write SSH role.
+ _, err = client.Logical().Write("ssh/roles/test", map[string]interface{}{
+ "key_type": "ca",
+ "allowed_extensions": "login@zipzap.com",
+ "allow_user_certificates": true,
+ "allowed_users": "tuber",
+ "default_user": "tuber",
+ "default_extensions_template": true,
+ "default_extensions": map[string]interface{}{
+ "login@foobar.com": "{{identity.entity.aliases." + userpassAccessor + ".name}}",
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sshKeyID := "vault-userpass-"+testUserName+"-9bd0f01b7dfc50a13aa5e5cd11aea19276968755c8f1f9c98965d04147f30ed0"
+
+ // Issue SSH certificate with default extensions templating enabled, and no user-provided extensions
+ client.SetToken(userpassToken)
+ resp, err := client.Logical().Write("ssh/sign/test", map[string]interface{}{
+ "public_key": publicKey4096,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ signedKey := resp.Data["signed_key"].(string)
+ key, _ := base64.StdEncoding.DecodeString(strings.Split(signedKey, " ")[1])
+
+ parsedKey, err := ssh.ParsePublicKey(key)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ defaultExtensionPermissions := map[string]string{
+ "login@foobar.com": testUserName,
+ }
+
+ err = validateSSHCertificate(parsedKey.(*ssh.Certificate), sshKeyID, ssh.UserCert, []string{"tuber"}, map[string]string{}, defaultExtensionPermissions, 16*time.Hour)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Issue SSH certificate with default extensions templating enabled, and user-provided extensions
+ // The certificate should only have the user-provided extensions, and no templated extensions
+ userProvidedExtensionPermissions := map[string]string{
+ "login@zipzap.com": "some_other_user_name",
+ }
+ resp, err = client.Logical().Write("ssh/sign/test", map[string]interface{}{
+ "public_key": publicKey4096,
+ "extensions": userProvidedExtensionPermissions,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ signedKey = resp.Data["signed_key"].(string)
+ key, _ = base64.StdEncoding.DecodeString(strings.Split(signedKey, " ")[1])
+
+ parsedKey, err = ssh.ParsePublicKey(key)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = validateSSHCertificate(parsedKey.(*ssh.Certificate), sshKeyID, ssh.UserCert, []string{"tuber"}, map[string]string{}, userProvidedExtensionPermissions, 16*time.Hour)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Issue SSH certificate with default extensions templating enabled, and invalid user-provided extensions - it should fail
+ invalidUserProvidedExtensionPermissions := map[string]string{
+ "login@foobar.com": "{{identity.entity.metadata}}",
+ }
+ resp, err = client.Logical().Write("ssh/sign/test", map[string]interface{}{
+ "public_key": publicKey4096,
+ "extensions": invalidUserProvidedExtensionPermissions,
+ })
+ if err == nil {
+ t.Fatal("expected an error while attempting to sign a key with invalid permissions")
+ }
+}
+
+func TestBackend_DefExtTemplatingDisabled(t *testing.T) {
+ cluster, userpassToken := getSshCaTestCluster(t, testUserName)
+ defer cluster.Cleanup()
+ client := cluster.Cores[0].Client
+
+ // Get auth accessor for identity template.
+ auths, err := client.Sys().ListAuth()
+ if err != nil {
+ t.Fatal(err)
+ }
+ userpassAccessor := auths["userpass/"].Accessor
+
+ // Write SSH role to test with any extension. We also provide a templated default extension,
+ // to verify that it's not actually being evaluated
+ _, err = client.Logical().Write("ssh/roles/test_allow_all_extensions", map[string]interface{}{
+ "key_type": "ca",
+ "allow_user_certificates": true,
+ "allowed_users": "tuber",
+ "default_user": "tuber",
+ "default_extensions_template": false,
+ "default_extensions": map[string]interface{}{
+ "login@foobar.com": "{{identity.entity.aliases." + userpassAccessor + ".name}}",
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sshKeyID := "vault-userpass-"+testUserName+"-9bd0f01b7dfc50a13aa5e5cd11aea19276968755c8f1f9c98965d04147f30ed0"
+
+// Issue SSH certificate with default extensions templating disabled, and no user-provided extensions
+ client.SetToken(userpassToken)
+ defaultExtensionPermissions := map[string]string{
+ "login@foobar.com": "{{identity.entity.aliases." + userpassAccessor + ".name}}",
+ "login@zipzap.com": "some_other_user_name",
+ }
+ resp, err := client.Logical().Write("ssh/sign/test_allow_all_extensions", map[string]interface{}{
+ "public_key": publicKey4096,
+ "extensions": defaultExtensionPermissions,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ signedKey := resp.Data["signed_key"].(string)
+ key, _ := base64.StdEncoding.DecodeString(strings.Split(signedKey, " ")[1])
+
+ parsedKey, err := ssh.ParsePublicKey(key)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = validateSSHCertificate(parsedKey.(*ssh.Certificate), sshKeyID, ssh.UserCert, []string{"tuber"}, map[string]string{}, defaultExtensionPermissions, 16*time.Hour)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Issue SSH certificate with default extensions templating disabled, and user-provided extensions
+ client.SetToken(userpassToken)
+ userProvidedAnyExtensionPermissions := map[string]string{
+ "login@foobar.com": "not_userpassname",
+ "login@zipzap.com": "some_other_user_name",
+ }
+ resp, err = client.Logical().Write("ssh/sign/test_allow_all_extensions", map[string]interface{}{
+ "public_key": publicKey4096,
+ "extensions": userProvidedAnyExtensionPermissions,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ signedKey = resp.Data["signed_key"].(string)
+ key, _ = base64.StdEncoding.DecodeString(strings.Split(signedKey, " ")[1])
+
+ parsedKey, err = ssh.ParsePublicKey(key)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = validateSSHCertificate(parsedKey.(*ssh.Certificate), sshKeyID, ssh.UserCert, []string{"tuber"}, map[string]string{}, userProvidedAnyExtensionPermissions, 16*time.Hour)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func getSshCaTestCluster(t *testing.T, userIdentity string) (*vault.TestCluster, string) {
+ coreConfig := &vault.CoreConfig{
+ CredentialBackends: map[string]logical.Factory{
+ "userpass": userpass.Factory,
+ },
+ LogicalBackends: map[string]logical.Factory{
+ "ssh": Factory,
+ },
+ }
+ cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
+ HandlerFunc: vaulthttp.Handler,
+ })
+ cluster.Start()
+ client := cluster.Cores[0].Client
+
+ // Write test policy for userpass auth method.
+ err := client.Sys().PutPolicy("test", `
+ path "ssh/*" {
+ capabilities = ["update"]
+ }`)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Enable userpass auth method.
+ if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil {
+ t.Fatal(err)
+ }
+
+ // Configure test role for userpass.
+ if _, err := client.Logical().Write("auth/userpass/users/"+userIdentity, map[string]interface{}{
+ "password": "test",
+ "policies": "test",
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // Login userpass for test role and keep client token.
+ secret, err := client.Logical().Write("auth/userpass/login/"+userIdentity, map[string]interface{}{
+ "password": "test",
+ })
+ if err != nil || secret == nil {
+ t.Fatal(err)
+ }
+ userpassToken := secret.Auth.ClientToken
+
+ // Mount SSH.
+ err = client.Sys().Mount("ssh", &api.MountInput{
+ Type: "ssh",
+ Config: api.MountConfigInput{
+ DefaultLeaseTTL: "16h",
+ MaxLeaseTTL: "60h",
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Configure SSH CA.
+ _, err = client.Logical().Write("ssh/config/ca", map[string]interface{}{
+ "public_key": testCAPublicKey,
+ "private_key": testCAPrivateKey,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return cluster, userpassToken
+}
+
func configCaStep(caPublicKey, caPrivateKey string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
@@ -1391,7 +1640,7 @@ func validateSSHCertificate(cert *ssh.Certificate, keyID string, certType int, v
actualTTL := time.Unix(int64(cert.ValidBefore), 0).Add(-30 * time.Second).Sub(time.Unix(int64(cert.ValidAfter), 0))
if actualTTL != ttl {
- return fmt.Errorf("incorrect ttl: expected: %v, actualL %v", ttl, actualTTL)
+ return fmt.Errorf("incorrect ttl: expected: %v, actual %v", ttl, actualTTL)
}
if !reflect.DeepEqual(cert.ValidPrincipals, validPrincipals) {
diff --git a/builtin/logical/ssh/path_roles.go b/builtin/logical/ssh/path_roles.go
index 5a4f47cb3033a..0b1ef84ec6af8 100644
--- a/builtin/logical/ssh/path_roles.go
+++ b/builtin/logical/ssh/path_roles.go
@@ -26,33 +26,34 @@ const (
// for both OTP and Dynamic roles. Not all the fields are mandatory for both type.
// Some are applicable for one and not for other. It doesn't matter.
type sshRole struct {
- KeyType string `mapstructure:"key_type" json:"key_type"`
- KeyName string `mapstructure:"key" json:"key"`
- KeyBits int `mapstructure:"key_bits" json:"key_bits"`
- AdminUser string `mapstructure:"admin_user" json:"admin_user"`
- DefaultUser string `mapstructure:"default_user" json:"default_user"`
- CIDRList string `mapstructure:"cidr_list" json:"cidr_list"`
- ExcludeCIDRList string `mapstructure:"exclude_cidr_list" json:"exclude_cidr_list"`
- Port int `mapstructure:"port" json:"port"`
- InstallScript string `mapstructure:"install_script" json:"install_script"`
- AllowedUsers string `mapstructure:"allowed_users" json:"allowed_users"`
- AllowedUsersTemplate bool `mapstructure:"allowed_users_template" json:"allowed_users_template"`
- AllowedDomains string `mapstructure:"allowed_domains" json:"allowed_domains"`
- KeyOptionSpecs string `mapstructure:"key_option_specs" json:"key_option_specs"`
- MaxTTL string `mapstructure:"max_ttl" json:"max_ttl"`
- TTL string `mapstructure:"ttl" json:"ttl"`
- DefaultCriticalOptions map[string]string `mapstructure:"default_critical_options" json:"default_critical_options"`
- DefaultExtensions map[string]string `mapstructure:"default_extensions" json:"default_extensions"`
- AllowedCriticalOptions string `mapstructure:"allowed_critical_options" json:"allowed_critical_options"`
- AllowedExtensions string `mapstructure:"allowed_extensions" json:"allowed_extensions"`
- AllowUserCertificates bool `mapstructure:"allow_user_certificates" json:"allow_user_certificates"`
- AllowHostCertificates bool `mapstructure:"allow_host_certificates" json:"allow_host_certificates"`
- AllowBareDomains bool `mapstructure:"allow_bare_domains" json:"allow_bare_domains"`
- AllowSubdomains bool `mapstructure:"allow_subdomains" json:"allow_subdomains"`
- AllowUserKeyIDs bool `mapstructure:"allow_user_key_ids" json:"allow_user_key_ids"`
- KeyIDFormat string `mapstructure:"key_id_format" json:"key_id_format"`
- AllowedUserKeyLengths map[string]int `mapstructure:"allowed_user_key_lengths" json:"allowed_user_key_lengths"`
- AlgorithmSigner string `mapstructure:"algorithm_signer" json:"algorithm_signer"`
+ KeyType string `mapstructure:"key_type" json:"key_type"`
+ KeyName string `mapstructure:"key" json:"key"`
+ KeyBits int `mapstructure:"key_bits" json:"key_bits"`
+ AdminUser string `mapstructure:"admin_user" json:"admin_user"`
+ DefaultUser string `mapstructure:"default_user" json:"default_user"`
+ CIDRList string `mapstructure:"cidr_list" json:"cidr_list"`
+ ExcludeCIDRList string `mapstructure:"exclude_cidr_list" json:"exclude_cidr_list"`
+ Port int `mapstructure:"port" json:"port"`
+ InstallScript string `mapstructure:"install_script" json:"install_script"`
+ AllowedUsers string `mapstructure:"allowed_users" json:"allowed_users"`
+ AllowedUsersTemplate bool `mapstructure:"allowed_users_template" json:"allowed_users_template"`
+ AllowedDomains string `mapstructure:"allowed_domains" json:"allowed_domains"`
+ KeyOptionSpecs string `mapstructure:"key_option_specs" json:"key_option_specs"`
+ MaxTTL string `mapstructure:"max_ttl" json:"max_ttl"`
+ TTL string `mapstructure:"ttl" json:"ttl"`
+ DefaultCriticalOptions map[string]string `mapstructure:"default_critical_options" json:"default_critical_options"`
+ DefaultExtensions map[string]string `mapstructure:"default_extensions" json:"default_extensions"`
+ DefaultExtensionsTemplate bool `mapstructure:"default_extensions_template" json:"default_extensions_template"`
+ AllowedCriticalOptions string `mapstructure:"allowed_critical_options" json:"allowed_critical_options"`
+ AllowedExtensions string `mapstructure:"allowed_extensions" json:"allowed_extensions"`
+ AllowUserCertificates bool `mapstructure:"allow_user_certificates" json:"allow_user_certificates"`
+ AllowHostCertificates bool `mapstructure:"allow_host_certificates" json:"allow_host_certificates"`
+ AllowBareDomains bool `mapstructure:"allow_bare_domains" json:"allow_bare_domains"`
+ AllowSubdomains bool `mapstructure:"allow_subdomains" json:"allow_subdomains"`
+ AllowUserKeyIDs bool `mapstructure:"allow_user_key_ids" json:"allow_user_key_ids"`
+ KeyIDFormat string `mapstructure:"key_id_format" json:"key_id_format"`
+ AllowedUserKeyLengths map[string]int `mapstructure:"allowed_user_key_lengths" json:"allowed_user_key_lengths"`
+ AlgorithmSigner string `mapstructure:"algorithm_signer" json:"algorithm_signer"`
}
func pathListRoles(b *backend) *framework.Path {
@@ -267,6 +268,15 @@ func pathRoles(b *backend) *framework.Path {
"allowed_extensions". Defaults to none.
`,
},
+ "default_extensions_template": {
+ Type: framework.TypeBool,
+ Description: `
+ [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
+ If set, Default extension values can be specified using identity template policies.
+ Non-templated extension values are also permitted.
+ `,
+ Default: false,
+ },
"allow_user_certificates": {
Type: framework.TypeBool,
Description: `
@@ -334,7 +344,7 @@ func pathRoles(b *backend) *framework.Path {
"algorithm_signer": {
Type: framework.TypeString,
Description: `
- When supplied, this value specifies a signing algorithm for the key. Possible values:
+ When supplied, this value specifies a signing algorithm for the key. Possible values:
ssh-rsa, rsa-sha2-256, rsa-sha2-512.
`,
DisplayAttrs: &framework.DisplayAttributes{
@@ -514,20 +524,21 @@ func (b *backend) createCARole(allowedUsers, defaultUser, signer string, data *f
ttl := time.Duration(data.Get("ttl").(int)) * time.Second
maxTTL := time.Duration(data.Get("max_ttl").(int)) * time.Second
role := &sshRole{
- AllowedCriticalOptions: data.Get("allowed_critical_options").(string),
- AllowedExtensions: data.Get("allowed_extensions").(string),
- AllowUserCertificates: data.Get("allow_user_certificates").(bool),
- AllowHostCertificates: data.Get("allow_host_certificates").(bool),
- AllowedUsers: allowedUsers,
- AllowedUsersTemplate: data.Get("allowed_users_template").(bool),
- AllowedDomains: data.Get("allowed_domains").(string),
- DefaultUser: defaultUser,
- AllowBareDomains: data.Get("allow_bare_domains").(bool),
- AllowSubdomains: data.Get("allow_subdomains").(bool),
- AllowUserKeyIDs: data.Get("allow_user_key_ids").(bool),
- KeyIDFormat: data.Get("key_id_format").(string),
- KeyType: KeyTypeCA,
- AlgorithmSigner: signer,
+ AllowedCriticalOptions: data.Get("allowed_critical_options").(string),
+ AllowedExtensions: data.Get("allowed_extensions").(string),
+ AllowUserCertificates: data.Get("allow_user_certificates").(bool),
+ AllowHostCertificates: data.Get("allow_host_certificates").(bool),
+ AllowedUsers: allowedUsers,
+ AllowedUsersTemplate: data.Get("allowed_users_template").(bool),
+ AllowedDomains: data.Get("allowed_domains").(string),
+ DefaultUser: defaultUser,
+ AllowBareDomains: data.Get("allow_bare_domains").(bool),
+ AllowSubdomains: data.Get("allow_subdomains").(bool),
+ AllowUserKeyIDs: data.Get("allow_user_key_ids").(bool),
+ DefaultExtensionsTemplate: data.Get("default_extensions_template").(bool),
+ KeyIDFormat: data.Get("key_id_format").(string),
+ KeyType: KeyTypeCA,
+ AlgorithmSigner: signer,
}
if !role.AllowUserCertificates && !role.AllowHostCertificates {
@@ -600,26 +611,27 @@ func (b *backend) parseRole(role *sshRole) (map[string]interface{}, error) {
}
result = map[string]interface{}{
- "allowed_users": role.AllowedUsers,
- "allowed_users_template": role.AllowedUsersTemplate,
- "allowed_domains": role.AllowedDomains,
- "default_user": role.DefaultUser,
- "ttl": int64(ttl.Seconds()),
- "max_ttl": int64(maxTTL.Seconds()),
- "allowed_critical_options": role.AllowedCriticalOptions,
- "allowed_extensions": role.AllowedExtensions,
- "allow_user_certificates": role.AllowUserCertificates,
- "allow_host_certificates": role.AllowHostCertificates,
- "allow_bare_domains": role.AllowBareDomains,
- "allow_subdomains": role.AllowSubdomains,
- "allow_user_key_ids": role.AllowUserKeyIDs,
- "key_id_format": role.KeyIDFormat,
- "key_type": role.KeyType,
- "key_bits": role.KeyBits,
- "default_critical_options": role.DefaultCriticalOptions,
- "default_extensions": role.DefaultExtensions,
- "allowed_user_key_lengths": role.AllowedUserKeyLengths,
- "algorithm_signer": role.AlgorithmSigner,
+ "allowed_users": role.AllowedUsers,
+ "allowed_users_template": role.AllowedUsersTemplate,
+ "allowed_domains": role.AllowedDomains,
+ "default_user": role.DefaultUser,
+ "ttl": int64(ttl.Seconds()),
+ "max_ttl": int64(maxTTL.Seconds()),
+ "allowed_critical_options": role.AllowedCriticalOptions,
+ "allowed_extensions": role.AllowedExtensions,
+ "allow_user_certificates": role.AllowUserCertificates,
+ "allow_host_certificates": role.AllowHostCertificates,
+ "allow_bare_domains": role.AllowBareDomains,
+ "allow_subdomains": role.AllowSubdomains,
+ "allow_user_key_ids": role.AllowUserKeyIDs,
+ "key_id_format": role.KeyIDFormat,
+ "key_type": role.KeyType,
+ "key_bits": role.KeyBits,
+ "default_critical_options": role.DefaultCriticalOptions,
+ "default_extensions": role.DefaultExtensions,
+ "default_extensions_template": role.DefaultExtensionsTemplate,
+ "allowed_user_key_lengths": role.AllowedUserKeyLengths,
+ "algorithm_signer": role.AlgorithmSigner,
}
case KeyTypeDynamic:
result = map[string]interface{}{
diff --git a/builtin/logical/ssh/path_sign.go b/builtin/logical/ssh/path_sign.go
index 8ab26f0c98204..acd7d2118bb38 100644
--- a/builtin/logical/ssh/path_sign.go
+++ b/builtin/logical/ssh/path_sign.go
@@ -155,7 +155,7 @@ func (b *backend) pathSignCertificate(ctx context.Context, req *logical.Request,
return logical.ErrorResponse(err.Error()), nil
}
- extensions, err := b.calculateExtensions(data, role)
+ extensions, err := b.calculateExtensions(data, req, role)
if err != nil {
return logical.ErrorResponse(err.Error()), nil
}
@@ -356,27 +356,51 @@ func (b *backend) calculateCriticalOptions(data *framework.FieldData, role *sshR
return criticalOptions, nil
}
-func (b *backend) calculateExtensions(data *framework.FieldData, role *sshRole) (map[string]string, error) {
+func (b *backend) calculateExtensions(data *framework.FieldData, req *logical.Request, role *sshRole) (map[string]string, error) {
unparsedExtensions := data.Get("extensions").(map[string]interface{})
- if len(unparsedExtensions) == 0 {
- return role.DefaultExtensions, nil
- }
+ extensions := make(map[string]string)
- extensions := convertMapToStringValue(unparsedExtensions)
+ if len(unparsedExtensions) > 0 {
+ extensions := convertMapToStringValue(unparsedExtensions)
+ if role.AllowedExtensions != "" {
+ notAllowed := []string{}
+ allowedExtensions := strings.Split(role.AllowedExtensions, ",")
- if role.AllowedExtensions != "" {
- notAllowed := []string{}
- allowedExtensions := strings.Split(role.AllowedExtensions, ",")
+ for extensionKey, _ := range extensions {
+ if !strutil.StrListContains(allowedExtensions, extensionKey) {
+ notAllowed = append(notAllowed, extensionKey)
+ }
+ }
- for extension := range extensions {
- if !strutil.StrListContains(allowedExtensions, extension) {
- notAllowed = append(notAllowed, extension)
+ if len(notAllowed) != 0 {
+ return nil, fmt.Errorf("extensions %v are not on allowed list", notAllowed)
}
}
+ return extensions, nil
+ }
- if len(notAllowed) != 0 {
- return nil, fmt.Errorf("extensions %v are not on allowed list", notAllowed)
+ if role.DefaultExtensionsTemplate {
+ for extensionKey, extensionValue := range role.DefaultExtensions {
+ // Look for templating markers {{ .* }}
+ matched, _ := regexp.MatchString(`^{{.+?}}$`, extensionValue)
+ if matched {
+ if req.EntityID != "" {
+ // Retrieve extension value based on template + entityID from request.
+ templateExtensionValue, err := framework.PopulateIdentityTemplate(extensionValue, req.EntityID, b.System())
+ if err == nil {
+ // Template returned an extension value that we can use
+ extensions[extensionKey] = templateExtensionValue
+ } else {
+ return nil, fmt.Errorf("template '%s' could not be rendered -> %s", extensionValue, err)
+ }
+ }
+ } else {
+ // Static extension value or err template
+ extensions[extensionKey] = extensionValue
+ }
}
+ } else {
+ extensions = role.DefaultExtensions
}
return extensions, nil
diff --git a/builtin/logical/transit/path_encrypt.go b/builtin/logical/transit/path_encrypt.go
index e0168504bc233..321e920998943 100644
--- a/builtin/logical/transit/path_encrypt.go
+++ b/builtin/logical/transit/path_encrypt.go
@@ -3,6 +3,7 @@ package transit
import (
"context"
"encoding/base64"
+ "encoding/json"
"fmt"
"reflect"
@@ -193,6 +194,14 @@ func decodeBatchRequestItems(src interface{}, dst *[]BatchRequestItem) error {
if !reflect.ValueOf(v).IsValid() {
} else if casted, ok := v.(int); ok {
(*dst)[i].KeyVersion = casted
+ } else if js, ok := v.(json.Number); ok {
+ // https://github.com/hashicorp/vault/issues/10232
+ // Because API server parses json request with UseNumber=true, logical.Request.Data can include json.Number for a number field.
+ if casted, err := js.Int64(); err == nil {
+ (*dst)[i].KeyVersion = int(casted)
+ } else {
+ errs.Errors = append(errs.Errors, fmt.Sprintf(`error decoding %T into [%d].key_version: strconv.ParseInt: parsing "%s": invalid syntax`, v, i, v))
+ }
} else {
errs.Errors = append(errs.Errors, fmt.Sprintf("'[%d].key_version' expected type 'int', got unconvertible type '%T'", i, item["key_version"]))
}
diff --git a/builtin/logical/transit/path_encrypt_test.go b/builtin/logical/transit/path_encrypt_test.go
index b81112f0e5d40..b6a772a0a6d93 100644
--- a/builtin/logical/transit/path_encrypt_test.go
+++ b/builtin/logical/transit/path_encrypt_test.go
@@ -2,6 +2,7 @@ package transit
import (
"context"
+ "encoding/json"
"reflect"
"testing"
@@ -634,6 +635,11 @@ func TestTransit_decodeBatchRequestItems(t *testing.T) {
src: []interface{}{map[string]interface{}{"key_version": "666"}},
dest: []BatchRequestItem{},
},
+ {
+ name: "src_key_version_invalid-number-dest",
+ src: []interface{}{map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "key_version": json.Number("1.1")}},
+ dest: []BatchRequestItem{},
+ },
{
name: "src_nonce-dest",
src: []interface{}{map[string]interface{}{"nonce": "dGVzdGNvbnRleHQ="}},
diff --git a/changelog/11288.txt b/changelog/11288.txt
new file mode 100644
index 0000000000000..6f0e95c8f53af
--- /dev/null
+++ b/changelog/11288.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+agent: Fixed agent templating to use configured tls servername values
+```
diff --git a/changelog/11345.txt b/changelog/11345.txt
new file mode 100644
index 0000000000000..8ff694ff89f7e
--- /dev/null
+++ b/changelog/11345.txt
@@ -0,0 +1,3 @@
+```release-note:improvement
+secrets/aws: add ability to provide a role session name when generating STS credentials
+```
diff --git a/changelog/11495.txt b/changelog/11495.txt
new file mode 100644
index 0000000000000..d529872f3bdc9
--- /dev/null
+++ b/changelog/11495.txt
@@ -0,0 +1,3 @@
+```release-note:feature
+ssh: add support for templated values in SSH CA DefaultExtensions
+```
\ No newline at end of file
diff --git a/changelog/11530.txt b/changelog/11530.txt
new file mode 100644
index 0000000000000..95bb8be7c20b3
--- /dev/null
+++ b/changelog/11530.txt
@@ -0,0 +1,3 @@
+```release-note:improvement
+ui: Redesign of KV 2 Delete toolbar.
+```
\ No newline at end of file
diff --git a/changelog/11576.txt b/changelog/11576.txt
new file mode 100644
index 0000000000000..0886ee983f652
--- /dev/null
+++ b/changelog/11576.txt
@@ -0,0 +1,4 @@
+```release-note:bug
+agent/cert: Fix issue where the API client on agent was not honoring certificate
+information from the auto-auth config map on renewals or retries.
+```
\ No newline at end of file
diff --git a/changelog/11585.txt b/changelog/11585.txt
new file mode 100644
index 0000000000000..c983802a6a8aa
--- /dev/null
+++ b/changelog/11585.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+secrets/database: Fixes issue for V4 database interface where `SetCredentials` wasn't falling back to using `RotateRootCredentials` if `SetCredentials` is `Unimplemented`
+```
diff --git a/changelog/11586.txt b/changelog/11586.txt
new file mode 100644
index 0000000000000..31c40692ca726
--- /dev/null
+++ b/changelog/11586.txt
@@ -0,0 +1,3 @@
+```release-note:improvement
+ui: Add regex validation to Transform Template pattern input
+```
diff --git a/changelog/11588.txt b/changelog/11588.txt
new file mode 100644
index 0000000000000..1f7f0365abfa5
--- /dev/null
+++ b/changelog/11588.txt
@@ -0,0 +1,3 @@
+```release-note:improvement
+core: Add a small (<1s) exponential backoff to failed TCP listener Accept failures.
+```
\ No newline at end of file
diff --git a/changelog/11596.txt b/changelog/11596.txt
new file mode 100644
index 0000000000000..3735ca0bf8584
--- /dev/null
+++ b/changelog/11596.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+core (enterprise): Fix plugins mounted in namespaces being unable to use password policies
+```
diff --git a/changelog/11597.txt b/changelog/11597.txt
new file mode 100644
index 0000000000000..4a9c113d71ae6
--- /dev/null
+++ b/changelog/11597.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+ui: Fix text link URL on database roles list
+```
\ No newline at end of file
diff --git a/changelog/11600.txt b/changelog/11600.txt
new file mode 100644
index 0000000000000..f40d4bc4537ba
--- /dev/null
+++ b/changelog/11600.txt
@@ -0,0 +1,9 @@
+```release-note:improvement
+secrets/database/mongodb: Add ability to customize `SocketTimeout`, `ConnectTimeout`, and `ServerSelectionTimeout`
+```
+```release-note:improvement
+secrets/database/mongodb: Increased throughput by allowing for multiple request threads to simultaneously update users in MongoDB
+```
+```release-note:bug
+secrets/database: Fixed minor race condition when rotate-root is called
+```
diff --git a/changelog/11607.txt b/changelog/11607.txt
new file mode 100644
index 0000000000000..4404a23d981c2
--- /dev/null
+++ b/changelog/11607.txt
@@ -0,0 +1,3 @@
+```release-note:improvement
+core: add irrevocable lease list and count apis
+```
\ No newline at end of file
diff --git a/changelog/11628.txt b/changelog/11628.txt
new file mode 100644
index 0000000000000..335777e12cdec
--- /dev/null
+++ b/changelog/11628.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+secret: fix the bug where transit encrypt batch doesn't work with key_version
+```
diff --git a/changelog/11638.txt b/changelog/11638.txt
new file mode 100644
index 0000000000000..5ed50652b150f
--- /dev/null
+++ b/changelog/11638.txt
@@ -0,0 +1,3 @@
+```release-note:improvement
+auth/aws: Underlying error included in validation failure message.
+```
diff --git a/changelog/11641.txt b/changelog/11641.txt
new file mode 100644
index 0000000000000..84bd31188beb3
--- /dev/null
+++ b/changelog/11641.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+ui: Fix entity group membership and metadata not showing
+```
diff --git a/changelog/11647.txt b/changelog/11647.txt
new file mode 100644
index 0000000000000..2075989ef7c31
--- /dev/null
+++ b/changelog/11647.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+tokenutil: Perform the num uses check before token type.
+```
diff --git a/changelog/11650.txt b/changelog/11650.txt
new file mode 100644
index 0000000000000..75029f94a090c
--- /dev/null
+++ b/changelog/11650.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+core: correct logic for renewal of leases nearing their expiration time.
+```
diff --git a/changelog/11672.txt b/changelog/11672.txt
new file mode 100644
index 0000000000000..2d019cec1018d
--- /dev/null
+++ b/changelog/11672.txt
@@ -0,0 +1,4 @@
+```release-note:improvement
+ui: Replace tool partials with components.
+```
+
diff --git a/changelog/11680.txt b/changelog/11680.txt
new file mode 100644
index 0000000000000..3e8b919b1c9b4
--- /dev/null
+++ b/changelog/11680.txt
@@ -0,0 +1,3 @@
+```release-note:improvement
+ui: Update partials to components
+```
\ No newline at end of file
diff --git a/changelog/11696.txt b/changelog/11696.txt
new file mode 100644
index 0000000000000..e3bc54c1de184
--- /dev/null
+++ b/changelog/11696.txt
@@ -0,0 +1,3 @@
+```release-note:improvement
+api: Allow a leveled logger to be provided to `api.Client` through `SetLogger`.
+```
diff --git a/changelog/11705.txt b/changelog/11705.txt
new file mode 100644
index 0000000000000..42d683d81f576
--- /dev/null
+++ b/changelog/11705.txt
@@ -0,0 +1,3 @@
+```release-note:improvement
+ui: Add specific error message if unseal fails due to license
+```
diff --git a/changelog/11708.txt b/changelog/11708.txt
new file mode 100644
index 0000000000000..21211915698bc
--- /dev/null
+++ b/changelog/11708.txt
@@ -0,0 +1,3 @@
+```release-note:improvement
+ui: JSON fields on database can be cleared on edit
+```
diff --git a/changelog/11759.txt b/changelog/11759.txt
new file mode 100644
index 0000000000000..0b0776a65cbff
--- /dev/null
+++ b/changelog/11759.txt
@@ -0,0 +1,3 @@
+```release-note:improvement
+ui: show site-wide banners for license warnings if applicable
+```
diff --git a/command/agent.go b/command/agent.go
index 576d9a40ed7ff..dbe7d81c82de1 100644
--- a/command/agent.go
+++ b/command/agent.go
@@ -17,7 +17,6 @@ import (
"sync"
"time"
- "github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/command/agent/auth"
@@ -357,7 +356,7 @@ func (c *AgentCommand) Run(args []string) int {
}
s, err := file.NewFileSink(config)
if err != nil {
- c.UI.Error(errwrap.Wrapf("Error creating file sink: {{err}}", err).Error())
+ c.UI.Error(fmt.Errorf("Error creating file sink: %w", err).Error())
return 1
}
config.Sink = s
@@ -411,7 +410,7 @@ func (c *AgentCommand) Run(args []string) int {
return 1
}
if err != nil {
- c.UI.Error(errwrap.Wrapf(fmt.Sprintf("Error creating %s auth method: {{err}}", config.AutoAuth.Method.Type), err).Error())
+ c.UI.Error(fmt.Errorf("Error creating %s auth method: %w", config.AutoAuth.Method.Type, err).Error())
return 1
}
}
@@ -947,7 +946,7 @@ func (c *AgentCommand) storePidFile(pidPath string) error {
// Open the PID file
pidFile, err := os.OpenFile(pidPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644)
if err != nil {
- return errwrap.Wrapf("could not open pid file: {{err}}", err)
+ return fmt.Errorf("could not open pid file: %w", err)
}
defer pidFile.Close()
@@ -955,7 +954,7 @@ func (c *AgentCommand) storePidFile(pidPath string) error {
pid := os.Getpid()
_, err = pidFile.WriteString(fmt.Sprintf("%d", pid))
if err != nil {
- return errwrap.Wrapf("could not write to pid file: {{err}}", err)
+ return fmt.Errorf("could not write to pid file: %w", err)
}
return nil
}
diff --git a/command/agent/auth/approle/approle.go b/command/agent/auth/approle/approle.go
index f9970348b7230..a76ba0b774993 100644
--- a/command/agent/auth/approle/approle.go
+++ b/command/agent/auth/approle/approle.go
@@ -9,7 +9,6 @@ import (
"os"
"strings"
- "github.com/hashicorp/errwrap"
hclog "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/command/agent/auth"
@@ -68,7 +67,7 @@ func NewApproleAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) {
if ok {
removeSecretIDFileAfterReading, err := parseutil.ParseBool(removeSecretIDFileAfterReadingRaw)
if err != nil {
- return nil, errwrap.Wrapf("error parsing 'remove_secret_id_file_after_reading' value: {{err}}", err)
+ return nil, fmt.Errorf("error parsing 'remove_secret_id_file_after_reading' value: %w", err)
}
a.removeSecretIDFileAfterReading = removeSecretIDFileAfterReading
}
@@ -93,7 +92,7 @@ func (a *approleMethod) Authenticate(ctx context.Context, client *api.Client) (s
roleID, err := ioutil.ReadFile(a.roleIDFilePath)
if err != nil {
if a.cachedRoleID == "" {
- return "", nil, nil, errwrap.Wrapf("error reading role ID file and no cached role ID known: {{err}}", err)
+ return "", nil, nil, fmt.Errorf("error reading role ID file and no cached role ID known: %w", err)
}
a.logger.Warn("error reading role ID file", "error", err)
}
@@ -121,7 +120,7 @@ func (a *approleMethod) Authenticate(ctx context.Context, client *api.Client) (s
secretID, err := ioutil.ReadFile(a.secretIDFilePath)
if err != nil {
if a.cachedSecretID == "" {
- return "", nil, nil, errwrap.Wrapf("error reading secret ID file and no cached secret ID known: {{err}}", err)
+ return "", nil, nil, fmt.Errorf("error reading secret ID file and no cached secret ID known: %w", err)
}
a.logger.Warn("error reading secret ID file", "error", err)
}
@@ -135,13 +134,13 @@ func (a *approleMethod) Authenticate(ctx context.Context, client *api.Client) (s
if a.secretIDResponseWrappingPath != "" {
clonedClient, err := client.Clone()
if err != nil {
- return "", nil, nil, errwrap.Wrapf("error cloning client to unwrap secret ID: {{err}}", err)
+ return "", nil, nil, fmt.Errorf("error cloning client to unwrap secret ID: %w", err)
}
clonedClient.SetToken(stringSecretID)
// Validate the creation path
resp, err := clonedClient.Logical().Read("sys/wrapping/lookup")
if err != nil {
- return "", nil, nil, errwrap.Wrapf("error looking up wrapped secret ID: {{err}}", err)
+ return "", nil, nil, fmt.Errorf("error looking up wrapped secret ID: %w", err)
}
if resp == nil {
return "", nil, nil, errors.New("response nil when looking up wrapped secret ID")
@@ -164,7 +163,7 @@ func (a *approleMethod) Authenticate(ctx context.Context, client *api.Client) (s
// Now get the secret ID
resp, err = clonedClient.Logical().Unwrap("")
if err != nil {
- return "", nil, nil, errwrap.Wrapf("error unwrapping secret ID: {{err}}", err)
+ return "", nil, nil, fmt.Errorf("error unwrapping secret ID: %w", err)
}
if resp == nil {
return "", nil, nil, errors.New("response nil when unwrapping secret ID")
diff --git a/command/agent/auth/aws/aws.go b/command/agent/auth/aws/aws.go
index f13d10dffd311..56992b3ae0410 100644
--- a/command/agent/auth/aws/aws.go
+++ b/command/agent/auth/aws/aws.go
@@ -13,11 +13,9 @@ import (
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
- "github.com/hashicorp/errwrap"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/api"
- awsauth "github.com/hashicorp/vault/builtin/credential/aws"
"github.com/hashicorp/vault/command/agent/auth"
"github.com/hashicorp/vault/sdk/helper/awsutil"
)
@@ -166,7 +164,7 @@ func NewAWSAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) {
// Do an initial population of the creds because we want to err right away if we can't
// even get a first set.
- creds, err := awsauth.RetrieveCreds(accessKey, secretKey, sessionToken, a.logger)
+ creds, err := awsutil.RetrieveCreds(accessKey, secretKey, sessionToken, a.logger)
if err != nil {
return nil, err
}
@@ -184,7 +182,7 @@ func (a *awsMethod) Authenticate(ctx context.Context, client *api.Client) (retTo
data := make(map[string]interface{})
sess, err := session.NewSession()
if err != nil {
- retErr = errwrap.Wrapf("error creating session: {{err}}", err)
+ retErr = fmt.Errorf("error creating session: %w", err)
return
}
metadataSvc := ec2metadata.New(sess)
@@ -195,7 +193,7 @@ func (a *awsMethod) Authenticate(ctx context.Context, client *api.Client) (retTo
{
doc, err := metadataSvc.GetDynamicData("/instance-identity/document")
if err != nil {
- retErr = errwrap.Wrapf("error requesting doc: {{err}}", err)
+ retErr = fmt.Errorf("error requesting doc: %w", err)
return
}
data["identity"] = base64.StdEncoding.EncodeToString([]byte(doc))
@@ -205,7 +203,7 @@ func (a *awsMethod) Authenticate(ctx context.Context, client *api.Client) (retTo
{
signature, err := metadataSvc.GetDynamicData("/instance-identity/signature")
if err != nil {
- retErr = errwrap.Wrapf("error requesting signature: {{err}}", err)
+ retErr = fmt.Errorf("error requesting signature: %w", err)
return
}
data["signature"] = signature
@@ -215,7 +213,7 @@ func (a *awsMethod) Authenticate(ctx context.Context, client *api.Client) (retTo
if a.nonce == "" {
uid, err := uuid.GenerateUUID()
if err != nil {
- retErr = errwrap.Wrapf("error generating uuid for reauthentication value: {{err}}", err)
+ retErr = fmt.Errorf("error generating uuid for reauthentication value: %w", err)
return
}
a.nonce = uid
@@ -228,9 +226,9 @@ func (a *awsMethod) Authenticate(ctx context.Context, client *api.Client) (retTo
defer a.credLock.Unlock()
var err error
- data, err = awsauth.GenerateLoginData(a.lastCreds, a.headerValue, a.region)
+ data, err = awsutil.GenerateLoginData(a.lastCreds, a.headerValue, a.region, a.logger)
if err != nil {
- retErr = errwrap.Wrapf("error creating login value: {{err}}", err)
+ retErr = fmt.Errorf("error creating login value: %w", err)
return
}
}
@@ -272,7 +270,7 @@ func (a *awsMethod) checkCreds(accessKey, secretKey, sessionToken string) error
defer a.credLock.Unlock()
a.logger.Trace("checking for new credentials")
- currentCreds, err := awsauth.RetrieveCreds(accessKey, secretKey, sessionToken, a.logger)
+ currentCreds, err := awsutil.RetrieveCreds(accessKey, secretKey, sessionToken, a.logger)
if err != nil {
return err
}
diff --git a/command/agent/auth/azure/azure.go b/command/agent/auth/azure/azure.go
index 4b2f7274a3704..bc01e561f38dc 100644
--- a/command/agent/auth/azure/azure.go
+++ b/command/agent/auth/azure/azure.go
@@ -7,7 +7,6 @@ import (
"io/ioutil"
"net/http"
- "github.com/hashicorp/errwrap"
cleanhttp "github.com/hashicorp/go-cleanhttp"
hclog "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/api"
@@ -95,7 +94,7 @@ func (a *azureMethod) Authenticate(ctx context.Context, client *api.Client) (ret
err = jsonutil.DecodeJSON(body, &instance)
if err != nil {
- retErr = errwrap.Wrapf("error parsing instance metadata response: {{err}}", err)
+ retErr = fmt.Errorf("error parsing instance metadata response: %w", err)
return
}
@@ -112,7 +111,7 @@ func (a *azureMethod) Authenticate(ctx context.Context, client *api.Client) (ret
err = jsonutil.DecodeJSON(body, &identity)
if err != nil {
- retErr = errwrap.Wrapf("error parsing identity metadata response: {{err}}", err)
+ retErr = fmt.Errorf("error parsing identity metadata response: %w", err)
return
}
@@ -158,7 +157,7 @@ func getMetadataInfo(ctx context.Context, endpoint, resource string) ([]byte, er
client := cleanhttp.DefaultClient()
resp, err := client.Do(req)
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("error fetching metadata from %s: {{err}}", endpoint), err)
+ return nil, fmt.Errorf("error fetching metadata from %s: %w", endpoint, err)
}
if resp == nil {
@@ -168,7 +167,7 @@ func getMetadataInfo(ctx context.Context, endpoint, resource string) ([]byte, er
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("error reading metadata from %s: {{err}}", endpoint), err)
+ return nil, fmt.Errorf("error reading metadata from %s: %w", endpoint, err)
}
if resp.StatusCode != http.StatusOK {
diff --git a/command/agent/auth/cert/cert.go b/command/agent/auth/cert/cert.go
index 297b71da05008..3c8162a5eacda 100644
--- a/command/agent/auth/cert/cert.go
+++ b/command/agent/auth/cert/cert.go
@@ -108,7 +108,7 @@ func (c *certMethod) AuthClient(client *api.Client) (*api.Client, error) {
if c.caCert != "" || (c.clientKey != "" && c.clientCert != "") {
// Return cached client if present
if c.client != nil {
- return client, nil
+ return c.client, nil
}
config := api.DefaultConfig()
diff --git a/command/agent/auth/cert/cert_test.go b/command/agent/auth/cert/cert_test.go
new file mode 100644
index 0000000000000..9f1378c280561
--- /dev/null
+++ b/command/agent/auth/cert/cert_test.go
@@ -0,0 +1,133 @@
+package cert
+
+import (
+ "context"
+ "os"
+ "path"
+ "reflect"
+ "testing"
+
+ "github.com/hashicorp/go-hclog"
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/command/agent/auth"
+)
+
+func TestCertAuthMethod_Authenticate(t *testing.T) {
+ config := &auth.AuthConfig{
+ Logger: hclog.NewNullLogger(),
+ MountPath: "cert-test",
+ Config: map[string]interface{}{
+ "name": "foo",
+ },
+ }
+
+ method, err := NewCertAuthMethod(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ client, err := api.NewClient(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ loginPath, _, authMap, err := method.Authenticate(context.Background(), client)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectedLoginPath := path.Join(config.MountPath, "/login")
+ if loginPath != expectedLoginPath {
+ t.Fatalf("mismatch on login path: got: %s, expected: %s", loginPath, expectedLoginPath)
+ }
+
+ expectedAuthMap := map[string]interface{}{
+ "name": config.Config["name"],
+ }
+ if !reflect.DeepEqual(authMap, expectedAuthMap) {
+ t.Fatalf("mismatch on login path:\ngot:\n\t%v\nexpected:\n\t%v", authMap, expectedAuthMap)
+ }
+}
+
+func TestCertAuthMethod_AuthClient_withoutCerts(t *testing.T) {
+ config := &auth.AuthConfig{
+ Logger: hclog.NewNullLogger(),
+ MountPath: "cert-test",
+ Config: map[string]interface{}{
+ "name": "without-certs",
+ },
+ }
+
+ method, err := NewCertAuthMethod(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ client, err := api.NewClient(api.DefaultConfig())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ clientToUse, err := method.(auth.AuthMethodWithClient).AuthClient(client)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if client != clientToUse {
+ t.Fatal("error: expected AuthClient to return back original client")
+ }
+}
+
+func TestCertAuthMethod_AuthClient_withCerts(t *testing.T) {
+
+ clientCert, err := os.Open("./test-fixtures/keys/cert.pem")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer clientCert.Close()
+
+ clientKey, err := os.Open("./test-fixtures/keys/key.pem")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer clientKey.Close()
+
+ config := &auth.AuthConfig{
+ Logger: hclog.NewNullLogger(),
+ MountPath: "cert-test",
+ Config: map[string]interface{}{
+ "name": "with-certs",
+ "client_cert": clientCert.Name(),
+ "client_key": clientKey.Name(),
+ },
+ }
+
+ method, err := NewCertAuthMethod(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ client, err := api.NewClient(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ clientToUse, err := method.(auth.AuthMethodWithClient).AuthClient(client)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if client == clientToUse {
+ t.Fatal("expected client from AuthClient to be different from original client")
+ }
+
+ // Call AuthClient again to get back the cached client
+ cachedClient, err := method.(auth.AuthMethodWithClient).AuthClient(client)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if cachedClient != clientToUse {
+ t.Fatal("expected client from AuthClient to return back a cached client")
+ }
+}
diff --git a/command/agent/auth/cert/test-fixtures/keys/cert.pem b/command/agent/auth/cert/test-fixtures/keys/cert.pem
new file mode 100644
index 0000000000000..67ef67dd8d718
--- /dev/null
+++ b/command/agent/auth/cert/test-fixtures/keys/cert.pem
@@ -0,0 +1,22 @@
+-----BEGIN CERTIFICATE-----
+MIIDtTCCAp2gAwIBAgIUf+jhKTFBnqSs34II0WS1L4QsbbAwDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzQxWhcNMjUw
+MTA1MTAyODExWjAbMRkwFwYDVQQDExBjZXJ0LmV4YW1wbGUuY29tMIIBIjANBgkq
+hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxS
+TRAVnygAftetT8puHflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGn
+SgMld6ZWRhNheZhA6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmi
+YYMiIWplidMmMO5NTRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5
+donyqtnaHuIJGuUdy54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVG
+B+5+AAGF5iuHC3N2DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABo4H1
+MIHyMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUm++e
+HpyM3p708bgZJuRYEdX1o+UwHwYDVR0jBBgwFoAUncSzT/6HMexyuiU9/7EgHu+o
+k5swOwYIKwYBBQUHAQEELzAtMCsGCCsGAQUFBzAChh9odHRwOi8vMTI3LjAuMC4x
+OjgyMDAvdjEvcGtpL2NhMCEGA1UdEQQaMBiCEGNlcnQuZXhhbXBsZS5jb22HBH8A
+AAEwMQYDVR0fBCowKDAmoCSgIoYgaHR0cDovLzEyNy4wLjAuMTo4MjAwL3YxL3Br
+aS9jcmwwDQYJKoZIhvcNAQELBQADggEBABsuvmPSNjjKTVN6itWzdQy+SgMIrwfs
+X1Yb9Lefkkwmp9ovKFNQxa4DucuCuzXcQrbKwWTfHGgR8ct4rf30xCRoA7dbQWq4
+aYqNKFWrRaBRAaaYZ/O1ApRTOrXqRx9Eqr0H1BXLsoAq+mWassL8sf6siae+CpwA
+KqBko5G0dNXq5T4i2LQbmoQSVetIrCJEeMrU+idkuqfV2h1BQKgSEhFDABjFdTCN
+QDAHsEHsi2M4/jRW9fqEuhHSDfl2n7tkFUI8wTHUUCl7gXwweJ4qtaSXIwKXYzNj
+xqKHA8Purc1Yfybz4iE1JCROi9fInKlzr5xABq8nb9Qc/J9DIQM+Xmk=
+-----END CERTIFICATE-----
\ No newline at end of file
diff --git a/command/agent/auth/cert/test-fixtures/keys/key.pem b/command/agent/auth/cert/test-fixtures/keys/key.pem
new file mode 100644
index 0000000000000..add982002acf7
--- /dev/null
+++ b/command/agent/auth/cert/test-fixtures/keys/key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxSTRAVnygAftetT8pu
+HflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGnSgMld6ZWRhNheZhA
+6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmiYYMiIWplidMmMO5N
+TRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5donyqtnaHuIJGuUd
+y54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVGB+5+AAGF5iuHC3N2
+DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABAoIBAHR7fFV0eAGaopsX
+9OD0TUGlsephBXb43g0GYHfJ/1Ew18w9oaxszJEqkl+PB4W3xZ3yG3e8ZomxDOhF
+RreF2WgG5xOfhDogMwu6NodbArfgnAvoC6JnW3qha8HMP4F500RFVyCRcd6A3Frd
+rFtaZn/UyCsBAN8/zkwPeYHayo7xX6d9kzgRl9HluEX5PXI5+3uiBDUiM085gkLI
+5Cmadh9fMdjfhDXI4x2JYmILpp/9Nlc/krB15s5n1MPNtn3yL0TI0tWp0WlwDCV7
+oUm1SfIM0F1fXGFyFDcqwoIr6JCQgXk6XtTg31YhH1xgUIclUVdtHqmAwAbLdIhQ
+GAiHn2kCgYEAwD4pZ8HfpiOG/EHNoWsMATc/5yC7O8F9WbvcHZQIymLY4v/7HKZb
+VyOR6UQ5/O2cztSGIuKSF6+OK1C34lOyCuTSOTFrjlgEYtLIXjdGLfFdtOO8GRQR
+akVXdwuzNAjTBaH5eXbG+NKcjmCvZL48dQVlfDTVulzFGbcsVTHIMQUCgYEA7IQI
+FVsKnY3KqpyGqXq92LMcsT3XgW6X1BIIV+YhJ5AFUFkFrjrbXs94/8XyLfi0xBQy
+efK+8g5sMs7koF8LyZEcAXWZJQduaKB71hoLlRaU4VQkL/dl2B6VFmAII/CsRCYh
+r9RmDN2PF/mp98Ih9dpC1VqcCDRGoTYsd7jLalMCgYAMgH5k1wDaZxkSMp1S0AlZ
+0uP+/evvOOgT+9mWutfPgZolOQx1koQCKLgGeX9j6Xf3I28NubpSfAI84uTyfQrp
+FnRtb79U5Hh0jMynA+U2e6niZ6UF5H41cQj9Hu+qhKBkj2IP+h96cwfnYnZFkPGR
+kqZE65KyqfHPeFATwkcImQKBgCdrfhlpGiTWXCABhKQ8s+WpPLAB2ahV8XJEKyXT
+UlVQuMIChGLcpnFv7P/cUxf8asx/fUY8Aj0/0CLLvulHziQjTmKj4gl86pb/oIQ3
+xRRtNhU0O+/OsSfLORgIm3K6C0w0esregL/GMbJSR1TnA1gBr7/1oSnw5JC8Ab9W
+injHAoGAJT1MGAiQrhlt9GCGe6Ajw4omdbY0wS9NXefnFhf7EwL0es52ezZ28zpU
+2LXqSFbtann5CHgpSLxiMYPDIf+er4xgg9Bz34tz1if1rDfP2Qrxdrpr4jDnrGT3
+gYC2qCpvVD9RRUMKFfnJTfl5gMQdBW/LINkHtJ82snAeLl3gjQ4=
+-----END RSA PRIVATE KEY-----
diff --git a/command/agent/auth/cert/test-fixtures/keys/pkioutput b/command/agent/auth/cert/test-fixtures/keys/pkioutput
new file mode 100644
index 0000000000000..526ff03167b2d
--- /dev/null
+++ b/command/agent/auth/cert/test-fixtures/keys/pkioutput
@@ -0,0 +1,74 @@
+Key Value
+lease_id pki/issue/example-dot-com/d8214077-9976-8c68-9c07-6610da30aea4
+lease_duration 279359999
+lease_renewable false
+certificate -----BEGIN CERTIFICATE-----
+MIIDtTCCAp2gAwIBAgIUf+jhKTFBnqSs34II0WS1L4QsbbAwDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzQxWhcNMjUw
+MTA1MTAyODExWjAbMRkwFwYDVQQDExBjZXJ0LmV4YW1wbGUuY29tMIIBIjANBgkq
+hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxS
+TRAVnygAftetT8puHflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGn
+SgMld6ZWRhNheZhA6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmi
+YYMiIWplidMmMO5NTRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5
+donyqtnaHuIJGuUdy54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVG
+B+5+AAGF5iuHC3N2DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABo4H1
+MIHyMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUm++e
+HpyM3p708bgZJuRYEdX1o+UwHwYDVR0jBBgwFoAUncSzT/6HMexyuiU9/7EgHu+o
+k5swOwYIKwYBBQUHAQEELzAtMCsGCCsGAQUFBzAChh9odHRwOi8vMTI3LjAuMC4x
+OjgyMDAvdjEvcGtpL2NhMCEGA1UdEQQaMBiCEGNlcnQuZXhhbXBsZS5jb22HBH8A
+AAEwMQYDVR0fBCowKDAmoCSgIoYgaHR0cDovLzEyNy4wLjAuMTo4MjAwL3YxL3Br
+aS9jcmwwDQYJKoZIhvcNAQELBQADggEBABsuvmPSNjjKTVN6itWzdQy+SgMIrwfs
+X1Yb9Lefkkwmp9ovKFNQxa4DucuCuzXcQrbKwWTfHGgR8ct4rf30xCRoA7dbQWq4
+aYqNKFWrRaBRAaaYZ/O1ApRTOrXqRx9Eqr0H1BXLsoAq+mWassL8sf6siae+CpwA
+KqBko5G0dNXq5T4i2LQbmoQSVetIrCJEeMrU+idkuqfV2h1BQKgSEhFDABjFdTCN
+QDAHsEHsi2M4/jRW9fqEuhHSDfl2n7tkFUI8wTHUUCl7gXwweJ4qtaSXIwKXYzNj
+xqKHA8Purc1Yfybz4iE1JCROi9fInKlzr5xABq8nb9Qc/J9DIQM+Xmk=
+-----END CERTIFICATE-----
+issuing_ca -----BEGIN CERTIFICATE-----
+MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw
+MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7
+Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0
+z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x
+AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb
+6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH
+SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G
+A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx
+7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc
+BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA
+wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2
+U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa
+cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N
+ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ
+t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk
+zehNe5dFTjFpylg1o6b8Ow==
+-----END CERTIFICATE-----
+private_key -----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxSTRAVnygAftetT8pu
+HflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGnSgMld6ZWRhNheZhA
+6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmiYYMiIWplidMmMO5N
+TRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5donyqtnaHuIJGuUd
+y54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVGB+5+AAGF5iuHC3N2
+DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABAoIBAHR7fFV0eAGaopsX
+9OD0TUGlsephBXb43g0GYHfJ/1Ew18w9oaxszJEqkl+PB4W3xZ3yG3e8ZomxDOhF
+RreF2WgG5xOfhDogMwu6NodbArfgnAvoC6JnW3qha8HMP4F500RFVyCRcd6A3Frd
+rFtaZn/UyCsBAN8/zkwPeYHayo7xX6d9kzgRl9HluEX5PXI5+3uiBDUiM085gkLI
+5Cmadh9fMdjfhDXI4x2JYmILpp/9Nlc/krB15s5n1MPNtn3yL0TI0tWp0WlwDCV7
+oUm1SfIM0F1fXGFyFDcqwoIr6JCQgXk6XtTg31YhH1xgUIclUVdtHqmAwAbLdIhQ
+GAiHn2kCgYEAwD4pZ8HfpiOG/EHNoWsMATc/5yC7O8F9WbvcHZQIymLY4v/7HKZb
+VyOR6UQ5/O2cztSGIuKSF6+OK1C34lOyCuTSOTFrjlgEYtLIXjdGLfFdtOO8GRQR
+akVXdwuzNAjTBaH5eXbG+NKcjmCvZL48dQVlfDTVulzFGbcsVTHIMQUCgYEA7IQI
+FVsKnY3KqpyGqXq92LMcsT3XgW6X1BIIV+YhJ5AFUFkFrjrbXs94/8XyLfi0xBQy
+efK+8g5sMs7koF8LyZEcAXWZJQduaKB71hoLlRaU4VQkL/dl2B6VFmAII/CsRCYh
+r9RmDN2PF/mp98Ih9dpC1VqcCDRGoTYsd7jLalMCgYAMgH5k1wDaZxkSMp1S0AlZ
+0uP+/evvOOgT+9mWutfPgZolOQx1koQCKLgGeX9j6Xf3I28NubpSfAI84uTyfQrp
+FnRtb79U5Hh0jMynA+U2e6niZ6UF5H41cQj9Hu+qhKBkj2IP+h96cwfnYnZFkPGR
+kqZE65KyqfHPeFATwkcImQKBgCdrfhlpGiTWXCABhKQ8s+WpPLAB2ahV8XJEKyXT
+UlVQuMIChGLcpnFv7P/cUxf8asx/fUY8Aj0/0CLLvulHziQjTmKj4gl86pb/oIQ3
+xRRtNhU0O+/OsSfLORgIm3K6C0w0esregL/GMbJSR1TnA1gBr7/1oSnw5JC8Ab9W
+injHAoGAJT1MGAiQrhlt9GCGe6Ajw4omdbY0wS9NXefnFhf7EwL0es52ezZ28zpU
+2LXqSFbtann5CHgpSLxiMYPDIf+er4xgg9Bz34tz1if1rDfP2Qrxdrpr4jDnrGT3
+gYC2qCpvVD9RRUMKFfnJTfl5gMQdBW/LINkHtJ82snAeLl3gjQ4=
+-----END RSA PRIVATE KEY-----
+private_key_type rsa
diff --git a/command/agent/auth/cert/test-fixtures/root/pkioutput b/command/agent/auth/cert/test-fixtures/root/pkioutput
new file mode 100644
index 0000000000000..312ae18deae8f
--- /dev/null
+++ b/command/agent/auth/cert/test-fixtures/root/pkioutput
@@ -0,0 +1,74 @@
+Key Value
+lease_id pki/root/generate/exported/7bf99d76-dd3e-2c5b-04ce-5253062ad586
+lease_duration 315359999
+lease_renewable false
+certificate -----BEGIN CERTIFICATE-----
+MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw
+MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7
+Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0
+z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x
+AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb
+6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH
+SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G
+A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx
+7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc
+BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA
+wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2
+U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa
+cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N
+ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ
+t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk
+zehNe5dFTjFpylg1o6b8Ow==
+-----END CERTIFICATE-----
+expiration 1.772072879e+09
+issuing_ca -----BEGIN CERTIFICATE-----
+MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw
+MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7
+Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0
+z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x
+AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb
+6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH
+SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G
+A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx
+7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc
+BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA
+wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2
+U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa
+cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N
+ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ
+t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk
+zehNe5dFTjFpylg1o6b8Ow==
+-----END CERTIFICATE-----
+private_key -----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEA7FMy+FO4hFzZiHFmmY/B6A/zpyCep9PWZfGLUDtDuprHbg2p
+t/TQjegMxC0AmWPZEHeG9FIJvT+WQXuLUG5v5MbG4zs21mqnYXwWAbTPaZ35bpp3
+BbbquXFtAqOZG8yfrob1g9OSgmY+bG3ZNxxv35qmbBbuWyUvmPwfjzEAclMxv48w
+/2Rs4dXkBuvc9PiNdQ9Sv+ZYG8GIqIcbRd38cSaXI4Q94BOHEr4jm1vqb54H7twv
+0Gy9CnLXfn7ZtEDFSmG8WLk2GvIna+UC+gnxSqKCw0rcTby63rQUCgdJZF8VQUVi
+18+BMNLXI4pT/P9cxTaCJC/YeuV5a3SaniOoFQIDAQABAoIBAQCoGZJC84JnnIgb
+ttZNWuWKBXbCJcDVDikOQJ9hBZbqsFg1X0CfGmQS3MHf9Ubc1Ro8zVjQh15oIEfn
+8lIpdzTeXcpxLdiW8ix3ekVJF20F6pnXY8ZP6UnTeOwamXY6QPZAtb0D9UXcvY+f
+nw+IVRD6082XS0Rmzu+peYWVXDy+FDN+HJRANBcdJZz8gOmNBIe0qDWx1b85d/s8
+2Kk1Wwdss1IwAGeSddTSwzBNaaHdItZaMZOqPW1gRyBfVSkcUQIE6zn2RKw2b70t
+grkIvyRcTdfmiKbqkkJ+eR+ITOUt0cBZSH4cDjlQA+r7hulvoBpQBRj068Toxkcc
+bTagHaPBAoGBAPWPGVkHqhTbJ/DjmqDIStxby2M1fhhHt4xUGHinhUYjQjGOtDQ9
+0mfaB7HObudRiSLydRAVGAHGyNJdQcTeFxeQbovwGiYKfZSA1IGpea7dTxPpGEdN
+ksA0pzSp9MfKzX/MdLuAkEtO58aAg5YzsgX9hDNxo4MhH/gremZhEGZlAoGBAPZf
+lqdYvAL0fjHGJ1FUEalhzGCGE9PH2iOqsxqLCXK7bDbzYSjvuiHkhYJHAOgVdiW1
+lB34UHHYAqZ1VVoFqJ05gax6DE2+r7K5VV3FUCaC0Zm3pavxchU9R/TKP82xRrBj
+AFWwdgDTxUyvQEmgPR9sqorftO71Iz2tiwyTpIfxAoGBAIhEMLzHFAse0rtKkrRG
+ccR27BbRyHeQ1Lp6sFnEHKEfT8xQdI/I/snCpCJ3e/PBu2g5Q9z416mktiyGs8ib
+thTNgYsGYnxZtfaCx2pssanoBcn2wBJRae5fSapf5gY49HDG9MBYR7qCvvvYtSzU
+4yWP2ZzyotpRt3vwJKxLkN5BAoGAORHpZvhiDNkvxj3da7Rqpu7VleJZA2y+9hYb
+iOF+HcqWhaAY+I+XcTRrTMM/zYLzLEcEeXDEyao86uwxCjpXVZw1kotvAC9UqbTO
+tnr3VwRkoxPsV4kFYTAh0+1pnC8dbcxxDmhi3Uww3tOVs7hfkEDuvF6XnebA9A+Y
+LyCgMzECgYEA6cCU8QODOivIKWFRXucvWckgE6MYDBaAwe6qcLsd1Q/gpE2e3yQc
+4RB3bcyiPROLzMLlXFxf1vSNJQdIaVfrRv+zJeGIiivLPU8+Eq4Lrb+tl1LepcOX
+OzQeADTSCn5VidOfjDkIst9UXjMlrFfV9/oJEw5Eiqa6lkNPCGDhfA8=
+-----END RSA PRIVATE KEY-----
+private_key_type rsa
+serial_number 6f:98:9d:f8:67:1a:31:e3:27:60:1b:f7:32:f7:53:19:68:a0:c8:9d
diff --git a/command/agent/auth/cert/test-fixtures/root/root.crl b/command/agent/auth/cert/test-fixtures/root/root.crl
new file mode 100644
index 0000000000000..a80c9e4117cb7
--- /dev/null
+++ b/command/agent/auth/cert/test-fixtures/root/root.crl
@@ -0,0 +1,12 @@
+-----BEGIN X509 CRL-----
+MIIBrjCBlzANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbRcN
+MTYwMjI5MDIyOTE3WhcNMjUwMTA1MTAyOTE3WjArMCkCFG+YnfhnGjHjJ2Ab9zL3
+UxlooMidFxExNjAyMjgyMTI5MTctMDUwMKAjMCEwHwYDVR0jBBgwFoAUncSzT/6H
+MexyuiU9/7EgHu+ok5swDQYJKoZIhvcNAQELBQADggEBAG9YDXpNe4LJroKZmVCn
+HqMhW8eyzyaPak2nPPGCVUnc6vt8rlBYQU+xlBizD6xatZQDMPgrT8sBl9W3ysXk
+RUlliHsT/SHddMz5dAZsBPRMJ7pYWLTx8jI4w2WRfbSyI4bY/6qTRNkEBUv+Fk8J
+xvwB89+EM0ENcVMhv9ghsUA8h7kOg673HKwRstLDAzxS/uLmEzFjj8SV2m5DbV2Y
+UUCKRSV20/kxJMIC9x2KikZhwOSyv1UE1otD+RQvbfAoZPUDmvp2FR/E0NGjBBOg
+1TtCPRrl63cjqU3s8KQ4uah9Vj+Cwcu9n/yIKKtNQq4NKHvagv8GlUsoJ4BdAxCw
+IA0=
+-----END X509 CRL-----
diff --git a/command/agent/auth/cert/test-fixtures/root/rootcacert.pem b/command/agent/auth/cert/test-fixtures/root/rootcacert.pem
new file mode 100644
index 0000000000000..dcb307a140115
--- /dev/null
+++ b/command/agent/auth/cert/test-fixtures/root/rootcacert.pem
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw
+MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7
+Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0
+z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x
+AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb
+6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH
+SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G
+A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx
+7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc
+BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA
+wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2
+U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa
+cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N
+ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ
+t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk
+zehNe5dFTjFpylg1o6b8Ow==
+-----END CERTIFICATE-----
diff --git a/command/agent/auth/cert/test-fixtures/root/rootcakey.pem b/command/agent/auth/cert/test-fixtures/root/rootcakey.pem
new file mode 100644
index 0000000000000..e950da5ba3040
--- /dev/null
+++ b/command/agent/auth/cert/test-fixtures/root/rootcakey.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEA7FMy+FO4hFzZiHFmmY/B6A/zpyCep9PWZfGLUDtDuprHbg2p
+t/TQjegMxC0AmWPZEHeG9FIJvT+WQXuLUG5v5MbG4zs21mqnYXwWAbTPaZ35bpp3
+BbbquXFtAqOZG8yfrob1g9OSgmY+bG3ZNxxv35qmbBbuWyUvmPwfjzEAclMxv48w
+/2Rs4dXkBuvc9PiNdQ9Sv+ZYG8GIqIcbRd38cSaXI4Q94BOHEr4jm1vqb54H7twv
+0Gy9CnLXfn7ZtEDFSmG8WLk2GvIna+UC+gnxSqKCw0rcTby63rQUCgdJZF8VQUVi
+18+BMNLXI4pT/P9cxTaCJC/YeuV5a3SaniOoFQIDAQABAoIBAQCoGZJC84JnnIgb
+ttZNWuWKBXbCJcDVDikOQJ9hBZbqsFg1X0CfGmQS3MHf9Ubc1Ro8zVjQh15oIEfn
+8lIpdzTeXcpxLdiW8ix3ekVJF20F6pnXY8ZP6UnTeOwamXY6QPZAtb0D9UXcvY+f
+nw+IVRD6082XS0Rmzu+peYWVXDy+FDN+HJRANBcdJZz8gOmNBIe0qDWx1b85d/s8
+2Kk1Wwdss1IwAGeSddTSwzBNaaHdItZaMZOqPW1gRyBfVSkcUQIE6zn2RKw2b70t
+grkIvyRcTdfmiKbqkkJ+eR+ITOUt0cBZSH4cDjlQA+r7hulvoBpQBRj068Toxkcc
+bTagHaPBAoGBAPWPGVkHqhTbJ/DjmqDIStxby2M1fhhHt4xUGHinhUYjQjGOtDQ9
+0mfaB7HObudRiSLydRAVGAHGyNJdQcTeFxeQbovwGiYKfZSA1IGpea7dTxPpGEdN
+ksA0pzSp9MfKzX/MdLuAkEtO58aAg5YzsgX9hDNxo4MhH/gremZhEGZlAoGBAPZf
+lqdYvAL0fjHGJ1FUEalhzGCGE9PH2iOqsxqLCXK7bDbzYSjvuiHkhYJHAOgVdiW1
+lB34UHHYAqZ1VVoFqJ05gax6DE2+r7K5VV3FUCaC0Zm3pavxchU9R/TKP82xRrBj
+AFWwdgDTxUyvQEmgPR9sqorftO71Iz2tiwyTpIfxAoGBAIhEMLzHFAse0rtKkrRG
+ccR27BbRyHeQ1Lp6sFnEHKEfT8xQdI/I/snCpCJ3e/PBu2g5Q9z416mktiyGs8ib
+thTNgYsGYnxZtfaCx2pssanoBcn2wBJRae5fSapf5gY49HDG9MBYR7qCvvvYtSzU
+4yWP2ZzyotpRt3vwJKxLkN5BAoGAORHpZvhiDNkvxj3da7Rqpu7VleJZA2y+9hYb
+iOF+HcqWhaAY+I+XcTRrTMM/zYLzLEcEeXDEyao86uwxCjpXVZw1kotvAC9UqbTO
+tnr3VwRkoxPsV4kFYTAh0+1pnC8dbcxxDmhi3Uww3tOVs7hfkEDuvF6XnebA9A+Y
+LyCgMzECgYEA6cCU8QODOivIKWFRXucvWckgE6MYDBaAwe6qcLsd1Q/gpE2e3yQc
+4RB3bcyiPROLzMLlXFxf1vSNJQdIaVfrRv+zJeGIiivLPU8+Eq4Lrb+tl1LepcOX
+OzQeADTSCn5VidOfjDkIst9UXjMlrFfV9/oJEw5Eiqa6lkNPCGDhfA8=
+-----END RSA PRIVATE KEY-----
diff --git a/command/agent/auth/gcp/gcp.go b/command/agent/auth/gcp/gcp.go
index 4d36b05e19e7b..3c8053f1b27ff 100644
--- a/command/agent/auth/gcp/gcp.go
+++ b/command/agent/auth/gcp/gcp.go
@@ -9,7 +9,6 @@ import (
"net/http"
"time"
- "github.com/hashicorp/errwrap"
cleanhttp "github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-gcp-common/gcputil"
hclog "github.com/hashicorp/go-hclog"
@@ -109,7 +108,7 @@ func NewGCPAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) {
if ok {
g.jwtExp, err = parseutil.ParseInt(jwtExpRaw)
if err != nil {
- return nil, errwrap.Wrapf("error parsing 'jwt_raw' into integer: {{err}}", err)
+ return nil, fmt.Errorf("error parsing 'jwt_raw' into integer: %w", err)
}
}
@@ -130,7 +129,7 @@ func (g *gcpMethod) Authenticate(ctx context.Context, client *api.Client) (retPa
{
req, err := http.NewRequest("GET", fmt.Sprintf(identityEndpoint, g.serviceAccount), nil)
if err != nil {
- retErr = errwrap.Wrapf("error creating request: {{err}}", err)
+ retErr = fmt.Errorf("error creating request: %w", err)
return
}
req = req.WithContext(ctx)
@@ -141,7 +140,7 @@ func (g *gcpMethod) Authenticate(ctx context.Context, client *api.Client) (retPa
req.URL.RawQuery = q.Encode()
resp, err := httpClient.Do(req)
if err != nil {
- retErr = errwrap.Wrapf("error fetching instance token: {{err}}", err)
+ retErr = fmt.Errorf("error fetching instance token: %w", err)
return
}
if resp == nil {
@@ -151,7 +150,7 @@ func (g *gcpMethod) Authenticate(ctx context.Context, client *api.Client) (retPa
defer resp.Body.Close()
jwtBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
- retErr = errwrap.Wrapf("error reading instance token response body: {{err}}", err)
+ retErr = fmt.Errorf("error reading instance token response body: %w", err)
return
}
@@ -163,7 +162,7 @@ func (g *gcpMethod) Authenticate(ctx context.Context, client *api.Client) (retPa
credentials, tokenSource, err := gcputil.FindCredentials(g.credentials, ctx, iamcredentials.CloudPlatformScope)
if err != nil {
- retErr = errwrap.Wrapf("could not obtain credentials: {{err}}", err)
+ retErr = fmt.Errorf("could not obtain credentials: %w", err)
return
}
@@ -193,7 +192,7 @@ func (g *gcpMethod) Authenticate(ctx context.Context, client *api.Client) (retPa
}
payloadBytes, err := json.Marshal(jwtPayload)
if err != nil {
- retErr = errwrap.Wrapf("could not convert JWT payload to JSON string: {{err}}", err)
+ retErr = fmt.Errorf("could not convert JWT payload to JSON string: %w", err)
return
}
@@ -203,14 +202,14 @@ func (g *gcpMethod) Authenticate(ctx context.Context, client *api.Client) (retPa
iamClient, err := iamcredentials.New(httpClient)
if err != nil {
- retErr = errwrap.Wrapf("could not create IAM client: {{err}}", err)
+ retErr = fmt.Errorf("could not create IAM client: %w", err)
return
}
resourceName := fmt.Sprintf("projects/-/serviceAccounts/%s", serviceAccount)
resp, err := iamClient.Projects.ServiceAccounts.SignJwt(resourceName, jwtReq).Do()
if err != nil {
- retErr = errwrap.Wrapf(fmt.Sprintf("unable to sign JWT for %s using given Vault credentials: {{err}}", resourceName), err)
+ retErr = fmt.Errorf("unable to sign JWT for %s using given Vault credentials: %w", resourceName, err)
return
}
diff --git a/command/agent/auth/kubernetes/kubernetes.go b/command/agent/auth/kubernetes/kubernetes.go
index 8b35b30ae6c7c..c30f3cb5a68b1 100644
--- a/command/agent/auth/kubernetes/kubernetes.go
+++ b/command/agent/auth/kubernetes/kubernetes.go
@@ -10,7 +10,6 @@ import (
"os"
"strings"
- "github.com/hashicorp/errwrap"
hclog "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/command/agent/auth"
@@ -78,7 +77,7 @@ func (k *kubernetesMethod) Authenticate(ctx context.Context, client *api.Client)
jwtString, err := k.readJWT()
if err != nil {
- return "", nil, nil, errwrap.Wrapf("error reading JWT with Kubernetes Auth: {{err}}", err)
+ return "", nil, nil, fmt.Errorf("error reading JWT with Kubernetes Auth: %w", err)
}
return fmt.Sprintf("%s/login", k.mountPath), nil, map[string]interface{}{
diff --git a/command/agent/cache/handler.go b/command/agent/cache/handler.go
index a63d32a79c493..73062df41fbd0 100644
--- a/command/agent/cache/handler.go
+++ b/command/agent/cache/handler.go
@@ -12,7 +12,6 @@ import (
"net/http"
"time"
- "github.com/hashicorp/errwrap"
hclog "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/command/agent/sink"
@@ -60,14 +59,14 @@ func Handler(ctx context.Context, logger hclog.Logger, proxier Proxier, inmemSin
w.WriteHeader(resp.Response.StatusCode)
io.Copy(w, resp.Response.Body)
} else {
- logical.RespondError(w, http.StatusInternalServerError, errwrap.Wrapf("failed to get the response: {{err}}", err))
+ logical.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to get the response: %w", err))
}
return
}
err = processTokenLookupResponse(ctx, logger, inmemSink, req, resp)
if err != nil {
- logical.RespondError(w, http.StatusInternalServerError, errwrap.Wrapf("failed to process token lookup response: {{err}}", err))
+ logical.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to process token lookup response: %w", err))
return
}
diff --git a/command/agent/cache/lease_cache.go b/command/agent/cache/lease_cache.go
index 0a68692134022..572341864882d 100644
--- a/command/agent/cache/lease_cache.go
+++ b/command/agent/cache/lease_cache.go
@@ -16,7 +16,6 @@ import (
"sync"
"time"
- "github.com/hashicorp/errwrap"
hclog "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/command/agent/cache/cacheboltdb"
@@ -577,7 +576,7 @@ func (c *LeaseCache) HandleCacheClear(ctx context.Context) http.Handler {
if err == io.EOF {
err = errors.New("empty JSON provided")
}
- logical.RespondError(w, http.StatusBadRequest, errwrap.Wrapf("failed to parse JSON input: {{err}}", err))
+ logical.RespondError(w, http.StatusBadRequest, fmt.Errorf("failed to parse JSON input: %w", err))
return
}
@@ -586,7 +585,7 @@ func (c *LeaseCache) HandleCacheClear(ctx context.Context) http.Handler {
in, err := parseCacheClearInput(req)
if err != nil {
c.logger.Error("unable to parse clear input", "error", err)
- logical.RespondError(w, http.StatusBadRequest, errwrap.Wrapf("failed to parse clear input: {{err}}", err))
+ logical.RespondError(w, http.StatusBadRequest, fmt.Errorf("failed to parse clear input: %w", err))
return
}
@@ -597,7 +596,7 @@ func (c *LeaseCache) HandleCacheClear(ctx context.Context) http.Handler {
if err == errInvalidType {
httpStatus = http.StatusBadRequest
}
- logical.RespondError(w, httpStatus, errwrap.Wrapf("failed to clear cache: {{err}}", err))
+ logical.RespondError(w, httpStatus, fmt.Errorf("failed to clear cache: %w", err))
return
}
diff --git a/command/agent/config/config.go b/command/agent/config/config.go
index 618e172a51e11..aa0647bd663db 100644
--- a/command/agent/config/config.go
+++ b/command/agent/config/config.go
@@ -9,7 +9,6 @@ import (
"time"
ctconfig "github.com/hashicorp/consul-template/config"
- "github.com/hashicorp/errwrap"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
@@ -171,15 +170,15 @@ func LoadConfig(path string) (*Config, error) {
}
if err := parseAutoAuth(result, list); err != nil {
- return nil, errwrap.Wrapf("error parsing 'auto_auth': {{err}}", err)
+ return nil, fmt.Errorf("error parsing 'auto_auth': %w", err)
}
if err := parseCache(result, list); err != nil {
- return nil, errwrap.Wrapf("error parsing 'cache':{{err}}", err)
+ return nil, fmt.Errorf("error parsing 'cache':%w", err)
}
if err := parseTemplates(result, list); err != nil {
- return nil, errwrap.Wrapf("error parsing 'template': {{err}}", err)
+ return nil, fmt.Errorf("error parsing 'template': %w", err)
}
if result.Cache != nil {
@@ -207,7 +206,7 @@ func LoadConfig(path string) (*Config, error) {
err = parseVault(result, list)
if err != nil {
- return nil, errwrap.Wrapf("error parsing 'vault':{{err}}", err)
+ return nil, fmt.Errorf("error parsing 'vault':%w", err)
}
if result.Vault == nil {
@@ -263,7 +262,7 @@ func parseVault(result *Config, list *ast.ObjectList) error {
}
if err := parseRetry(result, subs.List); err != nil {
- return errwrap.Wrapf("error parsing 'retry': {{err}}", err)
+ return fmt.Errorf("error parsing 'retry': %w", err)
}
return nil
@@ -409,14 +408,14 @@ func parseAutoAuth(result *Config, list *ast.ObjectList) error {
subList := subs.List
if err := parseMethod(result, subList); err != nil {
- return errwrap.Wrapf("error parsing 'method': {{err}}", err)
+ return fmt.Errorf("error parsing 'method': %w", err)
}
if a.Method == nil {
return fmt.Errorf("no 'method' block found")
}
if err := parseSinks(result, subList); err != nil {
- return errwrap.Wrapf("error parsing 'sink' stanzas: {{err}}", err)
+ return fmt.Errorf("error parsing 'sink' stanzas: %w", err)
}
if result.AutoAuth.Method.WrapTTL > 0 {
diff --git a/command/agent/sink/file/file_sink.go b/command/agent/sink/file/file_sink.go
index 0437aae981dfe..f2faf5641797d 100644
--- a/command/agent/sink/file/file_sink.go
+++ b/command/agent/sink/file/file_sink.go
@@ -7,7 +7,6 @@ import (
"path/filepath"
"strings"
- "github.com/hashicorp/errwrap"
hclog "github.com/hashicorp/go-hclog"
uuid "github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/command/agent/sink"
@@ -60,7 +59,7 @@ func NewFileSink(conf *sink.SinkConfig) (sink.Sink, error) {
}
if err := f.WriteToken(""); err != nil {
- return nil, errwrap.Wrapf("error during write check: {{err}}", err)
+ return nil, fmt.Errorf("error during write check: %w", err)
}
f.logger.Info("file sink configured", "path", f.path, "mode", f.mode)
@@ -79,7 +78,7 @@ func (f *fileSink) WriteToken(token string) error {
u, err := uuid.GenerateUUID()
if err != nil {
- return errwrap.Wrapf("error generating a uuid during write check: {{err}}", err)
+ return fmt.Errorf("error generating a uuid during write check: %w", err)
}
targetDir := filepath.Dir(f.path)
@@ -88,7 +87,7 @@ func (f *fileSink) WriteToken(token string) error {
tmpFile, err := os.OpenFile(filepath.Join(targetDir, fmt.Sprintf("%s.tmp.%s", fileName, tmpSuffix)), os.O_WRONLY|os.O_CREATE, f.mode)
if err != nil {
- return errwrap.Wrapf(fmt.Sprintf("error opening temp file in dir %s for writing: {{err}}", targetDir), err)
+ return fmt.Errorf("error opening temp file in dir %s for writing: %w", targetDir, err)
}
valToWrite := token
@@ -101,12 +100,12 @@ func (f *fileSink) WriteToken(token string) error {
// Attempt closing and deleting but ignore any error
tmpFile.Close()
os.Remove(tmpFile.Name())
- return errwrap.Wrapf(fmt.Sprintf("error writing to %s: {{err}}", tmpFile.Name()), err)
+ return fmt.Errorf("error writing to %s: %w", tmpFile.Name(), err)
}
err = tmpFile.Close()
if err != nil {
- return errwrap.Wrapf(fmt.Sprintf("error closing %s: {{err}}", tmpFile.Name()), err)
+ return fmt.Errorf("error closing %s: %w", tmpFile.Name(), err)
}
// Now, if we were just doing a write check (blank token), remove the file
@@ -114,14 +113,14 @@ func (f *fileSink) WriteToken(token string) error {
if token == "" {
err = os.Remove(tmpFile.Name())
if err != nil {
- return errwrap.Wrapf(fmt.Sprintf("error removing temp file %s during write check: {{err}}", tmpFile.Name()), err)
+ return fmt.Errorf("error removing temp file %s during write check: %w", tmpFile.Name(), err)
}
return nil
}
err = os.Rename(tmpFile.Name(), f.path)
if err != nil {
- return errwrap.Wrapf(fmt.Sprintf("error renaming temp file %s to target file %s: {{err}}", tmpFile.Name(), f.path), err)
+ return fmt.Errorf("error renaming temp file %s to target file %s: %w", tmpFile.Name(), f.path, err)
}
f.logger.Info("token written", "path", f.path)
diff --git a/command/agent/sink/sink.go b/command/agent/sink/sink.go
index bbbc8edf94903..853cc9345f5ef 100644
--- a/command/agent/sink/sink.go
+++ b/command/agent/sink/sink.go
@@ -3,13 +3,13 @@ package sink
import (
"context"
"errors"
+ "fmt"
"io/ioutil"
"math/rand"
"os"
"sync/atomic"
"time"
- "github.com/hashicorp/errwrap"
hclog "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/dhutil"
@@ -177,17 +177,17 @@ func (s *SinkConfig) encryptToken(token string) (string, error) {
_, err = os.Lstat(s.DHPath)
if err != nil {
if !os.IsNotExist(err) {
- return "", errwrap.Wrapf("error stat-ing dh parameters file: {{err}}", err)
+ return "", fmt.Errorf("error stat-ing dh parameters file: %w", err)
}
return "", errors.New("no dh parameters file found, and no cached pub key")
}
fileBytes, err := ioutil.ReadFile(s.DHPath)
if err != nil {
- return "", errwrap.Wrapf("error reading file for dh parameters: {{err}}", err)
+ return "", fmt.Errorf("error reading file for dh parameters: %w", err)
}
theirPubKey := new(dhutil.PublicKeyInfo)
if err := jsonutil.DecodeJSON(fileBytes, theirPubKey); err != nil {
- return "", errwrap.Wrapf("error decoding public key: {{err}}", err)
+ return "", fmt.Errorf("error decoding public key: %w", err)
}
if len(theirPubKey.Curve25519PublicKey) == 0 {
return "", errors.New("public key is nil")
@@ -197,7 +197,7 @@ func (s *SinkConfig) encryptToken(token string) (string, error) {
if len(s.cachedPubKey) == 0 {
s.cachedPubKey, s.cachedPriKey, err = dhutil.GeneratePublicPrivateKey()
if err != nil {
- return "", errwrap.Wrapf("error generating pub/pri curve25519 keys: {{err}}", err)
+ return "", fmt.Errorf("error generating pub/pri curve25519 keys: %w", err)
}
}
resp.Curve25519PublicKey = s.cachedPubKey
@@ -205,7 +205,7 @@ func (s *SinkConfig) encryptToken(token string) (string, error) {
secret, err := dhutil.GenerateSharedSecret(s.cachedPriKey, s.cachedRemotePubKey)
if err != nil {
- return "", errwrap.Wrapf("error calculating shared key: {{err}}", err)
+ return "", fmt.Errorf("error calculating shared key: %w", err)
}
if s.DeriveKey {
aesKey, err = dhutil.DeriveSharedKey(secret, s.cachedPubKey, s.cachedRemotePubKey)
@@ -214,7 +214,7 @@ func (s *SinkConfig) encryptToken(token string) (string, error) {
}
if err != nil {
- return "", errwrap.Wrapf("error deriving shared key: {{err}}", err)
+ return "", fmt.Errorf("error deriving shared key: %w", err)
}
if len(aesKey) == 0 {
return "", errors.New("derived AES key is empty")
@@ -222,11 +222,11 @@ func (s *SinkConfig) encryptToken(token string) (string, error) {
resp.EncryptedPayload, resp.Nonce, err = dhutil.EncryptAES(aesKey, []byte(token), []byte(s.AAD))
if err != nil {
- return "", errwrap.Wrapf("error encrypting with shared key: {{err}}", err)
+ return "", fmt.Errorf("error encrypting with shared key: %w", err)
}
m, err := jsonutil.EncodeJSON(resp)
if err != nil {
- return "", errwrap.Wrapf("error encoding encrypted payload: {{err}}", err)
+ return "", fmt.Errorf("error encoding encrypted payload: %w", err)
}
return string(m), nil
@@ -235,7 +235,7 @@ func (s *SinkConfig) encryptToken(token string) (string, error) {
func (s *SinkConfig) wrapToken(client *api.Client, wrapTTL time.Duration, token string) (string, error) {
wrapClient, err := client.Clone()
if err != nil {
- return "", errwrap.Wrapf("error deriving client for wrapping, not writing out to sink: {{err}})", err)
+ return "", fmt.Errorf("error deriving client for wrapping, not writing out to sink: %w)", err)
}
wrapClient.SetToken(token)
wrapClient.SetWrappingLookupFunc(func(string, string) string {
@@ -245,7 +245,7 @@ func (s *SinkConfig) wrapToken(client *api.Client, wrapTTL time.Duration, token
"token": token,
})
if err != nil {
- return "", errwrap.Wrapf("error wrapping token, not writing out to sink: {{err}})", err)
+ return "", fmt.Errorf("error wrapping token, not writing out to sink: %w)", err)
}
if secret == nil {
return "", errors.New("nil secret returned, not writing out to sink")
@@ -256,7 +256,7 @@ func (s *SinkConfig) wrapToken(client *api.Client, wrapTTL time.Duration, token
m, err := jsonutil.EncodeJSON(secret.WrapInfo)
if err != nil {
- return "", errwrap.Wrapf("error marshaling token, not writing out to sink: {{err}})", err)
+ return "", fmt.Errorf("error marshaling token, not writing out to sink: %w)", err)
}
return string(m), nil
diff --git a/command/agent/template/template.go b/command/agent/template/template.go
index 6591e9a189200..9396d1f82116d 100644
--- a/command/agent/template/template.go
+++ b/command/agent/template/template.go
@@ -274,12 +274,13 @@ func newRunnerConfig(sc *ServerConfig, templates ctconfig.TemplateConfigs) (*ctc
skipVerify := sc.AgentConfig.Vault.TLSSkipVerify
verify := !skipVerify
conf.Vault.SSL = &ctconfig.SSLConfig{
- Enabled: pointerutil.BoolPtr(true),
- Verify: &verify,
- Cert: &sc.AgentConfig.Vault.ClientCert,
- Key: &sc.AgentConfig.Vault.ClientKey,
- CaCert: &sc.AgentConfig.Vault.CACert,
- CaPath: &sc.AgentConfig.Vault.CAPath,
+ Enabled: pointerutil.BoolPtr(true),
+ Verify: &verify,
+ Cert: &sc.AgentConfig.Vault.ClientCert,
+ Key: &sc.AgentConfig.Vault.ClientKey,
+ CaCert: &sc.AgentConfig.Vault.CACert,
+ CaPath: &sc.AgentConfig.Vault.CAPath,
+ ServerName: &sc.AgentConfig.Vault.TLSServerName,
}
}
enabled := attempts > 0
diff --git a/command/base_helpers.go b/command/base_helpers.go
index 1d6b3ce545e58..5a6339d0969ad 100644
--- a/command/base_helpers.go
+++ b/command/base_helpers.go
@@ -8,7 +8,6 @@ import (
"strings"
"time"
- "github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/api"
kvbuilder "github.com/hashicorp/vault/internalshared/kv-builder"
"github.com/kr/text"
@@ -285,7 +284,7 @@ func parseFlagFile(raw string) (string, error) {
if len(raw) > 0 && raw[0] == '@' {
contents, err := ioutil.ReadFile(raw[1:])
if err != nil {
- return "", errwrap.Wrapf("error reading file: {{err}}", err)
+ return "", fmt.Errorf("error reading file: %w", err)
}
return string(contents), nil
diff --git a/command/config/config.go b/command/config/config.go
index 4a9916e13149b..ef0c4adf6dcd8 100644
--- a/command/config/config.go
+++ b/command/config/config.go
@@ -5,7 +5,6 @@ import (
"io/ioutil"
"os"
- "github.com/hashicorp/errwrap"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/vault/sdk/helper/hclutil"
@@ -57,7 +56,7 @@ func LoadConfig(path string) (*DefaultConfig, error) {
// NOTE: requires HOME env var to be set
path, err := homedir.Expand(path)
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("error expanding config path %q: {{err}}", path), err)
+ return nil, fmt.Errorf("error expanding config path %q: %w", path, err)
}
contents, err := ioutil.ReadFile(path)
@@ -67,7 +66,7 @@ func LoadConfig(path string) (*DefaultConfig, error) {
conf, err := ParseConfig(string(contents))
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("error parsing config file at %q: {{err}}; ensure that the file is valid; Ansible Vault is known to conflict with it.", path), err)
+ return nil, fmt.Errorf("error parsing config file at %q: %w; ensure that the file is valid; Ansible Vault is known to conflict with it.", path, err)
}
return conf, nil
diff --git a/command/license_get.go b/command/license_get.go
index 147c751964942..ffda9ec52b085 100644
--- a/command/license_get.go
+++ b/command/license_get.go
@@ -2,7 +2,6 @@ package command
import (
"fmt"
- "strconv"
"strings"
"github.com/mitchellh/cli"
@@ -82,7 +81,14 @@ func (c *LicenseGetCommand) Run(args []string) int {
return 2
}
- secret, err := client.Logical().ReadWithData("sys/license", map[string][]string{"signed": {strconv.FormatBool(c.signed)}})
+ var path string
+ if c.signed {
+ path = "sys/license/signed"
+ } else {
+ path = "sys/license"
+ }
+
+ secret, err := client.Logical().Read(path)
if err != nil {
c.UI.Error(fmt.Sprintf("Error retrieving license: %s", err))
return 2
diff --git a/command/operator_diagnose.go b/command/operator_diagnose.go
index 8b8511c4dffb2..f34830f729e72 100644
--- a/command/operator_diagnose.go
+++ b/command/operator_diagnose.go
@@ -2,16 +2,28 @@ package command
import (
"context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
"strings"
"sync"
+ "time"
+ "github.com/docker/docker/pkg/ioutils"
"github.com/hashicorp/consul/api"
log "github.com/hashicorp/go-hclog"
+ uuid "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/metricsutil"
+ "github.com/hashicorp/vault/internalshared/configutil"
"github.com/hashicorp/vault/internalshared/listenerutil"
"github.com/hashicorp/vault/internalshared/reloadutil"
physconsul "github.com/hashicorp/vault/physical/consul"
+ "github.com/hashicorp/vault/sdk/physical"
"github.com/hashicorp/vault/sdk/version"
+ sr "github.com/hashicorp/vault/serviceregistration"
srconsul "github.com/hashicorp/vault/serviceregistration/consul"
+ "github.com/hashicorp/vault/vault"
"github.com/hashicorp/vault/vault/diagnose"
"github.com/mitchellh/cli"
"github.com/posener/complete"
@@ -19,6 +31,10 @@ import (
const OperatorDiagnoseEnableEnv = "VAULT_DIAGNOSE"
+const CoreUninitializedErr = "diagnose cannot attempt this step because core could not be initialized"
+const BackendUninitializedErr = "diagnose cannot attempt this step because backend could not be initialized"
+const CoreConfigUninitializedErr = "diagnose cannot attempt this step because core config could not be set"
+
var (
_ cli.Command = (*OperatorDiagnoseCommand)(nil)
_ cli.CommandAutocomplete = (*OperatorDiagnoseCommand)(nil)
@@ -33,11 +49,12 @@ type OperatorDiagnoseCommand struct {
flagConfigs []string
cleanupGuard sync.Once
- reloadFuncsLock *sync.RWMutex
- reloadFuncs *map[string][]reloadutil.ReloadFunc
- startedCh chan struct{} // for tests
- reloadedCh chan struct{} // for tests
- skipEndEnd bool // for tests
+ reloadFuncsLock *sync.RWMutex
+ reloadFuncs *map[string][]reloadutil.ReloadFunc
+ ServiceRegistrations map[string]sr.Factory
+ startedCh chan struct{} // for tests
+ reloadedCh chan struct{} // for tests
+ skipEndEnd bool // for tests
}
func (c *OperatorDiagnoseCommand) Synopsis() string {
@@ -95,6 +112,12 @@ func (c *OperatorDiagnoseCommand) Flags() *FlagSets {
Default: false,
Usage: "Dump all information collected by Diagnose.",
})
+
+ f.StringVar(&StringVar{
+ Name: "format",
+ Target: &c.flagFormat,
+ Usage: "The output format",
+ })
return set
}
@@ -118,7 +141,7 @@ func (c *OperatorDiagnoseCommand) Run(args []string) int {
f := c.Flags()
if err := f.Parse(args); err != nil {
c.UI.Error(err.Error())
- return 1
+ return 3
}
return c.RunWithParsedFlags()
}
@@ -127,14 +150,43 @@ func (c *OperatorDiagnoseCommand) RunWithParsedFlags() int {
if len(c.flagConfigs) == 0 {
c.UI.Error("Must specify a configuration file using -config.")
- return 1
+ return 3
}
+ if c.diagnose == nil {
+ if c.flagFormat == "json" {
+ c.diagnose = diagnose.New(&ioutils.NopWriter{})
+ } else {
+ c.UI.Output(version.GetVersion().FullVersionNumber(true))
+ c.diagnose = diagnose.New(os.Stdout)
+ }
+ }
c.UI.Output(version.GetVersion().FullVersionNumber(true))
ctx := diagnose.Context(context.Background(), c.diagnose)
+ c.diagnose.SetSkipList(c.flagSkips)
err := c.offlineDiagnostics(ctx)
+ results := c.diagnose.Finalize(ctx)
+ if c.flagFormat == "json" {
+ resultsJS, err := json.MarshalIndent(results, "", " ")
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error marshalling results: %v", err)
+ return 4
+ }
+ c.UI.Output(string(resultsJS))
+ } else {
+ c.UI.Output("\nResults:")
+ results.Write(os.Stdout)
+ }
+
if err != nil {
+ return 4
+ }
+ // Use a different return code
+ switch results.Status {
+ case diagnose.WarningStatus:
+ return 2
+ case diagnose.ErrorStatus:
return 1
}
return 0
@@ -165,6 +217,9 @@ func (c *OperatorDiagnoseCommand) offlineDiagnostics(ctx context.Context) error
ctx, span := diagnose.StartSpan(ctx, "initialization")
defer span.End()
+ // OS Specific checks
+ diagnose.OSChecks(ctx)
+
server.flagConfigs = c.flagConfigs
config, err := server.parseConfig()
if err != nil {
@@ -172,119 +227,278 @@ func (c *OperatorDiagnoseCommand) offlineDiagnostics(ctx context.Context) error
} else {
diagnose.SpotOk(ctx, "parse-config", "")
}
- // Check Listener Information
- // TODO: Run Diagnose checks on the actual net.Listeners
- if err := diagnose.Test(ctx, "init-listeners", func(ctx context.Context) error {
- disableClustering := config.HAStorage.DisableClustering
- infoKeys := make([]string, 0, 10)
- info := make(map[string]string)
- status, lns, _, errMsg := server.InitListeners(config, disableClustering, &infoKeys, &info)
+ var metricSink *metricsutil.ClusterMetricSink
+ var metricsHelper *metricsutil.MetricsHelper
- if status != 0 {
- return errMsg
- }
+ var backend *physical.Backend
+ diagnose.Test(ctx, "storage", func(ctx context.Context) error {
+ diagnose.Test(ctx, "create-storage-backend", func(ctx context.Context) error {
- // Make sure we close all listeners from this point on
- listenerCloseFunc := func() {
- for _, ln := range lns {
- ln.Listener.Close()
+ b, err := server.setupStorage(config)
+ if err != nil {
+ return err
}
+ backend = &b
+ return nil
+ })
+
+ if config.Storage == nil {
+ return fmt.Errorf("no storage stanza found in config")
}
- defer c.cleanupGuard.Do(listenerCloseFunc)
+ if config.Storage != nil && config.Storage.Type == storageTypeConsul {
+ diagnose.Test(ctx, "test-storage-tls-consul", func(ctx context.Context) error {
+ err = physconsul.SetupSecureTLS(api.DefaultConfig(), config.Storage.Config, server.logger, true)
+ if err != nil {
+ return err
+ }
+ return nil
+ })
- sanitizedListeners := make([]listenerutil.Listener, 0, len(config.Listeners))
- for _, ln := range lns {
- if ln.Config.TLSDisable {
- diagnose.Warn(ctx, "TLS is disabled in a Listener config stanza.")
- continue
- }
- if ln.Config.TLSDisableClientCerts {
- diagnose.Warn(ctx, "TLS for a listener is turned on without requiring client certs.")
- }
+ diagnose.Test(ctx, "test-consul-direct-access-storage", func(ctx context.Context) error {
+ dirAccess := diagnose.ConsulDirectAccess(config.Storage.Config)
+ if dirAccess != "" {
+ diagnose.Warn(ctx, dirAccess)
+ }
+ return nil
+ })
+ }
+
+ // Attempt to use storage backend
+ if !c.skipEndEnd {
+ diagnose.Test(ctx, "test-access-storage", diagnose.WithTimeout(30*time.Second, func(ctx context.Context) error {
+ maxDurationCrudOperation := "write"
+ maxDuration := time.Duration(0)
+ uuidSuffix, err := uuid.GenerateUUID()
+ if err != nil {
+ return err
+ }
+ uuid := "diagnose/latency/" + uuidSuffix
+ dur, err := diagnose.EndToEndLatencyCheckWrite(ctx, uuid, *backend)
+ if err != nil {
+ return err
+ }
+ maxDuration = dur
+ dur, err = diagnose.EndToEndLatencyCheckRead(ctx, uuid, *backend)
+ if err != nil {
+ return err
+ }
+ if dur > maxDuration {
+ maxDuration = dur
+ maxDurationCrudOperation = "read"
+ }
+ dur, err = diagnose.EndToEndLatencyCheckDelete(ctx, uuid, *backend)
+ if err != nil {
+ return err
+ }
+ if dur > maxDuration {
+ maxDuration = dur
+ maxDurationCrudOperation = "delete"
+ }
+
+ if maxDuration > time.Duration(0) {
+ diagnose.Warn(ctx, diagnose.LatencyWarning+fmt.Sprintf("duration: %s, ", maxDuration)+fmt.Sprintf("operation: %s", maxDurationCrudOperation))
+ }
+ return nil
+ }))
+ }
+ return nil
+ })
- // Check ciphersuite and load ca/cert/key files
- // TODO: TLSConfig returns a reloadFunc and a TLSConfig. We can use this to
- // perform an active probe.
- _, _, err := listenerutil.TLSConfig(ln.Config, make(map[string]string), c.UI)
+ var configSR sr.ServiceRegistration
+ diagnose.Test(ctx, "service-discovery", func(ctx context.Context) error {
+ if config.ServiceRegistration == nil || config.ServiceRegistration.Config == nil {
+ diagnose.Skipped(ctx, "no service registration configured")
+ return nil
+ }
+ srConfig := config.ServiceRegistration.Config
+
+ diagnose.Test(ctx, "test-serviceregistration-tls-consul", func(ctx context.Context) error {
+ // SetupSecureTLS for service discovery uses the same cert and key to set up physical
+ // storage. See the consul package in physical for details.
+ err = srconsul.SetupSecureTLS(api.DefaultConfig(), srConfig, server.logger, true)
if err != nil {
return err
}
+ return nil
+ })
- sanitizedListeners = append(sanitizedListeners, listenerutil.Listener{
- Listener: ln.Listener,
- Config: ln.Config,
+ if config.ServiceRegistration != nil && config.ServiceRegistration.Type == "consul" {
+ diagnose.Test(ctx, "test-consul-direct-access-service-discovery", func(ctx context.Context) error {
+ dirAccess := diagnose.ConsulDirectAccess(config.ServiceRegistration.Config)
+ if dirAccess != "" {
+ diagnose.Warn(ctx, dirAccess)
+ }
+ return nil
})
}
- return diagnose.ListenerChecks(sanitizedListeners)
- }); err != nil {
- return err
+ return nil
+ })
+
+ sealcontext, sealspan := diagnose.StartSpan(ctx, "create-seal")
+ var seals []vault.Seal
+ var sealConfigError error
+ barrierSeal, barrierWrapper, unwrapSeal, seals, sealConfigError, err := setSeal(server, config, make([]string, 0), make(map[string]string))
+ // Check error here
+ if err != nil {
+ diagnose.Fail(sealcontext, err.Error())
+ goto SEALFAIL
+ }
+ if sealConfigError != nil {
+ diagnose.Fail(sealcontext, "seal could not be configured: seals may already be initialized")
+ goto SEALFAIL
+ }
+
+ if seals != nil {
+ for _, seal := range seals {
+ // Ensure that the seal finalizer is called, even if using verify-only
+ defer func(seal *vault.Seal) {
+ sealType := (*seal).BarrierType()
+ finalizeSealContext, finalizeSealSpan := diagnose.StartSpan(ctx, "finalize-seal-"+sealType)
+ err = (*seal).Finalize(finalizeSealContext)
+ if err != nil {
+ diagnose.Fail(finalizeSealContext, "error finalizing seal")
+ finalizeSealSpan.End()
+ }
+ finalizeSealSpan.End()
+ }(&seal)
+ }
}
- // Errors in these items could stop Vault from starting but are not yet covered:
- // TODO: logging configuration
- // TODO: SetupTelemetry
- if err := diagnose.Test(ctx, "storage", func(ctx context.Context) error {
- b, err := server.setupStorage(config)
+ if barrierSeal == nil {
+ diagnose.Fail(sealcontext, "could not create barrier seal! Most likely proper Seal configuration information was not set, but no error was generated")
+ }
+
+SEALFAIL:
+ sealspan.End()
+ var coreConfig vault.CoreConfig
+ if err := diagnose.Test(ctx, "setup-core", func(ctx context.Context) error {
+ var secureRandomReader io.Reader
+ // prepare a secure random reader for core
+ secureRandomReader, err = configutil.CreateSecureRandomReaderFunc(config.SharedConfig, barrierWrapper)
if err != nil {
- return err
+ return diagnose.SpotError(ctx, "init-randreader", err)
}
+ diagnose.SpotOk(ctx, "init-randreader", "")
- dirAccess := diagnose.ConsulDirectAccess(config.HAStorage.Config)
- if dirAccess != "" {
- diagnose.Warn(ctx, dirAccess)
+ if backend == nil {
+ return fmt.Errorf(BackendUninitializedErr)
}
+ coreConfig = createCoreConfig(server, config, *backend, configSR, barrierSeal, unwrapSeal, metricsHelper, metricSink, secureRandomReader)
+ return nil
+ }); err != nil {
+ diagnose.Error(ctx, err)
+ }
- if config.Storage != nil && config.Storage.Type == storageTypeConsul {
- err = physconsul.SetupSecureTLS(api.DefaultConfig(), config.Storage.Config, server.logger, true)
+ var disableClustering bool
+ diagnose.Test(ctx, "setup-ha-storage", func(ctx context.Context) error {
+ if backend == nil {
+ return fmt.Errorf(BackendUninitializedErr)
+ }
+ diagnose.Test(ctx, "create-ha-storage-backend", func(ctx context.Context) error {
+ // Initialize the separate HA storage backend, if it exists
+ disableClustering, err = initHaBackend(server, config, &coreConfig, *backend)
if err != nil {
return err
}
-
- dirAccess := diagnose.ConsulDirectAccess(config.Storage.Config)
- if dirAccess != "" {
- diagnose.Warn(ctx, dirAccess)
+ return nil
+ })
+ diagnose.Test(ctx, "test-consul-direct-access-storage", func(ctx context.Context) error {
+ if config.HAStorage == nil {
+ diagnose.Skipped(ctx, "no HA storage configured")
+ } else {
+ dirAccess := diagnose.ConsulDirectAccess(config.HAStorage.Config)
+ if dirAccess != "" {
+ diagnose.Warn(ctx, dirAccess)
+ }
}
+ return nil
+ })
+ if config.HAStorage != nil && config.HAStorage.Type == storageTypeConsul {
+ diagnose.Test(ctx, "test-ha-storage-tls-consul", func(ctx context.Context) error {
+ err = physconsul.SetupSecureTLS(api.DefaultConfig(), config.HAStorage.Config, server.logger, true)
+ if err != nil {
+ return err
+ }
+ return nil
+ })
}
+ return nil
+ })
- if config.HAStorage != nil && config.HAStorage.Type == storageTypeConsul {
- err = physconsul.SetupSecureTLS(api.DefaultConfig(), config.HAStorage.Config, server.logger, true)
- if err != nil {
+ // Determine the redirect address from environment variables
+ err = determineRedirectAddr(server, &coreConfig, config)
+ if err != nil {
+ return diagnose.SpotError(ctx, "determine-redirect", err)
+ }
+ diagnose.SpotOk(ctx, "determine-redirect", "")
+
+ err = findClusterAddress(server, &coreConfig, config, disableClustering)
+ if err != nil {
+ return diagnose.SpotError(ctx, "find-cluster-addr", err)
+ }
+ diagnose.SpotOk(ctx, "find-cluster-addr", "")
+
+ var lns []listenerutil.Listener
+ diagnose.Test(ctx, "init-listeners", func(ctx context.Context) error {
+ disableClustering := config.HAStorage != nil && config.HAStorage.DisableClustering
+ infoKeys := make([]string, 0, 10)
+ info := make(map[string]string)
+ var listeners []listenerutil.Listener
+ var status int
+ diagnose.Test(ctx, "create-listeners", func(ctx context.Context) error {
+ status, listeners, _, err = server.InitListeners(config, disableClustering, &infoKeys, &info)
+ if status != 0 {
return err
}
- }
+ return nil
+ })
- // Attempt to use storage backend
- if !c.skipEndEnd {
- err = diagnose.StorageEndToEndLatencyCheck(ctx, b)
- if err != nil {
- return err
+ lns = listeners
+
+ // Make sure we close all listeners from this point on
+ listenerCloseFunc := func() {
+ for _, ln := range lns {
+ ln.Listener.Close()
}
}
- return nil
- }); err != nil {
- return err
- }
+ defer c.cleanupGuard.Do(listenerCloseFunc)
- return diagnose.Test(ctx, "service-discovery", func(ctx context.Context) error {
- srConfig := config.ServiceRegistration.Config
- // Initialize the Service Discovery, if there is one
- if config.ServiceRegistration != nil && config.ServiceRegistration.Type == "consul" {
- // setupStorage populates the srConfig, so no nil checks are necessary.
- dirAccess := diagnose.ConsulDirectAccess(config.ServiceRegistration.Config)
- if dirAccess != "" {
- diagnose.Warn(ctx, dirAccess)
+ diagnose.Test(ctx, "check-listener-tls", func(ctx context.Context) error {
+ sanitizedListeners := make([]listenerutil.Listener, 0, len(config.Listeners))
+ for _, ln := range lns {
+ if ln.Config.TLSDisable {
+ diagnose.Warn(ctx, "TLS is disabled in a Listener config stanza.")
+ continue
+ }
+ if ln.Config.TLSDisableClientCerts {
+ diagnose.Warn(ctx, "TLS for a listener is turned on without requiring client certs.")
+ }
+
+ // Check ciphersuite and load ca/cert/key files
+ // TODO: TLSConfig returns a reloadFunc and a TLSConfig. We can use this to
+ // perform an active probe.
+ _, _, err := listenerutil.TLSConfig(ln.Config, make(map[string]string), c.UI)
+ if err != nil {
+ return err
+ }
+
+ sanitizedListeners = append(sanitizedListeners, listenerutil.Listener{
+ Listener: ln.Listener,
+ Config: ln.Config,
+ })
}
-
- // SetupSecureTLS for service discovery uses the same cert and key to set up physical
- // storage. See the consul package in physical for details.
- err = srconsul.SetupSecureTLS(api.DefaultConfig(), srConfig, server.logger, true)
+ err = diagnose.ListenerChecks(sanitizedListeners)
if err != nil {
return err
}
- }
+ return nil
+ })
return nil
})
+
+ // TODO: Diagnose logging configuration
+ return nil
}
diff --git a/command/operator_diagnose_test.go b/command/operator_diagnose_test.go
index 42f58e65a6ed7..4cd9183a785ef 100644
--- a/command/operator_diagnose_test.go
+++ b/command/operator_diagnose_test.go
@@ -5,6 +5,7 @@ package command
import (
"context"
"fmt"
+ "io/ioutil"
"strings"
"testing"
@@ -17,7 +18,7 @@ func testOperatorDiagnoseCommand(tb testing.TB) *OperatorDiagnoseCommand {
ui := cli.NewMockUi()
return &OperatorDiagnoseCommand{
- diagnose: diagnose.New(),
+ diagnose: diagnose.New(ioutil.Discard),
BaseCommand: &BaseCommand{
UI: ui,
},
@@ -45,17 +46,37 @@ func TestOperatorDiagnoseCommand_Run(t *testing.T) {
{
Name: "init-listeners",
Status: diagnose.WarningStatus,
- Warnings: []string{
- "TLS is disabled in a Listener config stanza.",
+ Children: []*diagnose.Result{
+ {
+ Name: "create-listeners",
+ Status: diagnose.OkStatus,
+ },
+ {
+ Name: "check-listener-tls",
+ Status: diagnose.WarningStatus,
+ Warnings: []string{
+ "TLS is disabled in a Listener config stanza.",
+ },
+ },
},
},
{
Name: "storage",
Status: diagnose.OkStatus,
- },
- {
- Name: "service-discovery",
- Status: diagnose.OkStatus,
+ Children: []*diagnose.Result{
+ {
+ Name: "create-storage-backend",
+ Status: diagnose.OkStatus,
+ },
+ {
+ Name: "test-storage-tls-consul",
+ Status: diagnose.OkStatus,
+ },
+ {
+ Name: "test-consul-direct-access-storage",
+ Status: diagnose.OkStatus,
+ },
+ },
},
},
},
@@ -65,21 +86,16 @@ func TestOperatorDiagnoseCommand_Run(t *testing.T) {
"-config", "./server/test-fixtures/nostore_config.hcl",
},
[]*diagnose.Result{
- {
- Name: "parse-config",
- Status: diagnose.OkStatus,
- },
- {
- Name: "init-listeners",
- Status: diagnose.WarningStatus,
- Warnings: []string{
- "TLS is disabled in a Listener config stanza.",
- },
- },
{
Name: "storage",
Status: diagnose.ErrorStatus,
- Message: "A storage backend must be specified",
+ Message: "no storage stanza found in config",
+ Children: []*diagnose.Result{
+ {
+ Name: "create-storage-backend",
+ Status: diagnose.ErrorStatus,
+ },
+ },
},
},
},
@@ -89,21 +105,19 @@ func TestOperatorDiagnoseCommand_Run(t *testing.T) {
"-config", "./server/test-fixtures/tls_config_ok.hcl",
},
[]*diagnose.Result{
- {
- Name: "parse-config",
- Status: diagnose.OkStatus,
- },
{
Name: "init-listeners",
Status: diagnose.OkStatus,
- },
- {
- Name: "storage",
- Status: diagnose.OkStatus,
- },
- {
- Name: "service-discovery",
- Status: diagnose.OkStatus,
+ Children: []*diagnose.Result{
+ {
+ Name: "create-listeners",
+ Status: diagnose.OkStatus,
+ },
+ {
+ Name: "check-listener-tls",
+ Status: diagnose.OkStatus,
+ },
+ },
},
},
},
@@ -113,20 +127,24 @@ func TestOperatorDiagnoseCommand_Run(t *testing.T) {
"-config", "./server/test-fixtures/config_bad_https_storage.hcl",
},
[]*diagnose.Result{
- {
- Name: "parse-config",
- Status: diagnose.OkStatus,
- },
- {
- Name: "init-listeners",
- Status: diagnose.WarningStatus,
- Warnings: []string{
- "TLS is disabled in a Listener config stanza.",
- },
- },
{
Name: "storage",
Status: diagnose.ErrorStatus,
+ Children: []*diagnose.Result{
+ {
+ Name: "create-storage-backend",
+ Status: diagnose.OkStatus,
+ },
+ {
+ Name: "test-storage-tls-consul",
+ Status: diagnose.ErrorStatus,
+ Message: "expired",
+ },
+ {
+ Name: "test-consul-direct-access-storage",
+ Status: diagnose.OkStatus,
+ },
+ },
},
},
},
@@ -137,23 +155,52 @@ func TestOperatorDiagnoseCommand_Run(t *testing.T) {
},
[]*diagnose.Result{
{
- Name: "parse-config",
- Status: diagnose.OkStatus,
- },
- {
- Name: "init-listeners",
+ Name: "storage",
Status: diagnose.WarningStatus,
- Warnings: []string{
- "TLS is disabled in a Listener config stanza.",
+ Children: []*diagnose.Result{
+ {
+ Name: "create-storage-backend",
+ Status: diagnose.OkStatus,
+ },
+ {
+ Name: "test-storage-tls-consul",
+ Status: diagnose.OkStatus,
+ },
+ {
+ Name: "test-consul-direct-access-storage",
+ Status: diagnose.WarningStatus,
+ Warnings: []string{
+ "consul storage does not connect to local agent, but directly to server",
+ },
+ },
},
},
{
- Name: "storage",
+ Name: "setup-ha-storage",
Status: diagnose.ErrorStatus,
- Warnings: []string{
- diagnose.AddrDNExistErr,
+ Children: []*diagnose.Result{
+ {
+ Name: "create-ha-storage-backend",
+ Status: diagnose.OkStatus,
+ },
+ {
+ Name: "test-consul-direct-access-storage",
+ Status: diagnose.WarningStatus,
+ Warnings: []string{
+ "consul storage does not connect to local agent, but directly to server",
+ },
+ },
+ {
+ Name: "test-ha-storage-tls-consul",
+ Status: diagnose.ErrorStatus,
+ Message: "x509: certificate has expired or is not yet valid",
+ },
},
},
+ {
+ Name: "find-cluster-addr",
+ Status: diagnose.ErrorStatus,
+ },
},
},
{
@@ -163,26 +210,21 @@ func TestOperatorDiagnoseCommand_Run(t *testing.T) {
},
[]*diagnose.Result{
{
- Name: "parse-config",
- Status: diagnose.OkStatus,
- },
- {
- Name: "init-listeners",
- Status: diagnose.WarningStatus,
- Warnings: []string{
- "TLS is disabled in a Listener config stanza.",
- },
- },
- {
- Name: "storage",
- Status: diagnose.OkStatus,
- },
- {
- Name: "service-discovery",
- Status: diagnose.ErrorStatus,
- Message: "failed to verify certificate: x509: certificate has expired or is not yet valid:",
- Warnings: []string{
- diagnose.DirAccessErr,
+ Name: "service-discovery",
+ Status: diagnose.ErrorStatus,
+ Children: []*diagnose.Result{
+ {
+ Name: "test-serviceregistration-tls-consul",
+ Status: diagnose.ErrorStatus,
+ Message: "failed to verify certificate: x509: certificate has expired or is not yet valid",
+ },
+ {
+ Name: "test-consul-direct-access-service-discovery",
+ Status: diagnose.WarningStatus,
+ Warnings: []string{
+ diagnose.DirAccessErr,
+ },
+ },
},
},
},
@@ -193,28 +235,27 @@ func TestOperatorDiagnoseCommand_Run(t *testing.T) {
"-config", "./server/test-fixtures/diagnose_ok_storage_direct_access.hcl",
},
[]*diagnose.Result{
- {
- Name: "parse-config",
- Status: diagnose.OkStatus,
- },
- {
- Name: "init-listeners",
- Status: diagnose.WarningStatus,
- Warnings: []string{
- "TLS is disabled in a Listener config stanza.",
- },
- },
{
Name: "storage",
Status: diagnose.WarningStatus,
- Warnings: []string{
- diagnose.DirAccessErr,
+ Children: []*diagnose.Result{
+ {
+ Name: "create-storage-backend",
+ Status: diagnose.OkStatus,
+ },
+ {
+ Name: "test-storage-tls-consul",
+ Status: diagnose.OkStatus,
+ },
+ {
+ Name: "test-consul-direct-access-storage",
+ Status: diagnose.WarningStatus,
+ Warnings: []string{
+ diagnose.DirAccessErr,
+ },
+ },
},
},
- {
- Name: "service-discovery",
- Status: diagnose.OkStatus,
- },
},
},
}
@@ -224,7 +265,6 @@ func TestOperatorDiagnoseCommand_Run(t *testing.T) {
for _, tc := range cases {
tc := tc
-
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
client, closer := testVaultServer(t)
@@ -236,18 +276,35 @@ func TestOperatorDiagnoseCommand_Run(t *testing.T) {
cmd.Run(tc.args)
result := cmd.diagnose.Finalize(context.Background())
- for i, exp := range tc.expected {
- act := result.Children[i]
- if err := compareResult(t, exp, act); err != nil {
- t.Fatalf("%v", err)
- }
+ if err := compareResults(tc.expected, result.Children); err != nil {
+ t.Fatalf("Did not find expected test results: %v", err)
}
})
}
})
}
-func compareResult(t *testing.T, exp *diagnose.Result, act *diagnose.Result) error {
+func compareResults(expected []*diagnose.Result, actual []*diagnose.Result) error {
+ for _, exp := range expected {
+ found := false
+ // Check them all so we don't have to be order specific
+ for _, act := range actual {
+ if exp.Name == act.Name {
+ found = true
+ if err := compareResult(exp, act); err != nil {
+ return err
+ }
+ break
+ }
+ }
+ if !found {
+ return fmt.Errorf("could not find expected test result: %s", exp.Name)
+ }
+ }
+ return nil
+}
+
+func compareResult(exp *diagnose.Result, act *diagnose.Result) error {
if exp.Name != act.Name {
return fmt.Errorf("names mismatch: %s vs %s", exp.Name, act.Name)
}
@@ -269,8 +326,17 @@ func compareResult(t *testing.T, exp *diagnose.Result, act *diagnose.Result) err
return fmt.Errorf("section %s, warning message not found: %s in %s", exp.Name, exp.Warnings[j], act.Warnings[j])
}
}
- if len(exp.Children) != len(act.Children) {
- return fmt.Errorf("section %s, child count mismatch: %d vs %d", exp.Name, len(exp.Children), len(act.Children))
+ if len(exp.Children) > len(act.Children) {
+ errStrings := []string{}
+ for _, c := range act.Children {
+ errStrings = append(errStrings, fmt.Sprintf("%+v", c))
+ }
+ return fmt.Errorf(strings.Join(errStrings, ","))
}
+
+ if len(exp.Children) > 0 {
+ return compareResults(exp.Children, act.Children)
+ }
+
return nil
}
diff --git a/command/operator_generate_root.go b/command/operator_generate_root.go
index a9f9117feb8cb..eb44fece68ba1 100644
--- a/command/operator_generate_root.go
+++ b/command/operator_generate_root.go
@@ -9,7 +9,6 @@ import (
"os"
"strings"
- "github.com/hashicorp/errwrap"
uuid "github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/pgpkeys"
@@ -310,7 +309,7 @@ func (c *OperatorGenerateRootCommand) generateOTP(client *api.Client, kind gener
default:
otp, err := base62.Random(status.OTPLength)
if err != nil {
- c.UI.Error(errwrap.Wrapf("Error reading random bytes: {{err}}", err).Error())
+ c.UI.Error(fmt.Errorf("Error reading random bytes: %w", err).Error())
return "", 2
}
@@ -363,13 +362,13 @@ func (c *OperatorGenerateRootCommand) decode(client *api.Client, encoded, otp st
default:
tokenBytes, err := base64.RawStdEncoding.DecodeString(encoded)
if err != nil {
- c.UI.Error(errwrap.Wrapf("Error decoding base64'd token: {{err}}", err).Error())
+ c.UI.Error(fmt.Errorf("Error decoding base64'd token: %w", err).Error())
return 1
}
tokenBytes, err = xor.XORBytes(tokenBytes, []byte(otp))
if err != nil {
- c.UI.Error(errwrap.Wrapf("Error xoring token: {{err}}", err).Error())
+ c.UI.Error(fmt.Errorf("Error xoring token: %w", err).Error())
return 1
}
token = string(tokenBytes)
diff --git a/command/operator_migrate.go b/command/operator_migrate.go
index c243864e4c140..1931584bc3f44 100644
--- a/command/operator_migrate.go
+++ b/command/operator_migrate.go
@@ -10,7 +10,6 @@ import (
"strings"
"time"
- "github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
@@ -150,24 +149,24 @@ func (c *OperatorMigrateCommand) Run(args []string) int {
func (c *OperatorMigrateCommand) migrate(config *migratorConfig) error {
from, err := c.newBackend(config.StorageSource.Type, config.StorageSource.Config)
if err != nil {
- return errwrap.Wrapf("error mounting 'storage_source': {{err}}", err)
+ return fmt.Errorf("error mounting 'storage_source': %w", err)
}
if c.flagReset {
if err := SetStorageMigration(from, false); err != nil {
- return errwrap.Wrapf("error resetting migration lock: {{err}}", err)
+ return fmt.Errorf("error resetting migration lock: %w", err)
}
return nil
}
to, err := c.createDestinationBackend(config.StorageDestination.Type, config.StorageDestination.Config, config)
if err != nil {
- return errwrap.Wrapf("error mounting 'storage_destination': {{err}}", err)
+ return fmt.Errorf("error mounting 'storage_destination': %w", err)
}
migrationStatus, err := CheckStorageMigration(from)
if err != nil {
- return errwrap.Wrapf("error checking migration status: {{err}}", err)
+ return fmt.Errorf("error checking migration status: %w", err)
}
if migrationStatus != nil {
@@ -181,7 +180,7 @@ func (c *OperatorMigrateCommand) migrate(config *migratorConfig) error {
// it.
default:
if err := SetStorageMigration(from, true); err != nil {
- return errwrap.Wrapf("error setting migration lock: {{err}}", err)
+ return fmt.Errorf("error setting migration lock: %w", err)
}
defer SetStorageMigration(from, false)
@@ -215,7 +214,7 @@ func (c *OperatorMigrateCommand) migrateAll(ctx context.Context, from physical.B
entry, err := from.Get(ctx, path)
if err != nil {
- return errwrap.Wrapf("error reading entry: {{err}}", err)
+ return fmt.Errorf("error reading entry: %w", err)
}
if entry == nil {
@@ -223,7 +222,7 @@ func (c *OperatorMigrateCommand) migrateAll(ctx context.Context, from physical.B
}
if err := to.Put(ctx, entry); err != nil {
- return errwrap.Wrapf("error writing entry: {{err}}", err)
+ return fmt.Errorf("error writing entry: %w", err)
}
c.logger.Info("copied key", "path", path)
return nil
@@ -258,7 +257,7 @@ func (c *OperatorMigrateCommand) createDestinationBackend(kind string, conf map[
parsedClusterAddr, err := url.Parse(config.ClusterAddr)
if err != nil {
- return nil, errwrap.Wrapf("error parsing cluster address: {{err}}", err)
+ return nil, fmt.Errorf("error parsing cluster address: %w", err)
}
if err := raftStorage.Bootstrap([]raft.Peer{
{
@@ -266,13 +265,13 @@ func (c *OperatorMigrateCommand) createDestinationBackend(kind string, conf map[
Address: parsedClusterAddr.Host,
},
}); err != nil {
- return nil, errwrap.Wrapf("could not bootstrap clustered storage: {{err}}", err)
+ return nil, fmt.Errorf("could not bootstrap clustered storage: %w", err)
}
if err := raftStorage.SetupCluster(context.Background(), raft.SetupOpts{
StartAsLeader: true,
}); err != nil {
- return nil, errwrap.Wrapf("could not start clustered storage: {{err}}", err)
+ return nil, fmt.Errorf("could not start clustered storage: %w", err)
}
}
@@ -318,7 +317,7 @@ func (c *OperatorMigrateCommand) loadMigratorConfig(path string) (*migratorConfi
}
if err := parseStorage(&result, o, stanza); err != nil {
- return nil, errwrap.Wrapf("error parsing '%s': {{err}}", err)
+ return nil, fmt.Errorf("error parsing '%s': %w", stanza, err)
}
}
return &result, nil
@@ -355,7 +354,7 @@ func dfsScan(ctx context.Context, source physical.Backend, cb func(ctx context.C
if key == "" || strings.HasSuffix(key, "/") {
children, err := source.List(ctx, key)
if err != nil {
- return errwrap.Wrapf("failed to scan for children: {{err}}", err)
+ return fmt.Errorf("failed to scan for children: %w", err)
}
sort.Strings(children)
diff --git a/command/server.go b/command/server.go
index 103294942e79c..6a1de0e900856 100644
--- a/command/server.go
+++ b/command/server.go
@@ -410,7 +410,7 @@ func (c *ServerCommand) parseConfig() (*server.Config, error) {
for _, path := range c.flagConfigs {
current, err := server.LoadConfig(path)
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("error loading configuration from %s: {{err}}", path), err)
+ return nil, fmt.Errorf("error loading configuration from %s: %w", path, err)
}
if config == nil {
@@ -875,6 +875,33 @@ func (c *ServerCommand) setupStorage(config *server.Config) (physical.Backend, e
return backend, nil
}
+func beginServiceRegistration(c *ServerCommand, config *server.Config) (sr.ServiceRegistration, error) {
+ sdFactory, ok := c.ServiceRegistrations[config.ServiceRegistration.Type]
+ if !ok {
+ return nil, fmt.Errorf("Unknown service_registration type %s", config.ServiceRegistration.Type)
+ }
+
+ namedSDLogger := c.logger.Named("service_registration." + config.ServiceRegistration.Type)
+ c.allLoggers = append(c.allLoggers, namedSDLogger)
+
+ // Since we haven't even begun starting Vault's core yet,
+ // we know that Vault is in its pre-running state.
+ state := sr.State{
+ VaultVersion: version.GetVersion().VersionNumber(),
+ IsInitialized: false,
+ IsSealed: true,
+ IsActive: false,
+ IsPerformanceStandby: false,
+ }
+ var err error
+ configSR, err := sdFactory(config.ServiceRegistration.Config, namedSDLogger, state)
+ if err != nil {
+ return nil, fmt.Errorf("Error initializing service_registration of type %s: %s", config.ServiceRegistration.Type, err)
+ }
+
+ return configSR, nil
+}
+
// InitListeners returns a response code, error message, Listeners, and a TCP Address list.
func (c *ServerCommand) InitListeners(config *server.Config, disableClustering bool, infoKeys *[]string, info *map[string]string) (int, []listenerutil.Listener, []*net.TCPAddr, error) {
clusterAddrs := []*net.TCPAddr{}
@@ -1170,27 +1197,9 @@ func (c *ServerCommand) Run(args []string) int {
// Initialize the Service Discovery, if there is one
var configSR sr.ServiceRegistration
if config.ServiceRegistration != nil {
- sdFactory, ok := c.ServiceRegistrations[config.ServiceRegistration.Type]
- if !ok {
- c.UI.Error(fmt.Sprintf("Unknown service_registration type %s", config.ServiceRegistration.Type))
- return 1
- }
-
- namedSDLogger := c.logger.Named("service_registration." + config.ServiceRegistration.Type)
- c.allLoggers = append(c.allLoggers, namedSDLogger)
-
- // Since we haven't even begun starting Vault's core yet,
- // we know that Vault is in its pre-running state.
- state := sr.State{
- VaultVersion: version.GetVersion().VersionNumber(),
- IsInitialized: false,
- IsSealed: true,
- IsActive: false,
- IsPerformanceStandby: false,
- }
- configSR, err = sdFactory(config.ServiceRegistration.Config, namedSDLogger, state)
+ configSR, err = beginServiceRegistration(c, config)
if err != nil {
- c.UI.Error(fmt.Sprintf("Error initializing service_registration of type %s: %s", config.ServiceRegistration.Type, err))
+ c.UI.Output(err.Error())
return 1
}
}
@@ -1199,83 +1208,22 @@ func (c *ServerCommand) Run(args []string) int {
info := make(map[string]string)
info["log level"] = logLevelString
infoKeys = append(infoKeys, "log level")
+ barrierSeal, barrierWrapper, unwrapSeal, seals, sealConfigError, err := setSeal(c, config, infoKeys, info)
+ // Check error here
+ if err != nil {
+ c.UI.Error(err.Error())
+ return 1
+ }
- var barrierSeal vault.Seal
- var unwrapSeal vault.Seal
-
- var sealConfigError error
- var wrapper wrapping.Wrapper
- var barrierWrapper wrapping.Wrapper
- if c.flagDevAutoSeal {
- barrierSeal = vault.NewAutoSeal(vaultseal.NewTestSeal(nil))
- } else {
- // Handle the case where no seal is provided
- switch len(config.Seals) {
- case 0:
- config.Seals = append(config.Seals, &configutil.KMS{Type: wrapping.Shamir})
- case 1:
- // If there's only one seal and it's disabled assume they want to
- // migrate to a shamir seal and simply didn't provide it
- if config.Seals[0].Disabled {
- config.Seals = append(config.Seals, &configutil.KMS{Type: wrapping.Shamir})
- }
- }
- for _, configSeal := range config.Seals {
- sealType := wrapping.Shamir
- if !configSeal.Disabled && os.Getenv("VAULT_SEAL_TYPE") != "" {
- sealType = os.Getenv("VAULT_SEAL_TYPE")
- configSeal.Type = sealType
- } else {
- sealType = configSeal.Type
- }
-
- var seal vault.Seal
- sealLogger := c.logger.ResetNamed(fmt.Sprintf("seal.%s", sealType))
- c.allLoggers = append(c.allLoggers, sealLogger)
- defaultSeal := vault.NewDefaultSeal(&vaultseal.Access{
- Wrapper: aeadwrapper.NewShamirWrapper(&wrapping.WrapperOptions{
- Logger: c.logger.Named("shamir"),
- }),
- })
- var sealInfoKeys []string
- sealInfoMap := map[string]string{}
- wrapper, sealConfigError = configutil.ConfigureWrapper(configSeal, &sealInfoKeys, &sealInfoMap, sealLogger)
- if sealConfigError != nil {
- if !errwrap.ContainsType(sealConfigError, new(logical.KeyNotFoundError)) {
- c.UI.Error(fmt.Sprintf(
- "Error parsing Seal configuration: %s", sealConfigError))
- return 1
- }
- }
- if wrapper == nil {
- seal = defaultSeal
- } else {
- seal = vault.NewAutoSeal(&vaultseal.Access{
- Wrapper: wrapper,
- })
- }
-
- infoPrefix := ""
- if configSeal.Disabled {
- unwrapSeal = seal
- infoPrefix = "Old "
- } else {
- barrierSeal = seal
- barrierWrapper = wrapper
- }
- for _, k := range sealInfoKeys {
- infoKeys = append(infoKeys, infoPrefix+k)
- info[infoPrefix+k] = sealInfoMap[k]
- }
-
+ if seals != nil {
+ for _, seal := range seals {
// Ensure that the seal finalizer is called, even if using verify-only
- defer func() {
- err = seal.Finalize(context.Background())
+ defer func(seal *vault.Seal) {
+ err = (*seal).Finalize(context.Background())
if err != nil {
c.UI.Error(fmt.Sprintf("Error finalizing seals: %v", err))
}
- }()
-
+ }(&seal)
}
}
@@ -1291,235 +1239,36 @@ func (c *ServerCommand) Run(args []string) int {
return 1
}
- coreConfig := &vault.CoreConfig{
- RawConfig: config,
- Physical: backend,
- RedirectAddr: config.Storage.RedirectAddr,
- StorageType: config.Storage.Type,
- HAPhysical: nil,
- ServiceRegistration: configSR,
- Seal: barrierSeal,
- UnwrapSeal: unwrapSeal,
- AuditBackends: c.AuditBackends,
- CredentialBackends: c.CredentialBackends,
- LogicalBackends: c.LogicalBackends,
- Logger: c.logger,
- DisableSentinelTrace: config.DisableSentinelTrace,
- DisableCache: config.DisableCache,
- DisableMlock: config.DisableMlock,
- MaxLeaseTTL: config.MaxLeaseTTL,
- DefaultLeaseTTL: config.DefaultLeaseTTL,
- ClusterName: config.ClusterName,
- CacheSize: config.CacheSize,
- PluginDirectory: config.PluginDirectory,
- EnableUI: config.EnableUI,
- EnableRaw: config.EnableRawEndpoint,
- DisableSealWrap: config.DisableSealWrap,
- DisablePerformanceStandby: config.DisablePerformanceStandby,
- DisableIndexing: config.DisableIndexing,
- AllLoggers: c.allLoggers,
- BuiltinRegistry: builtinplugins.Registry,
- DisableKeyEncodingChecks: config.DisablePrintableCheck,
- MetricsHelper: metricsHelper,
- MetricSink: metricSink,
- SecureRandomReader: secureRandomReader,
- EnableResponseHeaderHostname: config.EnableResponseHeaderHostname,
- EnableResponseHeaderRaftNodeID: config.EnableResponseHeaderRaftNodeID,
- License: config.License,
- LicensePath: config.LicensePath,
- }
- if c.flagDev {
- coreConfig.EnableRaw = true
- coreConfig.DevToken = c.flagDevRootTokenID
- if c.flagDevLeasedKV {
- coreConfig.LogicalBackends["kv"] = vault.LeasedPassthroughBackendFactory
- }
- if c.flagDevPluginDir != "" {
- coreConfig.PluginDirectory = c.flagDevPluginDir
- }
- if c.flagDevLatency > 0 {
- injectLatency := time.Duration(c.flagDevLatency) * time.Millisecond
- if _, txnOK := backend.(physical.Transactional); txnOK {
- coreConfig.Physical = physical.NewTransactionalLatencyInjector(backend, injectLatency, c.flagDevLatencyJitter, c.logger)
- } else {
- coreConfig.Physical = physical.NewLatencyInjector(backend, injectLatency, c.flagDevLatencyJitter, c.logger)
- }
- }
- }
-
+ coreConfig := createCoreConfig(c, config, backend, configSR, barrierSeal, unwrapSeal, metricsHelper, metricSink, secureRandomReader)
if c.flagDevThreeNode {
- return c.enableThreeNodeDevCluster(coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR"))
+ return c.enableThreeNodeDevCluster(&coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR"))
}
if c.flagDevFourCluster {
- return enableFourClusterDev(c, coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR"))
+ return enableFourClusterDev(c, &coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR"))
}
- var disableClustering bool
-
// Initialize the separate HA storage backend, if it exists
- var ok bool
- if config.HAStorage != nil {
- if config.Storage.Type == storageTypeRaft && config.HAStorage.Type == storageTypeRaft {
- c.UI.Error("Raft cannot be set both as 'storage' and 'ha_storage'. Setting 'storage' to 'raft' will automatically set it up for HA operations as well")
- return 1
- }
-
- if config.Storage.Type == storageTypeRaft {
- c.UI.Error("HA storage cannot be declared when Raft is the storage type")
- return 1
- }
-
- factory, exists := c.PhysicalBackends[config.HAStorage.Type]
- if !exists {
- c.UI.Error(fmt.Sprintf("Unknown HA storage type %s", config.HAStorage.Type))
- return 1
-
- }
-
- namedHALogger := c.logger.Named("ha." + config.HAStorage.Type)
- c.allLoggers = append(c.allLoggers, namedHALogger)
- habackend, err := factory(config.HAStorage.Config, namedHALogger)
- if err != nil {
- c.UI.Error(fmt.Sprintf(
- "Error initializing HA storage of type %s: %s", config.HAStorage.Type, err))
- return 1
-
- }
-
- if coreConfig.HAPhysical, ok = habackend.(physical.HABackend); !ok {
- c.UI.Error("Specified HA storage does not support HA")
- return 1
- }
-
- if !coreConfig.HAPhysical.HAEnabled() {
- c.UI.Error("Specified HA storage has HA support disabled; please consult documentation")
- return 1
- }
-
- coreConfig.RedirectAddr = config.HAStorage.RedirectAddr
- disableClustering = config.HAStorage.DisableClustering
-
- if config.HAStorage.Type == storageTypeRaft && disableClustering {
- c.UI.Error("Disable clustering cannot be set to true when Raft is the HA storage type")
- return 1
- }
-
- if !disableClustering {
- coreConfig.ClusterAddr = config.HAStorage.ClusterAddr
- }
- } else {
- if coreConfig.HAPhysical, ok = backend.(physical.HABackend); ok {
- coreConfig.RedirectAddr = config.Storage.RedirectAddr
- disableClustering = config.Storage.DisableClustering
-
- if (config.Storage.Type == storageTypeRaft) && disableClustering {
- c.UI.Error("Disable clustering cannot be set to true when Raft is the storage type")
- return 1
- }
-
- if !disableClustering {
- coreConfig.ClusterAddr = config.Storage.ClusterAddr
- }
- }
- }
-
- if envRA := os.Getenv("VAULT_API_ADDR"); envRA != "" {
- coreConfig.RedirectAddr = envRA
- } else if envRA := os.Getenv("VAULT_REDIRECT_ADDR"); envRA != "" {
- coreConfig.RedirectAddr = envRA
- } else if envAA := os.Getenv("VAULT_ADVERTISE_ADDR"); envAA != "" {
- coreConfig.RedirectAddr = envAA
+ disableClustering, err := initHaBackend(c, config, &coreConfig, backend)
+ if err != nil {
+ c.UI.Output(err.Error())
+ return 1
}
- // Attempt to detect the redirect address, if possible
- if coreConfig.RedirectAddr == "" {
- c.logger.Warn("no `api_addr` value specified in config or in VAULT_API_ADDR; falling back to detection if possible, but this value should be manually set")
- }
- var detect physical.RedirectDetect
- if coreConfig.HAPhysical != nil && coreConfig.HAPhysical.HAEnabled() {
- detect, ok = coreConfig.HAPhysical.(physical.RedirectDetect)
- } else {
- detect, ok = coreConfig.Physical.(physical.RedirectDetect)
- }
- if ok && coreConfig.RedirectAddr == "" {
- redirect, err := c.detectRedirect(detect, config)
- if err != nil {
- c.UI.Error(fmt.Sprintf("Error detecting api address: %s", err))
- } else if redirect == "" {
- c.UI.Error("Failed to detect api address")
- } else {
- coreConfig.RedirectAddr = redirect
- }
- }
- if coreConfig.RedirectAddr == "" && c.flagDev {
- coreConfig.RedirectAddr = fmt.Sprintf("http://%s", config.Listeners[0].Address)
+ // Determine the redirect address from environment variables
+ err = determineRedirectAddr(c, &coreConfig, config)
+ if err != nil {
+ c.UI.Output(err.Error())
}
// After the redirect bits are sorted out, if no cluster address was
// explicitly given, derive one from the redirect addr
- if disableClustering {
- coreConfig.ClusterAddr = ""
- } else if envCA := os.Getenv("VAULT_CLUSTER_ADDR"); envCA != "" {
- coreConfig.ClusterAddr = envCA
- } else {
- var addrToUse string
- switch {
- case coreConfig.ClusterAddr == "" && coreConfig.RedirectAddr != "":
- addrToUse = coreConfig.RedirectAddr
- case c.flagDev:
- addrToUse = fmt.Sprintf("http://%s", config.Listeners[0].Address)
- default:
- goto CLUSTER_SYNTHESIS_COMPLETE
- }
- u, err := url.ParseRequestURI(addrToUse)
- if err != nil {
- c.UI.Error(fmt.Sprintf(
- "Error parsing synthesized cluster address %s: %v", addrToUse, err))
- return 1
- }
- host, port, err := net.SplitHostPort(u.Host)
- if err != nil {
- // This sucks, as it's a const in the function but not exported in the package
- if strings.Contains(err.Error(), "missing port in address") {
- host = u.Host
- port = "443"
- } else {
- c.UI.Error(fmt.Sprintf("Error parsing api address: %v", err))
- return 1
- }
- }
- nPort, err := strconv.Atoi(port)
- if err != nil {
- c.UI.Error(fmt.Sprintf(
- "Error parsing synthesized address; failed to convert %q to a numeric: %v", port, err))
- return 1
- }
- u.Host = net.JoinHostPort(host, strconv.Itoa(nPort+1))
- // Will always be TLS-secured
- u.Scheme = "https"
- coreConfig.ClusterAddr = u.String()
- }
-
-CLUSTER_SYNTHESIS_COMPLETE:
-
- if coreConfig.RedirectAddr == coreConfig.ClusterAddr && len(coreConfig.RedirectAddr) != 0 {
- c.UI.Error(fmt.Sprintf(
- "Address %q used for both API and cluster addresses", coreConfig.RedirectAddr))
+ err = findClusterAddress(c, &coreConfig, config, disableClustering)
+ if err != nil {
+ c.UI.Output(err.Error())
return 1
}
- if coreConfig.ClusterAddr != "" {
- // Force https as we'll always be TLS-secured
- u, err := url.ParseRequestURI(coreConfig.ClusterAddr)
- if err != nil {
- c.UI.Error(fmt.Sprintf("Error parsing cluster address %s: %v", coreConfig.ClusterAddr, err))
- return 11
- }
- u.Scheme = "https"
- coreConfig.ClusterAddr = u.String()
- }
-
// Override the UI enabling config by the environment variable
if enableUI := os.Getenv("VAULT_UI"); enableUI != "" {
var err error
@@ -1538,15 +1287,20 @@ CLUSTER_SYNTHESIS_COMPLETE:
}
// Apply any enterprise configuration onto the coreConfig.
- adjustCoreConfigForEnt(config, coreConfig)
+ adjustCoreConfigForEnt(config, &coreConfig)
// Initialize the core
- core, newCoreError := vault.NewCore(coreConfig)
+ core, newCoreError := vault.NewCore(&coreConfig)
if newCoreError != nil {
if vault.IsFatalError(newCoreError) {
c.UI.Error(fmt.Sprintf("Error initializing core: %s", newCoreError))
return 1
}
+ c.UI.Warn(wrapAtLength(
+ "WARNING! A non-fatal error occurred during initialization. Please " +
+ "check the logs for more information."))
+ c.UI.Warn("")
+
}
// Copy the reload funcs pointers back
@@ -1650,27 +1404,7 @@ CLUSTER_SYNTHESIS_COMPLETE:
// uninitialized. Once one server initializes the storage backend, this
// goroutine will pick up the unseal keys and unseal this instance.
if !core.IsInSealMigrationMode() {
- go func() {
- for {
- err := core.UnsealWithStoredKeys(context.Background())
- if err == nil {
- return
- }
-
- if vault.IsFatalError(err) {
- c.logger.Error("error unsealing core", "error", err)
- return
- } else {
- c.logger.Warn("failed to unseal core", "error", err)
- }
-
- select {
- case <-c.ShutdownCh:
- return
- case <-time.After(5 * time.Second):
- }
- }
- }()
+ go runUnseal(c, core, context.Background())
}
// When the underlying storage is raft, kick off retry join if it was specified
@@ -1689,176 +1423,24 @@ CLUSTER_SYNTHESIS_COMPLETE:
c.WaitGroup = &sync.WaitGroup{}
// If service discovery is available, run service discovery
- if sd := coreConfig.GetServiceRegistration(); sd != nil {
- if err := configSR.Run(c.ShutdownCh, c.WaitGroup, coreConfig.RedirectAddr); err != nil {
- c.UI.Error(fmt.Sprintf("Error running service_registration of type %s: %s", config.ServiceRegistration.Type, err))
- return 1
- }
+ err = runListeners(c, &coreConfig, config, configSR)
+ if err != nil {
+ c.UI.Error(err.Error())
+ return 1
}
// If we're in Dev mode, then initialize the core
- if c.flagDev && !c.flagDevSkipInit {
-
- init, err := c.enableDev(core, coreConfig)
- if err != nil {
- c.UI.Error(fmt.Sprintf("Error initializing Dev mode: %s", err))
- return 1
- }
-
- var plugins, pluginsNotLoaded []string
- if c.flagDevPluginDir != "" && c.flagDevPluginInit {
-
- f, err := os.Open(c.flagDevPluginDir)
- if err != nil {
- c.UI.Error(fmt.Sprintf("Error reading plugin dir: %s", err))
- return 1
- }
-
- list, err := f.Readdirnames(0)
- f.Close()
- if err != nil {
- c.UI.Error(fmt.Sprintf("Error listing plugins: %s", err))
- return 1
- }
-
- for _, name := range list {
- path := filepath.Join(f.Name(), name)
- if err := c.addPlugin(path, init.RootToken, core); err != nil {
- if !errwrap.Contains(err, vault.ErrPluginBadType.Error()) {
- c.UI.Error(fmt.Sprintf("Error enabling plugin %s: %s", name, err))
- return 1
- }
- pluginsNotLoaded = append(pluginsNotLoaded, name)
- continue
- }
- plugins = append(plugins, name)
- }
-
- sort.Strings(plugins)
- }
-
- var qw *quiescenceSink
- var qwo sync.Once
- qw = &quiescenceSink{
- t: time.AfterFunc(100*time.Millisecond, func() {
- qwo.Do(func() {
- c.logger.DeregisterSink(qw)
-
- // Print the big dev mode warning!
- c.UI.Warn(wrapAtLength(
- "WARNING! dev mode is enabled! In this mode, Vault runs entirely " +
- "in-memory and starts unsealed with a single unseal key. The root " +
- "token is already authenticated to the CLI, so you can immediately " +
- "begin using Vault."))
- c.UI.Warn("")
- c.UI.Warn("You may need to set the following environment variable:")
- c.UI.Warn("")
-
- endpointURL := "http://" + config.Listeners[0].Address
- if runtime.GOOS == "windows" {
- c.UI.Warn("PowerShell:")
- c.UI.Warn(fmt.Sprintf(" $env:VAULT_ADDR=\"%s\"", endpointURL))
- c.UI.Warn("cmd.exe:")
- c.UI.Warn(fmt.Sprintf(" set VAULT_ADDR=%s", endpointURL))
- } else {
- c.UI.Warn(fmt.Sprintf(" $ export VAULT_ADDR='%s'", endpointURL))
- }
-
- // Unseal key is not returned if stored shares is supported
- if len(init.SecretShares) > 0 {
- c.UI.Warn("")
- c.UI.Warn(wrapAtLength(
- "The unseal key and root token are displayed below in case you want " +
- "to seal/unseal the Vault or re-authenticate."))
- c.UI.Warn("")
- c.UI.Warn(fmt.Sprintf("Unseal Key: %s", base64.StdEncoding.EncodeToString(init.SecretShares[0])))
- }
-
- if len(init.RecoveryShares) > 0 {
- c.UI.Warn("")
- c.UI.Warn(wrapAtLength(
- "The recovery key and root token are displayed below in case you want " +
- "to seal/unseal the Vault or re-authenticate."))
- c.UI.Warn("")
- c.UI.Warn(fmt.Sprintf("Recovery Key: %s", base64.StdEncoding.EncodeToString(init.RecoveryShares[0])))
- }
-
- c.UI.Warn(fmt.Sprintf("Root Token: %s", init.RootToken))
-
- if len(plugins) > 0 {
- c.UI.Warn("")
- c.UI.Warn(wrapAtLength(
- "The following dev plugins are registered in the catalog:"))
- for _, p := range plugins {
- c.UI.Warn(fmt.Sprintf(" - %s", p))
- }
- }
-
- if len(pluginsNotLoaded) > 0 {
- c.UI.Warn("")
- c.UI.Warn(wrapAtLength(
- "The following dev plugins FAILED to be registered in the catalog due to unknown type:"))
- for _, p := range pluginsNotLoaded {
- c.UI.Warn(fmt.Sprintf(" - %s", p))
- }
- }
-
- c.UI.Warn("")
- c.UI.Warn(wrapAtLength(
- "Development mode should NOT be used in production installations!"))
- c.UI.Warn("")
- })
- }),
- }
- c.logger.RegisterSink(qw)
+ err = initDevCore(c, &coreConfig, config, core)
+ if err != nil {
+ c.UI.Error(err.Error())
+ return 1
}
// Initialize the HTTP servers
- for _, ln := range lns {
- if ln.Config == nil {
- c.UI.Error("Found nil listener config after parsing")
- return 1
- }
- handler := vaulthttp.Handler(&vault.HandlerProperties{
- Core: core,
- ListenerConfig: ln.Config,
- DisablePrintableCheck: config.DisablePrintableCheck,
- RecoveryMode: c.flagRecovery,
- })
-
- if len(ln.Config.XForwardedForAuthorizedAddrs) > 0 {
- handler = vaulthttp.WrapForwardedForHandler(handler, ln.Config)
- }
-
- // server defaults
- server := &http.Server{
- Handler: handler,
- ReadHeaderTimeout: 10 * time.Second,
- ReadTimeout: 30 * time.Second,
- IdleTimeout: 5 * time.Minute,
- ErrorLog: c.logger.StandardLogger(nil),
- }
-
- // override server defaults with config values for read/write/idle timeouts if configured
- if ln.Config.HTTPReadHeaderTimeout > 0 {
- server.ReadHeaderTimeout = ln.Config.HTTPReadHeaderTimeout
- }
- if ln.Config.HTTPReadTimeout > 0 {
- server.ReadTimeout = ln.Config.HTTPReadTimeout
- }
- if ln.Config.HTTPWriteTimeout > 0 {
- server.WriteTimeout = ln.Config.HTTPWriteTimeout
- }
- if ln.Config.HTTPIdleTimeout > 0 {
- server.IdleTimeout = ln.Config.HTTPIdleTimeout
- }
-
- // server config tests can exit now
- if c.flagTestServerConfig {
- continue
- }
-
- go server.Serve(ln.Listener)
+ err = startHttpServers(c, core, config, lns)
+ if err != nil {
+ c.UI.Error(err.Error())
+ return 1
}
if c.flagTestServerConfig {
@@ -1877,13 +1459,6 @@ CLUSTER_SYNTHESIS_COMPLETE:
}
}
- if newCoreError != nil {
- c.UI.Warn(wrapAtLength(
- "WARNING! A non-fatal error occurred during initialization. Please " +
- "check the logs for more information."))
- c.UI.Warn("")
- }
-
// Output the header that the server has started
if !c.flagCombineLogs {
c.UI.Output("==> Vault server started! Log data will stream in below:\n")
@@ -1987,6 +1562,11 @@ CLUSTER_SYNTHESIS_COMPLETE:
c.UI.Error(fmt.Sprintf("Error(s) were encountered during reload: %s", err))
}
+ // Reload license file
+ if err := vault.LicenseReload(core); err != nil {
+ c.UI.Error(fmt.Sprintf("Error reloading license: %v", err))
+ }
+
case <-c.SigUSR2Ch:
logWriter := c.logger.StandardWriter(&hclog.StandardLoggerOptions{})
pprof.Lookup("goroutine").WriteTo(logWriter, 2)
@@ -2075,7 +1655,7 @@ func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig
isLeader, _, _, err := core.Leader()
if err != nil && err != vault.ErrHANotEnabled {
- return nil, errwrap.Wrapf("failed to check active status: {{err}}", err)
+ return nil, fmt.Errorf("failed to check active status: %w", err)
}
if err == nil {
leaderCount := 5
@@ -2088,7 +1668,7 @@ func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig
time.Sleep(1 * time.Second)
isLeader, _, _, err = core.Leader()
if err != nil {
- return nil, errwrap.Wrapf("failed to check active status: {{err}}", err)
+ return nil, fmt.Errorf("failed to check active status: %w", err)
}
leaderCount--
}
@@ -2110,7 +1690,7 @@ func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig
}
resp, err := core.HandleRequest(ctx, req)
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("failed to create root token with ID %q: {{err}}", coreConfig.DevToken), err)
+ return nil, fmt.Errorf("failed to create root token with ID %q: %w", coreConfig.DevToken, err)
}
if resp == nil {
return nil, fmt.Errorf("nil response when creating root token with ID %q", coreConfig.DevToken)
@@ -2126,7 +1706,7 @@ func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig
req.Data = nil
resp, err = core.HandleRequest(ctx, req)
if err != nil {
- return nil, errwrap.Wrapf("failed to revoke initial root token: {{err}}", err)
+ return nil, fmt.Errorf("failed to revoke initial root token: %w", err)
}
}
@@ -2160,10 +1740,10 @@ func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig
}
resp, err := core.HandleRequest(ctx, req)
if err != nil {
- return nil, errwrap.Wrapf("error creating default K/V store: {{err}}", err)
+ return nil, fmt.Errorf("error creating default K/V store: %w", err)
}
if resp.IsError() {
- return nil, errwrap.Wrapf("failed to create default K/V store: {{err}}", resp.Error())
+ return nil, fmt.Errorf("failed to create default K/V store: %w", resp.Error())
}
return init, nil
@@ -2478,7 +2058,7 @@ func (c *ServerCommand) Reload(lock *sync.RWMutex, reloadFuncs *map[string][]rel
for _, relFunc := range relFuncs {
if relFunc != nil {
if err := relFunc(); err != nil {
- reloadErrors = multierror.Append(reloadErrors, errwrap.Wrapf("error encountered reloading listener: {{err}}", err))
+ reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("error encountered reloading listener: %w", err))
}
}
}
@@ -2487,7 +2067,7 @@ func (c *ServerCommand) Reload(lock *sync.RWMutex, reloadFuncs *map[string][]rel
for _, relFunc := range relFuncs {
if relFunc != nil {
if err := relFunc(); err != nil {
- reloadErrors = multierror.Append(reloadErrors, errwrap.Wrapf(fmt.Sprintf("error encountered reloading file audit device at path %q: {{err}}", strings.TrimPrefix(k, "audit_file|")), err))
+ reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("error encountered reloading file audit device at path %q: %w", strings.TrimPrefix(k, "audit_file|"), err))
}
}
}
@@ -2514,7 +2094,7 @@ func (c *ServerCommand) storePidFile(pidPath string) error {
// Open the PID file
pidFile, err := os.OpenFile(pidPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644)
if err != nil {
- return errwrap.Wrapf("could not open pid file: {{err}}", err)
+ return fmt.Errorf("could not open pid file: %w", err)
}
defer pidFile.Close()
@@ -2522,7 +2102,7 @@ func (c *ServerCommand) storePidFile(pidPath string) error {
pid := os.Getpid()
_, err = pidFile.WriteString(fmt.Sprintf("%d", pid))
if err != nil {
- return errwrap.Wrapf("could not write to pid file: {{err}}", err)
+ return fmt.Errorf("could not write to pid file: %w", err)
}
return nil
}
@@ -2591,6 +2171,493 @@ func CheckStorageMigration(b physical.Backend) (*StorageMigrationStatus, error)
return &status, nil
}
+// setSeal return barrierSeal, barrierWrapper, unwrapSeal, and all the created seals from the configs so we can close them in Run
+// The two errors are the sealConfigError and the regular error
+func setSeal(c *ServerCommand, config *server.Config, infoKeys []string, info map[string]string) (vault.Seal, wrapping.Wrapper, vault.Seal, []vault.Seal, error, error) {
+ var barrierSeal vault.Seal
+ var unwrapSeal vault.Seal
+
+ var sealConfigError error
+ var wrapper wrapping.Wrapper
+ var barrierWrapper wrapping.Wrapper
+ if c.flagDevAutoSeal {
+ barrierSeal = vault.NewAutoSeal(vaultseal.NewTestSeal(nil))
+ return barrierSeal, nil, nil, nil, nil, nil
+ }
+
+ // Handle the case where no seal is provided
+ switch len(config.Seals) {
+ case 0:
+ config.Seals = append(config.Seals, &configutil.KMS{Type: wrapping.Shamir})
+ case 1:
+ // If there's only one seal and it's disabled assume they want to
+ // migrate to a shamir seal and simply didn't provide it
+ if config.Seals[0].Disabled {
+ config.Seals = append(config.Seals, &configutil.KMS{Type: wrapping.Shamir})
+ }
+ }
+ var createdSeals []vault.Seal = make([]vault.Seal, len(config.Seals))
+ for _, configSeal := range config.Seals {
+ sealType := wrapping.Shamir
+ if !configSeal.Disabled && os.Getenv("VAULT_SEAL_TYPE") != "" {
+ sealType = os.Getenv("VAULT_SEAL_TYPE")
+ configSeal.Type = sealType
+ } else {
+ sealType = configSeal.Type
+ }
+
+ var seal vault.Seal
+ sealLogger := c.logger.ResetNamed(fmt.Sprintf("seal.%s", sealType))
+ c.allLoggers = append(c.allLoggers, sealLogger)
+ defaultSeal := vault.NewDefaultSeal(&vaultseal.Access{
+ Wrapper: aeadwrapper.NewShamirWrapper(&wrapping.WrapperOptions{
+ Logger: c.logger.Named("shamir"),
+ }),
+ })
+ var sealInfoKeys []string
+ sealInfoMap := map[string]string{}
+ wrapper, sealConfigError = configutil.ConfigureWrapper(configSeal, &sealInfoKeys, &sealInfoMap, sealLogger)
+ if sealConfigError != nil {
+ if !errwrap.ContainsType(sealConfigError, new(logical.KeyNotFoundError)) {
+ return barrierSeal, barrierWrapper, unwrapSeal, createdSeals, sealConfigError, fmt.Errorf(
+ "Error parsing Seal configuration: %s", sealConfigError)
+ }
+ }
+ if wrapper == nil {
+ seal = defaultSeal
+ } else {
+ seal = vault.NewAutoSeal(&vaultseal.Access{
+ Wrapper: wrapper,
+ })
+ }
+ infoPrefix := ""
+ if configSeal.Disabled {
+ unwrapSeal = seal
+ infoPrefix = "Old "
+ } else {
+ barrierSeal = seal
+ barrierWrapper = wrapper
+ }
+ for _, k := range sealInfoKeys {
+ infoKeys = append(infoKeys, infoPrefix+k)
+ info[infoPrefix+k] = sealInfoMap[k]
+ }
+ createdSeals = append(createdSeals, seal)
+ }
+ return barrierSeal, barrierWrapper, unwrapSeal, createdSeals, sealConfigError, nil
+}
+
+func initHaBackend(c *ServerCommand, config *server.Config, coreConfig *vault.CoreConfig, backend physical.Backend) (bool, error) {
+ // Initialize the separate HA storage backend, if it exists
+ var ok bool
+ if config.HAStorage != nil {
+ if config.Storage.Type == storageTypeRaft && config.HAStorage.Type == storageTypeRaft {
+ return false, fmt.Errorf("Raft cannot be set both as 'storage' and 'ha_storage'. Setting 'storage' to 'raft' will automatically set it up for HA operations as well")
+ }
+
+ if config.Storage.Type == storageTypeRaft {
+ return false, fmt.Errorf("HA storage cannot be declared when Raft is the storage type")
+ }
+
+ factory, exists := c.PhysicalBackends[config.HAStorage.Type]
+ if !exists {
+ return false, fmt.Errorf("Unknown HA storage type %s", config.HAStorage.Type)
+ }
+
+ namedHALogger := c.logger.Named("ha." + config.HAStorage.Type)
+ c.allLoggers = append(c.allLoggers, namedHALogger)
+ habackend, err := factory(config.HAStorage.Config, namedHALogger)
+ if err != nil {
+ return false, fmt.Errorf("Error initializing HA storage of type %s: %s", config.HAStorage.Type, err)
+ }
+
+ if coreConfig.HAPhysical, ok = habackend.(physical.HABackend); !ok {
+ return false, fmt.Errorf("Specified HA storage does not support HA")
+ }
+
+ if !coreConfig.HAPhysical.HAEnabled() {
+ return false, fmt.Errorf("Specified HA storage has HA support disabled; please consult documentation")
+ }
+
+ coreConfig.RedirectAddr = config.HAStorage.RedirectAddr
+ disableClustering := config.HAStorage.DisableClustering
+
+ if config.HAStorage.Type == storageTypeRaft && disableClustering {
+ return disableClustering, fmt.Errorf("Disable clustering cannot be set to true when Raft is the HA storage type")
+ }
+
+ if !disableClustering {
+ coreConfig.ClusterAddr = config.HAStorage.ClusterAddr
+ }
+ } else {
+ if coreConfig.HAPhysical, ok = backend.(physical.HABackend); ok {
+ coreConfig.RedirectAddr = config.Storage.RedirectAddr
+ disableClustering := config.Storage.DisableClustering
+
+ if (config.Storage.Type == storageTypeRaft) && disableClustering {
+ return disableClustering, fmt.Errorf("Disable clustering cannot be set to true when Raft is the storage type")
+ }
+
+ if !disableClustering {
+ coreConfig.ClusterAddr = config.Storage.ClusterAddr
+ }
+ }
+ }
+ return config.DisableClustering, nil
+}
+
+func determineRedirectAddr(c *ServerCommand, coreConfig *vault.CoreConfig, config *server.Config) error {
+ var retErr error
+ if envRA := os.Getenv("VAULT_API_ADDR"); envRA != "" {
+ coreConfig.RedirectAddr = envRA
+ } else if envRA := os.Getenv("VAULT_REDIRECT_ADDR"); envRA != "" {
+ coreConfig.RedirectAddr = envRA
+ } else if envAA := os.Getenv("VAULT_ADVERTISE_ADDR"); envAA != "" {
+ coreConfig.RedirectAddr = envAA
+ }
+
+ // Attempt to detect the redirect address, if possible
+ if coreConfig.RedirectAddr == "" {
+ c.logger.Warn("no `api_addr` value specified in config or in VAULT_API_ADDR; falling back to detection if possible, but this value should be manually set")
+ }
+
+ var ok bool
+ var detect physical.RedirectDetect
+ if coreConfig.HAPhysical != nil && coreConfig.HAPhysical.HAEnabled() {
+ detect, ok = coreConfig.HAPhysical.(physical.RedirectDetect)
+ } else {
+ detect, ok = coreConfig.Physical.(physical.RedirectDetect)
+ }
+ if ok && coreConfig.RedirectAddr == "" {
+ redirect, err := c.detectRedirect(detect, config)
+ // the following errors did not cause Run to return, so I'm not returning these
+ // as errors.
+ if err != nil {
+ retErr = fmt.Errorf("Error detecting api address: %s", err)
+ } else if redirect == "" {
+ retErr = fmt.Errorf("Failed to detect api address")
+ } else {
+ coreConfig.RedirectAddr = redirect
+ }
+ }
+ if coreConfig.RedirectAddr == "" && c.flagDev {
+ coreConfig.RedirectAddr = fmt.Sprintf("http://%s", config.Listeners[0].Address)
+ }
+ return retErr
+}
+
+func findClusterAddress(c *ServerCommand, coreConfig *vault.CoreConfig, config *server.Config, disableClustering bool) error {
+ if disableClustering {
+ coreConfig.ClusterAddr = ""
+ } else if envCA := os.Getenv("VAULT_CLUSTER_ADDR"); envCA != "" {
+ coreConfig.ClusterAddr = envCA
+ } else {
+ var addrToUse string
+ switch {
+ case coreConfig.ClusterAddr == "" && coreConfig.RedirectAddr != "":
+ addrToUse = coreConfig.RedirectAddr
+ case c.flagDev:
+ addrToUse = fmt.Sprintf("http://%s", config.Listeners[0].Address)
+ default:
+ goto CLUSTER_SYNTHESIS_COMPLETE
+ }
+ u, err := url.ParseRequestURI(addrToUse)
+ if err != nil {
+ return fmt.Errorf("Error parsing synthesized cluster address %s: %v", addrToUse, err)
+ }
+ host, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ // This sucks, as it's a const in the function but not exported in the package
+ if strings.Contains(err.Error(), "missing port in address") {
+ host = u.Host
+ port = "443"
+ } else {
+ return fmt.Errorf("Error parsing api address: %v", err)
+ }
+ }
+ nPort, err := strconv.Atoi(port)
+ if err != nil {
+ return fmt.Errorf("Error parsing synthesized address; failed to convert %q to a numeric: %v", port, err)
+ }
+ u.Host = net.JoinHostPort(host, strconv.Itoa(nPort+1))
+ // Will always be TLS-secured
+ u.Scheme = "https"
+ coreConfig.ClusterAddr = u.String()
+ }
+
+CLUSTER_SYNTHESIS_COMPLETE:
+
+ if coreConfig.RedirectAddr == coreConfig.ClusterAddr && len(coreConfig.RedirectAddr) != 0 {
+ return fmt.Errorf("Address %q used for both API and cluster addresses", coreConfig.RedirectAddr)
+ }
+
+ if coreConfig.ClusterAddr != "" {
+ // Force https as we'll always be TLS-secured
+ u, err := url.ParseRequestURI(coreConfig.ClusterAddr)
+ if err != nil {
+ return fmt.Errorf("Error parsing cluster address %s: %v", coreConfig.ClusterAddr, err)
+ }
+ u.Scheme = "https"
+ coreConfig.ClusterAddr = u.String()
+ }
+ return nil
+}
+
+func runUnseal(c *ServerCommand, core *vault.Core, ctx context.Context) {
+ for {
+ err := core.UnsealWithStoredKeys(ctx)
+ if err == nil {
+ return
+ }
+
+ if vault.IsFatalError(err) {
+ c.logger.Error("error unsealing core", "error", err)
+ return
+ }
+ c.logger.Warn("failed to unseal core", "error", err)
+
+ select {
+ case <-c.ShutdownCh:
+ return
+ case <-time.After(5 * time.Second):
+ }
+ }
+}
+
+func createCoreConfig(c *ServerCommand, config *server.Config, backend physical.Backend, configSR sr.ServiceRegistration, barrierSeal, unwrapSeal vault.Seal,
+ metricsHelper *metricsutil.MetricsHelper, metricSink *metricsutil.ClusterMetricSink, secureRandomReader io.Reader) vault.CoreConfig {
+ coreConfig := &vault.CoreConfig{
+ RawConfig: config,
+ Physical: backend,
+ RedirectAddr: config.Storage.RedirectAddr,
+ StorageType: config.Storage.Type,
+ HAPhysical: nil,
+ ServiceRegistration: configSR,
+ Seal: barrierSeal,
+ UnwrapSeal: unwrapSeal,
+ AuditBackends: c.AuditBackends,
+ CredentialBackends: c.CredentialBackends,
+ LogicalBackends: c.LogicalBackends,
+ Logger: c.logger,
+ DisableSentinelTrace: config.DisableSentinelTrace,
+ DisableCache: config.DisableCache,
+ DisableMlock: config.DisableMlock,
+ MaxLeaseTTL: config.MaxLeaseTTL,
+ DefaultLeaseTTL: config.DefaultLeaseTTL,
+ ClusterName: config.ClusterName,
+ CacheSize: config.CacheSize,
+ PluginDirectory: config.PluginDirectory,
+ EnableUI: config.EnableUI,
+ EnableRaw: config.EnableRawEndpoint,
+ DisableSealWrap: config.DisableSealWrap,
+ DisablePerformanceStandby: config.DisablePerformanceStandby,
+ DisableIndexing: config.DisableIndexing,
+ AllLoggers: c.allLoggers,
+ BuiltinRegistry: builtinplugins.Registry,
+ DisableKeyEncodingChecks: config.DisablePrintableCheck,
+ MetricsHelper: metricsHelper,
+ MetricSink: metricSink,
+ SecureRandomReader: secureRandomReader,
+ EnableResponseHeaderHostname: config.EnableResponseHeaderHostname,
+ EnableResponseHeaderRaftNodeID: config.EnableResponseHeaderRaftNodeID,
+ License: config.License,
+ LicensePath: config.LicensePath,
+ }
+ if c.flagDev {
+ coreConfig.EnableRaw = true
+ coreConfig.DevToken = c.flagDevRootTokenID
+ if c.flagDevLeasedKV {
+ coreConfig.LogicalBackends["kv"] = vault.LeasedPassthroughBackendFactory
+ }
+ if c.flagDevPluginDir != "" {
+ coreConfig.PluginDirectory = c.flagDevPluginDir
+ }
+ if c.flagDevLatency > 0 {
+ injectLatency := time.Duration(c.flagDevLatency) * time.Millisecond
+ if _, txnOK := backend.(physical.Transactional); txnOK {
+ coreConfig.Physical = physical.NewTransactionalLatencyInjector(backend, injectLatency, c.flagDevLatencyJitter, c.logger)
+ } else {
+ coreConfig.Physical = physical.NewLatencyInjector(backend, injectLatency, c.flagDevLatencyJitter, c.logger)
+ }
+ }
+ }
+ return *coreConfig
+}
+
+func runListeners(c *ServerCommand, coreConfig *vault.CoreConfig, config *server.Config, configSR sr.ServiceRegistration) error {
+ if sd := coreConfig.GetServiceRegistration(); sd != nil {
+ if err := configSR.Run(c.ShutdownCh, c.WaitGroup, coreConfig.RedirectAddr); err != nil {
+ return fmt.Errorf("Error running service_registration of type %s: %s", config.ServiceRegistration.Type, err)
+ }
+ }
+ return nil
+}
+
+func initDevCore(c *ServerCommand, coreConfig *vault.CoreConfig, config *server.Config, core *vault.Core) error {
+ if c.flagDev && !c.flagDevSkipInit {
+
+ init, err := c.enableDev(core, coreConfig)
+ if err != nil {
+ return fmt.Errorf("Error initializing Dev mode: %s", err)
+ }
+
+ var plugins, pluginsNotLoaded []string
+ if c.flagDevPluginDir != "" && c.flagDevPluginInit {
+
+ f, err := os.Open(c.flagDevPluginDir)
+ if err != nil {
+ return fmt.Errorf("Error reading plugin dir: %s", err)
+ }
+
+ list, err := f.Readdirnames(0)
+ f.Close()
+ if err != nil {
+ return fmt.Errorf("Error listing plugins: %s", err)
+ }
+
+ for _, name := range list {
+ path := filepath.Join(f.Name(), name)
+ if err := c.addPlugin(path, init.RootToken, core); err != nil {
+ if !errwrap.Contains(err, vault.ErrPluginBadType.Error()) {
+ return fmt.Errorf("Error enabling plugin %s: %s", name, err)
+ }
+ pluginsNotLoaded = append(pluginsNotLoaded, name)
+ continue
+ }
+ plugins = append(plugins, name)
+ }
+
+ sort.Strings(plugins)
+ }
+
+ var qw *quiescenceSink
+ var qwo sync.Once
+ qw = &quiescenceSink{
+ t: time.AfterFunc(100*time.Millisecond, func() {
+ qwo.Do(func() {
+ c.logger.DeregisterSink(qw)
+
+ // Print the big dev mode warning!
+ c.UI.Warn(wrapAtLength(
+ "WARNING! dev mode is enabled! In this mode, Vault runs entirely " +
+ "in-memory and starts unsealed with a single unseal key. The root " +
+ "token is already authenticated to the CLI, so you can immediately " +
+ "begin using Vault."))
+ c.UI.Warn("")
+ c.UI.Warn("You may need to set the following environment variable:")
+ c.UI.Warn("")
+
+ endpointURL := "http://" + config.Listeners[0].Address
+ if runtime.GOOS == "windows" {
+ c.UI.Warn("PowerShell:")
+ c.UI.Warn(fmt.Sprintf(" $env:VAULT_ADDR=\"%s\"", endpointURL))
+ c.UI.Warn("cmd.exe:")
+ c.UI.Warn(fmt.Sprintf(" set VAULT_ADDR=%s", endpointURL))
+ } else {
+ c.UI.Warn(fmt.Sprintf(" $ export VAULT_ADDR='%s'", endpointURL))
+ }
+
+ // Unseal key is not returned if stored shares is supported
+ if len(init.SecretShares) > 0 {
+ c.UI.Warn("")
+ c.UI.Warn(wrapAtLength(
+ "The unseal key and root token are displayed below in case you want " +
+ "to seal/unseal the Vault or re-authenticate."))
+ c.UI.Warn("")
+ c.UI.Warn(fmt.Sprintf("Unseal Key: %s", base64.StdEncoding.EncodeToString(init.SecretShares[0])))
+ }
+
+ if len(init.RecoveryShares) > 0 {
+ c.UI.Warn("")
+ c.UI.Warn(wrapAtLength(
+ "The recovery key and root token are displayed below in case you want " +
+ "to seal/unseal the Vault or re-authenticate."))
+ c.UI.Warn("")
+ c.UI.Warn(fmt.Sprintf("Recovery Key: %s", base64.StdEncoding.EncodeToString(init.RecoveryShares[0])))
+ }
+
+ c.UI.Warn(fmt.Sprintf("Root Token: %s", init.RootToken))
+
+ if len(plugins) > 0 {
+ c.UI.Warn("")
+ c.UI.Warn(wrapAtLength(
+ "The following dev plugins are registered in the catalog:"))
+ for _, p := range plugins {
+ c.UI.Warn(fmt.Sprintf(" - %s", p))
+ }
+ }
+
+ if len(pluginsNotLoaded) > 0 {
+ c.UI.Warn("")
+ c.UI.Warn(wrapAtLength(
+ "The following dev plugins FAILED to be registered in the catalog due to unknown type:"))
+ for _, p := range pluginsNotLoaded {
+ c.UI.Warn(fmt.Sprintf(" - %s", p))
+ }
+ }
+
+ c.UI.Warn("")
+ c.UI.Warn(wrapAtLength(
+ "Development mode should NOT be used in production installations!"))
+ c.UI.Warn("")
+ })
+ }),
+ }
+ c.logger.RegisterSink(qw)
+ }
+ return nil
+}
+
+// Initialize the HTTP servers
+func startHttpServers(c *ServerCommand, core *vault.Core, config *server.Config, lns []listenerutil.Listener) error {
+ for _, ln := range lns {
+ if ln.Config == nil {
+ return fmt.Errorf("Found nil listener config after parsing")
+ }
+ handler := vaulthttp.Handler(&vault.HandlerProperties{
+ Core: core,
+ ListenerConfig: ln.Config,
+ DisablePrintableCheck: config.DisablePrintableCheck,
+ RecoveryMode: c.flagRecovery,
+ })
+
+ if len(ln.Config.XForwardedForAuthorizedAddrs) > 0 {
+ handler = vaulthttp.WrapForwardedForHandler(handler, ln.Config)
+ }
+
+ // server defaults
+ server := &http.Server{
+ Handler: handler,
+ ReadHeaderTimeout: 10 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ IdleTimeout: 5 * time.Minute,
+ ErrorLog: c.logger.StandardLogger(nil),
+ }
+
+ // override server defaults with config values for read/write/idle timeouts if configured
+ if ln.Config.HTTPReadHeaderTimeout > 0 {
+ server.ReadHeaderTimeout = ln.Config.HTTPReadHeaderTimeout
+ }
+ if ln.Config.HTTPReadTimeout > 0 {
+ server.ReadTimeout = ln.Config.HTTPReadTimeout
+ }
+ if ln.Config.HTTPWriteTimeout > 0 {
+ server.WriteTimeout = ln.Config.HTTPWriteTimeout
+ }
+ if ln.Config.HTTPIdleTimeout > 0 {
+ server.IdleTimeout = ln.Config.HTTPIdleTimeout
+ }
+
+ // server config tests can exit now
+ if c.flagTestServerConfig {
+ continue
+ }
+
+ go server.Serve(ln.Listener)
+ }
+ return nil
+}
+
func SetStorageMigration(b physical.Backend, active bool) error {
if !active {
return b.Delete(context.Background(), storageMigrationLock)
diff --git a/command/server/config.go b/command/server/config.go
index 744f57c035a1a..2626d249b9772 100644
--- a/command/server/config.go
+++ b/command/server/config.go
@@ -4,7 +4,6 @@ import (
"encoding/json"
"errors"
"fmt"
- "github.com/hashicorp/hcl/hcl/token"
"io"
"io/ioutil"
"os"
@@ -13,7 +12,6 @@ import (
"strings"
"time"
- "github.com/hashicorp/errwrap"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
@@ -23,7 +21,7 @@ import (
// Config is the configuration for the vault server.
type Config struct {
- UnusedKeys map[string][]token.Pos `hcl:",unusedKeyPositions"`
+ UnusedKeys configutil.UnusedKeyMap `hcl:",unusedKeyPositions"`
entConfig
*configutil.SharedConfig `hcl:"-"`
@@ -465,24 +463,24 @@ func ParseConfig(d, source string) (*Config, error) {
// Look for storage but still support old backend
if o := list.Filter("storage"); len(o.Items) > 0 {
if err := ParseStorage(result, o, "storage"); err != nil {
- return nil, errwrap.Wrapf("error parsing 'storage': {{err}}", err)
+ return nil, fmt.Errorf("error parsing 'storage': %w", err)
}
} else {
if o := list.Filter("backend"); len(o.Items) > 0 {
if err := ParseStorage(result, o, "backend"); err != nil {
- return nil, errwrap.Wrapf("error parsing 'backend': {{err}}", err)
+ return nil, fmt.Errorf("error parsing 'backend': %w", err)
}
}
}
if o := list.Filter("ha_storage"); len(o.Items) > 0 {
if err := parseHAStorage(result, o, "ha_storage"); err != nil {
- return nil, errwrap.Wrapf("error parsing 'ha_storage': {{err}}", err)
+ return nil, fmt.Errorf("error parsing 'ha_storage': %w", err)
}
} else {
if o := list.Filter("ha_backend"); len(o.Items) > 0 {
if err := parseHAStorage(result, o, "ha_backend"); err != nil {
- return nil, errwrap.Wrapf("error parsing 'ha_backend': {{err}}", err)
+ return nil, fmt.Errorf("error parsing 'ha_backend': %w", err)
}
}
}
@@ -490,13 +488,13 @@ func ParseConfig(d, source string) (*Config, error) {
// Parse service discovery
if o := list.Filter("service_registration"); len(o.Items) > 0 {
if err := parseServiceRegistration(result, o, "service_registration"); err != nil {
- return nil, errwrap.Wrapf("error parsing 'service_registration': {{err}}", err)
+ return nil, fmt.Errorf("error parsing 'service_registration': %w", err)
}
}
entConfig := &(result.entConfig)
if err := entConfig.parseConfig(list); err != nil {
- return nil, errwrap.Wrapf("error parsing enterprise config: {{err}}", err)
+ return nil, fmt.Errorf("error parsing enterprise config: %w", err)
}
// Remove all unused keys from Config that were satisfied by SharedConfig.
@@ -564,7 +562,7 @@ func LoadConfigDir(dir string) (*Config, error) {
for _, f := range files {
config, err := LoadConfigFile(f)
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("error loading %q: {{err}}", f), err)
+ return nil, fmt.Errorf("error loading %q: %w", f, err)
}
if result == nil {
diff --git a/command/server/listener.go b/command/server/listener.go
index eca313d72d6c8..248df52957d50 100644
--- a/command/server/listener.go
+++ b/command/server/listener.go
@@ -6,7 +6,6 @@ import (
"io"
"net"
- "github.com/hashicorp/errwrap"
// We must import sha512 so that it registers with the runtime so that
// certificates that use it can be parsed.
@@ -48,7 +47,7 @@ func listenerWrapProxy(ln net.Listener, l *configutil.Listener) (net.Listener, e
newLn, err := proxyutil.WrapInProxyProto(ln, proxyProtoConfig)
if err != nil {
- return nil, errwrap.Wrapf("failed configuring PROXY protocol wrapper: {{err}}", err)
+ return nil, fmt.Errorf("failed configuring PROXY protocol wrapper: %w", err)
}
return newLn, nil
diff --git a/command/server/test-fixtures/config_bad_https_storage.hcl b/command/server/test-fixtures/config_bad_https_storage.hcl
index 27911d755d081..f8b5d7734be2e 100644
--- a/command/server/test-fixtures/config_bad_https_storage.hcl
+++ b/command/server/test-fixtures/config_bad_https_storage.hcl
@@ -20,7 +20,7 @@ backend "consul" {
ha_backend "consul" {
address = "127.0.0.1:8500"
bar = "baz"
- advertise_addr = "snafu"
+ advertise_addr = "https://127.0.0.1:8500"
disable_clustering = "true"
}
diff --git a/command/server/test-fixtures/config_diagnose_hastorage_bad_https.hcl b/command/server/test-fixtures/config_diagnose_hastorage_bad_https.hcl
index c5187f5615326..deded2ddf1708 100644
--- a/command/server/test-fixtures/config_diagnose_hastorage_bad_https.hcl
+++ b/command/server/test-fixtures/config_diagnose_hastorage_bad_https.hcl
@@ -11,12 +11,12 @@ listener "tcp" {
backend "consul" {
foo = "bar"
advertise_addr = "foo"
- address = "127.0.0.1:1028"
-
+ address = "http://remoteconsulserverIP:1028"
}
ha_backend "consul" {
bar = "baz"
+ address = "https://remoteconsulserverIP:1028"
advertise_addr = "snafu"
disable_clustering = "true"
scheme = "https"
diff --git a/command/server/test-fixtures/config_diagnose_ok.hcl b/command/server/test-fixtures/config_diagnose_ok.hcl
index 9c1e76d975323..9287bc2540d1c 100644
--- a/command/server/test-fixtures/config_diagnose_ok.hcl
+++ b/command/server/test-fixtures/config_diagnose_ok.hcl
@@ -17,7 +17,7 @@ backend "consul" {
ha_backend "consul" {
address = "127.0.0.1:8500"
bar = "baz"
- advertise_addr = "snafu"
+ advertise_addr = "https://127.0.0.1:8500"
disable_clustering = "true"
}
diff --git a/command/server/test-fixtures/diagnose_ok_storage_direct_access.hcl b/command/server/test-fixtures/diagnose_ok_storage_direct_access.hcl
index 4a71fb606508c..3b6a9abf290ab 100644
--- a/command/server/test-fixtures/diagnose_ok_storage_direct_access.hcl
+++ b/command/server/test-fixtures/diagnose_ok_storage_direct_access.hcl
@@ -17,7 +17,7 @@ backend "consul" {
ha_backend "consul" {
address = "127.0.0.1:1024"
bar = "baz"
- advertise_addr = "snafu"
+ advertise_addr = "https://127.0.0.1:8500"
disable_clustering = "true"
}
diff --git a/command/server/test-fixtures/tls_config_ok.hcl b/command/server/test-fixtures/tls_config_ok.hcl
index 44be00930ef8e..0dee4b4836039 100644
--- a/command/server/test-fixtures/tls_config_ok.hcl
+++ b/command/server/test-fixtures/tls_config_ok.hcl
@@ -17,7 +17,7 @@ backend "consul" {
ha_backend "consul" {
bar = "baz"
- advertise_addr = "snafu"
+ advertise_addr = "http://blah:8500"
disable_clustering = "true"
address = "127.0.0.1:8500"
}
diff --git a/command/server_test.go b/command/server_test.go
index 8d668310513b3..9fdb9ed2e4dd5 100644
--- a/command/server_test.go
+++ b/command/server_test.go
@@ -23,6 +23,12 @@ import (
"github.com/mitchellh/cli"
)
+func init() {
+ if signed := os.Getenv("VAULT_LICENSE_CI"); signed != "" {
+ os.Setenv("VAULT_LICENSE", signed)
+ }
+}
+
func testBaseHCL(tb testing.TB, listenerExtras string) string {
tb.Helper()
diff --git a/command/ssh.go b/command/ssh.go
index 7e01882630896..e5e5af373e7a0 100644
--- a/command/ssh.go
+++ b/command/ssh.go
@@ -11,7 +11,6 @@ import (
"strings"
"syscall"
- "github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/builtin/logical/ssh"
"github.com/mitchellh/cli"
@@ -751,10 +750,10 @@ func (c *SSHCommand) defaultRole(mountPoint, ip string) (string, error) {
}
secret, err := c.client.Logical().Write(mountPoint+"/lookup", data)
if err != nil {
- return "", errwrap.Wrapf(fmt.Sprintf("error finding roles for IP %q: {{err}}", ip), err)
+ return "", fmt.Errorf("error finding roles for IP %q: %w", ip, err)
}
if secret == nil || secret.Data == nil {
- return "", errwrap.Wrapf(fmt.Sprintf("error finding roles for IP %q: {{err}}", ip), err)
+ return "", fmt.Errorf("error finding roles for IP %q: %w", ip, err)
}
if secret.Data["roles"] == nil {
diff --git a/command/token/helper_external.go b/command/token/helper_external.go
index edd95d5e1a786..102bc1cff4058 100644
--- a/command/token/helper_external.go
+++ b/command/token/helper_external.go
@@ -8,8 +8,6 @@ import (
"path/filepath"
"runtime"
"strings"
-
- "github.com/hashicorp/errwrap"
)
// ExternalTokenHelperPath takes the configured path to a helper and expands it to
@@ -64,7 +62,7 @@ func (h *ExternalTokenHelper) Erase() error {
return err
}
if output, err := cmd.CombinedOutput(); err != nil {
- return errwrap.Wrapf(fmt.Sprintf("%q: {{err}}", string(output)), err)
+ return fmt.Errorf("%q: %w", string(output), err)
}
return nil
}
@@ -79,7 +77,7 @@ func (h *ExternalTokenHelper) Get() (string, error) {
cmd.Stdout = &buf
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
- return "", errwrap.Wrapf(fmt.Sprintf("%q: {{err}}", stderr.String()), err)
+ return "", fmt.Errorf("%q: %w", stderr.String(), err)
}
return buf.String(), nil
@@ -94,7 +92,7 @@ func (h *ExternalTokenHelper) Store(v string) error {
}
cmd.Stdin = buf
if output, err := cmd.CombinedOutput(); err != nil {
- return errwrap.Wrapf(fmt.Sprintf("%q: {{err}}", string(output)), err)
+ return fmt.Errorf("%q: %w", string(output), err)
}
return nil
diff --git a/go.mod b/go.mod
index 7c4f90ac60d81..fde83374b3b3c 100644
--- a/go.mod
+++ b/go.mod
@@ -51,6 +51,7 @@ require (
github.com/go-test/deep v1.0.7
github.com/gocql/gocql v0.0.0-20210401103645-80ab1e13e309
github.com/golang/protobuf v1.4.2
+ github.com/google/go-cmp v0.5.5
github.com/google/go-github v17.0.0+incompatible
github.com/google/go-metrics-stackdriver v0.2.0
github.com/hashicorp/consul-template v0.25.2
@@ -60,7 +61,7 @@ require (
github.com/hashicorp/go-cleanhttp v0.5.2
github.com/hashicorp/go-discover v0.0.0-20201029210230-738cb3105cd0
github.com/hashicorp/go-gcp-common v0.6.0
- github.com/hashicorp/go-hclog v0.16.0
+ github.com/hashicorp/go-hclog v0.16.1
github.com/hashicorp/go-kms-wrapping v0.5.16
github.com/hashicorp/go-memdb v1.0.2
github.com/hashicorp/go-msgpack v1.1.5 // indirect
@@ -114,7 +115,7 @@ require (
github.com/kr/text v0.2.0
github.com/lib/pq v1.8.0
github.com/mattn/go-colorable v0.1.8
- github.com/mholt/archiver/v3 v3.5.0
+ github.com/mholt/archiver/v3 v3.5.1
github.com/michaelklishin/rabbit-hole v0.0.0-20191008194146-93d9988f0cd5
github.com/miekg/dns v1.1.40 // indirect
github.com/mitchellh/cli v1.1.1
@@ -145,9 +146,10 @@ require (
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da
github.com/sasha-s/go-deadlock v0.2.0
github.com/sethvargo/go-limiter v0.3.0
- github.com/shirou/gopsutil v3.21.1+incompatible
+ github.com/shirou/gopsutil v3.21.5+incompatible
github.com/stretchr/testify v1.7.0
github.com/tidwall/pretty v1.0.1 // indirect
+ github.com/tklauser/go-sysconf v0.3.6 // indirect
github.com/xdg/stringprep v1.0.0 // indirect
github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da // indirect
go.etcd.io/bbolt v1.3.5
diff --git a/go.sum b/go.sum
index e357f0973f118..88f524822bd7d 100644
--- a/go.sum
+++ b/go.sum
@@ -153,8 +153,8 @@ github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f h1:oRD
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ=
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5 h1:nWDRPCyCltiTsANwC/n3QZH7Vww33Npq9MKqlwRzI/c=
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
-github.com/andybalholm/brotli v1.0.0 h1:7UCwP93aiSfvWpapti8g88vVVGp2qqtGyePsSuDafo4=
-github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
+github.com/andybalholm/brotli v1.0.1 h1:KqhlKozYbRtJvsPrrEeXcO+N2l6NYT5A2QAFmSULpEc=
+github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230 h1:5ultmol0yeX75oh1hY78uAFn3dupBQ/QUNxERCkiaUQ=
github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
@@ -313,8 +313,8 @@ github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
-github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q=
-github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo=
+github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 h1:iFaUwBSo5Svw6L7HYpRu/0lE3e0BaElwnNO1qkNQxBY=
+github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s=
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74 h1:2MIhn2R6oXQbgW5yHfS+d6YqyMfXiu2L55rFZC4UD/M=
github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74/go.mod h1:UqXY1lYT/ERa4OEAywUqdok1T4RCRdArkhic1Opuavo=
@@ -589,8 +589,8 @@ github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrj
github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-hclog v0.15.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
-github.com/hashicorp/go-hclog v0.16.0 h1:uCeOEwSWGMwhJUdpUjk+1cVKIEfGu2/1nFXukimi2MU=
-github.com/hashicorp/go-hclog v0.16.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-hclog v0.16.1 h1:IVQwpTGNRRIHafnTs2dQLIk4ENtneRIEEJWOVDqz99o=
+github.com/hashicorp/go-hclog v0.16.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.3.0 h1:8exGP7ego3OmkfksihtSouGMZ+hQrhxx+FVELeXpVPE=
@@ -806,11 +806,11 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
-github.com/klauspost/compress v1.10.10 h1:a/y8CglcM7gLGYmlbP/stPE5sR3hbhFRUjCBfd/0B3I=
-github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.11.4 h1:kz40R/YWls3iqT9zX9AHN3WoVsrAWVyui5sxuLqiXqU=
+github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
-github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A=
-github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
+github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
+github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
@@ -862,8 +862,8 @@ github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvO
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/mholt/archiver/v3 v3.5.0 h1:nE8gZIrw66cu4osS/U7UW7YDuGMHssxKutU8IfWxwWE=
-github.com/mholt/archiver/v3 v3.5.0/go.mod h1:qqTTPUK/HZPFgFQ/TJ3BzvTpF/dPtFVJXdQbCmeMxwc=
+github.com/mholt/archiver/v3 v3.5.1 h1:rDjOBX9JSF5BvoJGvjqK479aL70qh9DIpZCl+k7Clwo=
+github.com/mholt/archiver/v3 v3.5.1/go.mod h1:e3dqJ7H78uzsRSEACH1joayhuSyhnonssnDhppzS1L4=
github.com/michaelklishin/rabbit-hole v0.0.0-20191008194146-93d9988f0cd5 h1:uA3b4GgZMZxAJsTkd+CVQ85b7KBlD7HLpd/FfTNlGN0=
github.com/michaelklishin/rabbit-hole v0.0.0-20191008194146-93d9988f0cd5/go.mod h1:+pmbihVqjC3GPdfWv1V2TnRSuVvwrWLKfEP/MZVB/Wc=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
@@ -1015,8 +1015,8 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi
github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI=
github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
-github.com/pierrec/lz4/v4 v4.0.3 h1:vNQKSVZNYUEAvRY9FaUXAF1XPbSOHJtDTiP41kzDz2E=
-github.com/pierrec/lz4/v4 v4.0.3/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pierrec/lz4/v4 v4.1.2 h1:qvY3YFXRQE/XB8MlLzJH7mSzBs74eA2gg52YTk6jUPM=
+github.com/pierrec/lz4/v4 v4.1.2/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4 h1:49lOXmGaUpV9Fz3gd7TFZY106KVlPVa5jcYD1gaQf98=
github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -1101,8 +1101,8 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUt
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/sethvargo/go-limiter v0.3.0 h1:yRMc+Qs2yqw6YJp6UxrO2iUs6DOSq4zcnljbB7/rMns=
github.com/sethvargo/go-limiter v0.3.0/go.mod h1:C0kbSFbiriE5k2FFOe18M1YZbAR2Fiwf72uGu0CXCcU=
-github.com/shirou/gopsutil v3.21.1+incompatible h1:2LwXWdbjXwyDgq26Yy/OT4xozlpmssQfy/rtfhWb0bY=
-github.com/shirou/gopsutil v3.21.1+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
+github.com/shirou/gopsutil v3.21.5+incompatible h1:OloQyEerMi7JUrXiNzy8wQ5XN+baemxSl12QgIzt0jc=
+github.com/shirou/gopsutil v3.21.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
@@ -1173,6 +1173,10 @@ github.com/tencentcloud/tencentcloud-sdk-go v3.0.171+incompatible/go.mod h1:0PfY
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tidwall/pretty v1.0.1 h1:WE4RBSZ1x6McVVC8S/Md+Qse8YUv6HRObAx6ke00NY8=
github.com/tidwall/pretty v1.0.1/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
+github.com/tklauser/go-sysconf v0.3.6 h1:oc1sJWvKkmvIxhDHeKWvZS4f6AW+YcoguSfRF2/Hmo4=
+github.com/tklauser/go-sysconf v0.3.6/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI=
+github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA=
+github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
@@ -1180,9 +1184,9 @@ github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
-github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
-github.com/ulikunitz/xz v0.5.7 h1:YvTNdFzX6+W5m9msiYg/zpkSURPPtOlzbqYjrFn7Yt4=
-github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
+github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
+github.com/ulikunitz/xz v0.5.9 h1:RsKRIA2MO8x56wkkcd3LbtcE/uMszhb6DpRf+3uwa3I=
+github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
@@ -1448,6 +1452,7 @@ golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 h1:dXfMednGJh/SUUFjTLsWJz3P+TQt9qnR11GgeI3vWKs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM=
diff --git a/http/sys_health.go b/http/sys_health.go
index 145deaf53adb1..8ab7359e231fd 100644
--- a/http/sys_health.go
+++ b/http/sys_health.go
@@ -195,6 +195,21 @@ func getSysHealth(core *vault.Core, r *http.Request) (int, *HealthResponse, erro
ClusterID: clusterID,
}
+ licenseState, err := vault.LicenseSummary(core)
+ if err != nil {
+ return http.StatusInternalServerError, nil, err
+ }
+
+ if licenseState != nil {
+ body.License = &HealthResponseLicense{
+ State: licenseState.State,
+ Terminated: licenseState.Terminated,
+ }
+ if !licenseState.ExpiryTime.IsZero() {
+ body.License.ExpiryTime = licenseState.ExpiryTime.Format(time.RFC3339)
+ }
+ }
+
if init && !sealed && !standby {
body.LastWAL = vault.LastWAL(core)
}
@@ -202,16 +217,23 @@ func getSysHealth(core *vault.Core, r *http.Request) (int, *HealthResponse, erro
return code, body, nil
}
+type HealthResponseLicense struct {
+ State string `json:"state"`
+ ExpiryTime string `json:"expiry_time"`
+ Terminated bool `json:"terminated"`
+}
+
type HealthResponse struct {
- Initialized bool `json:"initialized"`
- Sealed bool `json:"sealed"`
- Standby bool `json:"standby"`
- PerformanceStandby bool `json:"performance_standby"`
- ReplicationPerformanceMode string `json:"replication_performance_mode"`
- ReplicationDRMode string `json:"replication_dr_mode"`
- ServerTimeUTC int64 `json:"server_time_utc"`
- Version string `json:"version"`
- ClusterName string `json:"cluster_name,omitempty"`
- ClusterID string `json:"cluster_id,omitempty"`
- LastWAL uint64 `json:"last_wal,omitempty"`
+ Initialized bool `json:"initialized"`
+ Sealed bool `json:"sealed"`
+ Standby bool `json:"standby"`
+ PerformanceStandby bool `json:"performance_standby"`
+ ReplicationPerformanceMode string `json:"replication_performance_mode"`
+ ReplicationDRMode string `json:"replication_dr_mode"`
+ ServerTimeUTC int64 `json:"server_time_utc"`
+ Version string `json:"version"`
+ ClusterName string `json:"cluster_name,omitempty"`
+ ClusterID string `json:"cluster_id,omitempty"`
+ LastWAL uint64 `json:"last_wal,omitempty"`
+ License *HealthResponseLicense `json:"license,omitempty"`
}
diff --git a/http/sys_health_test.go b/http/sys_health_test.go
index 8cf373d94ab06..68ef11b9e2f57 100644
--- a/http/sys_health_test.go
+++ b/http/sys_health_test.go
@@ -44,6 +44,7 @@ func TestSysHealth_get(t *testing.T) {
} else {
expected["cluster_id"] = actual["cluster_id"]
}
+ delete(actual, "license")
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
}
@@ -77,6 +78,7 @@ func TestSysHealth_get(t *testing.T) {
} else {
expected["cluster_id"] = actual["cluster_id"]
}
+ delete(actual, "license")
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
}
@@ -114,6 +116,7 @@ func TestSysHealth_get(t *testing.T) {
} else {
expected["cluster_id"] = actual["cluster_id"]
}
+ delete(actual, "license")
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
}
@@ -157,6 +160,7 @@ func TestSysHealth_customcodes(t *testing.T) {
} else {
expected["cluster_id"] = actual["cluster_id"]
}
+ delete(actual, "license")
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
}
@@ -191,6 +195,7 @@ func TestSysHealth_customcodes(t *testing.T) {
} else {
expected["cluster_id"] = actual["cluster_id"]
}
+ delete(actual, "license")
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
}
@@ -228,6 +233,7 @@ func TestSysHealth_customcodes(t *testing.T) {
} else {
expected["cluster_id"] = actual["cluster_id"]
}
+ delete(actual, "license")
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
}
diff --git a/internalshared/configutil/config_util.go b/internalshared/configutil/config_util.go
index ca108af5ae1b6..fc527cc2af0c0 100644
--- a/internalshared/configutil/config_util.go
+++ b/internalshared/configutil/config_util.go
@@ -3,30 +3,11 @@
package configutil
import (
- "fmt"
- "github.com/asaskevich/govalidator"
"github.com/hashicorp/hcl/hcl/ast"
- "github.com/hashicorp/hcl/hcl/token"
- "github.com/hashicorp/vault/sdk/helper/strutil"
)
type EntSharedConfig struct{}
-type UnusedKeyMap map[string][]token.Pos
-
-type ConfigError struct {
- Problem string
- Position token.Pos
-}
-
-func (c *ConfigError) String() string {
- return fmt.Sprintf("%s at %s", c.Problem, c.Position.String())
-}
-
-type ValidatableConfig interface {
- Validate() []ConfigError
-}
-
func (ec *EntSharedConfig) ParseConfig(list *ast.ObjectList) error {
return nil
}
@@ -34,41 +15,3 @@ func (ec *EntSharedConfig) ParseConfig(list *ast.ObjectList) error {
func ParseEntropy(result *SharedConfig, list *ast.ObjectList, blockName string) error {
return nil
}
-
-// Creates the ConfigErrors for unused fields, which occur in various structs
-func ValidateUnusedFields(unusedKeyPositions UnusedKeyMap, sourceFilePath string) []ConfigError {
- if unusedKeyPositions == nil {
- return nil
- }
- var errors []ConfigError
- for field, positions := range unusedKeyPositions {
- problem := fmt.Sprintf("unknown field %s found in configuration", field)
- for _, pos := range positions {
- if pos.Filename == "" && sourceFilePath != "" {
- pos.Filename = sourceFilePath
- }
- errors = append(errors, ConfigError{
- Problem: problem,
- Position: pos,
- })
- }
- }
- return errors
-}
-
-// UnusedFieldDifference returns all the keys in map a that are not present in map b, and also not present in foundKeys.
-func UnusedFieldDifference(a, b UnusedKeyMap, foundKeys []string) UnusedKeyMap {
- if a == nil {
- return nil
- }
- if b == nil {
- return a
- }
- res := make(UnusedKeyMap)
- for k, v := range a {
- if _, ok := b[k]; !ok && !strutil.StrListContainsCaseInsensitive(foundKeys, govalidator.UnderscoreToCamelCase(k)) {
- res[k] = v
- }
- }
- return res
-}
diff --git a/internalshared/configutil/lint.go b/internalshared/configutil/lint.go
new file mode 100644
index 0000000000000..943c5287f8d38
--- /dev/null
+++ b/internalshared/configutil/lint.go
@@ -0,0 +1,61 @@
+package configutil
+
+import (
+ "fmt"
+ "github.com/asaskevich/govalidator"
+ "github.com/hashicorp/hcl/hcl/token"
+ "github.com/hashicorp/vault/sdk/helper/strutil"
+)
+
+type UnusedKeyMap map[string][]token.Pos
+
+type ConfigError struct {
+ Problem string
+ Position token.Pos
+}
+
+func (c *ConfigError) String() string {
+ return fmt.Sprintf("%s at %s", c.Problem, c.Position.String())
+}
+
+type ValidatableConfig interface {
+ Validate() []ConfigError
+}
+
+// Creates the ConfigErrors for unused fields, which occur in various structs
+func ValidateUnusedFields(unusedKeyPositions UnusedKeyMap, sourceFilePath string) []ConfigError {
+ if unusedKeyPositions == nil {
+ return nil
+ }
+ var errors []ConfigError
+ for field, positions := range unusedKeyPositions {
+ problem := fmt.Sprintf("unknown field %s found in configuration", field)
+ for _, pos := range positions {
+ if pos.Filename == "" && sourceFilePath != "" {
+ pos.Filename = sourceFilePath
+ }
+ errors = append(errors, ConfigError{
+ Problem: problem,
+ Position: pos,
+ })
+ }
+ }
+ return errors
+}
+
+// UnusedFieldDifference returns all the keys in map a that are not present in map b, and also not present in foundKeys.
+func UnusedFieldDifference(a, b UnusedKeyMap, foundKeys []string) UnusedKeyMap {
+ if a == nil {
+ return nil
+ }
+ if b == nil {
+ return a
+ }
+ res := make(UnusedKeyMap)
+ for k, v := range a {
+ if _, ok := b[k]; !ok && !strutil.StrListContainsCaseInsensitive(foundKeys, govalidator.UnderscoreToCamelCase(k)) {
+ res[k] = v
+ }
+ }
+ return res
+}
diff --git a/physical/aerospike/aerospike_test.go b/physical/aerospike/aerospike_test.go
index 1845248d3ae0f..610cf6526fc60 100644
--- a/physical/aerospike/aerospike_test.go
+++ b/physical/aerospike/aerospike_test.go
@@ -43,7 +43,7 @@ func prepareAerospikeContainer(t *testing.T) (func(), *aerospikeConfig) {
runner, err := docker.NewServiceRunner(docker.RunOptions{
ImageRepo: "aerospike/aerospike-server",
ContainerName: "aerospikedb",
- ImageTag: "latest",
+ ImageTag: "5.5.0.10",
Ports: []string{"3000/tcp", "3001/tcp", "3002/tcp", "3003/tcp"},
})
if err != nil {
diff --git a/physical/alicloudoss/alicloudoss.go b/physical/alicloudoss/alicloudoss.go
index c58ece46b97fa..40f3da6d5643c 100644
--- a/physical/alicloudoss/alicloudoss.go
+++ b/physical/alicloudoss/alicloudoss.go
@@ -14,7 +14,6 @@ import (
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/armon/go-metrics"
- "github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/sdk/physical"
)
@@ -92,7 +91,7 @@ func NewAliCloudOSSBackend(conf map[string]string, logger log.Logger) (physical.
_, err = bucketObj.ListObjects()
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("unable to access bucket %q at endpoint %q: {{err}}", bucket, endpoint), err)
+ return nil, fmt.Errorf("unable to access bucket %q at endpoint %q: %w", bucket, endpoint, err)
}
maxParStr, ok := conf["max_parallel"]
@@ -100,7 +99,7 @@ func NewAliCloudOSSBackend(conf map[string]string, logger log.Logger) (physical.
if ok {
maxParInt, err = strconv.Atoi(maxParStr)
if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
+ return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err)
}
if logger.IsDebug() {
logger.Debug("max_parallel set", "max_parallel", maxParInt)
diff --git a/physical/azure/azure.go b/physical/azure/azure.go
index 4def98ce99949..9b8e92dff858d 100644
--- a/physical/azure/azure.go
+++ b/physical/azure/azure.go
@@ -16,7 +16,6 @@ import (
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/armon/go-metrics"
- "github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/sdk/helper/strutil"
"github.com/hashicorp/vault/sdk/physical"
@@ -90,16 +89,12 @@ func NewAzureBackend(conf map[string]string, logger log.Logger) (physical.Backen
if environmentURL != "" {
environment, err = azure.EnvironmentFromURL(environmentURL)
if err != nil {
- errorMsg := fmt.Sprintf("failed to look up Azure environment descriptor for URL %q: {{err}}",
- environmentURL)
- return nil, errwrap.Wrapf(errorMsg, err)
+ return nil, fmt.Errorf("failed to look up Azure environment descriptor for URL %q: %w", environmentURL, err)
}
} else {
environment, err = azure.EnvironmentFromName(environmentName)
if err != nil {
- errorMsg := fmt.Sprintf("failed to look up Azure environment descriptor for name %q: {{err}}",
- environmentName)
- return nil, errwrap.Wrapf(errorMsg, err)
+ return nil, fmt.Errorf("failed to look up Azure environment descriptor for name %q: %w", environmentName, err)
}
}
@@ -107,9 +102,7 @@ func NewAzureBackend(conf map[string]string, logger log.Logger) (physical.Backen
if useMSI {
authToken, err := getAuthTokenFromIMDS(environment.ResourceIdentifiers.Storage)
if err != nil {
- errorMsg := fmt.Sprintf("failed to obtain auth token from IMDS %q: {{err}}",
- environmentName)
- return nil, errwrap.Wrapf(errorMsg, err)
+ return nil, fmt.Errorf("failed to obtain auth token from IMDS %q: %w", environmentName, err)
}
credential = azblob.NewTokenCredential(authToken.OAuthToken(), func(c azblob.TokenCredential) time.Duration {
@@ -134,14 +127,14 @@ func NewAzureBackend(conf map[string]string, logger log.Logger) (physical.Backen
} else {
credential, err = azblob.NewSharedKeyCredential(accountName, accountKey)
if err != nil {
- return nil, errwrap.Wrapf("failed to create Azure client: {{err}}", err)
+ return nil, fmt.Errorf("failed to create Azure client: %w", err)
}
}
URL, err := url.Parse(
fmt.Sprintf("https://%s.blob.%s/%s", accountName, environment.StorageEndpointSuffix, name))
if err != nil {
- return nil, errwrap.Wrapf("failed to create Azure client: {{err}}", err)
+ return nil, fmt.Errorf("failed to create Azure client: %w", err)
}
p := azblob.NewPipeline(credential, azblob.PipelineOptions{})
@@ -158,10 +151,10 @@ func NewAzureBackend(conf map[string]string, logger log.Logger) (physical.Backen
case azblob.ServiceCodeContainerNotFound:
_, err := containerURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("failed to create %q container: {{err}}", name), err)
+ return nil, fmt.Errorf("failed to create %q container: %w", name, err)
}
default:
- return nil, errwrap.Wrapf(fmt.Sprintf("failed to get properties for container %q: {{err}}", name), err)
+ return nil, fmt.Errorf("failed to get properties for container %q: %w", name, err)
}
}
}
@@ -171,7 +164,7 @@ func NewAzureBackend(conf map[string]string, logger log.Logger) (physical.Backen
if ok {
maxParInt, err = strconv.Atoi(maxParStr)
if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
+ return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err)
}
if logger.IsDebug() {
logger.Debug("max_parallel set", "max_parallel", maxParInt)
@@ -221,7 +214,7 @@ func (a *AzureBackend) Get(ctx context.Context, key string) (*physical.Entry, er
case azblob.ServiceCodeBlobNotFound:
return nil, nil
default:
- return nil, errwrap.Wrapf(fmt.Sprintf("failed to download blob %q: {{err}}", key), err)
+ return nil, fmt.Errorf("failed to download blob %q: %w", key, err)
}
}
return nil, err
@@ -256,7 +249,7 @@ func (a *AzureBackend) Delete(ctx context.Context, key string) error {
case azblob.ServiceCodeBlobNotFound:
return nil
default:
- return errwrap.Wrapf(fmt.Sprintf("failed to delete blob %q: {{err}}", key), err)
+ return fmt.Errorf("failed to delete blob %q: %w", key, err)
}
}
}
diff --git a/physical/cassandra/cassandra.go b/physical/cassandra/cassandra.go
index 93c5721ebdefa..f20b992055601 100644
--- a/physical/cassandra/cassandra.go
+++ b/physical/cassandra/cassandra.go
@@ -12,7 +12,6 @@ import (
metrics "github.com/armon/go-metrics"
"github.com/gocql/gocql"
- "github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/sdk/helper/certutil"
"github.com/hashicorp/vault/sdk/physical"
@@ -169,11 +168,11 @@ func setupCassandraTLS(conf map[string]string, cluster *gocql.ClusterConfig) err
if pemBundlePath, ok := conf["pem_bundle_file"]; ok {
pemBundleData, err := ioutil.ReadFile(pemBundlePath)
if err != nil {
- return errwrap.Wrapf(fmt.Sprintf("error reading pem bundle from %q: {{err}}", pemBundlePath), err)
+ return fmt.Errorf("error reading pem bundle from %q: %w", pemBundlePath, err)
}
pemBundle, err := certutil.ParsePEMBundle(string(pemBundleData))
if err != nil {
- return errwrap.Wrapf("error parsing 'pem_bundle': {{err}}", err)
+ return fmt.Errorf("error parsing 'pem_bundle': %w", err)
}
tlsConfig, err = pemBundle.GetTLSConfig(certutil.TLSClient)
if err != nil {
@@ -182,7 +181,7 @@ func setupCassandraTLS(conf map[string]string, cluster *gocql.ClusterConfig) err
} else if pemJSONPath, ok := conf["pem_json_file"]; ok {
pemJSONData, err := ioutil.ReadFile(pemJSONPath)
if err != nil {
- return errwrap.Wrapf(fmt.Sprintf("error reading json bundle from %q: {{err}}", pemJSONPath), err)
+ return fmt.Errorf("error reading json bundle from %q: %w", pemJSONPath, err)
}
pemJSON, err := certutil.ParsePKIJSON([]byte(pemJSONData))
if err != nil {
diff --git a/physical/cockroachdb/cockroachdb.go b/physical/cockroachdb/cockroachdb.go
index 587146f2a59fd..ee91dbfb4033c 100644
--- a/physical/cockroachdb/cockroachdb.go
+++ b/physical/cockroachdb/cockroachdb.go
@@ -12,7 +12,6 @@ import (
metrics "github.com/armon/go-metrics"
"github.com/cockroachdb/cockroach-go/crdb"
- "github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/vault/sdk/helper/strutil"
@@ -59,7 +58,7 @@ func NewCockroachDBBackend(conf map[string]string, logger log.Logger) (physical.
err := validateDBTable(dbTable)
if err != nil {
- return nil, errwrap.Wrapf("invalid table: {{err}}", err)
+ return nil, fmt.Errorf("invalid table: %w", err)
}
maxParStr, ok := conf["max_parallel"]
@@ -67,7 +66,7 @@ func NewCockroachDBBackend(conf map[string]string, logger log.Logger) (physical.
if ok {
maxParInt, err = strconv.Atoi(maxParStr)
if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
+ return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err)
}
if logger.IsDebug() {
logger.Debug("max_parallel set", "max_parallel", maxParInt)
@@ -77,14 +76,14 @@ func NewCockroachDBBackend(conf map[string]string, logger log.Logger) (physical.
// Create CockroachDB handle for the database.
db, err := sql.Open("postgres", connURL)
if err != nil {
- return nil, errwrap.Wrapf("failed to connect to cockroachdb: {{err}}", err)
+ return nil, fmt.Errorf("failed to connect to cockroachdb: %w", err)
}
// Create the required table if it doesn't exists.
createQuery := "CREATE TABLE IF NOT EXISTS " + dbTable +
" (path STRING, value BYTES, PRIMARY KEY (path))"
if _, err := db.Exec(createQuery); err != nil {
- return nil, errwrap.Wrapf("failed to create mysql table: {{err}}", err)
+ return nil, fmt.Errorf("failed to create mysql table: %w", err)
}
// Setup the backend
@@ -117,7 +116,7 @@ func NewCockroachDBBackend(conf map[string]string, logger log.Logger) (physical.
func (c *CockroachDBBackend) prepare(name, query string) error {
stmt, err := c.client.Prepare(query)
if err != nil {
- return errwrap.Wrapf(fmt.Sprintf("failed to prepare %q: {{err}}", name), err)
+ return fmt.Errorf("failed to prepare %q: %w", name, err)
}
c.statements[name] = stmt
return nil
@@ -194,7 +193,7 @@ func (c *CockroachDBBackend) List(ctx context.Context, prefix string) ([]string,
var key string
err = rows.Scan(&key)
if err != nil {
- return nil, errwrap.Wrapf("failed to scan rows: {{err}}", err)
+ return nil, fmt.Errorf("failed to scan rows: %w", err)
}
key = strings.TrimPrefix(key, prefix)
diff --git a/physical/consul/consul.go b/physical/consul/consul.go
index ae0703e4ad8f2..814a341117745 100644
--- a/physical/consul/consul.go
+++ b/physical/consul/consul.go
@@ -11,7 +11,6 @@ import (
"github.com/armon/go-metrics"
"github.com/hashicorp/consul/api"
- "github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/vault/sdk/helper/consts"
@@ -81,7 +80,7 @@ func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backe
if ok {
_, err := parseutil.ParseDurationSecond(sessionTTLStr)
if err != nil {
- return nil, errwrap.Wrapf("invalid session_ttl: {{err}}", err)
+ return nil, fmt.Errorf("invalid session_ttl: %w", err)
}
sessionTTL = sessionTTLStr
if logger.IsDebug() {
@@ -94,7 +93,7 @@ func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backe
if ok {
d, err := parseutil.ParseDurationSecond(lockWaitTimeRaw)
if err != nil {
- return nil, errwrap.Wrapf("invalid lock_wait_time: {{err}}", err)
+ return nil, fmt.Errorf("invalid lock_wait_time: %w", err)
}
lockWaitTime = d
if logger.IsDebug() {
@@ -107,7 +106,7 @@ func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backe
if ok {
maxParInt, err := strconv.Atoi(maxParStr)
if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
+ return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err)
}
if logger.IsDebug() {
logger.Debug("max_parallel set", "max_parallel", maxParInt)
@@ -135,7 +134,7 @@ func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backe
consulConf.HttpClient = &http.Client{Transport: consulConf.Transport}
client, err := api.NewClient(consulConf)
if err != nil {
- return nil, errwrap.Wrapf("client setup failed: {{err}}", err)
+ return nil, fmt.Errorf("client setup failed: %w", err)
}
// Setup the backend
@@ -249,7 +248,7 @@ func (c *ConsulBackend) Transaction(ctx context.Context, txns []*physical.TxnEnt
ok, resp, _, err := c.kv.Txn(ops, queryOpts)
if err != nil {
if strings.Contains(err.Error(), "is too large") {
- return errwrap.Wrapf(fmt.Sprintf("%s: {{err}}", physical.ErrValueTooLarge), err)
+ return fmt.Errorf("%s: %w", physical.ErrValueTooLarge, err)
}
return err
}
@@ -283,7 +282,7 @@ func (c *ConsulBackend) Put(ctx context.Context, entry *physical.Entry) error {
_, err := c.kv.Put(pair, writeOpts)
if err != nil {
if strings.Contains(err.Error(), "Value exceeds") {
- return errwrap.Wrapf(fmt.Sprintf("%s: {{err}}", physical.ErrValueTooLarge), err)
+ return fmt.Errorf("%s: %w", physical.ErrValueTooLarge, err)
}
return err
}
@@ -372,7 +371,7 @@ func (c *ConsulBackend) LockWith(key, value string) (physical.Lock, error) {
}
lock, err := c.client.LockOpts(opts)
if err != nil {
- return nil, errwrap.Wrapf("failed to create lock: {{err}}", err)
+ return nil, fmt.Errorf("failed to create lock: %w", err)
}
cl := &ConsulLock{
client: c.client,
diff --git a/physical/couchdb/couchdb.go b/physical/couchdb/couchdb.go
index f5bf6b77f0778..86fc139ed92d0 100644
--- a/physical/couchdb/couchdb.go
+++ b/physical/couchdb/couchdb.go
@@ -14,7 +14,6 @@ import (
"time"
metrics "github.com/armon/go-metrics"
- "github.com/hashicorp/errwrap"
cleanhttp "github.com/hashicorp/go-cleanhttp"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/sdk/physical"
@@ -179,7 +178,7 @@ func buildCouchDBBackend(conf map[string]string, logger log.Logger) (*CouchDBBac
if ok {
maxParInt, err = strconv.Atoi(maxParStr)
if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
+ return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err)
}
if logger.IsDebug() {
logger.Debug("max_parallel set", "max_parallel", maxParInt)
diff --git a/physical/dynamodb/dynamodb.go b/physical/dynamodb/dynamodb.go
index 3f79ef781c163..44e3ac0309f22 100644
--- a/physical/dynamodb/dynamodb.go
+++ b/physical/dynamodb/dynamodb.go
@@ -22,7 +22,6 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
- "github.com/hashicorp/errwrap"
cleanhttp "github.com/hashicorp/go-cleanhttp"
uuid "github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/sdk/helper/awsutil"
@@ -213,7 +212,7 @@ func NewDynamoDBBackend(conf map[string]string, logger log.Logger) (physical.Bac
awsSession, err := session.NewSession(awsConf)
if err != nil {
- return nil, errwrap.Wrapf("Could not establish AWS session: {{err}}", err)
+ return nil, fmt.Errorf("Could not establish AWS session: %w", err)
}
client := dynamodb.New(awsSession)
@@ -233,7 +232,7 @@ func NewDynamoDBBackend(conf map[string]string, logger log.Logger) (physical.Bac
if ok {
maxParInt, err = strconv.Atoi(maxParStr)
if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
+ return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err)
}
if logger.IsDebug() {
logger.Debug("max_parallel set", "max_parallel", maxParInt)
@@ -260,7 +259,7 @@ func (d *DynamoDBBackend) Put(ctx context.Context, entry *physical.Entry) error
}
item, err := dynamodbattribute.MarshalMap(record)
if err != nil {
- return errwrap.Wrapf("could not convert prefix record to DynamoDB item: {{err}}", err)
+ return fmt.Errorf("could not convert prefix record to DynamoDB item: %w", err)
}
requests := []*dynamodb.WriteRequest{{
PutRequest: &dynamodb.PutRequest{
@@ -275,7 +274,7 @@ func (d *DynamoDBBackend) Put(ctx context.Context, entry *physical.Entry) error
}
item, err := dynamodbattribute.MarshalMap(record)
if err != nil {
- return errwrap.Wrapf("could not convert prefix record to DynamoDB item: {{err}}", err)
+ return fmt.Errorf("could not convert prefix record to DynamoDB item: %w", err)
}
requests = append(requests, &dynamodb.WriteRequest{
PutRequest: &dynamodb.PutRequest{
diff --git a/physical/etcd/etcd.go b/physical/etcd/etcd.go
index c624a6988641b..ca573c44fd2c5 100644
--- a/physical/etcd/etcd.go
+++ b/physical/etcd/etcd.go
@@ -3,12 +3,12 @@ package etcd
import (
"context"
"errors"
+ "fmt"
"net/url"
"os"
"strings"
"github.com/coreos/go-semver/semver"
- "github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/sdk/physical"
"go.etcd.io/etcd/client"
@@ -137,7 +137,7 @@ func getEtcdEndpoints(conf map[string]string) ([]string, error) {
discoverer := client.NewSRVDiscover()
endpoints, err := discoverer.Discover(domain, srvName)
if err != nil {
- return nil, errwrap.Wrapf("failed to discover etcd endpoints through SRV discovery: {{err}}", err)
+ return nil, fmt.Errorf("failed to discover etcd endpoints through SRV discovery: %w", err)
}
return endpoints, nil
}
diff --git a/physical/etcd/etcd3.go b/physical/etcd/etcd3.go
index 942eaa5c93765..91350d072449c 100644
--- a/physical/etcd/etcd3.go
+++ b/physical/etcd/etcd3.go
@@ -12,7 +12,6 @@ import (
"time"
metrics "github.com/armon/go-metrics"
- "github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/sdk/helper/parseutil"
"github.com/hashicorp/vault/sdk/helper/strutil"
@@ -115,7 +114,7 @@ func newEtcd3Backend(conf map[string]string, logger log.Logger) (physical.Backen
// grpc converts this to uint32 internally, so parse as that to avoid passing invalid values
val, err := strconv.ParseUint(maxReceive, 10, 32)
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("value of 'max_receive_size' (%v) could not be understood: {{err}}", maxReceive), err)
+ return nil, fmt.Errorf("value of 'max_receive_size' (%v) could not be understood: %w", maxReceive, err)
}
cfg.MaxCallRecvMsgSize = int(val)
}
@@ -133,7 +132,7 @@ func newEtcd3Backend(conf map[string]string, logger log.Logger) (physical.Backen
}
reqTimeout, err := parseutil.ParseDurationSecond(sReqTimeout)
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("value [%v] of 'request_timeout' could not be understood: {{err}}", sReqTimeout), err)
+ return nil, fmt.Errorf("value [%v] of 'request_timeout' could not be understood: %w", sReqTimeout, err)
}
ssync, ok := conf["sync"]
@@ -142,7 +141,7 @@ func newEtcd3Backend(conf map[string]string, logger log.Logger) (physical.Backen
}
sync, err := strconv.ParseBool(ssync)
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("value of 'sync' (%v) could not be understood: {{err}}", ssync), err)
+ return nil, fmt.Errorf("value of 'sync' (%v) could not be understood: %w", ssync, err)
}
if sync {
@@ -161,7 +160,7 @@ func newEtcd3Backend(conf map[string]string, logger log.Logger) (physical.Backen
}
lock, err := parseutil.ParseDurationSecond(sLock)
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("value [%v] of 'lock_timeout' could not be understood: {{err}}", sLock), err)
+ return nil, fmt.Errorf("value [%v] of 'lock_timeout' could not be understood: %w", sLock, err)
}
return &EtcdBackend{
diff --git a/physical/foundationdb/foundationdb.go b/physical/foundationdb/foundationdb.go
index ad43a487d54db..442c0bed2cd4d 100644
--- a/physical/foundationdb/foundationdb.go
+++ b/physical/foundationdb/foundationdb.go
@@ -21,7 +21,6 @@ import (
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
metrics "github.com/armon/go-metrics"
- "github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/sdk/physical"
)
@@ -165,7 +164,7 @@ func NewFDBBackend(conf map[string]string, logger log.Logger) (physical.Backend,
fdbApiVersionInt, err := strconv.Atoi(fdbApiVersionStr)
if err != nil {
- return nil, errwrap.Wrapf("failed to parse fdb_api_version parameter: {{err}}", err)
+ return nil, fmt.Errorf("failed to parse fdb_api_version parameter: %w", err)
}
// Check requested FDB API version against minimum required API version
@@ -186,18 +185,18 @@ func NewFDBBackend(conf map[string]string, logger log.Logger) (physical.Backend,
if ok {
haEnabled, err = strconv.ParseBool(haEnabledStr)
if err != nil {
- return nil, errwrap.Wrapf("failed to parse ha_enabled parameter: {{err}}", err)
+ return nil, fmt.Errorf("failed to parse ha_enabled parameter: %w", err)
}
}
instanceUUID, err := uuid.GenerateUUID()
if err != nil {
- return nil, errwrap.Wrapf("could not generate instance UUID: {{err}}", err)
+ return nil, fmt.Errorf("could not generate instance UUID: %w", err)
}
logger.Debug("Instance UUID", "uuid", instanceUUID)
if err := fdb.APIVersion(fdbApiVersionInt); err != nil {
- return nil, errwrap.Wrapf("failed to set FDB API version: {{err}}", err)
+ return nil, fmt.Errorf("failed to set FDB API version: %w", err)
}
if tlsEnabled {
@@ -207,39 +206,39 @@ func NewFDBBackend(conf map[string]string, logger log.Logger) (physical.Backend,
if ok {
err := opts.SetTLSPassword(tlsPassword)
if err != nil {
- return nil, errwrap.Wrapf("failed to set TLS password: {{err}}", err)
+ return nil, fmt.Errorf("failed to set TLS password: %w", err)
}
}
err := opts.SetTLSCaPath(tlsCAFile)
if err != nil {
- return nil, errwrap.Wrapf("failed to set TLS CA bundle path: {{err}}", err)
+ return nil, fmt.Errorf("failed to set TLS CA bundle path: %w", err)
}
err = opts.SetTLSCertPath(tlsCertFile)
if err != nil {
- return nil, errwrap.Wrapf("failed to set TLS certificate path: {{err}}", err)
+ return nil, fmt.Errorf("failed to set TLS certificate path: %w", err)
}
err = opts.SetTLSKeyPath(tlsKeyFile)
if err != nil {
- return nil, errwrap.Wrapf("failed to set TLS key path: {{err}}", err)
+ return nil, fmt.Errorf("failed to set TLS key path: %w", err)
}
err = opts.SetTLSVerifyPeers([]byte(tlsVerifyPeers))
if err != nil {
- return nil, errwrap.Wrapf("failed to set TLS peer verification criteria: {{err}}", err)
+ return nil, fmt.Errorf("failed to set TLS peer verification criteria: %w", err)
}
}
db, err := fdb.Open(fdbClusterFile, []byte("DB"))
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("failed to open database with cluster file '%s': {{err}}", fdbClusterFile), err)
+ return nil, fmt.Errorf("failed to open database with cluster file '%s': %w", fdbClusterFile, err)
}
topDir, err := directory.CreateOrOpen(db, dirPath, nil)
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("failed to create/open top-level directory '%s': {{err}}", path), err)
+ return nil, fmt.Errorf("failed to create/open top-level directory '%s': %w", path, err)
}
// Setup the backend
@@ -262,7 +261,7 @@ func (f *FDBBackend) incDirsRefcount(tr fdb.Transaction, path string) error {
for i := len(pathElements) - 1; i != 0; i-- {
dPath, err := decoratePath(strings.Join(pathElements[:i], "/") + "/")
if err != nil {
- return errwrap.Wrapf("error incrementing directories refcount: {{err}}", err)
+ return fmt.Errorf("error incrementing directories refcount: %w", err)
}
// Atomic +1
@@ -287,7 +286,7 @@ func (f *FDBBackend) decDirsRefcount(tr fdb.Transaction, path string) error {
for i := len(pathElements) - 1; i != 0; i-- {
dPath, err := decoratePath(strings.Join(pathElements[:i], "/") + "/")
if err != nil {
- return errwrap.Wrapf("error decrementing directories refcount: {{err}}", err)
+ return fmt.Errorf("error decrementing directories refcount: %w", err)
}
metaFKey := fdb.Key(concat(f.metaKeysSpace.Bytes(), dPath...))
@@ -306,7 +305,7 @@ func (f *FDBBackend) decDirsRefcount(tr fdb.Transaction, path string) error {
for _, todo := range dirsTodo {
value, err := todo.future.Get()
if err != nil {
- return errwrap.Wrapf("error getting directory refcount while decrementing: {{err}}", err)
+ return fmt.Errorf("error getting directory refcount while decrementing: %w", err)
}
// The directory entry does not exist; this is not expected
@@ -317,7 +316,7 @@ func (f *FDBBackend) decDirsRefcount(tr fdb.Transaction, path string) error {
var count int64
err = binary.Read(bytes.NewReader(value), binary.LittleEndian, &count)
if err != nil {
- return errwrap.Wrapf("error reading directory refcount while decrementing: {{err}}", err)
+ return fmt.Errorf("error reading directory refcount while decrementing: %w", err)
}
if count > 1 {
@@ -346,7 +345,7 @@ func (f *FDBBackend) internalPut(tr fdb.Transaction, decoratedPath []byte, path
value, err := metaFuture.Get()
if err != nil {
- return errwrap.Wrapf("Put error while getting meta key: {{err}}", err)
+ return fmt.Errorf("Put error while getting meta key: %w", err)
}
if value == nil {
@@ -366,7 +365,7 @@ func (f *FDBBackend) internalClear(tr fdb.Transaction, decoratedPath []byte, pat
value, err := tr.Get(metaFKey).Get()
if err != nil {
- return errwrap.Wrapf("Delete error while getting meta key: {{err}}", err)
+ return fmt.Errorf("Delete error while getting meta key: %w", err)
}
if value != nil {
@@ -399,7 +398,7 @@ func (f *FDBBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry)
decoratedPath, err := decoratePath(op.Entry.Key)
if err != nil {
- return errwrap.Wrapf(fmt.Sprintf("could not build decorated path for transaction item %s: {{err}}", op.Entry.Key), err)
+ return fmt.Errorf("could not build decorated path for transaction item %s: %w", op.Entry.Key, err)
}
todo[i] = &TxnTodo{
@@ -419,14 +418,14 @@ func (f *FDBBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry)
}
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("operation %s failed for transaction item %s: {{err}}", txnTodo.op.Operation, txnTodo.op.Entry.Key), err)
+ return nil, fmt.Errorf("operation %s failed for transaction item %s: %w", txnTodo.op.Operation, txnTodo.op.Entry.Key, err)
}
}
return nil, nil
})
if err != nil {
- return errwrap.Wrapf("transaction failed: {{err}}", err)
+ return fmt.Errorf("transaction failed: %w", err)
}
return nil
@@ -438,7 +437,7 @@ func (f *FDBBackend) Put(ctx context.Context, entry *physical.Entry) error {
decoratedPath, err := decoratePath(entry.Key)
if err != nil {
- return errwrap.Wrapf(fmt.Sprintf("could not build decorated path to put item %s: {{err}}", entry.Key), err)
+ return fmt.Errorf("could not build decorated path to put item %s: %w", entry.Key, err)
}
_, err = f.db.Transact(func(tr fdb.Transaction) (interface{}, error) {
@@ -451,7 +450,7 @@ func (f *FDBBackend) Put(ctx context.Context, entry *physical.Entry) error {
})
if err != nil {
- return errwrap.Wrapf(fmt.Sprintf("put failed for item %s: {{err}}", entry.Key), err)
+ return fmt.Errorf("put failed for item %s: %w", entry.Key, err)
}
return nil
@@ -464,7 +463,7 @@ func (f *FDBBackend) Get(ctx context.Context, key string) (*physical.Entry, erro
decoratedPath, err := decoratePath(key)
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("could not build decorated path to get item %s: {{err}}", key), err)
+ return nil, fmt.Errorf("could not build decorated path to get item %s: %w", key, err)
}
fkey := fdb.Key(concat(f.dataSpace.Bytes(), decoratedPath...))
@@ -478,7 +477,7 @@ func (f *FDBBackend) Get(ctx context.Context, key string) (*physical.Entry, erro
return value, nil
})
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("get failed for item %s: {{err}}", key), err)
+ return nil, fmt.Errorf("get failed for item %s: %w", key, err)
}
if value.([]byte) == nil {
return nil, nil
@@ -496,7 +495,7 @@ func (f *FDBBackend) Delete(ctx context.Context, key string) error {
decoratedPath, err := decoratePath(key)
if err != nil {
- return errwrap.Wrapf(fmt.Sprintf("could not build decorated path to delete item %s: {{err}}", key), err)
+ return fmt.Errorf("could not build decorated path to delete item %s: %w", key, err)
}
_, err = f.db.Transact(func(tr fdb.Transaction) (interface{}, error) {
@@ -509,7 +508,7 @@ func (f *FDBBackend) Delete(ctx context.Context, key string) error {
})
if err != nil {
- return errwrap.Wrapf(fmt.Sprintf("delete failed for item %s: {{err}}", key), err)
+ return fmt.Errorf("delete failed for item %s: %w", key, err)
}
return nil
@@ -525,7 +524,7 @@ func (f *FDBBackend) List(ctx context.Context, prefix string) ([]string, error)
decoratedPrefix, err := decoratePrefix(prefix)
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("could not build decorated path to list prefix %s: {{err}}", prefix), err)
+ return nil, fmt.Errorf("could not build decorated path to list prefix %s: %w", prefix, err)
}
// The beginning of the range is /\x02foo/\x02bar/\x01 (the decorated prefix) to list foo/bar/
@@ -551,7 +550,7 @@ func (f *FDBBackend) List(ctx context.Context, prefix string) ([]string, error)
return dirList, nil
})
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("could not list prefix %s: {{err}}", prefix), err)
+ return nil, fmt.Errorf("could not list prefix %s: %w", prefix, err)
}
return content.([]string), nil
@@ -635,7 +634,7 @@ func (fl *FDBBackendLock) getLockContent(tr fdb.Transaction) (*FDBBackendLockCon
content, err := unpackLock(tupleContent)
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("failed to unpack lock %s: {{err}}", fl.key), err)
+ return nil, fmt.Errorf("failed to unpack lock %s: %w", fl.key, err)
}
return content, nil
@@ -657,14 +656,14 @@ func (fl *FDBBackendLock) acquireTryLock(acquired chan struct{}, errors chan err
wonTheRace, err := fl.f.db.Transact(func(tr fdb.Transaction) (interface{}, error) {
tupleContent, err := tr.Get(fl.fkey).Get()
if err != nil {
- return nil, errwrap.Wrapf("could not read lock: {{err}}", err)
+ return nil, fmt.Errorf("could not read lock: %w", err)
}
// Lock exists
if tupleContent != nil {
content, err := unpackLock(tupleContent)
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("failed to unpack lock %s: {{err}}", fl.key), err)
+ return nil, fmt.Errorf("failed to unpack lock %s: %w", fl.key, err)
}
if fl.isOwned(content) {
@@ -842,7 +841,7 @@ func (fl *FDBBackendLock) Unlock() error {
_, err := fl.f.db.Transact(func(tr fdb.Transaction) (interface{}, error) {
content, err := fl.getLockContent(tr)
if err != nil {
- return nil, errwrap.Wrapf("could not get lock content: {{err}}", err)
+ return nil, fmt.Errorf("could not get lock content: %w", err)
}
// We don't own the lock
@@ -855,7 +854,7 @@ func (fl *FDBBackendLock) Unlock() error {
return nil, nil
})
if err != nil {
- return errwrap.Wrapf("unlock failed: {{err}}", err)
+ return fmt.Errorf("unlock failed: %w", err)
}
return nil
@@ -865,13 +864,13 @@ func (fl *FDBBackendLock) Value() (bool, string, error) {
tupleContent, err := fl.f.db.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
tupleContent, err := rtr.Get(fl.fkey).Get()
if err != nil {
- return nil, errwrap.Wrapf("could not read lock: {{err}}", err)
+ return nil, fmt.Errorf("could not read lock: %w", err)
}
return tupleContent, nil
})
if err != nil {
- return false, "", errwrap.Wrapf(fmt.Sprintf("get lock value failed for lock %s: {{err}}", fl.key), err)
+ return false, "", fmt.Errorf("get lock value failed for lock %s: %w", fl.key, err)
}
if tupleContent.([]byte) == nil {
return false, "", nil
@@ -879,7 +878,7 @@ func (fl *FDBBackendLock) Value() (bool, string, error) {
content, err := unpackLock(tupleContent.([]byte))
if err != nil {
- return false, "", errwrap.Wrapf(fmt.Sprintf("get lock value failed to unpack lock %s: {{err}}", fl.key), err)
+ return false, "", fmt.Errorf("get lock value failed to unpack lock %s: %w", fl.key, err)
}
return true, content.value, nil
diff --git a/physical/foundationdb/foundationdb_test.go b/physical/foundationdb/foundationdb_test.go
index 97cdb50d4a753..9c08b812bcedf 100644
--- a/physical/foundationdb/foundationdb_test.go
+++ b/physical/foundationdb/foundationdb_test.go
@@ -10,7 +10,6 @@ import (
"testing"
"time"
- "github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
uuid "github.com/hashicorp/go-uuid"
@@ -25,12 +24,12 @@ import (
func connectToFoundationDB(clusterFile string) (*fdb.Database, error) {
if err := fdb.APIVersion(520); err != nil {
- return nil, errwrap.Wrapf("failed to set FDB API version: {{err}}", err)
+ return nil, fmt.Errorf("failed to set FDB API version: %w", err)
}
db, err := fdb.Open(clusterFile, []byte("DB"))
if err != nil {
- return nil, errwrap.Wrapf("failed to open database: {{err}}", err)
+ return nil, fmt.Errorf("failed to open database: %w", err)
}
return &db, nil
@@ -39,11 +38,11 @@ func connectToFoundationDB(clusterFile string) (*fdb.Database, error) {
func cleanupTopDir(clusterFile, topDir string) error {
db, err := connectToFoundationDB(clusterFile)
if err != nil {
- return errwrap.Wrapf("could not connect to FDB for cleanup: {{err}}", err)
+ return fmt.Errorf("could not connect to FDB for cleanup: %w", err)
}
if _, err := directory.Root().Remove(db, []string{topDir}); err != nil {
- return errwrap.Wrapf("could not remove directory: {{err}}", err)
+ return fmt.Errorf("could not remove directory: %w", err)
}
return nil
@@ -170,16 +169,16 @@ func prepareFoundationDBTestDirectory(t *testing.T, topDir string) (func(), stri
connectString := fmt.Sprintf("foundationdb:foundationdb@127.0.0.1:%s", resource.GetPort("4500/tcp"))
if err := tmpFile.Truncate(0); err != nil {
- return errwrap.Wrapf("could not truncate cluster file: {{err}}", err)
+ return fmt.Errorf("could not truncate cluster file: %w", err)
}
_, err := tmpFile.WriteAt([]byte(connectString), 0)
if err != nil {
- return errwrap.Wrapf("could not write cluster file: {{err}}", err)
+ return fmt.Errorf("could not write cluster file: %w", err)
}
if _, err := connectToFoundationDB(clusterFile); err != nil {
- return errwrap.Wrapf("could not connect to FoundationDB after starting container: %s", err)
+ return fmt.Errorf("could not connect to FoundationDB after starting container: %s", err)
}
return nil
diff --git a/physical/gcs/gcs.go b/physical/gcs/gcs.go
index 2e63b37185a8e..f38ffa53d3fa7 100644
--- a/physical/gcs/gcs.go
+++ b/physical/gcs/gcs.go
@@ -13,7 +13,6 @@ import (
"time"
metrics "github.com/armon/go-metrics"
- "github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/vault/sdk/helper/useragent"
@@ -116,7 +115,7 @@ func NewBackend(c map[string]string, logger log.Logger) (physical.Backend, error
}
chunkSize, err := strconv.Atoi(chunkSizeStr)
if err != nil {
- return nil, errwrap.Wrapf("failed to parse chunk_size: {{err}}", err)
+ return nil, fmt.Errorf("failed to parse chunk_size: %w", err)
}
// Values are specified as kb, but the API expects them as bytes.
@@ -133,7 +132,7 @@ func NewBackend(c map[string]string, logger log.Logger) (physical.Backend, error
var err error
haEnabled, err = strconv.ParseBool(haEnabledStr)
if err != nil {
- return nil, errwrap.Wrapf("failed to parse HA enabled: {{err}}", err)
+ return nil, fmt.Errorf("failed to parse HA enabled: %w", err)
}
}
if haEnabled {
@@ -142,14 +141,14 @@ func NewBackend(c map[string]string, logger log.Logger) (physical.Backend, error
ctx := context.Background()
haClient, err = storage.NewClient(ctx, option.WithUserAgent(useragent.String()))
if err != nil {
- return nil, errwrap.Wrapf("failed to create HA storage client: {{err}}", err)
+ return nil, fmt.Errorf("failed to create HA storage client: %w", err)
}
}
// Max parallel
maxParallel, err := extractInt(c["max_parallel"])
if err != nil {
- return nil, errwrap.Wrapf("failed to parse max_parallel: {{err}}", err)
+ return nil, fmt.Errorf("failed to parse max_parallel: %w", err)
}
logger.Debug("configuration",
@@ -163,7 +162,7 @@ func NewBackend(c map[string]string, logger log.Logger) (physical.Backend, error
ctx := context.Background()
client, err := storage.NewClient(ctx, option.WithUserAgent(useragent.String()))
if err != nil {
- return nil, errwrap.Wrapf("failed to create storage client: {{err}}", err)
+ return nil, fmt.Errorf("failed to create storage client: %w", err)
}
return &Backend{
@@ -195,12 +194,12 @@ func (b *Backend) Put(ctx context.Context, entry *physical.Entry) (retErr error)
defer func() {
closeErr := w.Close()
if closeErr != nil {
- retErr = multierror.Append(retErr, errwrap.Wrapf("error closing connection: {{err}}", closeErr))
+ retErr = multierror.Append(retErr, fmt.Errorf("error closing connection: %w", closeErr))
}
}()
if _, err := w.Write(entry.Value); err != nil {
- return errwrap.Wrapf("failed to put data: {{err}}", err)
+ return fmt.Errorf("failed to put data: %w", err)
}
return nil
}
@@ -219,19 +218,19 @@ func (b *Backend) Get(ctx context.Context, key string) (retEntry *physical.Entry
return nil, nil
}
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("failed to read value for %q: {{err}}", key), err)
+ return nil, fmt.Errorf("failed to read value for %q: %w", key, err)
}
defer func() {
closeErr := r.Close()
if closeErr != nil {
- retErr = multierror.Append(retErr, errwrap.Wrapf("error closing connection: {{err}}", closeErr))
+ retErr = multierror.Append(retErr, fmt.Errorf("error closing connection: %w", closeErr))
}
}()
value, err := ioutil.ReadAll(r)
if err != nil {
- return nil, errwrap.Wrapf("failed to read value into a string: {{err}}", err)
+ return nil, fmt.Errorf("failed to read value into a string: %w", err)
}
return &physical.Entry{
@@ -251,7 +250,7 @@ func (b *Backend) Delete(ctx context.Context, key string) error {
// Delete
err := b.client.Bucket(b.bucket).Object(key).Delete(ctx)
if err != nil && err != storage.ErrObjectNotExist {
- return errwrap.Wrapf(fmt.Sprintf("failed to delete key %q: {{err}}", key), err)
+ return fmt.Errorf("failed to delete key %q: %w", key, err)
}
return nil
}
@@ -279,7 +278,7 @@ func (b *Backend) List(ctx context.Context, prefix string) ([]string, error) {
break
}
if err != nil {
- return nil, errwrap.Wrapf("failed to read object: {{err}}", err)
+ return nil, fmt.Errorf("failed to read object: %w", err)
}
var path string
diff --git a/physical/gcs/gcs_ha.go b/physical/gcs/gcs_ha.go
index 39f30f7e0bd8e..7ad57a0f48c3d 100644
--- a/physical/gcs/gcs_ha.go
+++ b/physical/gcs/gcs_ha.go
@@ -9,7 +9,6 @@ import (
"cloud.google.com/go/storage"
metrics "github.com/armon/go-metrics"
- "github.com/hashicorp/errwrap"
uuid "github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/sdk/physical"
"github.com/pkg/errors"
@@ -109,7 +108,7 @@ func (b *Backend) HAEnabled() bool {
func (b *Backend) LockWith(key, value string) (physical.Lock, error) {
identity, err := uuid.GenerateUUID()
if err != nil {
- return nil, errwrap.Wrapf("lock with: {{err}}", err)
+ return nil, fmt.Errorf("lock with: %w", err)
}
return &Lock{
backend: b,
@@ -142,7 +141,7 @@ func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
// occurs.
acquired, err := l.attemptLock(stopCh)
if err != nil {
- return nil, errwrap.Wrapf("lock: {{err}}", err)
+ return nil, fmt.Errorf("lock: %w", err)
}
if !acquired {
return nil, nil
@@ -187,7 +186,7 @@ func (l *Lock) Unlock() error {
ctx := context.Background()
r, err := l.get(ctx)
if err != nil {
- return errwrap.Wrapf("failed to read lock for deletion: {{err}}", err)
+ return fmt.Errorf("failed to read lock for deletion: %w", err)
}
if r != nil && r.Identity == l.identity {
ctx := context.Background()
@@ -203,7 +202,7 @@ func (l *Lock) Unlock() error {
if terr, ok := err.(*googleapi.Error); ok && terr.Code == 412 {
l.backend.logger.Debug("unlock: preconditions failed (lock already taken by someone else?)")
} else {
- return errwrap.Wrapf("failed to delete lock: {{err}}", err)
+ return fmt.Errorf("failed to delete lock: %w", err)
}
}
}
@@ -240,7 +239,7 @@ func (l *Lock) attemptLock(stopCh <-chan struct{}) (bool, error) {
case <-ticker.C:
acquired, err := l.writeLock()
if err != nil {
- return false, errwrap.Wrapf("attempt lock: {{err}}", err)
+ return false, fmt.Errorf("attempt lock: %w", err)
}
if !acquired {
continue
@@ -345,7 +344,7 @@ func (l *Lock) writeLock() (bool, error) {
// Read the record
r, err := l.get(ctx)
if err != nil {
- return false, errwrap.Wrapf("write lock: {{err}}", err)
+ return false, fmt.Errorf("write lock: %w", err)
}
if r != nil {
// If the key is empty or the identity is ours or the ttl expired, we can
@@ -370,7 +369,7 @@ func (l *Lock) writeLock() (bool, error) {
Timestamp: time.Now().UTC(),
})
if err != nil {
- return false, errwrap.Wrapf("write lock: failed to encode JSON: {{err}}", err)
+ return false, fmt.Errorf("write lock: failed to encode JSON: %w", err)
}
// Write the object
@@ -399,7 +398,7 @@ func (l *Lock) get(ctx context.Context) (*LockRecord, error) {
return nil, nil
}
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("failed to read attrs for %q: {{err}}", l.key), err)
+ return nil, fmt.Errorf("failed to read attrs for %q: %w", l.key, err)
}
// If we got this far, we have attributes, meaning the lockfile exists.
@@ -407,7 +406,7 @@ func (l *Lock) get(ctx context.Context) (*LockRecord, error) {
r.attrs = attrs
lockData := []byte(attrs.Metadata["lock"])
if err := json.Unmarshal(lockData, &r); err != nil {
- return nil, errwrap.Wrapf("failed to decode lock: {{err}}", err)
+ return nil, fmt.Errorf("failed to decode lock: %w", err)
}
return &r, nil
}
diff --git a/physical/manta/manta.go b/physical/manta/manta.go
index a02bf3d479f07..390683d3695fc 100644
--- a/physical/manta/manta.go
+++ b/physical/manta/manta.go
@@ -13,7 +13,6 @@ import (
"time"
metrics "github.com/armon/go-metrics"
- "github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/sdk/physical"
triton "github.com/joyent/triton-go"
@@ -63,7 +62,7 @@ func NewMantaBackend(conf map[string]string, logger log.Logger) (physical.Backen
}
signer, err := authentication.NewSSHAgentSigner(input)
if err != nil {
- return nil, errwrap.Wrapf("Error Creating SSH Agent Signer: {{err}}", err)
+ return nil, fmt.Errorf("Error Creating SSH Agent Signer: %w", err)
}
maxParStr, ok := conf["max_parallel"]
@@ -71,7 +70,7 @@ func NewMantaBackend(conf map[string]string, logger log.Logger) (physical.Backen
if ok {
maxParInt, err = strconv.Atoi(maxParStr)
if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
+ return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err)
}
if logger.IsDebug() {
logger.Debug("max_parallel set", "max_parallel", maxParInt)
@@ -86,7 +85,7 @@ func NewMantaBackend(conf map[string]string, logger log.Logger) (physical.Backen
client, err := storage.NewClient(config)
if err != nil {
- return nil, errwrap.Wrapf("failed initialising Storage client: {{err}}", err)
+ return nil, fmt.Errorf("failed initialising Storage client: %w", err)
}
return &MantaBackend{
diff --git a/physical/mssql/mssql.go b/physical/mssql/mssql.go
index f6e390623002a..fa145bfe1f07f 100644
--- a/physical/mssql/mssql.go
+++ b/physical/mssql/mssql.go
@@ -11,7 +11,6 @@ import (
metrics "github.com/armon/go-metrics"
_ "github.com/denisenkom/go-mssqldb"
- "github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/sdk/helper/strutil"
"github.com/hashicorp/vault/sdk/physical"
@@ -55,7 +54,7 @@ func NewMSSQLBackend(conf map[string]string, logger log.Logger) (physical.Backen
if ok {
maxParInt, err = strconv.Atoi(maxParStr)
if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
+ return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err)
}
if logger.IsDebug() {
logger.Debug("max_parallel set", "max_parallel", maxParInt)
@@ -109,13 +108,13 @@ func NewMSSQLBackend(conf map[string]string, logger log.Logger) (physical.Backen
db, err := sql.Open("mssql", connectionString)
if err != nil {
- return nil, errwrap.Wrapf("failed to connect to mssql: {{err}}", err)
+ return nil, fmt.Errorf("failed to connect to mssql: %w", err)
}
db.SetMaxOpenConns(maxParInt)
if _, err := db.Exec("IF NOT EXISTS(SELECT * FROM sys.databases WHERE name = '" + database + "') CREATE DATABASE " + database); err != nil {
- return nil, errwrap.Wrapf("failed to create mssql database: {{err}}", err)
+ return nil, fmt.Errorf("failed to create mssql database: %w", err)
}
dbTable := database + "." + schema + "." + table
@@ -130,16 +129,16 @@ func NewMSSQLBackend(conf map[string]string, logger log.Logger) (physical.Backen
switch {
case err == sql.ErrNoRows:
if _, err := db.Exec("USE " + database + "; EXEC ('CREATE SCHEMA " + schema + "')"); err != nil {
- return nil, errwrap.Wrapf("failed to create mssql schema: {{err}}", err)
+ return nil, fmt.Errorf("failed to create mssql schema: %w", err)
}
case err != nil:
- return nil, errwrap.Wrapf("failed to check if mssql schema exists: {{err}}", err)
+ return nil, fmt.Errorf("failed to check if mssql schema exists: %w", err)
}
}
if _, err := db.Exec(createQuery); err != nil {
- return nil, errwrap.Wrapf("failed to create mssql table: {{err}}", err)
+ return nil, fmt.Errorf("failed to create mssql table: %w", err)
}
m := &MSSQLBackend{
@@ -170,7 +169,7 @@ func NewMSSQLBackend(conf map[string]string, logger log.Logger) (physical.Backen
func (m *MSSQLBackend) prepare(name, query string) error {
stmt, err := m.client.Prepare(query)
if err != nil {
- return errwrap.Wrapf(fmt.Sprintf("failed to prepare %q: {{err}}", name), err)
+ return fmt.Errorf("failed to prepare %q: %w", name, err)
}
m.statements[name] = stmt
@@ -246,7 +245,7 @@ func (m *MSSQLBackend) List(ctx context.Context, prefix string) ([]string, error
var key string
err = rows.Scan(&key)
if err != nil {
- return nil, errwrap.Wrapf("failed to scan rows: {{err}}", err)
+ return nil, fmt.Errorf("failed to scan rows: %w", err)
}
key = strings.TrimPrefix(key, prefix)
diff --git a/physical/mysql/mysql.go b/physical/mysql/mysql.go
index 41393977b022e..3f7577011fc45 100644
--- a/physical/mysql/mysql.go
+++ b/physical/mysql/mysql.go
@@ -22,7 +22,6 @@ import (
metrics "github.com/armon/go-metrics"
mysql "github.com/go-sql-driver/mysql"
- "github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/sdk/helper/strutil"
"github.com/hashicorp/vault/sdk/physical"
)
@@ -84,7 +83,7 @@ func NewMySQLBackend(conf map[string]string, logger log.Logger) (physical.Backen
if ok {
maxParInt, err = strconv.Atoi(maxParStr)
if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
+ return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err)
}
if logger.IsDebug() {
logger.Debug("max_parallel set", "max_parallel", maxParInt)
@@ -97,7 +96,7 @@ func NewMySQLBackend(conf map[string]string, logger log.Logger) (physical.Backen
var schemaExist bool
schemaRows, err := db.Query("SELECT SCHEMA_NAME FROM information_schema.SCHEMATA WHERE SCHEMA_NAME = ?", database)
if err != nil {
- return nil, errwrap.Wrapf("failed to check mysql schema exist: {{err}}", err)
+ return nil, fmt.Errorf("failed to check mysql schema exist: %w", err)
}
defer schemaRows.Close()
schemaExist = schemaRows.Next()
@@ -106,7 +105,7 @@ func NewMySQLBackend(conf map[string]string, logger log.Logger) (physical.Backen
var tableExist bool
tableRows, err := db.Query("SELECT TABLE_NAME FROM information_schema.TABLES WHERE TABLE_NAME = ? AND TABLE_SCHEMA = ?", table, database)
if err != nil {
- return nil, errwrap.Wrapf("failed to check mysql table exist: {{err}}", err)
+ return nil, fmt.Errorf("failed to check mysql table exist: %w", err)
}
defer tableRows.Close()
tableExist = tableRows.Next()
@@ -114,7 +113,7 @@ func NewMySQLBackend(conf map[string]string, logger log.Logger) (physical.Backen
// Create the required database if it doesn't exists.
if !schemaExist {
if _, err := db.Exec("CREATE DATABASE IF NOT EXISTS `" + database + "`"); err != nil {
- return nil, errwrap.Wrapf("failed to create mysql database: {{err}}", err)
+ return nil, fmt.Errorf("failed to create mysql database: %w", err)
}
}
@@ -123,7 +122,7 @@ func NewMySQLBackend(conf map[string]string, logger log.Logger) (physical.Backen
create_query := "CREATE TABLE IF NOT EXISTS " + dbTable +
" (vault_key varbinary(512), vault_value mediumblob, PRIMARY KEY (vault_key))"
if _, err := db.Exec(create_query); err != nil {
- return nil, errwrap.Wrapf("failed to create mysql table: {{err}}", err)
+ return nil, fmt.Errorf("failed to create mysql table: %w", err)
}
}
@@ -150,7 +149,7 @@ func NewMySQLBackend(conf map[string]string, logger log.Logger) (physical.Backen
var lockTableExist bool
lockTableRows, err := db.Query("SELECT TABLE_NAME FROM information_schema.TABLES WHERE TABLE_NAME = ? AND TABLE_SCHEMA = ?", locktable, database)
if err != nil {
- return nil, errwrap.Wrapf("failed to check mysql table exist: {{err}}", err)
+ return nil, fmt.Errorf("failed to check mysql table exist: %w", err)
}
defer lockTableRows.Close()
lockTableExist = lockTableRows.Next()
@@ -160,7 +159,7 @@ func NewMySQLBackend(conf map[string]string, logger log.Logger) (physical.Backen
create_query := "CREATE TABLE IF NOT EXISTS " + dbLockTable +
" (node_job varbinary(512), current_leader varbinary(512), PRIMARY KEY (node_job))"
if _, err := db.Exec(create_query); err != nil {
- return nil, errwrap.Wrapf("failed to create mysql table: {{err}}", err)
+ return nil, fmt.Errorf("failed to create mysql table: %w", err)
}
}
}
@@ -286,7 +285,7 @@ func NewMySQLClient(conf map[string]string, logger log.Logger) (*sql.DB, error)
if ok {
maxIdleConnInt, err = strconv.Atoi(maxIdleConnStr)
if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_idle_connections parameter: {{err}}", err)
+ return nil, fmt.Errorf("failed parsing max_idle_connections parameter: %w", err)
}
if logger.IsDebug() {
logger.Debug("max_idle_connections set", "max_idle_connections", maxIdleConnInt)
@@ -298,7 +297,7 @@ func NewMySQLClient(conf map[string]string, logger log.Logger) (*sql.DB, error)
if ok {
maxConnLifeInt, err = strconv.Atoi(maxConnLifeStr)
if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_connection_lifetime parameter: {{err}}", err)
+ return nil, fmt.Errorf("failed parsing max_connection_lifetime parameter: %w", err)
}
if logger.IsDebug() {
logger.Debug("max_connection_lifetime set", "max_connection_lifetime", maxConnLifeInt)
@@ -310,7 +309,7 @@ func NewMySQLClient(conf map[string]string, logger log.Logger) (*sql.DB, error)
if ok {
maxParInt, err = strconv.Atoi(maxParStr)
if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
+ return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err)
}
if logger.IsDebug() {
logger.Debug("max_parallel set", "max_parallel", maxParInt)
@@ -323,7 +322,7 @@ func NewMySQLClient(conf map[string]string, logger log.Logger) (*sql.DB, error)
tlsCaFile, tlsOk := conf["tls_ca_file"]
if tlsOk {
if err := setupMySQLTLSConfig(tlsCaFile); err != nil {
- return nil, errwrap.Wrapf("failed register TLS config: {{err}}", err)
+ return nil, fmt.Errorf("failed register TLS config: %w", err)
}
dsnParams.Add("tls", mysqlTLSKey)
@@ -337,7 +336,7 @@ func NewMySQLClient(conf map[string]string, logger log.Logger) (*sql.DB, error)
dsn := username + ":" + password + "@tcp(" + address + ")/?" + dsnParams.Encode()
db, err := sql.Open("mysql", dsn)
if err != nil {
- return nil, errwrap.Wrapf("failed to connect to mysql: {{err}}", err)
+ return nil, fmt.Errorf("failed to connect to mysql: %w", err)
}
db.SetMaxOpenConns(maxParInt)
if maxIdleConnInt != 0 {
@@ -354,7 +353,7 @@ func NewMySQLClient(conf map[string]string, logger log.Logger) (*sql.DB, error)
func (m *MySQLBackend) prepare(name, query string) error {
stmt, err := m.client.Prepare(query)
if err != nil {
- return errwrap.Wrapf(fmt.Sprintf("failed to prepare %q: {{err}}", name), err)
+ return fmt.Errorf("failed to prepare %q: %w", name, err)
}
m.statements[name] = stmt
return nil
@@ -423,7 +422,7 @@ func (m *MySQLBackend) List(ctx context.Context, prefix string) ([]string, error
likePrefix := prefix + "%"
rows, err := m.statements["list"].Query(likePrefix)
if err != nil {
- return nil, errwrap.Wrapf("failed to execute statement: {{err}}", err)
+ return nil, fmt.Errorf("failed to execute statement: %w", err)
}
var keys []string
@@ -431,7 +430,7 @@ func (m *MySQLBackend) List(ctx context.Context, prefix string) ([]string, error
var key string
err = rows.Scan(&key)
if err != nil {
- return nil, errwrap.Wrapf("failed to scan rows: {{err}}", err)
+ return nil, fmt.Errorf("failed to scan rows: %w", err)
}
key = strings.TrimPrefix(key, prefix)
@@ -672,7 +671,7 @@ func NewMySQLLock(in *MySQLBackend, l log.Logger, key, value string) (*MySQLLock
func (m *MySQLLock) prepare(name, query string) error {
stmt, err := m.in.Prepare(query)
if err != nil {
- return errwrap.Wrapf(fmt.Sprintf("failed to prepare %q: {{err}}", name), err)
+ return fmt.Errorf("failed to prepare %q: %w", name, err)
}
m.statements[name] = stmt
return nil
diff --git a/physical/oci/oci.go b/physical/oci/oci.go
index 3e8137675462f..cd80f6e9ab14f 100644
--- a/physical/oci/oci.go
+++ b/physical/oci/oci.go
@@ -13,7 +13,6 @@ import (
"time"
"github.com/armon/go-metrics"
- "github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/sdk/helper/strutil"
@@ -89,7 +88,7 @@ func NewBackend(conf map[string]string, logger log.Logger) (physical.Backend, er
if haEnabledStr != "" {
haEnabled, err = strconv.ParseBool(haEnabledStr)
if err != nil {
- return nil, errwrap.Wrapf("failed to parse HA enabled: {{err}}", err)
+ return nil, fmt.Errorf("failed to parse HA enabled: %w", err)
}
if haEnabled {
@@ -105,7 +104,7 @@ func NewBackend(conf map[string]string, logger log.Logger) (physical.Backend, er
if authTypeAPIKeyStr != "" {
authTypeAPIKeyBool, err = strconv.ParseBool(authTypeAPIKeyStr)
if err != nil {
- return nil, errwrap.Wrapf("failed parsing auth_type_api_key parameter: {{err}}", err)
+ return nil, fmt.Errorf("failed parsing auth_type_api_key parameter: %w", err)
}
}
@@ -115,13 +114,13 @@ func NewBackend(conf map[string]string, logger log.Logger) (physical.Backend, er
} else {
cp, err = auth.InstancePrincipalConfigurationProvider()
if err != nil {
- return nil, errwrap.Wrapf("failed creating InstancePrincipalConfigurationProvider: {{err}}", err)
+ return nil, fmt.Errorf("failed creating InstancePrincipalConfigurationProvider: %w", err)
}
}
objectStorageClient, err := objectstorage.NewObjectStorageClientWithConfigurationProvider(cp)
if err != nil {
- return nil, errwrap.Wrapf("failed creating NewObjectStorageClientWithConfigurationProvider: {{err}}", err)
+ return nil, fmt.Errorf("failed creating NewObjectStorageClientWithConfigurationProvider: %w", err)
}
region := conf["region"]
@@ -164,7 +163,7 @@ func (o *Backend) Put(ctx context.Context, entry *physical.Entry) error {
if err != nil {
metrics.IncrCounter(metricPutFailed, 1)
o.logger.Error("failed to generate UUID")
- return errwrap.Wrapf("failed to generate UUID: {{err}}", err)
+ return fmt.Errorf("failed to generate UUID: %w", err)
}
o.logger.Debug("PUT", "opc-client-request-id", opcClientRequestId)
@@ -185,7 +184,7 @@ func (o *Backend) Put(ctx context.Context, entry *physical.Entry) error {
if err != nil {
metrics.IncrCounter(metricPutFailed, 1)
- return errwrap.Wrapf("failed to put data: {{err}}", err)
+ return fmt.Errorf("failed to put data: %w", err)
}
o.logRequest("PUT", resp.RawResponse, resp.OpcClientRequestId, resp.OpcRequestId, err)
@@ -207,7 +206,7 @@ func (o *Backend) Get(ctx context.Context, key string) (*physical.Entry, error)
opcClientRequestId, err := uuid.GenerateUUID()
if err != nil {
o.logger.Error("failed to generate UUID")
- return nil, errwrap.Wrapf("failed to generate UUID: {{err}}", err)
+ return nil, fmt.Errorf("failed to generate UUID: %w", err)
}
o.logger.Debug("GET", "opc-client-request-id", opcClientRequestId)
request := objectstorage.GetObjectRequest{
@@ -228,13 +227,13 @@ func (o *Backend) Get(ctx context.Context, key string) (*physical.Entry, error)
return nil, nil
}
metrics.IncrCounter(metricGetFailed, 1)
- return nil, errwrap.Wrapf(fmt.Sprintf("failed to read Value: {{err}}"), err)
+ return nil, fmt.Errorf("failed to read Value: %w", err)
}
body, err := ioutil.ReadAll(resp.Content)
if err != nil {
metrics.IncrCounter(metricGetFailed, 1)
- return nil, errwrap.Wrapf("failed to decode Value into bytes: {{err}}", err)
+ return nil, fmt.Errorf("failed to decode Value into bytes: %w", err)
}
o.logger.Debug("GET completed")
@@ -258,7 +257,7 @@ func (o *Backend) Delete(ctx context.Context, key string) error {
opcClientRequestId, err := uuid.GenerateUUID()
if err != nil {
o.logger.Error("Delete: error generating UUID")
- return errwrap.Wrapf("failed to generate UUID: {{err}}", err)
+ return fmt.Errorf("failed to generate UUID: %w", err)
}
o.logger.Debug("Delete", "opc-client-request-id", opcClientRequestId)
request := objectstorage.DeleteObjectRequest{
@@ -280,7 +279,7 @@ func (o *Backend) Delete(ctx context.Context, key string) error {
return nil
}
metrics.IncrCounter(metricDeleteFailed, 1)
- return errwrap.Wrapf("failed to delete Key: {{err}}", err)
+ return fmt.Errorf("failed to delete Key: %w", err)
}
o.logger.Debug("DELETE completed")
@@ -305,7 +304,7 @@ func (o *Backend) List(ctx context.Context, prefix string) ([]string, error) {
opcClientRequestId, err := uuid.GenerateUUID()
if err != nil {
o.logger.Error("List: error generating UUID")
- return nil, errwrap.Wrapf("failed to generate UUID {{err}}", err)
+ return nil, fmt.Errorf("failed to generate UUID %w", err)
}
o.logger.Debug("LIST", "opc-client-request-id", opcClientRequestId)
request := objectstorage.ListObjectsRequest{
@@ -322,7 +321,7 @@ func (o *Backend) List(ctx context.Context, prefix string) ([]string, error) {
if err != nil {
metrics.IncrCounter(metricListFailed, 1)
- return nil, errwrap.Wrapf("failed to list using prefix: {{err}}", err)
+ return nil, fmt.Errorf("failed to list using prefix: %w", err)
}
for _, commonPrefix := range resp.Prefixes {
diff --git a/physical/oci/oci_ha.go b/physical/oci/oci_ha.go
index 9fe30128104b8..a4c6ad52ea6b8 100644
--- a/physical/oci/oci_ha.go
+++ b/physical/oci/oci_ha.go
@@ -14,7 +14,6 @@ import (
"time"
"github.com/armon/go-metrics"
- "github.com/hashicorp/errwrap"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/sdk/physical"
"github.com/oracle/oci-go-sdk/objectstorage"
@@ -118,7 +117,7 @@ func (b *Backend) HAEnabled() bool {
func (b *Backend) LockWith(key, value string) (physical.Lock, error) {
identity, err := uuid.GenerateUUID()
if err != nil {
- return nil, errwrap.Wrapf("Lock with: {{err}}", err)
+ return nil, fmt.Errorf("Lock with: %w", err)
}
return &Lock{
backend: b,
@@ -148,7 +147,7 @@ func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
// occurs.
acquired, err := l.attemptLock(stopCh)
if err != nil {
- return nil, errwrap.Wrapf("lock: {{err}}", err)
+ return nil, fmt.Errorf("lock: %w", err)
}
if !acquired {
return nil, nil
@@ -183,7 +182,7 @@ func (l *Lock) attemptLock(stopCh <-chan struct{}) (bool, error) {
case <-ticker.C:
acquired, err := l.writeLock()
if err != nil {
- return false, errwrap.Wrapf("attempt lock: {{err}}", err)
+ return false, fmt.Errorf("attempt lock: %w", err)
}
if !acquired {
continue
@@ -314,7 +313,7 @@ func (l *Lock) Unlock() error {
// Get current lock record
currentLockRecord, etag, err := l.get(context.Background())
if err != nil {
- return errwrap.Wrapf("error reading lock record: {{err}}", err)
+ return fmt.Errorf("error reading lock record: %w", err)
}
if currentLockRecord != nil && currentLockRecord.Identity == l.identity {
@@ -323,7 +322,7 @@ func (l *Lock) Unlock() error {
opcClientRequestId, err := uuid.GenerateUUID()
if err != nil {
l.backend.logger.Debug("Unlock: error generating UUID")
- return errwrap.Wrapf("failed to generate UUID: {{err}}", err)
+ return fmt.Errorf("failed to generate UUID: %w", err)
}
l.backend.logger.Debug("Unlock", "opc-client-request-id", opcClientRequestId)
request := objectstorage.DeleteObjectRequest{
@@ -339,7 +338,7 @@ func (l *Lock) Unlock() error {
if err != nil {
metrics.IncrCounter(metricDeleteFailed, 1)
- return errwrap.Wrapf("write lock: {{err}}", err)
+ return fmt.Errorf("write lock: %w", err)
}
}
@@ -370,7 +369,7 @@ func (l *Lock) get(ctx context.Context) (*LockRecord, string, error) {
opcClientRequestId, err := uuid.GenerateUUID()
if err != nil {
l.backend.logger.Error("getHa: error generating UUID")
- return nil, "", errwrap.Wrapf("failed to generate UUID: {{err}}", err)
+ return nil, "", fmt.Errorf("failed to generate UUID: %w", err)
}
l.backend.logger.Debug("getHa", "opc-client-request-id", opcClientRequestId)
@@ -394,7 +393,7 @@ func (l *Lock) get(ctx context.Context) (*LockRecord, string, error) {
metrics.IncrCounter(metricGetFailed, 1)
l.backend.logger.Error("Error calling GET", "err", err)
- return nil, "", errwrap.Wrapf(fmt.Sprintf("failed to read Value for %q: {{err}}", l.key), err)
+ return nil, "", fmt.Errorf("failed to read Value for %q: %w", l.key, err)
}
defer response.RawResponse.Body.Close()
@@ -403,7 +402,7 @@ func (l *Lock) get(ctx context.Context) (*LockRecord, string, error) {
if err != nil {
metrics.IncrCounter(metricGetFailed, 1)
l.backend.logger.Error("Error reading content", "err", err)
- return nil, "", errwrap.Wrapf("failed to decode Value into bytes: {{err}}", err)
+ return nil, "", fmt.Errorf("failed to decode Value into bytes: %w", err)
}
var lockRecord LockRecord
@@ -411,7 +410,7 @@ func (l *Lock) get(ctx context.Context) (*LockRecord, string, error) {
if err != nil {
metrics.IncrCounter(metricGetFailed, 1)
l.backend.logger.Error("Error un-marshalling content", "err", err)
- return nil, "", errwrap.Wrapf(fmt.Sprintf("failed to read Value for %q: {{err}}", l.key), err)
+ return nil, "", fmt.Errorf("failed to read Value for %q: %w", l.key, err)
}
return &lockRecord, *response.ETag, nil
@@ -442,7 +441,7 @@ func (l *Lock) writeLock() (bool, error) {
// case secondary
currentLockRecord, currentEtag, err := l.get(ctx)
if err != nil {
- return false, errwrap.Wrapf("error reading lock record: {{err}}", err)
+ return false, fmt.Errorf("error reading lock record: %w", err)
}
if (lockRecordCache == nil) || lockRecordCache.etag != currentEtag {
@@ -471,7 +470,7 @@ func (l *Lock) writeLock() (bool, error) {
newLockRecordJson, err := json.Marshal(newLockRecord)
if err != nil {
- return false, errwrap.Wrapf("error reading lock record: {{err}}", err)
+ return false, fmt.Errorf("error reading lock record: %w", err)
}
defer metrics.MeasureSince(metricPutHa, time.Now())
@@ -479,7 +478,7 @@ func (l *Lock) writeLock() (bool, error) {
opcClientRequestId, err := uuid.GenerateUUID()
if err != nil {
l.backend.logger.Error("putHa: error generating UUID")
- return false, errwrap.Wrapf("failed to generate UUID", err)
+ return false, fmt.Errorf("failed to generate UUID: %w", err)
}
l.backend.logger.Debug("putHa", "opc-client-request-id", opcClientRequestId)
size := int64(len(newLockRecordJson))
@@ -536,7 +535,7 @@ func (l *Lock) writeLock() (bool, error) {
}
if err != nil {
- return false, errwrap.Wrapf("write lock: {{err}}", err)
+ return false, fmt.Errorf("write lock: %w", err)
}
l.backend.logger.Debug("Lock written", string(newLockRecordJson))
diff --git a/physical/postgresql/postgresql.go b/physical/postgresql/postgresql.go
index 669aba98daf6e..6766567341b35 100644
--- a/physical/postgresql/postgresql.go
+++ b/physical/postgresql/postgresql.go
@@ -10,7 +10,6 @@ import (
"sync"
"time"
- "github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/sdk/physical"
log "github.com/hashicorp/go-hclog"
@@ -108,7 +107,7 @@ func NewPostgreSQLBackend(conf map[string]string, logger log.Logger) (physical.B
if ok {
maxParInt, err = strconv.Atoi(maxParStr)
if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
+ return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err)
}
if logger.IsDebug() {
logger.Debug("max_parallel set", "max_parallel", maxParInt)
@@ -122,7 +121,7 @@ func NewPostgreSQLBackend(conf map[string]string, logger log.Logger) (physical.B
if maxIdleConnsIsSet {
maxIdleConns, err = strconv.Atoi(maxIdleConnsStr)
if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_idle_connections parameter: {{err}}", err)
+ return nil, fmt.Errorf("failed parsing max_idle_connections parameter: %w", err)
}
if logger.IsDebug() {
logger.Debug("max_idle_connections set", "max_idle_connections", maxIdleConnsStr)
@@ -132,7 +131,7 @@ func NewPostgreSQLBackend(conf map[string]string, logger log.Logger) (physical.B
// Create PostgreSQL handle for the database.
db, err := sql.Open("postgres", connURL)
if err != nil {
- return nil, errwrap.Wrapf("failed to connect to postgres: {{err}}", err)
+ return nil, fmt.Errorf("failed to connect to postgres: %w", err)
}
db.SetMaxOpenConns(maxParInt)
@@ -144,7 +143,7 @@ func NewPostgreSQLBackend(conf map[string]string, logger log.Logger) (physical.B
var upsertAvailable bool
upsertAvailableQuery := "SELECT current_setting('server_version_num')::int >= 90500"
if err := db.QueryRow(upsertAvailableQuery).Scan(&upsertAvailable); err != nil {
- return nil, errwrap.Wrapf("failed to check for native upsert: {{err}}", err)
+ return nil, fmt.Errorf("failed to check for native upsert: %w", err)
}
if !upsertAvailable && conf["ha_enabled"] == "true" {
@@ -313,7 +312,7 @@ func (m *PostgreSQLBackend) List(ctx context.Context, prefix string) ([]string,
var key string
err = rows.Scan(&key)
if err != nil {
- return nil, errwrap.Wrapf("failed to scan rows: {{err}}", err)
+ return nil, fmt.Errorf("failed to scan rows: %w", err)
}
keys = append(keys, key)
diff --git a/physical/raft/fsm.go b/physical/raft/fsm.go
index 86cb08279e00d..934c5726c8d5b 100644
--- a/physical/raft/fsm.go
+++ b/physical/raft/fsm.go
@@ -15,7 +15,6 @@ import (
metrics "github.com/armon/go-metrics"
"github.com/golang/protobuf/proto"
- "github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-raftchunking"
@@ -125,7 +124,7 @@ func NewFSM(path string, localID string, logger log.Logger) (*FSM, error) {
dbPath := filepath.Join(path, databaseFilename)
if err := f.openDBFile(dbPath); err != nil {
- return nil, errwrap.Wrapf("failed to open bolt file: {{err}}", err)
+ return nil, fmt.Errorf("failed to open bolt file: %w", err)
}
return f, nil
@@ -792,7 +791,7 @@ func (f *FSM) Restore(r io.ReadCloser) error {
var retErr *multierror.Error
if err := snapshotInstaller.Install(dbPath); err != nil {
f.logger.Error("failed to install snapshot", "error", err)
- retErr = multierror.Append(retErr, errwrap.Wrapf("failed to install snapshot database: {{err}}", err))
+ retErr = multierror.Append(retErr, fmt.Errorf("failed to install snapshot database: %w", err))
} else {
f.logger.Info("snapshot installed")
}
@@ -801,7 +800,7 @@ func (f *FSM) Restore(r io.ReadCloser) error {
// worked. If the install failed we should try to open the old DB file.
if err := f.openDBFile(dbPath); err != nil {
f.logger.Error("failed to open new database file", "error", err)
- retErr = multierror.Append(retErr, errwrap.Wrapf("failed to open new bolt file: {{err}}", err))
+ retErr = multierror.Append(retErr, fmt.Errorf("failed to open new bolt file: %w", err))
}
// Handle local node config restore. lnConfig should not be nil here, but
@@ -810,7 +809,7 @@ func (f *FSM) Restore(r io.ReadCloser) error {
// Persist the local node config on the restored fsm.
if err := f.persistDesiredSuffrage(lnConfig); err != nil {
f.logger.Error("failed to persist local node config from before the restore", "error", err)
- retErr = multierror.Append(retErr, errwrap.Wrapf("failed to persist local node config from before the restore: {{err}}", err))
+ retErr = multierror.Append(retErr, fmt.Errorf("failed to persist local node config from before the restore: %w", err))
}
}
@@ -890,7 +889,7 @@ func (f *FSMChunkStorage) chunkPaths(chunk *raftchunking.ChunkInfo) (string, str
func (f *FSMChunkStorage) StoreChunk(chunk *raftchunking.ChunkInfo) (bool, error) {
b, err := jsonutil.EncodeJSON(chunk)
if err != nil {
- return false, errwrap.Wrapf("error encoding chunk info: {{err}}", err)
+ return false, fmt.Errorf("error encoding chunk info: %w", err)
}
prefix, key := f.chunkPaths(chunk)
@@ -907,7 +906,7 @@ func (f *FSMChunkStorage) StoreChunk(chunk *raftchunking.ChunkInfo) (bool, error
done := new(bool)
if err := f.f.db.Update(func(tx *bolt.Tx) error {
if err := tx.Bucket(dataBucketName).Put([]byte(entry.Key), entry.Value); err != nil {
- return errwrap.Wrapf("error storing chunk info: {{err}}", err)
+ return fmt.Errorf("error storing chunk info: %w", err)
}
// Assume bucket exists and has keys
@@ -940,12 +939,12 @@ func (f *FSMChunkStorage) StoreChunk(chunk *raftchunking.ChunkInfo) (bool, error
func (f *FSMChunkStorage) FinalizeOp(opNum uint64) ([]*raftchunking.ChunkInfo, error) {
ret, err := f.chunksForOpNum(opNum)
if err != nil {
- return nil, errwrap.Wrapf("error getting chunks for op keys: {{err}}", err)
+ return nil, fmt.Errorf("error getting chunks for op keys: %w", err)
}
prefix, _ := f.chunkPaths(&raftchunking.ChunkInfo{OpNum: opNum})
if err := f.f.DeletePrefix(f.ctx, prefix); err != nil {
- return nil, errwrap.Wrapf("error deleting prefix after op finalization: {{err}}", err)
+ return nil, fmt.Errorf("error deleting prefix after op finalization: %w", err)
}
return ret, nil
@@ -956,7 +955,7 @@ func (f *FSMChunkStorage) chunksForOpNum(opNum uint64) ([]*raftchunking.ChunkInf
opChunkKeys, err := f.f.List(f.ctx, prefix)
if err != nil {
- return nil, errwrap.Wrapf("error fetching op chunk keys: {{err}}", err)
+ return nil, fmt.Errorf("error fetching op chunk keys: %w", err)
}
if len(opChunkKeys) == 0 {
@@ -968,17 +967,17 @@ func (f *FSMChunkStorage) chunksForOpNum(opNum uint64) ([]*raftchunking.ChunkInf
for _, v := range opChunkKeys {
seqNum, err := strconv.ParseInt(v, 10, 64)
if err != nil {
- return nil, errwrap.Wrapf("error converting seqnum to integer: {{err}}", err)
+ return nil, fmt.Errorf("error converting seqnum to integer: %w", err)
}
entry, err := f.f.Get(f.ctx, prefix+v)
if err != nil {
- return nil, errwrap.Wrapf("error fetching chunkinfo: {{err}}", err)
+ return nil, fmt.Errorf("error fetching chunkinfo: %w", err)
}
var ci raftchunking.ChunkInfo
if err := jsonutil.DecodeJSON(entry.Value, &ci); err != nil {
- return nil, errwrap.Wrapf("error decoding chunkinfo json: {{err}}", err)
+ return nil, fmt.Errorf("error decoding chunkinfo json: %w", err)
}
if ret == nil {
@@ -994,7 +993,7 @@ func (f *FSMChunkStorage) chunksForOpNum(opNum uint64) ([]*raftchunking.ChunkInf
func (f *FSMChunkStorage) GetChunks() (raftchunking.ChunkMap, error) {
opNums, err := f.f.List(f.ctx, chunkingPrefix)
if err != nil {
- return nil, errwrap.Wrapf("error doing recursive list for chunk saving: {{err}}", err)
+ return nil, fmt.Errorf("error doing recursive list for chunk saving: %w", err)
}
if len(opNums) == 0 {
@@ -1005,12 +1004,12 @@ func (f *FSMChunkStorage) GetChunks() (raftchunking.ChunkMap, error) {
for _, opNumStr := range opNums {
opNum, err := strconv.ParseInt(opNumStr, 10, 64)
if err != nil {
- return nil, errwrap.Wrapf("error parsing op num during chunk saving: {{err}}", err)
+ return nil, fmt.Errorf("error parsing op num during chunk saving: %w", err)
}
opChunks, err := f.chunksForOpNum(uint64(opNum))
if err != nil {
- return nil, errwrap.Wrapf("error getting chunks for op keys during chunk saving: {{err}}", err)
+ return nil, fmt.Errorf("error getting chunks for op keys during chunk saving: %w", err)
}
ret[uint64(opNum)] = opChunks
@@ -1021,7 +1020,7 @@ func (f *FSMChunkStorage) GetChunks() (raftchunking.ChunkMap, error) {
func (f *FSMChunkStorage) RestoreChunks(chunks raftchunking.ChunkMap) error {
if err := f.f.DeletePrefix(f.ctx, chunkingPrefix); err != nil {
- return errwrap.Wrapf("error deleting prefix for chunk restoration: {{err}}", err)
+ return fmt.Errorf("error deleting prefix for chunk restoration: %w", err)
}
if len(chunks) == 0 {
return nil
@@ -1036,7 +1035,7 @@ func (f *FSMChunkStorage) RestoreChunks(chunks raftchunking.ChunkMap) error {
return errors.New("unexpected op number in chunk")
}
if _, err := f.StoreChunk(chunk); err != nil {
- return errwrap.Wrapf("error storing chunk during restoration: {{err}}", err)
+ return fmt.Errorf("error storing chunk during restoration: %w", err)
}
}
}
diff --git a/physical/raft/raft.go b/physical/raft/raft.go
index 2a458b4d830d8..bf05c3b6a39ce 100644
--- a/physical/raft/raft.go
+++ b/physical/raft/raft.go
@@ -15,7 +15,6 @@ import (
"github.com/armon/go-metrics"
"github.com/golang/protobuf/proto"
- "github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
wrapping "github.com/hashicorp/go-kms-wrapping"
"github.com/hashicorp/go-raftchunking"
@@ -219,7 +218,7 @@ func (b *RaftBackend) JoinConfig() ([]*LeaderJoinInfo, error) {
var leaderInfos []*LeaderJoinInfo
err := jsonutil.DecodeJSON([]byte(config), &leaderInfos)
if err != nil {
- return nil, errwrap.Wrapf("failed to decode retry_join config: {{err}}", err)
+ return nil, fmt.Errorf("failed to decode retry_join config: %w", err)
}
if len(leaderInfos) == 0 {
@@ -238,7 +237,7 @@ func (b *RaftBackend) JoinConfig() ([]*LeaderJoinInfo, error) {
info.Retry = true
info.TLSConfig, err = parseTLSInfo(info)
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("failed to create tls config to communicate with leader node (retry_join index: %d): {{err}}", i), err)
+ return nil, fmt.Errorf("failed to create tls config to communicate with leader node (retry_join index: %d): %w", i, err)
}
}
@@ -804,7 +803,7 @@ func (b *RaftBackend) SetupCluster(ctx context.Context, opts SetupOpts) error {
recoveryConfig, err := raft.ReadConfigJSON(peersFile)
if err != nil {
- return errwrap.Wrapf("raft recovery failed to parse peers.json: {{err}}", err)
+ return fmt.Errorf("raft recovery failed to parse peers.json: %w", err)
}
// Non-voting servers are only allowed in enterprise. If Suffrage is disabled,
@@ -819,12 +818,12 @@ func (b *RaftBackend) SetupCluster(ctx context.Context, opts SetupOpts) error {
err = raft.RecoverCluster(raftConfig, b.fsm, b.logStore, b.stableStore, b.snapStore, b.raftTransport, recoveryConfig)
if err != nil {
- return errwrap.Wrapf("raft recovery failed: {{err}}", err)
+ return fmt.Errorf("raft recovery failed: %w", err)
}
err = os.Remove(peersFile)
if err != nil {
- return errwrap.Wrapf("raft recovery failed to delete peers.json; please delete manually: {{err}}", err)
+ return fmt.Errorf("raft recovery failed to delete peers.json; please delete manually: %w", err)
}
b.logger.Info("raft recovery deleted peers.json")
}
@@ -832,7 +831,7 @@ func (b *RaftBackend) SetupCluster(ctx context.Context, opts SetupOpts) error {
if opts.RecoveryModeConfig != nil {
err = raft.RecoverCluster(raftConfig, b.fsm, b.logStore, b.stableStore, b.snapStore, b.raftTransport, *opts.RecoveryModeConfig)
if err != nil {
- return errwrap.Wrapf("recovering raft cluster failed: {{err}}", err)
+ return fmt.Errorf("recovering raft cluster failed: %w", err)
}
}
@@ -857,7 +856,7 @@ func (b *RaftBackend) SetupCluster(ctx context.Context, opts SetupOpts) error {
case <-ctx.Done():
future := raftObj.Shutdown()
if future.Error() != nil {
- return errwrap.Wrapf("shutdown while waiting for leadership: {{err}}", future.Error())
+ return fmt.Errorf("shutdown while waiting for leadership: %w", future.Error())
}
return errors.New("shutdown while waiting for leadership")
diff --git a/physical/raft/streamlayer.go b/physical/raft/streamlayer.go
index 349606b527f47..ed154f8bcdaf4 100644
--- a/physical/raft/streamlayer.go
+++ b/physical/raft/streamlayer.go
@@ -19,7 +19,6 @@ import (
"sync"
"time"
- "github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
uuid "github.com/hashicorp/go-uuid"
"github.com/hashicorp/raft"
@@ -119,7 +118,7 @@ func GenerateTLSKey(reader io.Reader) (*TLSKey, error) {
certBytes, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key)
if err != nil {
- return nil, errwrap.Wrapf("unable to generate local cluster certificate: {{err}}", err)
+ return nil, fmt.Errorf("unable to generate local cluster certificate: %w", err)
}
return &TLSKey{
@@ -226,7 +225,7 @@ func (l *raftLayer) setTLSKeyring(keyring *TLSKeyring) error {
parsedCert, err := x509.ParseCertificate(key.CertBytes)
if err != nil {
- return errwrap.Wrapf("error parsing raft cluster certificate: {{err}}", err)
+ return fmt.Errorf("error parsing raft cluster certificate: %w", err)
}
key.parsedCert = parsedCert
diff --git a/physical/s3/s3.go b/physical/s3/s3.go
index 7c4822a3a3e84..2329580145a0a 100644
--- a/physical/s3/s3.go
+++ b/physical/s3/s3.go
@@ -18,7 +18,6 @@ import (
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
- "github.com/hashicorp/errwrap"
"github.com/hashicorp/go-cleanhttp"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/sdk/helper/awsutil"
@@ -129,7 +128,7 @@ func NewS3Backend(conf map[string]string, logger log.Logger) (physical.Backend,
_, err = s3conn.ListObjects(&s3.ListObjectsInput{Bucket: &bucket})
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("unable to access bucket %q in region %q: {{err}}", bucket, region), err)
+ return nil, fmt.Errorf("unable to access bucket %q in region %q: %w", bucket, region, err)
}
maxParStr, ok := conf["max_parallel"]
@@ -137,7 +136,7 @@ func NewS3Backend(conf map[string]string, logger log.Logger) (physical.Backend,
if ok {
maxParInt, err = strconv.Atoi(maxParStr)
if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
+ return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err)
}
if logger.IsDebug() {
logger.Debug("max_parallel set", "max_parallel", maxParInt)
diff --git a/physical/spanner/spanner.go b/physical/spanner/spanner.go
index 8e4e13265fab8..1202f9c9f1514 100644
--- a/physical/spanner/spanner.go
+++ b/physical/spanner/spanner.go
@@ -10,7 +10,6 @@ import (
"time"
metrics "github.com/armon/go-metrics"
- "github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/sdk/helper/strutil"
"github.com/hashicorp/vault/sdk/helper/useragent"
@@ -147,7 +146,7 @@ func NewBackend(c map[string]string, logger log.Logger) (physical.Backend, error
var err error
haEnabled, err = strconv.ParseBool(haEnabledStr)
if err != nil {
- return nil, errwrap.Wrapf("failed to parse HA enabled: {{err}}", err)
+ return nil, fmt.Errorf("failed to parse HA enabled: %w", err)
}
}
if haEnabled {
@@ -158,14 +157,14 @@ func NewBackend(c map[string]string, logger log.Logger) (physical.Backend, error
option.WithUserAgent(useragent.String()),
)
if err != nil {
- return nil, errwrap.Wrapf("failed to create HA client: {{err}}", err)
+ return nil, fmt.Errorf("failed to create HA client: %w", err)
}
}
// Max parallel
maxParallel, err := extractInt(c["max_parallel"])
if err != nil {
- return nil, errwrap.Wrapf("failed to parse max_parallel: {{err}}", err)
+ return nil, fmt.Errorf("failed to parse max_parallel: %w", err)
}
logger.Debug("configuration",
@@ -182,7 +181,7 @@ func NewBackend(c map[string]string, logger log.Logger) (physical.Backend, error
option.WithUserAgent(useragent.String()),
)
if err != nil {
- return nil, errwrap.Wrapf("failed to create spanner client: {{err}}", err)
+ return nil, fmt.Errorf("failed to create spanner client: %w", err)
}
return &Backend{
@@ -213,7 +212,7 @@ func (b *Backend) Put(ctx context.Context, entry *physical.Entry) error {
"Value": entry.Value,
})
if _, err := b.client.Apply(ctx, []*spanner.Mutation{m}); err != nil {
- return errwrap.Wrapf("failed to put data: {{err}}", err)
+ return fmt.Errorf("failed to put data: %w", err)
}
return nil
}
@@ -232,12 +231,12 @@ func (b *Backend) Get(ctx context.Context, key string) (*physical.Entry, error)
return nil, nil
}
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("failed to read value for %q: {{err}}", key), err)
+ return nil, fmt.Errorf("failed to read value for %q: %w", key, err)
}
var value []byte
if err := row.Column(0, &value); err != nil {
- return nil, errwrap.Wrapf("failed to decode value into bytes: {{err}}", err)
+ return nil, fmt.Errorf("failed to decode value into bytes: %w", err)
}
return &physical.Entry{
@@ -257,7 +256,7 @@ func (b *Backend) Delete(ctx context.Context, key string) error {
// Delete
m := spanner.Delete(b.table, spanner.Key{key})
if _, err := b.client.Apply(ctx, []*spanner.Mutation{m}); err != nil {
- return errwrap.Wrapf("failed to delete key: {{err}}", err)
+ return fmt.Errorf("failed to delete key: %w", err)
}
return nil
@@ -291,12 +290,12 @@ func (b *Backend) List(ctx context.Context, prefix string) ([]string, error) {
break
}
if err != nil {
- return nil, errwrap.Wrapf("failed to read row: {{err}}", err)
+ return nil, fmt.Errorf("failed to read row: %w", err)
}
var key string
if err := row.Column(0, &key); err != nil {
- return nil, errwrap.Wrapf("failed to decode key into string: {{err}}", err)
+ return nil, fmt.Errorf("failed to decode key into string: %w", err)
}
// The results will include the full prefix (folder) and any deeply-nested
@@ -351,7 +350,7 @@ func (b *Backend) Transaction(ctx context.Context, txns []*physical.TxnEntry) er
// Transactivate!
if _, err := b.client.Apply(ctx, ms); err != nil {
- return errwrap.Wrapf("failed to commit transaction: {{err}}", err)
+ return fmt.Errorf("failed to commit transaction: %w", err)
}
return nil
diff --git a/physical/spanner/spanner_ha.go b/physical/spanner/spanner_ha.go
index ab9c9a855ca8b..f3284fc270196 100644
--- a/physical/spanner/spanner_ha.go
+++ b/physical/spanner/spanner_ha.go
@@ -8,7 +8,6 @@ import (
"cloud.google.com/go/spanner"
metrics "github.com/armon/go-metrics"
- "github.com/hashicorp/errwrap"
uuid "github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/sdk/physical"
"github.com/pkg/errors"
@@ -104,7 +103,7 @@ func (b *Backend) HAEnabled() bool {
func (b *Backend) LockWith(key, value string) (physical.Lock, error) {
identity, err := uuid.GenerateUUID()
if err != nil {
- return nil, errwrap.Wrapf("lock with: {{err}}", err)
+ return nil, fmt.Errorf("lock with: %w", err)
}
return &Lock{
backend: b,
@@ -137,7 +136,7 @@ func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
// occurs.
acquired, err := l.attemptLock(stopCh)
if err != nil {
- return nil, errwrap.Wrapf("lock: {{err}}", err)
+ return nil, fmt.Errorf("lock: %w", err)
}
if !acquired {
return nil, nil
@@ -190,7 +189,7 @@ func (l *Lock) Unlock() error {
var r LockRecord
if derr := row.ToStruct(&r); derr != nil {
- return errwrap.Wrapf("failed to decode to struct: {{err}}", derr)
+ return fmt.Errorf("failed to decode to struct: %w", derr)
}
// If the identity is different, that means that between the time that after
@@ -204,7 +203,7 @@ func (l *Lock) Unlock() error {
spanner.Delete(l.backend.haTable, spanner.Key{l.key}),
})
}); err != nil {
- return errwrap.Wrapf("unlock: {{err}}", err)
+ return fmt.Errorf("unlock: %w", err)
}
// We are no longer holding the lock
@@ -239,7 +238,7 @@ func (l *Lock) attemptLock(stopCh <-chan struct{}) (bool, error) {
case <-ticker.C:
acquired, err := l.writeLock()
if err != nil {
- return false, errwrap.Wrapf("attempt lock: {{err}}", err)
+ return false, fmt.Errorf("attempt lock: %w", err)
}
if !acquired {
continue
@@ -353,7 +352,7 @@ func (l *Lock) writeLock() (bool, error) {
if row != nil {
var r LockRecord
if derr := row.ToStruct(&r); derr != nil {
- return errwrap.Wrapf("failed to decode to struct: {{err}}", derr)
+ return fmt.Errorf("failed to decode to struct: %w", derr)
}
// If the key is empty or the identity is ours or the ttl expired, we can
@@ -370,10 +369,10 @@ func (l *Lock) writeLock() (bool, error) {
Timestamp: time.Now().UTC(),
})
if err != nil {
- return errwrap.Wrapf("failed to generate struct: {{err}}", err)
+ return fmt.Errorf("failed to generate struct: %w", err)
}
if err := txn.BufferWrite([]*spanner.Mutation{m}); err != nil {
- return errwrap.Wrapf("failed to write: {{err}}", err)
+ return fmt.Errorf("failed to write: %w", err)
}
// Mark that the lock was acquired
@@ -382,7 +381,7 @@ func (l *Lock) writeLock() (bool, error) {
return nil
})
if err != nil {
- return false, errwrap.Wrapf("write lock: {{err}}", err)
+ return false, fmt.Errorf("write lock: %w", err)
}
return lockWritten, nil
@@ -396,12 +395,12 @@ func (l *Lock) get(ctx context.Context) (*LockRecord, error) {
return nil, nil
}
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("failed to read value for %q: {{err}}", l.key), err)
+ return nil, fmt.Errorf("failed to read value for %q: %w", l.key, err)
}
var r LockRecord
if err := row.ToStruct(&r); err != nil {
- return nil, errwrap.Wrapf("failed to decode lock: {{err}}", err)
+ return nil, fmt.Errorf("failed to decode lock: %w", err)
}
return &r, nil
}
diff --git a/physical/swift/swift.go b/physical/swift/swift.go
index 260a5bedc5f0c..20de749b19169 100644
--- a/physical/swift/swift.go
+++ b/physical/swift/swift.go
@@ -12,7 +12,6 @@ import (
log "github.com/hashicorp/go-hclog"
metrics "github.com/armon/go-metrics"
- "github.com/hashicorp/errwrap"
cleanhttp "github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/vault/sdk/helper/strutil"
"github.com/hashicorp/vault/sdk/physical"
@@ -128,7 +127,7 @@ func NewSwiftBackend(conf map[string]string, logger log.Logger) (physical.Backen
_, _, err = c.Container(container)
if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("Unable to access container %q: {{err}}", container), err)
+ return nil, fmt.Errorf("Unable to access container %q: %w", container, err)
}
maxParStr, ok := conf["max_parallel"]
@@ -136,7 +135,7 @@ func NewSwiftBackend(conf map[string]string, logger log.Logger) (physical.Backen
if ok {
maxParInt, err = strconv.Atoi(maxParStr)
if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
+ return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err)
}
if logger.IsDebug() {
logger.Debug("max_parallel set", "max_parallel", maxParInt)
diff --git a/physical/zookeeper/zookeeper.go b/physical/zookeeper/zookeeper.go
index 47a0fb3eb7357..870999220ce19 100644
--- a/physical/zookeeper/zookeeper.go
+++ b/physical/zookeeper/zookeeper.go
@@ -13,7 +13,6 @@ import (
"sync"
"time"
- "github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/sdk/helper/parseutil"
"github.com/hashicorp/vault/sdk/physical"
@@ -129,14 +128,14 @@ func NewZooKeeperBackend(conf map[string]string, logger log.Logger) (physical.Ba
// We have all of the configuration in hand - let's try and connect to ZK
client, _, err := createClient(conf, machines, time.Second)
if err != nil {
- return nil, errwrap.Wrapf("client setup failed: {{err}}", err)
+ return nil, fmt.Errorf("client setup failed: %w", err)
}
// ZK AddAuth API if the user asked for it
if useAddAuth {
err = client.AddAuth(schema, []byte(owner))
if err != nil {
- return nil, errwrap.Wrapf("ZooKeeper rejected authentication information provided at auth_info: {{err}}", err)
+ return nil, fmt.Errorf("ZooKeeper rejected authentication information provided at auth_info: %w", err)
}
}
@@ -163,7 +162,7 @@ func createClient(conf map[string]string, machines string, timeout time.Duration
if ok && isTlsEnabledStr != "" {
parsedBoolval, err := parseutil.ParseBool(isTlsEnabledStr)
if err != nil {
- return nil, nil, errwrap.Wrapf("failed parsing tls_enabled parameter: {{err}}", err)
+ return nil, nil, fmt.Errorf("failed parsing tls_enabled parameter: %w", err)
}
isTlsEnabled = parsedBoolval
}
@@ -194,7 +193,7 @@ func customTLSDial(conf map[string]string, machines string) zk.Dialer {
if strings.Contains(sParseErr.Error(), "missing port") {
serverName = addr
} else {
- return nil, errwrap.Wrapf("failed parsing the server address for 'serverName' setting {{err}}", sParseErr)
+ return nil, fmt.Errorf("failed parsing the server address for 'serverName' setting %w", sParseErr)
}
}
@@ -204,7 +203,7 @@ func customTLSDial(conf map[string]string, machines string) zk.Dialer {
if ok && tlsSkipVerify != "" {
b, err := parseutil.ParseBool(tlsSkipVerify)
if err != nil {
- return nil, errwrap.Wrapf("failed parsing tls_skip_verify parameter: {{err}}", err)
+ return nil, fmt.Errorf("failed parsing tls_skip_verify parameter: %w", err)
}
insecureSkipVerify = b
}
@@ -220,7 +219,7 @@ func customTLSDial(conf map[string]string, machines string) zk.Dialer {
if lookupOk && configVal != "" {
parsedIpSanCheck, ipSanErr := parseutil.ParseBool(configVal)
if ipSanErr != nil {
- return nil, errwrap.Wrapf("failed parsing tls_verify_ip parameter: {{err}}", ipSanErr)
+ return nil, fmt.Errorf("failed parsing tls_verify_ip parameter: %w", ipSanErr)
}
ipSanCheck = parsedIpSanCheck
}
@@ -270,7 +269,7 @@ func customTLSDial(conf map[string]string, machines string) zk.Dialer {
if okCert && okKey {
tlsCert, err := tls.LoadX509KeyPair(conf["tls_cert_file"], conf["tls_key_file"])
if err != nil {
- return nil, errwrap.Wrapf("client tls setup failed for ZK: {{err}}", err)
+ return nil, fmt.Errorf("client tls setup failed for ZK: %w", err)
}
tlsClientConfig.Certificates = []tls.Certificate{tlsCert}
@@ -281,7 +280,7 @@ func customTLSDial(conf map[string]string, machines string) zk.Dialer {
data, err := ioutil.ReadFile(tlsCaFile)
if err != nil {
- return nil, errwrap.Wrapf("failed to read ZK CA file: {{err}}", err)
+ return nil, fmt.Errorf("failed to read ZK CA file: %w", err)
}
if !caPool.AppendCertsFromPEM(data) {
@@ -346,7 +345,7 @@ func (c *ZooKeeperBackend) cleanupLogicalPath(path string) error {
_, stat, err := c.client.Exists(fullPath)
if err != nil {
- return errwrap.Wrapf("failed to acquire node data: {{err}}", err)
+ return fmt.Errorf("failed to acquire node data: %w", err)
}
if stat.DataLength > 0 && stat.NumChildren > 0 {
@@ -358,7 +357,7 @@ func (c *ZooKeeperBackend) cleanupLogicalPath(path string) error {
} else {
// Empty node, lets clean it up!
if err := c.client.Delete(fullPath, -1); err != nil && err != zk.ErrNoNode {
- return errwrap.Wrapf(fmt.Sprintf("removal of node %q failed: {{err}}", fullPath), err)
+ return fmt.Errorf("removal of node %q failed: %w", fullPath, err)
}
}
}
@@ -426,7 +425,7 @@ func (c *ZooKeeperBackend) Delete(ctx context.Context, key string) error {
// Mask if the node does not exist
if err != nil && err != zk.ErrNoNode {
- return errwrap.Wrapf(fmt.Sprintf("failed to remove %q: {{err}}", fullPath), err)
+ return fmt.Errorf("failed to remove %q: %w", fullPath, err)
}
err = c.cleanupLogicalPath(key)
@@ -545,7 +544,7 @@ func (i *ZooKeeperHALock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error)
// Watch for Events which could result in loss of our zkLock and close(i.leaderCh)
currentVal, _, lockeventCh, err := i.in.client.GetW(lockpath)
if err != nil {
- return nil, errwrap.Wrapf("unable to watch HA lock: {{err}}", err)
+ return nil, fmt.Errorf("unable to watch HA lock: %w", err)
}
if i.value != string(currentVal) {
return nil, fmt.Errorf("lost HA lock immediately before watch")
diff --git a/plugins/database/influxdb/connection_producer.go b/plugins/database/influxdb/connection_producer.go
index b1d3ea5598387..ee22964807c1f 100644
--- a/plugins/database/influxdb/connection_producer.go
+++ b/plugins/database/influxdb/connection_producer.go
@@ -7,7 +7,6 @@ import (
"sync"
"time"
- "github.com/hashicorp/errwrap"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
"github.com/hashicorp/vault/sdk/database/helper/connutil"
"github.com/hashicorp/vault/sdk/helper/certutil"
@@ -62,7 +61,7 @@ func (i *influxdbConnectionProducer) Initialize(ctx context.Context, req dbplugi
}
i.connectTimeout, err = parseutil.ParseDurationSecond(i.ConnectTimeoutRaw)
if err != nil {
- return dbplugin.InitializeResponse{}, errwrap.Wrapf("invalid connect_timeout: {{err}}", err)
+ return dbplugin.InitializeResponse{}, fmt.Errorf("invalid connect_timeout: %w", err)
}
switch {
@@ -80,11 +79,11 @@ func (i *influxdbConnectionProducer) Initialize(ctx context.Context, req dbplugi
case len(i.PemJSON) != 0:
parsedCertBundle, err = certutil.ParsePKIJSON([]byte(i.PemJSON))
if err != nil {
- return dbplugin.InitializeResponse{}, errwrap.Wrapf("could not parse given JSON; it must be in the format of the output of the PKI backend certificate issuing command: {{err}}", err)
+ return dbplugin.InitializeResponse{}, fmt.Errorf("could not parse given JSON; it must be in the format of the output of the PKI backend certificate issuing command: %w", err)
}
certBundle, err = parsedCertBundle.ToCertBundle()
if err != nil {
- return dbplugin.InitializeResponse{}, errwrap.Wrapf("Error marshaling PEM information: {{err}}", err)
+ return dbplugin.InitializeResponse{}, fmt.Errorf("Error marshaling PEM information: %w", err)
}
i.certificate = certBundle.Certificate
i.privateKey = certBundle.PrivateKey
@@ -94,11 +93,11 @@ func (i *influxdbConnectionProducer) Initialize(ctx context.Context, req dbplugi
case len(i.PemBundle) != 0:
parsedCertBundle, err = certutil.ParsePEMBundle(i.PemBundle)
if err != nil {
- return dbplugin.InitializeResponse{}, errwrap.Wrapf("Error parsing the given PEM information: {{err}}", err)
+ return dbplugin.InitializeResponse{}, fmt.Errorf("Error parsing the given PEM information: %w", err)
}
certBundle, err = parsedCertBundle.ToCertBundle()
if err != nil {
- return dbplugin.InitializeResponse{}, errwrap.Wrapf("Error marshaling PEM information: {{err}}", err)
+ return dbplugin.InitializeResponse{}, fmt.Errorf("Error marshaling PEM information: %w", err)
}
i.certificate = certBundle.Certificate
i.privateKey = certBundle.PrivateKey
@@ -112,7 +111,7 @@ func (i *influxdbConnectionProducer) Initialize(ctx context.Context, req dbplugi
if req.VerifyConnection {
if _, err := i.Connection(ctx); err != nil {
- return dbplugin.InitializeResponse{}, errwrap.Wrapf("error verifying connection: {{err}}", err)
+ return dbplugin.InitializeResponse{}, fmt.Errorf("error verifying connection: %w", err)
}
}
@@ -185,12 +184,12 @@ func (i *influxdbConnectionProducer) createClient() (influx.Client, error) {
parsedCertBundle, err := certBundle.ToParsedCertBundle()
if err != nil {
- return nil, errwrap.Wrapf("failed to parse certificate bundle: {{err}}", err)
+ return nil, fmt.Errorf("failed to parse certificate bundle: %w", err)
}
tlsConfig, err = parsedCertBundle.GetTLSConfig(certutil.TLSClient)
if err != nil || tlsConfig == nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("failed to get TLS configuration: tlsConfig:%#v err:{{err}}", tlsConfig), err)
+ return nil, fmt.Errorf("failed to get TLS configuration: tlsConfig:%#v err:%w", tlsConfig, err)
}
}
@@ -214,19 +213,19 @@ func (i *influxdbConnectionProducer) createClient() (influx.Client, error) {
cli, err := influx.NewHTTPClient(clientConfig)
if err != nil {
- return nil, errwrap.Wrapf("error creating client: {{err}}", err)
+ return nil, fmt.Errorf("error creating client: %w", err)
}
// Checking server status
_, _, err = cli.Ping(i.connectTimeout)
if err != nil {
- return nil, errwrap.Wrapf("error checking cluster status: {{err}}", err)
+ return nil, fmt.Errorf("error checking cluster status: %w", err)
}
// verifying infos about the connection
isAdmin, err := isUserAdmin(cli, i.Username)
if err != nil {
- return nil, errwrap.Wrapf("error getting if provided username is admin: {{err}}", err)
+ return nil, fmt.Errorf("error getting if provided username is admin: %w", err)
}
if !isAdmin {
return nil, fmt.Errorf("the provided user is not an admin of the influxDB server")
diff --git a/plugins/database/influxdb/influxdb_test.go b/plugins/database/influxdb/influxdb_test.go
index 8669961741274..5328996927d40 100644
--- a/plugins/database/influxdb/influxdb_test.go
+++ b/plugins/database/influxdb/influxdb_test.go
@@ -11,7 +11,6 @@ import (
"testing"
"time"
- "github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/helper/testhelpers/docker"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing"
@@ -78,12 +77,12 @@ func prepareInfluxdbTestContainer(t *testing.T) (func(), *Config) {
})
cli, err := influx.NewHTTPClient(c.apiConfig())
if err != nil {
- return nil, errwrap.Wrapf("error creating InfluxDB client: {{err}}", err)
+ return nil, fmt.Errorf("error creating InfluxDB client: %w", err)
}
defer cli.Close()
_, _, err = cli.Ping(1)
if err != nil {
- return nil, errwrap.Wrapf("error checking cluster status: {{err}}", err)
+ return nil, fmt.Errorf("error checking cluster status: %w", err)
}
return c, nil
@@ -421,20 +420,20 @@ func testCredsExist(address, username, password string) error {
}
cli, err := influx.NewHTTPClient(conf)
if err != nil {
- return errwrap.Wrapf("Error creating InfluxDB Client: ", err)
+ return fmt.Errorf("Error creating InfluxDB Client: %w", err)
}
defer cli.Close()
_, _, err = cli.Ping(1)
if err != nil {
- return errwrap.Wrapf("error checking server ping: {{err}}", err)
+ return fmt.Errorf("error checking server ping: %w", err)
}
q := influx.NewQuery("SHOW SERIES ON vault", "", "")
response, err := cli.Query(q)
if err != nil {
- return errwrap.Wrapf("error querying influxdb server: {{err}}", err)
+ return fmt.Errorf("error querying influxdb server: %w", err)
}
if response != nil && response.Error() != nil {
- return errwrap.Wrapf("error using the correct influx database: {{err}}", response.Error())
+ return fmt.Errorf("error using the correct influx database: %w", response.Error())
}
return nil
}
diff --git a/plugins/database/mongodb/connection_producer.go b/plugins/database/mongodb/connection_producer.go
index f160c0a043dd8..348fb6bd4d435 100644
--- a/plugins/database/mongodb/connection_producer.go
+++ b/plugins/database/mongodb/connection_producer.go
@@ -10,9 +10,9 @@ import (
"sync"
"time"
- "github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/sdk/database/helper/connutil"
"github.com/hashicorp/vault/sdk/database/helper/dbutil"
+ "github.com/mitchellh/mapstructure"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/mongo/readpref"
@@ -31,6 +31,10 @@ type mongoDBConnectionProducer struct {
TLSCertificateKeyData []byte `json:"tls_certificate_key" structs:"-" mapstructure:"tls_certificate_key"`
TLSCAData []byte `json:"tls_ca" structs:"-" mapstructure:"tls_ca"`
+ SocketTimeout time.Duration `json:"socket_timeout" structs:"-" mapstructure:"socket_timeout"`
+ ConnectTimeout time.Duration `json:"connect_timeout" structs:"-" mapstructure:"connect_timeout"`
+ ServerSelectionTimeout time.Duration `json:"server_selection_timeout" structs:"-" mapstructure:"server_selection_timeout"`
+
Initialized bool
RawConfig map[string]interface{}
Type string
@@ -48,15 +52,47 @@ type writeConcern struct {
J bool // Sync via the journal if present
}
+func (c *mongoDBConnectionProducer) loadConfig(cfg map[string]interface{}) error {
+ err := mapstructure.WeakDecode(cfg, c)
+ if err != nil {
+ return err
+ }
+
+ if len(c.ConnectionURL) == 0 {
+ return fmt.Errorf("connection_url cannot be empty")
+ }
+
+ if c.SocketTimeout < 0 {
+ return fmt.Errorf("socket_timeout must be >= 0")
+ }
+ if c.ConnectTimeout < 0 {
+ return fmt.Errorf("connect_timeout must be >= 0")
+ }
+ if c.ServerSelectionTimeout < 0 {
+ return fmt.Errorf("server_selection_timeout must be >= 0")
+ }
+
+ opts, err := c.makeClientOpts()
+ if err != nil {
+ return err
+ }
+
+ c.clientOptions = opts
+
+ return nil
+}
+
// Connection creates or returns an existing a database connection. If the session fails
// on a ping check, the session will be closed and then re-created.
-// This method does not lock the mutex and it is intended that this is the callers
-// responsibility.
-func (c *mongoDBConnectionProducer) Connection(ctx context.Context) (interface{}, error) {
+// This method does locks the mutex on its own.
+func (c *mongoDBConnectionProducer) Connection(ctx context.Context) (*mongo.Client, error) {
if !c.Initialized {
return nil, connutil.ErrNotInitialized
}
+ c.Mutex.Lock()
+ defer c.Mutex.Unlock()
+
if c.client != nil {
if err := c.client.Ping(ctx, readpref.Primary()); err == nil {
return c.client, nil
@@ -65,8 +101,7 @@ func (c *mongoDBConnectionProducer) Connection(ctx context.Context) (interface{}
_ = c.client.Disconnect(ctx)
}
- connURL := c.getConnectionURL()
- client, err := createClient(ctx, connURL, c.clientOptions)
+ client, err := c.createClient(ctx)
if err != nil {
return nil, err
}
@@ -74,14 +109,14 @@ func (c *mongoDBConnectionProducer) Connection(ctx context.Context) (interface{}
return c.client, nil
}
-func createClient(ctx context.Context, connURL string, clientOptions *options.ClientOptions) (client *mongo.Client, err error) {
- if clientOptions == nil {
- clientOptions = options.Client()
+func (c *mongoDBConnectionProducer) createClient(ctx context.Context) (client *mongo.Client, err error) {
+ if !c.Initialized {
+ return nil, fmt.Errorf("failed to create client: connection producer is not initialized")
}
- clientOptions.SetSocketTimeout(1 * time.Minute)
- clientOptions.SetConnectTimeout(1 * time.Minute)
-
- client, err = mongo.Connect(ctx, options.MergeClientOptions(options.Client().ApplyURI(connURL), clientOptions))
+ if c.clientOptions == nil {
+ return nil, fmt.Errorf("missing client options")
+ }
+ client, err = mongo.Connect(ctx, options.MergeClientOptions(options.Client().ApplyURI(c.getConnectionURL()), c.clientOptions))
if err != nil {
return nil, err
}
@@ -120,6 +155,26 @@ func (c *mongoDBConnectionProducer) getConnectionURL() (connURL string) {
return connURL
}
+func (c *mongoDBConnectionProducer) makeClientOpts() (*options.ClientOptions, error) {
+ writeOpts, err := c.getWriteConcern()
+ if err != nil {
+ return nil, err
+ }
+
+ authOpts, err := c.getTLSAuth()
+ if err != nil {
+ return nil, err
+ }
+
+ timeoutOpts, err := c.timeoutOpts()
+ if err != nil {
+ return nil, err
+ }
+
+ opts := options.MergeClientOptions(writeOpts, authOpts, timeoutOpts)
+ return opts, nil
+}
+
func (c *mongoDBConnectionProducer) getWriteConcern() (opts *options.ClientOptions, err error) {
if c.WriteConcern == "" {
return nil, nil
@@ -137,7 +192,7 @@ func (c *mongoDBConnectionProducer) getWriteConcern() (opts *options.ClientOptio
concern := &writeConcern{}
err = json.Unmarshal([]byte(input), concern)
if err != nil {
- return nil, errwrap.Wrapf("error unmarshalling write_concern: {{err}}", err)
+ return nil, fmt.Errorf("error unmarshalling write_concern: %w", err)
}
// Translate write concern to mongo options
@@ -206,3 +261,29 @@ func (c *mongoDBConnectionProducer) getTLSAuth() (opts *options.ClientOptions, e
opts.SetTLSConfig(tlsConfig)
return opts, nil
}
+
+func (c *mongoDBConnectionProducer) timeoutOpts() (opts *options.ClientOptions, err error) {
+ opts = options.Client()
+
+ if c.SocketTimeout < 0 {
+ return nil, fmt.Errorf("socket_timeout must be >= 0")
+ }
+
+ if c.SocketTimeout == 0 {
+ opts.SetSocketTimeout(1 * time.Minute)
+ } else {
+ opts.SetSocketTimeout(c.SocketTimeout)
+ }
+
+ if c.ConnectTimeout == 0 {
+ opts.SetConnectTimeout(1 * time.Minute)
+ } else {
+ opts.SetConnectTimeout(c.ConnectTimeout)
+ }
+
+ if c.ServerSelectionTimeout != 0 {
+ opts.SetServerSelectionTimeout(c.ServerSelectionTimeout)
+ }
+
+ return opts, nil
+}
diff --git a/plugins/database/mongodb/connection_producer_test.go b/plugins/database/mongodb/connection_producer_test.go
index c39914cc537ff..4b0ccaf2514a7 100644
--- a/plugins/database/mongodb/connection_producer_test.go
+++ b/plugins/database/mongodb/connection_producer_test.go
@@ -103,7 +103,7 @@ net:
"connectionStatus": 1,
}
- client, err := mongo.getConnection(ctx)
+ client, err := mongo.Connection(ctx)
if err != nil {
t.Fatalf("Unable to make connection to Mongo: %s", err)
}
diff --git a/plugins/database/mongodb/mongodb.go b/plugins/database/mongodb/mongodb.go
index bfd8d4a3ca136..884f17dbe23af 100644
--- a/plugins/database/mongodb/mongodb.go
+++ b/plugins/database/mongodb/mongodb.go
@@ -7,14 +7,12 @@ import (
"io"
"strings"
+ log "github.com/hashicorp/go-hclog"
+ dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
"github.com/hashicorp/vault/sdk/database/helper/dbutil"
"github.com/hashicorp/vault/sdk/helper/strutil"
"github.com/hashicorp/vault/sdk/helper/template"
-
- dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
- "github.com/mitchellh/mapstructure"
"go.mongodb.org/mongo-driver/mongo"
- "go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/mongo/readpref"
"go.mongodb.org/mongo-driver/mongo/writeconcern"
"go.mongodb.org/mongo-driver/x/mongo/driver/connstring"
@@ -57,15 +55,6 @@ func (m *MongoDB) Type() (string, error) {
return mongoDBTypeName, nil
}
-func (m *MongoDB) getConnection(ctx context.Context) (*mongo.Client, error) {
- client, err := m.Connection(ctx)
- if err != nil {
- return nil, err
- }
-
- return client.(*mongo.Client), nil
-}
-
func (m *MongoDB) Initialize(ctx context.Context, req dbplugin.InitializeRequest) (dbplugin.InitializeResponse, error) {
m.Lock()
defer m.Unlock()
@@ -91,41 +80,27 @@ func (m *MongoDB) Initialize(ctx context.Context, req dbplugin.InitializeRequest
return dbplugin.InitializeResponse{}, fmt.Errorf("invalid username template: %w", err)
}
- err = mapstructure.WeakDecode(req.Config, m.mongoDBConnectionProducer)
- if err != nil {
- return dbplugin.InitializeResponse{}, err
- }
-
- if len(m.ConnectionURL) == 0 {
- return dbplugin.InitializeResponse{}, fmt.Errorf("connection_url cannot be empty-mongo fail")
- }
-
- writeOpts, err := m.getWriteConcern()
- if err != nil {
- return dbplugin.InitializeResponse{}, err
- }
-
- authOpts, err := m.getTLSAuth()
+ err = m.mongoDBConnectionProducer.loadConfig(req.Config)
if err != nil {
return dbplugin.InitializeResponse{}, err
}
- m.clientOptions = options.MergeClientOptions(writeOpts, authOpts)
-
// Set initialized to true at this point since all fields are set,
// and the connection can be established at a later time.
m.Initialized = true
if req.VerifyConnection {
- _, err := m.Connection(ctx)
+ client, err := m.mongoDBConnectionProducer.createClient(ctx)
if err != nil {
return dbplugin.InitializeResponse{}, fmt.Errorf("failed to verify connection: %w", err)
}
- err = m.client.Ping(ctx, readpref.Primary())
+ err = client.Ping(ctx, readpref.Primary())
if err != nil {
+ _ = client.Disconnect(ctx) // Try to prevent any sort of resource leak
return dbplugin.InitializeResponse{}, fmt.Errorf("failed to verify connection: %w", err)
}
+ m.mongoDBConnectionProducer.client = client
}
resp := dbplugin.InitializeResponse{
@@ -135,10 +110,6 @@ func (m *MongoDB) Initialize(ctx context.Context, req dbplugin.InitializeRequest
}
func (m *MongoDB) NewUser(ctx context.Context, req dbplugin.NewUserRequest) (dbplugin.NewUserResponse, error) {
- // Grab the lock
- m.Lock()
- defer m.Unlock()
-
if len(req.Statements.Commands) == 0 {
return dbplugin.NewUserResponse{}, dbutil.ErrEmptyCreationStatement
}
@@ -189,9 +160,6 @@ func (m *MongoDB) UpdateUser(ctx context.Context, req dbplugin.UpdateUserRequest
}
func (m *MongoDB) changeUserPassword(ctx context.Context, username, password string) error {
- m.Lock()
- defer m.Unlock()
-
connURL := m.getConnectionURL()
cs, err := connstring.Parse(connURL)
if err != nil {
@@ -218,9 +186,6 @@ func (m *MongoDB) changeUserPassword(ctx context.Context, username, password str
}
func (m *MongoDB) DeleteUser(ctx context.Context, req dbplugin.DeleteUserRequest) (dbplugin.DeleteUserResponse, error) {
- m.Lock()
- defer m.Unlock()
-
// If no revocation statements provided, pass in empty JSON
var revocationStatement string
switch len(req.Statements.Commands) {
@@ -251,6 +216,12 @@ func (m *MongoDB) DeleteUser(ctx context.Context, req dbplugin.DeleteUserRequest
}
err = m.runCommandWithRetry(ctx, db, dropUserCmd)
+ cErr, ok := err.(mongo.CommandError)
+ if ok && cErr.Name == "UserNotFound" { // User already removed, don't retry needlessly
+ log.Default().Warn("MongoDB user was deleted prior to lease revocation", "user", req.Username)
+ return dbplugin.DeleteUserResponse{}, nil
+ }
+
return dbplugin.DeleteUserResponse{}, err
}
@@ -258,7 +229,7 @@ func (m *MongoDB) DeleteUser(ctx context.Context, req dbplugin.DeleteUserRequest
// on the first attempt. This should be called with the lock held
func (m *MongoDB) runCommandWithRetry(ctx context.Context, db string, cmd interface{}) error {
// Get the client
- client, err := m.getConnection(ctx)
+ client, err := m.Connection(ctx)
if err != nil {
return err
}
@@ -273,7 +244,7 @@ func (m *MongoDB) runCommandWithRetry(ctx context.Context, db string, cmd interf
return nil
case err == io.EOF, strings.Contains(err.Error(), "EOF"):
// Call getConnection to reset and retry query if we get an EOF error on first attempt.
- client, err = m.getConnection(ctx)
+ client, err = m.Connection(ctx)
if err != nil {
return err
}
diff --git a/plugins/database/mongodb/mongodb_test.go b/plugins/database/mongodb/mongodb_test.go
index cb088892487de..832b0ce1f7d16 100644
--- a/plugins/database/mongodb/mongodb_test.go
+++ b/plugins/database/mongodb/mongodb_test.go
@@ -7,9 +7,13 @@ import (
"fmt"
"reflect"
"strings"
+ "sync"
"testing"
"time"
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+
"github.com/hashicorp/vault/helper/testhelpers/certhelpers"
"github.com/hashicorp/vault/helper/testhelpers/mongodb"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
@@ -346,9 +350,7 @@ func TestGetTLSAuth(t *testing.T) {
if !test.expectErr && err != nil {
t.Fatalf("no error expected, got: %s", err)
}
- if !reflect.DeepEqual(actual, test.expectOpts) {
- t.Fatalf("Actual:\n%#v\nExpected:\n%#v", actual, test.expectOpts)
- }
+ assertDeepEqual(t, test.expectOpts, actual)
})
}
}
@@ -363,6 +365,27 @@ func appendToCertPool(t *testing.T, pool *x509.CertPool, caPem []byte) *x509.Cer
return pool
}
+var cmpClientOptionsOpts = cmp.Options{
+ cmp.AllowUnexported(options.ClientOptions{}),
+
+ cmp.AllowUnexported(tls.Config{}),
+ cmpopts.IgnoreTypes(sync.Mutex{}, sync.RWMutex{}),
+
+ // 'lazyCerts' has a func field which can't be compared.
+ cmpopts.IgnoreFields(x509.CertPool{}, "lazyCerts"),
+ cmp.AllowUnexported(x509.CertPool{}),
+}
+
+// Need a special comparison for ClientOptions because reflect.DeepEquals won't work in Go 1.16.
+// See: https://github.com/golang/go/issues/45891
+func assertDeepEqual(t *testing.T, a, b *options.ClientOptions) {
+ t.Helper()
+
+ if diff := cmp.Diff(a, b, cmpClientOptionsOpts); diff != "" {
+ t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff)
+ }
+}
+
func createDBUser(t testing.TB, connURL, db, username, password string) {
t.Helper()
diff --git a/plugins/database/mssql/mssql.go b/plugins/database/mssql/mssql.go
index bfed5fee1fd90..feb4385b0a9ca 100644
--- a/plugins/database/mssql/mssql.go
+++ b/plugins/database/mssql/mssql.go
@@ -8,7 +8,6 @@ import (
"strings"
_ "github.com/denisenkom/go-mssqldb"
- "github.com/hashicorp/errwrap"
multierror "github.com/hashicorp/go-multierror"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
"github.com/hashicorp/vault/sdk/database/helper/connutil"
@@ -279,10 +278,10 @@ func (m *MSSQL) revokeUserDefault(ctx context.Context, username string) error {
// can't drop if not all database users are dropped
if rows.Err() != nil {
- return errwrap.Wrapf("could not generate sql statements for all rows: {{err}}", rows.Err())
+ return fmt.Errorf("could not generate sql statements for all rows: %w", rows.Err())
}
if lastStmtError != nil {
- return errwrap.Wrapf("could not perform all sql statements: {{err}}", lastStmtError)
+ return fmt.Errorf("could not perform all sql statements: %w", lastStmtError)
}
// Drop this login
diff --git a/plugins/database/mysql/connection_producer.go b/plugins/database/mysql/connection_producer.go
index 014eeea3ffa5b..480719a0834b7 100644
--- a/plugins/database/mysql/connection_producer.go
+++ b/plugins/database/mysql/connection_producer.go
@@ -11,7 +11,6 @@ import (
"time"
"github.com/go-sql-driver/mysql"
- "github.com/hashicorp/errwrap"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/sdk/database/helper/connutil"
"github.com/hashicorp/vault/sdk/database/helper/dbutil"
@@ -88,7 +87,7 @@ func (c *mySQLConnectionProducer) Init(ctx context.Context, conf map[string]inte
c.maxConnectionLifetime, err = parseutil.ParseDurationSecond(c.MaxConnectionLifetimeRaw)
if err != nil {
- return nil, errwrap.Wrapf("invalid max_connection_lifetime: {{err}}", err)
+ return nil, fmt.Errorf("invalid max_connection_lifetime: %w", err)
}
tlsConfig, err := c.getTLSAuth()
@@ -113,11 +112,11 @@ func (c *mySQLConnectionProducer) Init(ctx context.Context, conf map[string]inte
if verifyConnection {
if _, err := c.Connection(ctx); err != nil {
- return nil, errwrap.Wrapf("error verifying connection: {{err}}", err)
+ return nil, fmt.Errorf("error verifying connection: %w", err)
}
if err := c.db.PingContext(ctx); err != nil {
- return nil, errwrap.Wrapf("error verifying connection: {{err}}", err)
+ return nil, fmt.Errorf("error verifying connection: %w", err)
}
}
diff --git a/plugins/database/postgresql/postgresql.go b/plugins/database/postgresql/postgresql.go
index fcba13249300f..ac0219b948bbb 100644
--- a/plugins/database/postgresql/postgresql.go
+++ b/plugins/database/postgresql/postgresql.go
@@ -7,7 +7,6 @@ import (
"regexp"
"strings"
- "github.com/hashicorp/errwrap"
"github.com/hashicorp/go-multierror"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
"github.com/hashicorp/vault/sdk/database/helper/connutil"
@@ -445,10 +444,10 @@ func (p *PostgreSQL) defaultDeleteUser(ctx context.Context, username string) err
// can't drop if not all privileges are revoked
if rows.Err() != nil {
- return errwrap.Wrapf("could not generate revocation statements for all rows: {{err}}", rows.Err())
+ return fmt.Errorf("could not generate revocation statements for all rows: %w", rows.Err())
}
if lastStmtError != nil {
- return errwrap.Wrapf("could not perform all revocation statements: {{err}}", lastStmtError)
+ return fmt.Errorf("could not perform all revocation statements: %w", lastStmtError)
}
// Drop this user
diff --git a/plugins/database/redshift/redshift.go b/plugins/database/redshift/redshift.go
index e8262e7cbbe2f..6cd96bf5df8e1 100644
--- a/plugins/database/redshift/redshift.go
+++ b/plugins/database/redshift/redshift.go
@@ -7,7 +7,6 @@ import (
"fmt"
"strings"
- "github.com/hashicorp/errwrap"
"github.com/hashicorp/go-multierror"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
"github.com/hashicorp/vault/sdk/database/helper/connutil"
@@ -446,10 +445,10 @@ $$;`)
// can't drop if not all privileges are revoked
if rows.Err() != nil {
- return dbplugin.DeleteUserResponse{}, errwrap.Wrapf("could not generate revocation statements for all rows: {{err}}", rows.Err())
+ return dbplugin.DeleteUserResponse{}, fmt.Errorf("could not generate revocation statements for all rows: %w", rows.Err())
}
if lastStmtError != nil {
- return dbplugin.DeleteUserResponse{}, errwrap.Wrapf("could not perform all revocation statements: {{err}}", lastStmtError)
+ return dbplugin.DeleteUserResponse{}, fmt.Errorf("could not perform all revocation statements: %w", lastStmtError)
}
// Drop this user
diff --git a/scripts/docker/Dockerfile b/scripts/docker/Dockerfile
index 53fedea69491c..73e6c71b16b04 100644
--- a/scripts/docker/Dockerfile
+++ b/scripts/docker/Dockerfile
@@ -12,7 +12,7 @@ WORKDIR /go/src/github.com/hashicorp/vault
COPY . .
RUN make bootstrap \
- && CGO_ENABLED=$CGO_ENABLED BUILD_TAGS='$BUILD_TAGS' VAULT_DEV_BUILD=1 XC_OSARCH='linux/amd64' sh -c "'./scripts/build.sh'"
+ && CGO_ENABLED=$CGO_ENABLED BUILD_TAGS="${BUILD_TAGS}" VAULT_DEV_BUILD=1 XC_OSARCH='linux/amd64' sh -c "'./scripts/build.sh'"
# Docker Image
diff --git a/scripts/docker/Dockerfile.ui b/scripts/docker/Dockerfile.ui
index 036de3315b6b1..0e5becfe942aa 100644
--- a/scripts/docker/Dockerfile.ui
+++ b/scripts/docker/Dockerfile.ui
@@ -38,7 +38,7 @@ ENV PATH $GOROOT/bin:$GOPATH/bin:$PATH
WORKDIR /go/src/github.com/hashicorp/vault
COPY . .
RUN make bootstrap static-dist \
- && CGO_ENABLED=$CGO_ENABLED BUILD_TAGS='$BUILD_TAGS ui' VAULT_DEV_BUILD=1 XC_OSARCH='linux/amd64' sh -c "'./scripts/build.sh'"
+ && CGO_ENABLED=$CGO_ENABLED BUILD_TAGS="${BUILD_TAGS} ui" VAULT_DEV_BUILD=1 XC_OSARCH='linux/amd64' sh -c "'./scripts/build.sh'"
# Docker Image
diff --git a/sdk/database/dbplugin/v5/testing/test_helpers.go b/sdk/database/dbplugin/v5/testing/test_helpers.go
index 200a603649a33..a5b4b337e5f3c 100644
--- a/sdk/database/dbplugin/v5/testing/test_helpers.go
+++ b/sdk/database/dbplugin/v5/testing/test_helpers.go
@@ -12,7 +12,7 @@ import (
func getRequestTimeout(t *testing.T) time.Duration {
rawDur := os.Getenv("VAULT_TEST_DATABASE_REQUEST_TIMEOUT")
if rawDur == "" {
- return 2 * time.Second
+ return 5 * time.Second
}
dur, err := time.ParseDuration(rawDur)
diff --git a/sdk/framework/lease.go b/sdk/framework/lease.go
index f5c68b841d0f9..4d0240fbe7fd8 100644
--- a/sdk/framework/lease.go
+++ b/sdk/framework/lease.go
@@ -91,7 +91,7 @@ func CalculateTTL(sysView logical.SystemView, increment, backendTTL, period, bac
// If we are past the max TTL, we shouldn't be in this function...but
// fast path out if we are
- if maxValidTTL < 0 {
+ if maxValidTTL <= 0 {
return 0, nil, fmt.Errorf("past the max TTL, cannot renew")
}
diff --git a/sdk/helper/awsutil/generate_credentials.go b/sdk/helper/awsutil/generate_credentials.go
index 1ff60d696bf44..a4aa92f0bf00d 100644
--- a/sdk/helper/awsutil/generate_credentials.go
+++ b/sdk/helper/awsutil/generate_credentials.go
@@ -1,7 +1,10 @@
package awsutil
import (
+ "encoding/base64"
+ "encoding/json"
"fmt"
+ "io/ioutil"
"net/http"
"os"
"time"
@@ -10,12 +13,15 @@ import (
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/defaults"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/hashicorp/go-hclog"
"github.com/pkg/errors"
)
+const iamServerIdHeader = "X-Vault-AWS-IAM-Server-ID"
+
type CredentialsConfig struct {
// The access key if static credentials are being used
AccessKey string
@@ -132,3 +138,87 @@ func (c *CredentialsConfig) GenerateCredentialChain() (*credentials.Credentials,
return creds, nil
}
+
+func RetrieveCreds(accessKey, secretKey, sessionToken string, logger hclog.Logger) (*credentials.Credentials, error) {
+ credConfig := CredentialsConfig{
+ AccessKey: accessKey,
+ SecretKey: secretKey,
+ SessionToken: sessionToken,
+ Logger: logger,
+ }
+ creds, err := credConfig.GenerateCredentialChain()
+ if err != nil {
+ return nil, err
+ }
+ if creds == nil {
+ return nil, fmt.Errorf("could not compile valid credential providers from static config, environment, shared, or instance metadata")
+ }
+
+ _, err = creds.Get()
+ if err != nil {
+ return nil, fmt.Errorf("failed to retrieve credentials from credential chain: %w", err)
+ }
+ return creds, nil
+}
+
+// GenerateLoginData populates the necessary data to send to the Vault server for generating a token
+// This is useful for other API clients to use
+func GenerateLoginData(creds *credentials.Credentials, headerValue, configuredRegion string, logger hclog.Logger) (map[string]interface{}, error) {
+ loginData := make(map[string]interface{})
+
+ // Use the credentials we've found to construct an STS session
+ region, err := GetRegion(configuredRegion)
+ if err != nil {
+ logger.Warn(fmt.Sprintf("defaulting region to %q due to %s", DefaultRegion, err.Error()))
+ region = DefaultRegion
+ }
+ stsSession, err := session.NewSessionWithOptions(session.Options{
+ Config: aws.Config{
+ Credentials: creds,
+ Region: ®ion,
+ EndpointResolver: endpoints.ResolverFunc(stsSigningResolver),
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ var params *sts.GetCallerIdentityInput
+ svc := sts.New(stsSession)
+ stsRequest, _ := svc.GetCallerIdentityRequest(params)
+
+ // Inject the required auth header value, if supplied, and then sign the request including that header
+ if headerValue != "" {
+ stsRequest.HTTPRequest.Header.Add(iamServerIdHeader, headerValue)
+ }
+ stsRequest.Sign()
+
+ // Now extract out the relevant parts of the request
+ headersJson, err := json.Marshal(stsRequest.HTTPRequest.Header)
+ if err != nil {
+ return nil, err
+ }
+ requestBody, err := ioutil.ReadAll(stsRequest.HTTPRequest.Body)
+ if err != nil {
+ return nil, err
+ }
+ loginData["iam_http_request_method"] = stsRequest.HTTPRequest.Method
+ loginData["iam_request_url"] = base64.StdEncoding.EncodeToString([]byte(stsRequest.HTTPRequest.URL.String()))
+ loginData["iam_request_headers"] = base64.StdEncoding.EncodeToString(headersJson)
+ loginData["iam_request_body"] = base64.StdEncoding.EncodeToString(requestBody)
+
+ return loginData, nil
+}
+
+// STS is a really weird service that used to only have global endpoints but now has regional endpoints as well.
+// For backwards compatibility, even if you request a region other than us-east-1, it'll still sign for us-east-1.
+// See, e.g., https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code
+// So we have to shim in this EndpointResolver to force it to sign for the right region
+func stsSigningResolver(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
+ defaultEndpoint, err := endpoints.DefaultResolver().EndpointFor(service, region, optFns...)
+ if err != nil {
+ return defaultEndpoint, err
+ }
+ defaultEndpoint.SigningRegion = region
+ return defaultEndpoint, nil
+}
diff --git a/sdk/helper/tokenutil/tokenutil.go b/sdk/helper/tokenutil/tokenutil.go
index 29f9748344524..19a3f73c51899 100644
--- a/sdk/helper/tokenutil/tokenutil.go
+++ b/sdk/helper/tokenutil/tokenutil.go
@@ -207,6 +207,13 @@ func (t *TokenParams) ParseTokenFields(req *logical.Request, d *framework.FieldD
t.TokenType = tokenType
}
+ if tokenNumUses, ok := d.GetOk("token_num_uses"); ok {
+ t.TokenNumUses = tokenNumUses.(int)
+ }
+ if t.TokenNumUses < 0 {
+ return errors.New("'token_num_uses' cannot be negative")
+ }
+
if t.TokenType == logical.TokenTypeBatch || t.TokenType == logical.TokenTypeDefaultBatch {
if t.TokenPeriod != 0 {
return errors.New("'token_type' cannot be 'batch' or 'default_batch' when set to generate periodic tokens")
@@ -226,13 +233,6 @@ func (t *TokenParams) ParseTokenFields(req *logical.Request, d *framework.FieldD
return errors.New("'token_ttl' cannot be greater than 'token_max_ttl'")
}
- if tokenNumUses, ok := d.GetOk("token_num_uses"); ok {
- t.TokenNumUses = tokenNumUses.(int)
- }
- if t.TokenNumUses < 0 {
- return errors.New("'token_num_uses' cannot be negative")
- }
-
return nil
}
diff --git a/ui/app/adapters/secret-v2-version.js b/ui/app/adapters/secret-v2-version.js
index 5aac49d731d96..5bdd02129566a 100644
--- a/ui/app/adapters/secret-v2-version.js
+++ b/ui/app/adapters/secret-v2-version.js
@@ -70,14 +70,27 @@ export default ApplicationAdapter.extend({
v2DeleteOperation(store, id, deleteType = 'delete') {
let [backend, path, version] = JSON.parse(id);
-
- // deleteType should be 'delete', 'destroy', 'undelete'
- return this.ajax(this._url(backend, path, deleteType), 'POST', { data: { versions: [version] } }).then(
- () => {
- let model = store.peekRecord('secret-v2-version', id);
- return model && model.rollbackAttributes() && model.reload();
- }
- );
+ // deleteType should be 'delete', 'destroy', 'undelete', 'delete-latest-version', 'destroy-version'
+ if ((!version && deleteType === 'delete') || deleteType === 'delete-latest-version') {
+ return this.ajax(this._url(backend, path, 'data'), 'DELETE')
+ .then(() => {
+ let model = store.peekRecord('secret-v2-version', id);
+ return model && model.rollbackAttributes() && model.reload();
+ })
+ .catch(e => {
+ return e;
+ });
+ } else {
+ return this.ajax(this._url(backend, path, deleteType), 'POST', { data: { versions: [version] } })
+ .then(() => {
+ let model = store.peekRecord('secret-v2-version', id);
+ // potential that model.reload() is never called.
+ return model && model.rollbackAttributes() && model.reload();
+ })
+ .catch(e => {
+ return e;
+ });
+ }
},
handleResponse(status, headers, payload, requestData) {
diff --git a/ui/app/components/auth-form.js b/ui/app/components/auth-form.js
index 03f5fa9d6046c..7c345524e87cf 100644
--- a/ui/app/components/auth-form.js
+++ b/ui/app/components/auth-form.js
@@ -128,14 +128,14 @@ export default Component.extend(DEFAULTS, {
}
),
- providerPartialName: computed('selectedAuthBackend.type', function() {
+ providerName: computed('selectedAuthBackend.type', function() {
if (!this.selectedAuthBackend) {
return;
}
let type = this.selectedAuthBackend.type || 'token';
type = type.toLowerCase();
let templateName = dasherize(type);
- return `partials/auth-form/${templateName}`;
+ return templateName;
}),
hasCSPError: alias('csp.connectionViolations.firstObject'),
diff --git a/ui/app/components/auth-info.js b/ui/app/components/auth-info.js
index 76a2c0e5e278b..9f77ebc28d9a0 100644
--- a/ui/app/components/auth-info.js
+++ b/ui/app/components/auth-info.js
@@ -1,42 +1,54 @@
+import Component from '@glimmer/component';
import { inject as service } from '@ember/service';
-import { or, alias } from '@ember/object/computed';
-import Component from '@ember/component';
import { run } from '@ember/runloop';
-
-export default Component.extend({
- auth: service(),
- wizard: service(),
- router: service(),
- version: service(),
-
- transitionToRoute: function() {
+import { action } from '@ember/object';
+import { tracked } from '@glimmer/tracking';
+
+/**
+ * @module AuthInfo
+ *
+ * @example
+ * ```js
+ *
{{@attr.options.subText}} {{#if @attr.options.docLink}}See our documentation for help.{{/if}}
+ {{/if}} +