diff --git a/.circleci/config.yml b/.circleci/config.yml index c160da9fc64e5..ecfc8f3952304 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -86,7 +86,7 @@ jobs: mkdir ./pkg # Build dev binary - make bootstrap dev + make ci-bootstrap dev name: Build dev binary - persist_to_workspace: paths: @@ -517,7 +517,7 @@ workflows: # mkdir ./pkg # # # Build dev binary -# make bootstrap dev +# make ci-bootstrap dev # name: Build dev binary # - persist_to_workspace: # paths: diff --git a/.circleci/config/jobs/build-go-dev.yml b/.circleci/config/jobs/build-go-dev.yml index 3193d0be6d308..5d956f86781a2 100644 --- a/.circleci/config/jobs/build-go-dev.yml +++ b/.circleci/config/jobs/build-go-dev.yml @@ -12,7 +12,7 @@ steps: mkdir ./pkg # Build dev binary - make bootstrap dev + make ci-bootstrap dev - persist_to_workspace: root: . paths: diff --git a/CHANGELOG.md b/CHANGELOG.md index e0d5917f7c687..43c4ff7aad0f5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,11 +3,16 @@ CHANGES: * token: Token renewals will now return token policies within the `token_policies` , identity policies within `identity_policies`, and the full policy set within `policies`. [[GH-8535](https://github.com/hashicorp/vault/pull/8535)] +* kv: Return the value of delete_version_after when reading kv/config, even if it is set to the default. [[GH-42](https://github.com/hashicorp/vault-plugin-secrets-kv/pull/42)] IMPROVEMENTS: * secrets/gcp: Support BigQuery dataset ACLs in absence of IAM endpoints [[GH-78](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/78)] +BUG FIXES: + +* secrets/database: Fix issue where rotating root database credentials while Vault's storage backend is unavailable causes Vault to lose access to the database [[GH-8782](https://github.com/hashicorp/vault/pull/8782)] + ## 1.4.1 (TBD) CHANGES: @@ -20,10 +25,16 @@ IMPROVEMENTS: BUG FIXES: -* config/seal: Fix segfault when seal block is removed[[GH-8517](https://github.com/hashicorp/vault/pull/8517)] +* auth/okta: Fix MFA regression (introduced in [GH-8143](https://github.com/hashicorp/vault/pull/8143)) from 1.4.0 [[GH-8807](https://github.com/hashicorp/vault/pull/8807)] +* auth/userpass: Fix upgrade value for `token_bound_cidrs` being ignored due to incorrect key provided [[GH-8826](https://github.com/hashicorp/vault/pull/8826/files)] +* config/seal: Fix segfault when seal block is removed [[GH-8517](https://github.com/hashicorp/vault/pull/8517)] +* core: Fix an issue where users attempting to build Vault could receive Go module checksum errors [[GH-8770](https://github.com/hashicorp/vault/pull/8770)] * core: Fix blocked requests if a SIGHUP is issued during a long-running request has the state lock held. Also fixes deadlock that can happen if `vault debug` with the config target is ran during this time. [[GH-8755](https://github.com/hashicorp/vault/pull/8755)] +* http: Fix superflous call messages from the http package on logs caused by missing returns after + `respondError` calls [[GH-8796](https://github.com/hashicorp/vault/pull/8796)] +* raft: Fix panic that could occur if `disable_clustering` was set to true on Raft storage cluster [[GH-8784](https://github.com/hashicorp/vault/pull/8784)] * sys/wrapping: Allow unwrapping of wrapping tokens which contain nil data [[GH-8714](https://github.com/hashicorp/vault/pull/8714)] ## 1.4.0 (April 7th, 2020) @@ -176,7 +187,7 @@ IMPROVEMENTS: BUG FIXES: * auth/azure: Fix Azure compute client to use correct base URL [[GH-8072](https://github.com/hashicorp/vault/pull/8072)] -* auth/ldap: Fix renewal of tokens without cofigured policies that are +* auth/ldap: Fix renewal of tokens without configured policies that are generated by an LDAP login [[GH-8072](https://github.com/hashicorp/vault/pull/8072)] * auth/okta: Fix renewal of tokens without configured policies that are generated by an Okta login [[GH-8072](https://github.com/hashicorp/vault/pull/8072)] diff --git a/Makefile b/Makefile index 1304fc82f4486..45e017dbeecc5 100644 --- a/Makefile +++ b/Makefile @@ -9,12 +9,12 @@ TEST_TIMEOUT?=45m EXTENDED_TEST_TIMEOUT=60m INTEG_TEST_TIMEOUT=120m VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf -rangeloops -shift -structtags -unsafeptr -EXTERNAL_TOOLS=\ - golang.org/x/tools/cmd/goimports \ +EXTERNAL_TOOLS_CI=\ github.com/elazarl/go-bindata-assetfs/... \ github.com/hashicorp/go-bindata/... \ - github.com/mitchellh/gox \ - github.com/kardianos/govendor \ + github.com/mitchellh/gox +EXTERNAL_TOOLS=\ + golang.org/x/tools/cmd/goimports \ github.com/client9/misspell/cmd/misspell \ github.com/golangci/golangci-lint/cmd/golangci-lint GOFMT_FILES?=$$(find . -name '*.go' | grep -v pb.go | grep -v vendor) @@ -126,8 +126,15 @@ ci-config: ci-verify: @$(MAKE) -C .circleci ci-verify -# bootstrap the build by downloading additional tools -bootstrap: +# bootstrap the build by downloading additional tools needed to build +ci-bootstrap: + @for tool in $(EXTERNAL_TOOLS_CI) ; do \ + echo "Installing/Updating $$tool" ; \ + GO111MODULE=off $(GO_CMD) get -u $$tool; \ + done + +# bootstrap the build by downloading additional tools that may be used by devs +bootstrap: ci-bootstrap @for tool in $(EXTERNAL_TOOLS) ; do \ echo "Installing/Updating $$tool" ; \ GO111MODULE=off $(GO_CMD) get -u $$tool; \ @@ -274,6 +281,6 @@ publish-commit: @[ -n "$(PUBLISH_VERSION)" ] || { echo "You must set PUBLISH_VERSION to the version in semver-like format."; exit 1; } set -x; $(GPG_KEY_VARS) && git commit --allow-empty --gpg-sign=$$GIT_GPG_KEY_ID -m 'release: publish v$(PUBLISH_VERSION)' -.PHONY: bin default prep test vet bootstrap fmt fmtcheck mysql-database-plugin mysql-legacy-database-plugin cassandra-database-plugin influxdb-database-plugin postgresql-database-plugin mssql-database-plugin hana-database-plugin mongodb-database-plugin static-assets ember-dist ember-dist-dev static-dist static-dist-dev assetcheck check-vault-in-path check-browserstack-creds test-ui-browserstack stage-commit publish-commit +.PHONY: bin default prep test vet ci-bootstrap bootstrap fmt fmtcheck mysql-database-plugin mysql-legacy-database-plugin cassandra-database-plugin influxdb-database-plugin postgresql-database-plugin mssql-database-plugin hana-database-plugin mongodb-database-plugin static-assets ember-dist ember-dist-dev static-dist static-dist-dev assetcheck check-vault-in-path check-browserstack-creds test-ui-browserstack stage-commit publish-commit .NOTPARALLEL: ember-dist ember-dist-dev static-assets diff --git a/builtin/credential/okta/backend.go b/builtin/credential/okta/backend.go index 6d75a61883abe..441443cf3a31c 100644 --- a/builtin/credential/okta/backend.go +++ b/builtin/credential/okta/backend.go @@ -100,7 +100,7 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri StateToken string `json:"stateToken"` } - authReq, err := shim.NewRequest("POST", "/api/v1/authn", map[string]interface{}{ + authReq, err := shim.NewRequest("POST", "authn", map[string]interface{}{ "username": username, "password": password, }) diff --git a/builtin/credential/okta/backend_test.go b/builtin/credential/okta/backend_test.go index 9a6b20d9e130f..0794ea9081fc5 100644 --- a/builtin/credential/okta/backend_test.go +++ b/builtin/credential/okta/backend_test.go @@ -15,6 +15,15 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) +// To run this test, set the following env variables: +// VAULT_ACC=1 +// OKTA_ORG=dev-219337 +// OKTA_API_TOKEN= +// OKTA_USERNAME=test2@example.com +// OKTA_PASSWORD= +// +// You will need to install the Okta client app on your mobile device and +// setup MFA. func TestBackend_Config(t *testing.T) { defaultLeaseTTLVal := time.Hour * 12 maxLeaseTTLVal := time.Hour * 24 diff --git a/builtin/credential/okta/path_config.go b/builtin/credential/okta/path_config.go index 32d8b3d959f65..7faede370a897 100644 --- a/builtin/credential/okta/path_config.go +++ b/builtin/credential/okta/path_config.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/go-cleanhttp" "net/http" "net/url" + "strings" "time" oktaold "github.com/chrismalek/oktasdk-go/okta" @@ -282,6 +283,9 @@ func (new *oktaShimNew) Client() *oktanew.Client { } func (new *oktaShimNew) NewRequest(method string, url string, body interface{}) (*http.Request, error) { + if !strings.HasPrefix(url, "/") { + url = "/api/v1/" + url + } return new.client.GetRequestExecutor().NewRequest(method, url, body) } diff --git a/builtin/credential/userpass/path_users.go b/builtin/credential/userpass/path_users.go index 1c43e97d2d7fa..c6b0514efd517 100644 --- a/builtin/credential/userpass/path_users.go +++ b/builtin/credential/userpass/path_users.go @@ -231,7 +231,7 @@ func (b *backend) userCreateUpdate(ctx context.Context, req *logical.Request, d return logical.ErrorResponse(err.Error()), nil } - if err := tokenutil.UpgradeValue(d, "bound_cidrs", "token_bound_cirs", &userEntry.BoundCIDRs, &userEntry.TokenBoundCIDRs); err != nil { + if err := tokenutil.UpgradeValue(d, "bound_cidrs", "token_bound_cidrs", &userEntry.BoundCIDRs, &userEntry.TokenBoundCIDRs); err != nil { return logical.ErrorResponse(err.Error()), nil } } diff --git a/builtin/logical/database/backend.go b/builtin/logical/database/backend.go index 804a98d35bece..138890d9642b2 100644 --- a/builtin/logical/database/backend.go +++ b/builtin/logical/database/backend.go @@ -6,6 +6,7 @@ import ( "net/rpc" "strings" "sync" + "time" log "github.com/hashicorp/go-hclog" @@ -24,6 +25,7 @@ const ( databaseConfigPath = "database/config/" databaseRolePath = "role/" databaseStaticRolePath = "static-role/" + minRootCredRollbackAge = 1 * time.Minute ) type dbPluginInstance struct { @@ -93,9 +95,11 @@ func Backend(conf *logical.BackendConfig) *databaseBackend { Secrets: []*framework.Secret{ secretCreds(&b), }, - Clean: b.clean, - Invalidate: b.invalidate, - BackendType: logical.TypeLogical, + Clean: b.clean, + Invalidate: b.invalidate, + WALRollback: b.walRollback, + WALRollbackMinAge: minRootCredRollbackAge, + BackendType: logical.TypeLogical, } b.logger = conf.Logger @@ -223,6 +227,15 @@ func (b *databaseBackend) invalidate(ctx context.Context, key string) { } func (b *databaseBackend) GetConnection(ctx context.Context, s logical.Storage, name string) (*dbPluginInstance, error) { + config, err := b.DatabaseConfig(ctx, s, name) + if err != nil { + return nil, err + } + + return b.GetConnectionWithConfig(ctx, name, config) +} + +func (b *databaseBackend) GetConnectionWithConfig(ctx context.Context, name string, config *DatabaseConfig) (*dbPluginInstance, error) { b.RLock() unlockFunc := b.RUnlock defer func() { unlockFunc() }() @@ -242,11 +255,6 @@ func (b *databaseBackend) GetConnection(ctx context.Context, s logical.Storage, return db, nil } - config, err := b.DatabaseConfig(ctx, s, name) - if err != nil { - return nil, err - } - dbp, err := dbplugin.PluginFactory(ctx, config.PluginName, b.System(), b.logger) if err != nil { return nil, err diff --git a/builtin/logical/database/backend_test.go b/builtin/logical/database/backend_test.go index 642a007f74820..897d114fdd6cc 100644 --- a/builtin/logical/database/backend_test.go +++ b/builtin/logical/database/backend_test.go @@ -71,7 +71,7 @@ func preparePostgresTestContainer(t *testing.T, s logical.Storage, b logical.Bac }) if err != nil || (resp != nil && resp.IsError()) { // It's likely not up and running yet, so return error and try again - return fmt.Errorf("err:%#v resp:%#v", err, resp) + return fmt.Errorf("err:%#v resp:%+v", err, resp) } if resp == nil { t.Fatal("expected warning") diff --git a/builtin/logical/database/path_rotate_credentials.go b/builtin/logical/database/path_rotate_credentials.go index 5955860e16bb1..0cc913941fd5d 100644 --- a/builtin/logical/database/path_rotate_credentials.go +++ b/builtin/logical/database/path_rotate_credentials.go @@ -5,6 +5,10 @@ import ( "fmt" "time" + "github.com/hashicorp/vault/sdk/database/dbplugin" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/queue" @@ -72,6 +76,16 @@ func (b *databaseBackend) pathRotateCredentialsUpdate() framework.OperationFunc return nil, err } + defer func() { + // Close the plugin + db.closed = true + if err := db.Database.Close(); err != nil { + b.Logger().Error("error closing the database plugin connection", "err", err) + } + // Even on error, still remove the connection + delete(b.connections, name) + }() + // Take out the backend lock since we are swapping out the connection b.Lock() defer b.Unlock() @@ -80,12 +94,44 @@ func (b *databaseBackend) pathRotateCredentialsUpdate() framework.OperationFunc db.Lock() defer db.Unlock() - connectionDetails, err := db.RotateRootCredentials(ctx, config.RootCredentialsRotateStatements) + // Generate new credentials + userName := config.ConnectionDetails["username"].(string) + oldPassword := config.ConnectionDetails["password"].(string) + newPassword, err := db.GenerateCredentials(ctx) + if err != nil { + return nil, err + } + config.ConnectionDetails["password"] = newPassword + + // Write a WAL entry + walID, err := framework.PutWAL(ctx, req.Storage, rotateRootWALKey, &rotateRootCredentialsWAL{ + ConnectionName: name, + UserName: userName, + OldPassword: oldPassword, + NewPassword: newPassword, + }) if err != nil { return nil, err } - config.ConnectionDetails = connectionDetails + // Attempt to use SetCredentials for the root credential rotation + statements := dbplugin.Statements{Rotation: config.RootCredentialsRotateStatements} + userConfig := dbplugin.StaticUserConfig{ + Username: userName, + Password: newPassword, + } + if _, _, err := db.SetCredentials(ctx, statements, userConfig); err != nil { + if status.Code(err) == codes.Unimplemented { + // Fall back to using RotateRootCredentials if unimplemented + config.ConnectionDetails, err = db.RotateRootCredentials(ctx, + config.RootCredentialsRotateStatements) + } + if err != nil { + return nil, err + } + } + + // Update storage with the new root credentials entry, err := logical.StorageEntryJSON(fmt.Sprintf("config/%s", name), config) if err != nil { return nil, err @@ -94,17 +140,15 @@ func (b *databaseBackend) pathRotateCredentialsUpdate() framework.OperationFunc return nil, err } - // Close the plugin - db.closed = true - if err := db.Database.Close(); err != nil { - b.Logger().Error("error closing the database plugin connection", "err", err) + // Delete the WAL entry after successfully rotating root credentials + if err := framework.DeleteWAL(ctx, req.Storage, walID); err != nil { + b.Logger().Warn("unable to delete WAL", "error", err, "WAL ID", walID) } - // Even on error, still remove the connection - delete(b.connections, name) return nil, nil } } + func (b *databaseBackend) pathRotateRoleCredentialsUpdate() framework.OperationFunc { return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { name := data.Get("name").(string) diff --git a/builtin/logical/database/rollback.go b/builtin/logical/database/rollback.go new file mode 100644 index 0000000000000..30e02b250d722 --- /dev/null +++ b/builtin/logical/database/rollback.go @@ -0,0 +1,112 @@ +package database + +import ( + "context" + "errors" + + "github.com/hashicorp/vault/sdk/database/dbplugin" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// WAL storage key used for the rollback of root database credentials +const rotateRootWALKey = "rotateRootWALKey" + +// WAL entry used for the rollback of root database credentials +type rotateRootCredentialsWAL struct { + ConnectionName string + UserName string + NewPassword string + OldPassword string +} + +// walRollback handles WAL entries that result from partial failures +// to rotate the root credentials of a database. It is responsible +// for rolling back root database credentials when doing so would +// reconcile the credentials with Vault storage. +func (b *databaseBackend) walRollback(ctx context.Context, req *logical.Request, kind string, data interface{}) error { + if kind != rotateRootWALKey { + return errors.New("unknown type to rollback") + } + + // Decode the WAL data + var entry rotateRootCredentialsWAL + if err := mapstructure.Decode(data, &entry); err != nil { + return err + } + + // Get the current database configuration from storage + config, err := b.DatabaseConfig(ctx, req.Storage, entry.ConnectionName) + if err != nil { + return err + } + + // The password in storage doesn't match the new password + // in the WAL entry. This means there was a partial failure + // to update either the database or storage. + if config.ConnectionDetails["password"] != entry.NewPassword { + // Clear any cached connection to inform the rollback decision + err := b.ClearConnection(entry.ConnectionName) + if err != nil { + return err + } + + // Attempt to get a connection with the current configuration. + // If successful, the WAL entry can be deleted. This means + // the root credentials are the same according to the database + // and storage. + _, err = b.GetConnection(ctx, req.Storage, entry.ConnectionName) + if err == nil { + return nil + } + + return b.rollbackDatabaseCredentials(ctx, config, entry) + } + + // The password in storage matches the new password in + // the WAL entry, so there is nothing to roll back. This + // means the new password was successfully updated in the + // database and storage, but the WAL wasn't deleted. + return nil +} + +// rollbackDatabaseCredentials rolls back root database credentials for +// the connection associated with the passed WAL entry. It will creates +// a connection to the database using the WAL entry new password in +// order to alter the password to be the WAL entry old password. +func (b *databaseBackend) rollbackDatabaseCredentials(ctx context.Context, config *DatabaseConfig, entry rotateRootCredentialsWAL) error { + // Attempt to get a connection with the WAL entry new password. + config.ConnectionDetails["password"] = entry.NewPassword + dbc, err := b.GetConnectionWithConfig(ctx, entry.ConnectionName, config) + if err != nil { + return err + } + + // Ensure the connection used to roll back the database password is not cached + defer func() { + if err := b.ClearConnection(entry.ConnectionName); err != nil { + b.Logger().Error("error closing database plugin connection", "err", err) + } + }() + + // Roll back the database password to the WAL entry old password + statements := dbplugin.Statements{Rotation: config.RootCredentialsRotateStatements} + userConfig := dbplugin.StaticUserConfig{ + Username: entry.UserName, + Password: entry.OldPassword, + } + if _, _, err := dbc.SetCredentials(ctx, statements, userConfig); err != nil { + // If the database plugin doesn't implement SetCredentials, the root + // credentials can't be rolled back. This means the root credential + // rotation happened via the plugin RotateRootCredentials RPC. + if status.Code(err) == codes.Unimplemented { + return nil + } + + return err + } + + return nil +} diff --git a/builtin/logical/database/rollback_test.go b/builtin/logical/database/rollback_test.go new file mode 100644 index 0000000000000..9ce06d2d07c74 --- /dev/null +++ b/builtin/logical/database/rollback_test.go @@ -0,0 +1,411 @@ +package database + +import ( + "context" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/database/dbplugin" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + "strings" + "testing" +) + +const ( + databaseUser = "postgres" + defaultPassword = "secret" +) + +// Tests that the WAL rollback function rolls back the database password. +// The database password should be rolled back when: +// - A WAL entry exists +// - Password has been altered on the database +// - Password has not been updated in storage +func TestBackend_RotateRootCredentials_WAL_rollback(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + dbBackend, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + defer lb.Cleanup(context.Background()) + + cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, lb) + defer cleanup() + + connURL = strings.Replace(connURL, "postgres:secret", "{{username}}:{{password}}", -1) + + // Configure a connection to the database + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "allowed_roles": []string{"plugin-role-test"}, + "username": databaseUser, + "password": defaultPassword, + } + resp, err := lb.HandleRequest(namespace.RootContext(nil), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Create a role + data = map[string]interface{}{ + "db_name": "plugin-test", + "creation_statements": testRole, + "max_ttl": "10m", + } + resp, err = lb.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read credentials to verify this initially works + credReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/plugin-role-test", + Storage: config.StorageView, + Data: make(map[string]interface{}), + } + credResp, err := lb.HandleRequest(context.Background(), credReq) + if err != nil || (credResp != nil && credResp.IsError()) { + t.Fatalf("err:%s resp:%v\n", err, credResp) + } + + // Get a connection to the database plugin + pc, err := dbBackend.GetConnection(context.Background(), + config.StorageView, "plugin-test") + if err != nil { + t.Fatal(err) + } + + // Alter the database password so it no longer matches what is in storage + _, _, err = pc.SetCredentials(context.Background(), dbplugin.Statements{}, + dbplugin.StaticUserConfig{ + Username: databaseUser, + Password: "newSecret", + }) + if err != nil { + t.Fatal(err) + } + + // Clear the plugin connection to verify we're no longer able to connect + err = dbBackend.ClearConnection("plugin-test") + if err != nil { + t.Fatal(err) + } + + // Reading credentials should no longer work + credResp, err = lb.HandleRequest(namespace.RootContext(nil), credReq) + if err == nil { + t.Fatalf("expected authentication to fail when reading credentials") + } + + // Put a WAL entry that will be used for rolling back the database password + walEntry := &rotateRootCredentialsWAL{ + ConnectionName: "plugin-test", + UserName: databaseUser, + OldPassword: defaultPassword, + NewPassword: "newSecret", + } + _, err = framework.PutWAL(context.Background(), config.StorageView, rotateRootWALKey, walEntry) + if err != nil { + t.Fatal(err) + } + assertWALCount(t, config.StorageView, 1, rotateRootWALKey) + + // Trigger an immediate RollbackOperation so that the WAL rollback + // function can use the WAL entry to roll back the database password + _, err = lb.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.RollbackOperation, + Path: "", + Storage: config.StorageView, + Data: map[string]interface{}{ + "immediate": true, + }, + }) + if err != nil { + t.Fatal(err) + } + assertWALCount(t, config.StorageView, 0, rotateRootWALKey) + + // Reading credentials should work again after the database + // password has been rolled back. + credResp, err = lb.HandleRequest(namespace.RootContext(nil), credReq) + if err != nil || (credResp != nil && credResp.IsError()) { + t.Fatalf("err:%s resp:%v\n", err, credResp) + } +} + +// Tests that the WAL rollback function does not roll back the database password. +// The database password should not be rolled back when: +// - A WAL entry exists +// - Password has not been altered on the database +// - Password has not been updated in storage +func TestBackend_RotateRootCredentials_WAL_no_rollback_1(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + defer lb.Cleanup(context.Background()) + + cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, lb) + defer cleanup() + + connURL = strings.Replace(connURL, "postgres:secret", "{{username}}:{{password}}", -1) + + // Configure a connection to the database + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "allowed_roles": []string{"plugin-role-test"}, + "username": databaseUser, + "password": defaultPassword, + } + resp, err := lb.HandleRequest(namespace.RootContext(nil), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Create a role + data = map[string]interface{}{ + "db_name": "plugin-test", + "creation_statements": testRole, + "max_ttl": "10m", + } + resp, err = lb.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read credentials to verify this initially works + credReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/plugin-role-test", + Storage: config.StorageView, + Data: make(map[string]interface{}), + } + credResp, err := lb.HandleRequest(context.Background(), credReq) + if err != nil || (credResp != nil && credResp.IsError()) { + t.Fatalf("err:%s resp:%v\n", err, credResp) + } + + // Put a WAL entry + walEntry := &rotateRootCredentialsWAL{ + ConnectionName: "plugin-test", + UserName: databaseUser, + OldPassword: defaultPassword, + NewPassword: "newSecret", + } + _, err = framework.PutWAL(context.Background(), config.StorageView, rotateRootWALKey, walEntry) + if err != nil { + t.Fatal(err) + } + assertWALCount(t, config.StorageView, 1, rotateRootWALKey) + + // Trigger an immediate RollbackOperation + _, err = lb.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.RollbackOperation, + Path: "", + Storage: config.StorageView, + Data: map[string]interface{}{ + "immediate": true, + }, + }) + if err != nil { + t.Fatal(err) + } + assertWALCount(t, config.StorageView, 0, rotateRootWALKey) + + // Reading credentials should work + credResp, err = lb.HandleRequest(namespace.RootContext(nil), credReq) + if err != nil || (credResp != nil && credResp.IsError()) { + t.Fatalf("err:%s resp:%v\n", err, credResp) + } +} + +// Tests that the WAL rollback function does not roll back the database password. +// The database password should not be rolled back when: +// - A WAL entry exists +// - Password has been altered on the database +// - Password has been updated in storage +func TestBackend_RotateRootCredentials_WAL_no_rollback_2(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + dbBackend, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + defer lb.Cleanup(context.Background()) + + cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, lb) + defer cleanup() + + connURL = strings.Replace(connURL, "postgres:secret", "{{username}}:{{password}}", -1) + + // Configure a connection to the database + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "allowed_roles": []string{"plugin-role-test"}, + "username": databaseUser, + "password": defaultPassword, + } + resp, err := lb.HandleRequest(namespace.RootContext(nil), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Create a role + data = map[string]interface{}{ + "db_name": "plugin-test", + "creation_statements": testRole, + "max_ttl": "10m", + } + resp, err = lb.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read credentials to verify this initially works + credReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/plugin-role-test", + Storage: config.StorageView, + Data: make(map[string]interface{}), + } + credResp, err := lb.HandleRequest(context.Background(), credReq) + if err != nil || (credResp != nil && credResp.IsError()) { + t.Fatalf("err:%s resp:%v\n", err, credResp) + } + + // Get a connection to the database plugin + pc, err := dbBackend.GetConnection(context.Background(), config.StorageView, "plugin-test") + if err != nil { + t.Fatal(err) + } + + // Alter the database password + _, _, err = pc.SetCredentials(context.Background(), dbplugin.Statements{}, + dbplugin.StaticUserConfig{ + Username: databaseUser, + Password: "newSecret", + }) + if err != nil { + t.Fatal(err) + } + + // Update storage with the new password + dbConfig, err := dbBackend.DatabaseConfig(context.Background(), config.StorageView, + "plugin-test") + if err != nil { + t.Fatal(err) + } + dbConfig.ConnectionDetails["password"] = "newSecret" + entry, err := logical.StorageEntryJSON("config/plugin-test", dbConfig) + if err != nil { + t.Fatal(err) + } + err = config.StorageView.Put(context.Background(), entry) + if err != nil { + t.Fatal(err) + } + + // Clear the plugin connection to verify we can connect to the database + err = dbBackend.ClearConnection("plugin-test") + if err != nil { + t.Fatal(err) + } + + // Reading credentials should work + credResp, err = lb.HandleRequest(namespace.RootContext(nil), credReq) + if err != nil || (credResp != nil && credResp.IsError()) { + t.Fatalf("err:%s resp:%v\n", err, credResp) + } + + // Put a WAL entry + walEntry := &rotateRootCredentialsWAL{ + ConnectionName: "plugin-test", + UserName: databaseUser, + OldPassword: defaultPassword, + NewPassword: "newSecret", + } + _, err = framework.PutWAL(context.Background(), config.StorageView, rotateRootWALKey, walEntry) + if err != nil { + t.Fatal(err) + } + assertWALCount(t, config.StorageView, 1, rotateRootWALKey) + + // Trigger an immediate RollbackOperation + _, err = lb.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.RollbackOperation, + Path: "", + Storage: config.StorageView, + Data: map[string]interface{}{ + "immediate": true, + }, + }) + if err != nil { + t.Fatal(err) + } + assertWALCount(t, config.StorageView, 0, rotateRootWALKey) + + // Reading credentials should work + credResp, err = lb.HandleRequest(namespace.RootContext(nil), credReq) + if err != nil || (credResp != nil && credResp.IsError()) { + t.Fatalf("err:%s resp:%v\n", err, credResp) + } +} diff --git a/builtin/logical/database/rotation_test.go b/builtin/logical/database/rotation_test.go index 6283403452d21..a2e017294303e 100644 --- a/builtin/logical/database/rotation_test.go +++ b/builtin/logical/database/rotation_test.go @@ -477,7 +477,7 @@ func TestBackend_Static_QueueWAL_discard_role_not_found(t *testing.T) { t.Fatalf("error with PutWAL: %s", err) } - assertWALCount(t, config.StorageView, 1) + assertWALCount(t, config.StorageView, 1, staticWALKey) b, err := Factory(ctx, config) if err != nil { @@ -496,7 +496,7 @@ func TestBackend_Static_QueueWAL_discard_role_not_found(t *testing.T) { t.Fatalf("expected zero queue items, got: %d", bd.credRotationQueue.Len()) } - assertWALCount(t, config.StorageView, 0) + assertWALCount(t, config.StorageView, 0, staticWALKey) } // Second scenario, WAL contains a role name that does exist, but the role's @@ -597,7 +597,7 @@ func TestBackend_Static_QueueWAL_discard_role_newer_rotation_date(t *testing.T) t.Fatalf("error with PutWAL: %s", err) } - assertWALCount(t, config.StorageView, 1) + assertWALCount(t, config.StorageView, 1, staticWALKey) // Reload backend lb, err = Factory(context.Background(), config) @@ -614,7 +614,7 @@ func TestBackend_Static_QueueWAL_discard_role_newer_rotation_date(t *testing.T) time.Sleep(time.Second * 12) // PopulateQueue should have processed the entry - assertWALCount(t, config.StorageView, 0) + assertWALCount(t, config.StorageView, 0, staticWALKey) // Read the role data = map[string]interface{}{} @@ -656,7 +656,7 @@ func TestBackend_Static_QueueWAL_discard_role_newer_rotation_date(t *testing.T) } // Helper to assert the number of WAL entries is what we expect -func assertWALCount(t *testing.T, s logical.Storage, expected int) { +func assertWALCount(t *testing.T, s logical.Storage, expected int, key string) { var count int ctx := context.Background() keys, err := framework.ListWAL(ctx, s) @@ -671,7 +671,7 @@ func assertWALCount(t *testing.T, s logical.Storage, expected int) { continue } - if walEntry.Kind != staticWALKey { + if walEntry.Kind != key { continue } count++ diff --git a/command/agent/cache/handler.go b/command/agent/cache/handler.go index b9f935d134309..a63d32a79c493 100644 --- a/command/agent/cache/handler.go +++ b/command/agent/cache/handler.go @@ -40,6 +40,7 @@ func Handler(ctx context.Context, logger hclog.Logger, proxier Proxier, inmemSin if err != nil { logger.Error("failed to read request body") logical.RespondError(w, http.StatusInternalServerError, errors.New("failed to read request body")) + return } if r.Body != nil { r.Body.Close() diff --git a/command/server.go b/command/server.go index 7a27b3da98db8..8494ec5e8c10b 100644 --- a/command/server.go +++ b/command/server.go @@ -1152,7 +1152,7 @@ func (c *ServerCommand) Run(args []string) int { // TODO: Remove when Raft can server as the ha_storage backend. // See https://github.com/hashicorp/vault/issues/8206 if config.HAStorage.Type == "raft" { - c.UI.Error("Raft cannot be used as seperate HA storage at this time") + c.UI.Error("Raft cannot be used as separate HA storage at this time") return 1 } factory, exists := c.PhysicalBackends[config.HAStorage.Type] @@ -1180,6 +1180,9 @@ func (c *ServerCommand) Run(args []string) int { } coreConfig.RedirectAddr = config.HAStorage.RedirectAddr + + // TODO: Check for raft and disableClustering case when Raft on HA + // Storage support is added. disableClustering = config.HAStorage.DisableClustering if !disableClustering { coreConfig.ClusterAddr = config.HAStorage.ClusterAddr @@ -1188,6 +1191,12 @@ func (c *ServerCommand) Run(args []string) int { if coreConfig.HAPhysical, ok = backend.(physical.HABackend); ok { coreConfig.RedirectAddr = config.Storage.RedirectAddr disableClustering = config.Storage.DisableClustering + + if config.Storage.Type == "raft" && disableClustering { + c.UI.Error("Disable clustering cannot be set to true when Raft is the storage type") + return 1 + } + if !disableClustering { coreConfig.ClusterAddr = config.Storage.ClusterAddr } diff --git a/go.sum b/go.sum index 06af0e1f304bc..eed22a25adbc8 100644 --- a/go.sum +++ b/go.sum @@ -150,6 +150,7 @@ github.com/coreos/go-oidc v2.1.0+incompatible h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7 h1:u9SHYsPQNyt5tgDm3YN7+9dYrpK96E5wFilTFWIDZOM= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= diff --git a/helper/storagepacker/storagepacker.go b/helper/storagepacker/storagepacker.go index 91ec08da97057..f151508240a96 100644 --- a/helper/storagepacker/storagepacker.go +++ b/helper/storagepacker/storagepacker.go @@ -6,7 +6,9 @@ import ( "fmt" "strconv" "strings" + "time" + "github.com/armon/go-metrics" "github.com/golang/protobuf/proto" "github.com/hashicorp/errwrap" "github.com/hashicorp/go-hclog" @@ -129,6 +131,7 @@ func (s *StoragePacker) DeleteItem(_ context.Context, itemID string) error { } func (s *StoragePacker) DeleteMultipleItems(ctx context.Context, logger hclog.Logger, itemIDs []string) error { + defer metrics.MeasureSince([]string{"storage_packer", "delete_items"}, time.Now()) if len(itemIDs) == 0 { return nil } @@ -226,6 +229,7 @@ func (s *StoragePacker) DeleteMultipleItems(ctx context.Context, logger hclog.Lo } func (s *StoragePacker) putBucket(ctx context.Context, bucket *Bucket) error { + defer metrics.MeasureSince([]string{"storage_packer", "put_bucket"}, time.Now()) if bucket == nil { return fmt.Errorf("nil bucket entry") } @@ -265,6 +269,8 @@ func (s *StoragePacker) putBucket(ctx context.Context, bucket *Bucket) error { // GetItem fetches the storage entry for a given key from its corresponding // bucket. func (s *StoragePacker) GetItem(itemID string) (*Item, error) { + defer metrics.MeasureSince([]string{"storage_packer", "get_item"}, time.Now()) + if itemID == "" { return nil, fmt.Errorf("empty item ID") } @@ -292,6 +298,8 @@ func (s *StoragePacker) GetItem(itemID string) (*Item, error) { // PutItem stores the given item in its respective bucket func (s *StoragePacker) PutItem(_ context.Context, item *Item) error { + defer metrics.MeasureSince([]string{"storage_packer", "put_item"}, time.Now()) + if item == nil { return fmt.Errorf("nil item") } diff --git a/http/logical.go b/http/logical.go index 25572a018969a..57ec03629b4c9 100644 --- a/http/logical.go +++ b/http/logical.go @@ -232,6 +232,7 @@ func handleLogicalRecovery(raw *vault.RawBackend, token *atomic.String) http.Han reqToken := r.Header.Get(consts.AuthHeaderName) if reqToken == "" || token.Load() == "" || reqToken != token.Load() { respondError(w, http.StatusForbidden, nil) + return } resp, err := raw.HandleRequest(r.Context(), req) @@ -379,6 +380,7 @@ func handleLogicalInternal(core *vault.Core, injectDataIntoTopLevel bool, noForw case strings.HasPrefix(req.Path, "sys/metrics"): if isStandby, _ := core.Standby(); isStandby { respondError(w, http.StatusBadRequest, vault.ErrCannotForwardLocalOnly) + return } } diff --git a/http/sys_metrics.go b/http/sys_metrics.go index b97786b79f6e5..0e58be3ea262d 100644 --- a/http/sys_metrics.go +++ b/http/sys_metrics.go @@ -17,6 +17,7 @@ func handleMetricsUnauthenticated(core *vault.Core) http.Handler { case "GET": default: respondError(w, http.StatusMethodNotAllowed, nil) + return } // Parse form diff --git a/http/sys_raft.go b/http/sys_raft.go index c36f87310df58..c443a11ad2ebc 100644 --- a/http/sys_raft.go +++ b/http/sys_raft.go @@ -33,6 +33,7 @@ func handleSysRaftJoinPost(core *vault.Core, w http.ResponseWriter, r *http.Requ if req.NonVoter && !nonVotersAllowed { respondError(w, http.StatusBadRequest, errors.New("non-voting nodes not allowed")) + return } var tlsConfig *tls.Config diff --git a/physical/raft/fsm.go b/physical/raft/fsm.go index 22f525624f15f..7c49fbadd0546 100644 --- a/physical/raft/fsm.go +++ b/physical/raft/fsm.go @@ -88,6 +88,10 @@ type FSM struct { storeLatestState bool chunker *raftchunking.ChunkingBatchingFSM + + // testSnapshotRestoreError is used in tests to simulate an error while + // restoring a snapshot. + testSnapshotRestoreError bool } // NewFSM constructs a FSM using the given directory @@ -193,12 +197,12 @@ func (f *FSM) witnessIndex(i *IndexValue) { } } -func (f *FSM) witnessSnapshot(index, term, configurationIndex uint64, configuration raft.Configuration) error { +func (f *FSM) witnessSnapshot(metadata *raft.SnapshotMeta) error { var indexBytes []byte latestIndex, _ := f.LatestState() - latestIndex.Index = index - latestIndex.Term = term + latestIndex.Index = metadata.Index + latestIndex.Term = metadata.Term var err error indexBytes, err = proto.Marshal(latestIndex) @@ -206,7 +210,7 @@ func (f *FSM) witnessSnapshot(index, term, configurationIndex uint64, configurat return err } - protoConfig := raftConfigurationToProtoConfiguration(configurationIndex, configuration) + protoConfig := raftConfigurationToProtoConfiguration(metadata.ConfigurationIndex, metadata.Configuration) configBytes, err := proto.Marshal(protoConfig) if err != nil { return err @@ -232,8 +236,8 @@ func (f *FSM) witnessSnapshot(index, term, configurationIndex uint64, configurat } } - atomic.StoreUint64(f.latestIndex, index) - atomic.StoreUint64(f.latestTerm, term) + atomic.StoreUint64(f.latestIndex, metadata.Index) + atomic.StoreUint64(f.latestTerm, metadata.Term) f.latestConfig.Store(protoConfig) return nil @@ -241,7 +245,7 @@ func (f *FSM) witnessSnapshot(index, term, configurationIndex uint64, configurat // Delete deletes the given key from the bolt file. func (f *FSM) Delete(ctx context.Context, path string) error { - defer metrics.MeasureSince([]string{"raft", "delete"}, time.Now()) + defer metrics.MeasureSince([]string{"raft_storage", "fsm", "delete"}, time.Now()) f.l.RLock() defer f.l.RUnlock() @@ -253,7 +257,7 @@ func (f *FSM) Delete(ctx context.Context, path string) error { // Delete deletes the given key from the bolt file. func (f *FSM) DeletePrefix(ctx context.Context, prefix string) error { - defer metrics.MeasureSince([]string{"raft", "delete_prefix"}, time.Now()) + defer metrics.MeasureSince([]string{"raft_storage", "fsm", "delete_prefix"}, time.Now()) f.l.RLock() defer f.l.RUnlock() @@ -277,7 +281,9 @@ func (f *FSM) DeletePrefix(ctx context.Context, prefix string) error { // Get retrieves the value at the given path from the bolt file. func (f *FSM) Get(ctx context.Context, path string) (*physical.Entry, error) { + // TODO: Remove this outdated metric name in an older release defer metrics.MeasureSince([]string{"raft", "get"}, time.Now()) + defer metrics.MeasureSince([]string{"raft_storage", "fsm", "get"}, time.Now()) f.l.RLock() defer f.l.RUnlock() @@ -311,7 +317,7 @@ func (f *FSM) Get(ctx context.Context, path string) (*physical.Entry, error) { // Put writes the given entry to the bolt file. func (f *FSM) Put(ctx context.Context, entry *physical.Entry) error { - defer metrics.MeasureSince([]string{"raft", "put"}, time.Now()) + defer metrics.MeasureSince([]string{"raft_storage", "fsm", "put"}, time.Now()) f.l.RLock() defer f.l.RUnlock() @@ -324,7 +330,9 @@ func (f *FSM) Put(ctx context.Context, entry *physical.Entry) error { // List retrieves the set of keys with the given prefix from the bolt file. func (f *FSM) List(ctx context.Context, prefix string) ([]string, error) { + // TODO: Remove this outdated metric name in a future release defer metrics.MeasureSince([]string{"raft", "list"}, time.Now()) + defer metrics.MeasureSince([]string{"raft_storage", "fsm", "list"}, time.Now()) f.l.RLock() defer f.l.RUnlock() @@ -531,6 +539,8 @@ type writeErrorCloser interface { // (size, checksum, etc) and a second for the sink of the data. We also use a // proto delimited writer so we can stream proto messages to the sink. func (f *FSM) writeTo(ctx context.Context, metaSink writeErrorCloser, sink writeErrorCloser) { + defer metrics.MeasureSince([]string{"raft_storage", "fsm", "write_snapshot"}, time.Now()) + protoWriter := protoio.NewDelimitedWriter(sink) metadataProtoWriter := protoio.NewDelimitedWriter(metaSink) @@ -573,7 +583,9 @@ func (f *FSM) writeTo(ctx context.Context, metaSink writeErrorCloser, sink write // Snapshot implements the FSM interface. It returns a noop snapshot object. func (f *FSM) Snapshot() (raft.FSMSnapshot, error) { - return &noopSnapshotter{}, nil + return &noopSnapshotter{ + fsm: f, + }, nil } // SetNoopRestore is used to disable restore operations on raft startup. Because @@ -589,48 +601,91 @@ func (f *FSM) SetNoopRestore(enabled bool) { // first deletes the existing bucket to clear all existing data, then recreates // it so we can copy in the snapshot. func (f *FSM) Restore(r io.ReadCloser) error { + defer metrics.MeasureSince([]string{"raft_storage", "fsm", "restore_snapshot"}, time.Now()) + if f.noopRestore == true { return nil } + snapMeta := r.(*boltSnapshotMetadataReader).Metadata() + protoReader := protoio.NewDelimitedReader(r, math.MaxInt32) defer protoReader.Close() f.l.Lock() defer f.l.Unlock() - // Start a write transaction. + // Delete the existing data bucket and create a new one. + f.logger.Debug("snapshot restore: deleting bucket") err := f.db.Update(func(tx *bolt.Tx) error { err := tx.DeleteBucket(dataBucketName) if err != nil { return err } - b, err := tx.CreateBucket(dataBucketName) + _, err = tx.CreateBucket(dataBucketName) if err != nil { return err } - for { + return nil + }) + if err != nil { + f.logger.Error("could not restore snapshot: could not clear existing bucket", "error", err) + return err + } + + // If we are testing a failed snapshot error here. + if f.testSnapshotRestoreError { + return errors.New("Test error") + } + + f.logger.Debug("snapshot restore: deleting bucket done") + f.logger.Debug("snapshot restore: writing keys") + + var done bool + var keys int + for !done { + err := f.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(dataBucketName) s := new(pb.StorageEntry) - err := protoReader.ReadMsg(s) - if err != nil { - if err == io.EOF { - return nil + + // Commit in batches of 50k. Bolt holds all the data in memory and + // doesn't split the pages until commit so we do incremental writes. + // This is safe since we have a write lock on the fsm's lock. + for i := 0; i < 50000; i++ { + err := protoReader.ReadMsg(s) + if err != nil { + if err == io.EOF { + done = true + return nil + } + return err } - return err - } - err = b.Put([]byte(s.Key), s.Value) - if err != nil { - return err + err = b.Put([]byte(s.Key), s.Value) + if err != nil { + return err + } + keys += 1 } + + return nil + }) + if err != nil { + f.logger.Error("could not restore snapshot", "error", err) + return err } - return nil - }) - if err != nil { - f.logger.Error("could not restore snapshot", "error", err) + f.logger.Trace("snapshot restore: writing keys", "num_written", keys) + } + + f.logger.Debug("snapshot restore: writing keys done") + + // Write the metadata after we have applied all the snapshot data + f.logger.Debug("snapshot restore: writing metadata") + if err := f.witnessSnapshot(snapMeta); err != nil { + f.logger.Error("could not write metadata", "error", err) return err } @@ -639,10 +694,23 @@ func (f *FSM) Restore(r io.ReadCloser) error { // noopSnapshotter implements the fsm.Snapshot interface. It doesn't do anything // since our SnapshotStore reads data out of the FSM on Open(). -type noopSnapshotter struct{} +type noopSnapshotter struct { + fsm *FSM +} -// Persist doesn't do anything. +// Persist implements the fsm.Snapshot interface. It doesn't need to persist any +// state data, but it does persist the raft metadata. This is necessary so we +// can be sure to capture indexes for operation types that are not sent to the +// FSM. func (s *noopSnapshotter) Persist(sink raft.SnapshotSink) error { + boltSnapshotSink := sink.(*BoltSnapshotSink) + + // We are processing a snapshot, fastforward the index, term, and + // configuration to the latest seen by the raft system. + if err := s.fsm.witnessSnapshot(&boltSnapshotSink.meta); err != nil { + return err + } + return nil } diff --git a/physical/raft/raft.go b/physical/raft/raft.go index 97d5c4544fb45..75f9fde8ab395 100644 --- a/physical/raft/raft.go +++ b/physical/raft/raft.go @@ -491,15 +491,29 @@ func (b *RaftBackend) SetupCluster(ctx context.Context, opts SetupOpts) error { return err } + listenerIsNil := func(cl cluster.ClusterHook) bool { + switch { + case opts.ClusterListener == nil: + return true + default: + // Concrete type checks + switch cl.(type) { + case *cluster.Listener: + return cl.(*cluster.Listener) == nil + } + } + return false + } + switch { - case opts.TLSKeyring == nil && opts.ClusterListener == nil: + case opts.TLSKeyring == nil && listenerIsNil(opts.ClusterListener): // If we don't have a provided network we use an in-memory one. // This allows us to bootstrap a node without bringing up a cluster // network. This will be true during bootstrap, tests and dev modes. _, b.raftTransport = raft.NewInmemTransportWithTimeout(raft.ServerAddress(b.localID), time.Second) case opts.TLSKeyring == nil: return errors.New("no keyring provided") - case opts.ClusterListener == nil: + case listenerIsNil(opts.ClusterListener): return errors.New("no cluster listener provided") default: // Set the local address and localID in the streaming layer and the raft config. diff --git a/physical/raft/raft_test.go b/physical/raft/raft_test.go index 40faa09b18f32..930e46a40d07f 100644 --- a/physical/raft/raft_test.go +++ b/physical/raft/raft_test.go @@ -76,22 +76,77 @@ func getRaftWithDir(t testing.TB, bootstrap bool, noStoreState bool, raftDir str return backend, raftDir } +func connectPeers(nodes ...*RaftBackend) { + for _, node := range nodes { + for _, peer := range nodes { + if node == peer { + continue + } + + node.raftTransport.(*raft.InmemTransport).Connect(raft.ServerAddress(peer.NodeID()), peer.raftTransport) + peer.raftTransport.(*raft.InmemTransport).Connect(raft.ServerAddress(node.NodeID()), node.raftTransport) + } + } +} + +func stepDownLeader(t *testing.T, node *RaftBackend) { + t.Helper() + + if err := node.raft.LeadershipTransfer().Error(); err != nil { + t.Fatal(err) + } + + timeout := time.Now().Add(time.Second * 10) + for !time.Now().After(timeout) { + if err := node.raft.VerifyLeader().Error(); err != nil { + return + } + time.Sleep(100 * time.Millisecond) + } + + t.Fatal("still leader") +} + +func waitForLeader(t *testing.T, nodes ...*RaftBackend) *RaftBackend { + t.Helper() + timeout := time.Now().Add(time.Second * 10) + for !time.Now().After(timeout) { + for _, node := range nodes { + if node.raft.Leader() == raft.ServerAddress(node.NodeID()) { + return node + } + } + time.Sleep(100 * time.Millisecond) + } + + t.Fatal("no leader") + return nil +} + func compareFSMs(t *testing.T, fsm1, fsm2 *FSM) { + t.Helper() + if err := compareFSMsWithErr(t, fsm1, fsm2); err != nil { + t.Fatal(err) + } +} + +func compareFSMsWithErr(t *testing.T, fsm1, fsm2 *FSM) error { t.Helper() index1, config1 := fsm1.LatestState() index2, config2 := fsm2.LatestState() if !proto.Equal(index1, index2) { - t.Fatalf("indexes did not match: %+v != %+v", index1, index2) + return fmt.Errorf("indexes did not match: %+v != %+v", index1, index2) } if !proto.Equal(config1, config2) { - t.Fatalf("configs did not match: %+v != %+v", config1, config2) + return fmt.Errorf("configs did not match: %+v != %+v", config1, config2) } - compareDBs(t, fsm1.db, fsm2.db) + return compareDBs(t, fsm1.db, fsm2.db) } -func compareDBs(t *testing.T, boltDB1, boltDB2 *bolt.DB) { +func compareDBs(t *testing.T, boltDB1, boltDB2 *bolt.DB) error { + t.Helper() db1 := make(map[string]string) db2 := make(map[string]string) @@ -135,8 +190,10 @@ func compareDBs(t *testing.T, boltDB1, boltDB2 *bolt.DB) { } if diff := deep.Equal(db1, db2); diff != nil { - t.Fatal(diff) + return fmt.Errorf("%+v", diff) } + + return nil } func TestRaft_Backend(t *testing.T) { diff --git a/physical/raft/snapshot.go b/physical/raft/snapshot.go index 8538778b5d06a..7139cce7d9d7b 100644 --- a/physical/raft/snapshot.go +++ b/physical/raft/snapshot.go @@ -104,13 +104,6 @@ func (f *BoltSnapshotStore) Create(version raft.SnapshotVersion, index, term uin return nil, fmt.Errorf("unsupported snapshot version %d", version) } - // We are processing a snapshot, fastforward the index, term, and - // configuration to the latest seen by the raft system. This could include - // log indexes for operation types that are never sent to the FSM. - if err := f.fsm.witnessSnapshot(index, term, configurationIndex, configuration); err != nil { - return nil, err - } - // Create the sink sink := &BoltSnapshotSink{ store: f, @@ -208,6 +201,11 @@ func (f *BoltSnapshotStore) Open(id string) (*raft.SnapshotMeta, io.ReadCloser, if err != nil { return nil, nil, err } + + readCloser = &boltSnapshotMetadataReader{ + meta: meta, + ReadCloser: readCloser, + } } return meta, readCloser, nil @@ -286,3 +284,12 @@ func (s *BoltSnapshotSink) Cancel() error { return nil } + +type boltSnapshotMetadataReader struct { + io.ReadCloser + meta *raft.SnapshotMeta +} + +func (r *boltSnapshotMetadataReader) Metadata() *raft.SnapshotMeta { + return r.meta +} diff --git a/physical/raft/snapshot_test.go b/physical/raft/snapshot_test.go index 5851a2c0e9185..33b1d1b22e821 100644 --- a/physical/raft/snapshot_test.go +++ b/physical/raft/snapshot_test.go @@ -345,6 +345,88 @@ func TestRaft_Snapshot_Restart(t *testing.T) { compareFSMs(t, raft1.fsm, raft2.fsm) } +func TestRaft_Snapshot_ErrorRecovery(t *testing.T) { + raft1, dir := getRaft(t, true, false) + raft2, dir2 := getRaft(t, false, false) + raft3, dir3 := getRaft(t, false, false) + defer os.RemoveAll(dir) + defer os.RemoveAll(dir2) + defer os.RemoveAll(dir3) + + // Add raft2 to the cluster + addPeer(t, raft1, raft2) + + // Write some data + for i := 0; i < 100; i++ { + err := raft1.Put(context.Background(), &physical.Entry{ + Key: fmt.Sprintf("key-%d", i), + Value: []byte(fmt.Sprintf("value-%d", i)), + }) + if err != nil { + t.Fatal(err) + } + } + + // Take a snapshot on each node to ensure we no longer have older logs + snapFuture := raft1.raft.Snapshot() + if err := snapFuture.Error(); err != nil { + t.Fatal(err) + } + + stepDownLeader(t, raft1) + leader := waitForLeader(t, raft1, raft2) + + snapFuture = leader.raft.Snapshot() + if err := snapFuture.Error(); err != nil { + t.Fatal(err) + } + + // Advance FSM's index past snapshot index + leader.Put(context.Background(), &physical.Entry{ + Key: "key", + Value: []byte("value"), + }) + + // Error on snapshot restore + raft3.fsm.testSnapshotRestoreError = true + + // Add raft3 to the cluster + addPeer(t, leader, raft3) + + time.Sleep(2 * time.Second) + + // Restart the failing node to make sure fresh state does not have invalid + // values. + if err := raft3.TeardownCluster(nil); err != nil { + t.Fatal(err) + } + + // Ensure the databases are not equal + if err := compareFSMsWithErr(t, leader.fsm, raft3.fsm); err == nil { + t.Fatal("nil error") + } + + // Remove error and make sure we can reconcile state + raft3.fsm.testSnapshotRestoreError = false + + // Step down leader node + stepDownLeader(t, leader) + leader = waitForLeader(t, raft1, raft2) + + // Start Raft3 + if err := raft3.SetupCluster(context.Background(), SetupOpts{}); err != nil { + t.Fatal(err) + } + + connectPeers(raft1, raft2, raft3) + waitForLeader(t, raft1, raft2) + + time.Sleep(5 * time.Second) + + // Make sure state gets re-replicated. + compareFSMs(t, raft1.fsm, raft3.fsm) +} + func TestRaft_Snapshot_Take_Restore(t *testing.T) { raft1, dir := getRaft(t, true, false) defer os.RemoveAll(dir) diff --git a/plugins/database/postgresql/postgresql.go b/plugins/database/postgresql/postgresql.go index 6902413eea174..d0094b53b5c79 100644 --- a/plugins/database/postgresql/postgresql.go +++ b/plugins/database/postgresql/postgresql.go @@ -112,7 +112,7 @@ func (p *PostgreSQL) getConnection(ctx context.Context) (*sql.DB, error) { // Vault's storage. func (p *PostgreSQL) SetCredentials(ctx context.Context, statements dbplugin.Statements, staticUser dbplugin.StaticUserConfig) (username, password string, err error) { if len(statements.Rotation) == 0 { - return "", "", errors.New("empty rotation statements") + statements.Rotation = []string{defaultPostgresRotateRootCredentialsSQL} } username = staticUser.Username diff --git a/plugins/database/redshift/redshift.go b/plugins/database/redshift/redshift.go index d2c688c20b926..c88fc961f7ebd 100644 --- a/plugins/database/redshift/redshift.go +++ b/plugins/database/redshift/redshift.go @@ -107,7 +107,7 @@ func (r *RedShift) getConnection(ctx context.Context) (*sql.DB, error) { // Vault's storage. func (r *RedShift) SetCredentials(ctx context.Context, statements dbplugin.Statements, staticUser dbplugin.StaticUserConfig) (username, password string, err error) { if len(statements.Rotation) == 0 { - return "", "", errors.New("empty rotation statements") + statements.Rotation = []string{defaultRotateRootCredentialsSQL} } username = staticUser.Username diff --git a/sdk/database/dbplugin/databasemiddleware.go b/sdk/database/dbplugin/databasemiddleware.go index 19cfa3374b621..e7cb0a2f5af12 100644 --- a/sdk/database/dbplugin/databasemiddleware.go +++ b/sdk/database/dbplugin/databasemiddleware.go @@ -8,10 +8,10 @@ import ( "sync" "time" - "github.com/hashicorp/errwrap" - metrics "github.com/armon/go-metrics" + "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" + "google.golang.org/grpc/status" ) // ---- Tracing Middleware Domain ---- @@ -318,6 +318,15 @@ func (mw *DatabaseErrorSanitizerMiddleware) sanitize(err error) error { if k == "" { continue } + + // Attempt to keep the status code attached to the + // error without changing the actual error message + s, ok := status.FromError(err) + if ok { + err = status.Error(s.Code(), strings.Replace(s.Message(), k, v.(string), -1)) + continue + } + err = errors.New(strings.Replace(err.Error(), k, v.(string), -1)) } } diff --git a/ui/lib/core/addon/components/form-field.js b/ui/lib/core/addon/components/form-field.js index dc83f494d745d..cf3509d6c9c9a 100644 --- a/ui/lib/core/addon/components/form-field.js +++ b/ui/lib/core/addon/components/form-field.js @@ -113,6 +113,12 @@ export default Component.extend({ this.send('setAndBroadcast', path, valueToSet); }, + setAndBroadcastTtl(path, value) { + const alwaysSendValue = path === 'expiry' || path === 'safetyBuffer'; + let valueToSet = value.enabled === true || alwaysSendValue ? `${value.seconds}s` : undefined; + this.send('setAndBroadcast', path, `${valueToSet}`); + }, + codemirrorUpdated(path, isString, value, codemirror) { codemirror.performLint(); const hasErrors = codemirror.state.lint.marked.length > 0; diff --git a/ui/lib/core/addon/components/ttl-picker2.js b/ui/lib/core/addon/components/ttl-picker2.js index 8dae2ae1dbcab..2df8f87e89da6 100644 --- a/ui/lib/core/addon/components/ttl-picker2.js +++ b/ui/lib/core/addon/components/ttl-picker2.js @@ -15,12 +15,15 @@ * @param time=30 {Number} - The time (in the default units) which will be adjustable by the user of the form * @param unit="s" {String} - This is the unit key which will show by default on the form. Can be one of `s` (seconds), `m` (minutes), `h` (hours), `d` (days) * @param recalculationTimeout=5000 {Number} - This is the time, in milliseconds, that `recalculateSeconds` will be be true after time is updated + * @param initialValue {String} - This is the value set initially (particularly from a string like '30h') */ import Ember from 'ember'; import Component from '@ember/component'; import { computed } from '@ember/object'; import { task, timeout } from 'ember-concurrency'; +import { typeOf } from '@ember/utils'; +import Duration from 'Duration.js'; import layout from '../templates/components/ttl-picker2'; const secondsMap = { @@ -43,8 +46,34 @@ export default Component.extend({ helperTextDisabled: 'Allow tokens to be used indefinitely', helperTextEnabled: 'Disable the use of the token after', time: 30, - unit: 'm', + unit: 's', recalculationTimeout: 5000, + initialValue: null, + + init() { + this._super(...arguments); + const value = this.initialValue; + // if initial value is unset use params passed in as defaults + if (!value && value !== 0) { + return; + } + + let seconds = 30; + if (typeOf(value) === 'number') { + seconds = value; + } else { + try { + seconds = Duration.parse(value).seconds(); + } catch (e) { + console.error(e); + // if parsing fails leave as default 30 + } + } + this.setProperties({ + time: seconds, + unit: 's', + }); + }, unitOptions: computed(function() { return [ { label: 'seconds', value: 's' }, @@ -53,16 +82,15 @@ export default Component.extend({ { label: 'days', value: 'd' }, ]; }), - - TTL: computed('enableTTL', 'seconds', function() { + handleChange() { let { time, unit, enableTTL, seconds } = this.getProperties('time', 'unit', 'enableTTL', 'seconds'); - return { + const ttl = { enabled: enableTTL, seconds, timeString: time + unit, }; - }), - + this.onChange(ttl); + }, updateTime: task(function*(newTime) { this.set('errorMessage', ''); let parsedTime; @@ -75,7 +103,7 @@ export default Component.extend({ return; } this.set('time', parsedTime); - this.onChange(this.TTL); + this.handleChange(); if (Ember.testing) { return; } @@ -107,11 +135,11 @@ export default Component.extend({ } else { this.recalculateTime(newUnit); } - this.onChange(this.TTL); + this.handleChange(); }, toggleEnabled() { this.toggleProperty('enableTTL'); - this.onChange(this.TTL); + this.handleChange(); }, }, }); diff --git a/ui/lib/core/addon/templates/components/form-field.hbs b/ui/lib/core/addon/templates/components/form-field.hbs index febcac81836c5..6a22565d3fc0e 100644 --- a/ui/lib/core/addon/templates/components/form-field.hbs +++ b/ui/lib/core/addon/templates/components/form-field.hbs @@ -96,14 +96,13 @@ label=labelString }} {{else if (eq attr.options.editType "ttl")}} - {{ttl-picker - data-test-input=attr.name - initialValue=(or (get model valuePath) attr.options.defaultValue) - labelText=labelString - warning=attr.options.warning - setDefaultValue=(or (get model valuePath) attr.options.setDefault false) - onChange=(action (action "setAndBroadcast" valuePath)) - }} + {{else if (eq attr.options.editType "stringArray")}} {{string-list data-test-input=attr.name diff --git a/ui/lib/core/addon/templates/components/ttl-picker2.hbs b/ui/lib/core/addon/templates/components/ttl-picker2.hbs index e972d59ad5a2d..71660e5d10fb7 100644 --- a/ui/lib/core/addon/templates/components/ttl-picker2.hbs +++ b/ui/lib/core/addon/templates/components/ttl-picker2.hbs @@ -10,11 +10,11 @@ {{helperText}} {{#if enableTTL}} -
+