From 3c845deaf921608913662a8cf11cbc5a811ddce7 Mon Sep 17 00:00:00 2001 From: swayne275 Date: Tue, 11 May 2021 10:41:57 -0600 Subject: [PATCH 001/101] don't panic if no data, improve log (#11581) --- vault/activity_log.go | 2 +- vault/activity_log_test.go | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/vault/activity_log.go b/vault/activity_log.go index 03cafe1b1eac2..be161995eb224 100644 --- a/vault/activity_log.go +++ b/vault/activity_log.go @@ -1577,7 +1577,7 @@ func (a *ActivityLog) precomputedQueryWorker() error { } lastMonth := intent.PreviousMonth - a.logger.Info("computing queries", "month", lastMonth) + a.logger.Info("computing queries", "month", time.Unix(lastMonth, 0).UTC()) times, err := a.getMostRecentActivityLogSegment(ctx) if err != nil { diff --git a/vault/activity_log_test.go b/vault/activity_log_test.go index cd7e44b0c3f01..36523e2fabef1 100644 --- a/vault/activity_log_test.go +++ b/vault/activity_log_test.go @@ -1808,6 +1808,10 @@ func TestActivityLog_EndOfMonth(t *testing.T) { if err != nil { t.Fatal(err) } + if intentRaw == nil { + t.Fatal("no intent log present") + } + var intent ActivityIntentLog err = intentRaw.DecodeJSON(&intent) if err != nil { From d21e60eaa9cb19ebcf22bb47348815ffced6b555 Mon Sep 17 00:00:00 2001 From: Scott Miller Date: Tue, 11 May 2021 11:59:08 -0500 Subject: [PATCH 002/101] Move config validation code to it's own, non ENT specific file (#11579) * Move config validation code to it's own, non ENT specific file * Fix imports * import order --- command/server/config.go | 3 +- internalshared/configutil/config_util.go | 57 ---------------------- internalshared/configutil/lint.go | 61 ++++++++++++++++++++++++ 3 files changed, 62 insertions(+), 59 deletions(-) create mode 100644 internalshared/configutil/lint.go diff --git a/command/server/config.go b/command/server/config.go index 744f57c035a1a..617af886ff60f 100644 --- a/command/server/config.go +++ b/command/server/config.go @@ -4,7 +4,6 @@ import ( "encoding/json" "errors" "fmt" - "github.com/hashicorp/hcl/hcl/token" "io" "io/ioutil" "os" @@ -23,7 +22,7 @@ import ( // Config is the configuration for the vault server. type Config struct { - UnusedKeys map[string][]token.Pos `hcl:",unusedKeyPositions"` + UnusedKeys configutil.UnusedKeyMap `hcl:",unusedKeyPositions"` entConfig *configutil.SharedConfig `hcl:"-"` diff --git a/internalshared/configutil/config_util.go b/internalshared/configutil/config_util.go index ca108af5ae1b6..fc527cc2af0c0 100644 --- a/internalshared/configutil/config_util.go +++ b/internalshared/configutil/config_util.go @@ -3,30 +3,11 @@ package configutil import ( - "fmt" - "github.com/asaskevich/govalidator" "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/token" - "github.com/hashicorp/vault/sdk/helper/strutil" ) type EntSharedConfig struct{} -type UnusedKeyMap map[string][]token.Pos - -type ConfigError struct { - Problem string - Position token.Pos -} - -func (c *ConfigError) String() string { - return fmt.Sprintf("%s at %s", c.Problem, c.Position.String()) -} - -type ValidatableConfig interface { - Validate() []ConfigError -} - func (ec *EntSharedConfig) ParseConfig(list *ast.ObjectList) error { return nil } @@ -34,41 +15,3 @@ func (ec *EntSharedConfig) ParseConfig(list *ast.ObjectList) error { func ParseEntropy(result *SharedConfig, list *ast.ObjectList, blockName string) error { return nil } - -// Creates the ConfigErrors for unused fields, which occur in various structs -func ValidateUnusedFields(unusedKeyPositions UnusedKeyMap, sourceFilePath string) []ConfigError { - if unusedKeyPositions == nil { - return nil - } - var errors []ConfigError - for field, positions := range unusedKeyPositions { - problem := fmt.Sprintf("unknown field %s found in configuration", field) - for _, pos := range positions { - if pos.Filename == "" && sourceFilePath != "" { - pos.Filename = sourceFilePath - } - errors = append(errors, ConfigError{ - Problem: problem, - Position: pos, - }) - } - } - return errors -} - -// UnusedFieldDifference returns all the keys in map a that are not present in map b, and also not present in foundKeys. -func UnusedFieldDifference(a, b UnusedKeyMap, foundKeys []string) UnusedKeyMap { - if a == nil { - return nil - } - if b == nil { - return a - } - res := make(UnusedKeyMap) - for k, v := range a { - if _, ok := b[k]; !ok && !strutil.StrListContainsCaseInsensitive(foundKeys, govalidator.UnderscoreToCamelCase(k)) { - res[k] = v - } - } - return res -} diff --git a/internalshared/configutil/lint.go b/internalshared/configutil/lint.go new file mode 100644 index 0000000000000..943c5287f8d38 --- /dev/null +++ b/internalshared/configutil/lint.go @@ -0,0 +1,61 @@ +package configutil + +import ( + "fmt" + "github.com/asaskevich/govalidator" + "github.com/hashicorp/hcl/hcl/token" + "github.com/hashicorp/vault/sdk/helper/strutil" +) + +type UnusedKeyMap map[string][]token.Pos + +type ConfigError struct { + Problem string + Position token.Pos +} + +func (c *ConfigError) String() string { + return fmt.Sprintf("%s at %s", c.Problem, c.Position.String()) +} + +type ValidatableConfig interface { + Validate() []ConfigError +} + +// Creates the ConfigErrors for unused fields, which occur in various structs +func ValidateUnusedFields(unusedKeyPositions UnusedKeyMap, sourceFilePath string) []ConfigError { + if unusedKeyPositions == nil { + return nil + } + var errors []ConfigError + for field, positions := range unusedKeyPositions { + problem := fmt.Sprintf("unknown field %s found in configuration", field) + for _, pos := range positions { + if pos.Filename == "" && sourceFilePath != "" { + pos.Filename = sourceFilePath + } + errors = append(errors, ConfigError{ + Problem: problem, + Position: pos, + }) + } + } + return errors +} + +// UnusedFieldDifference returns all the keys in map a that are not present in map b, and also not present in foundKeys. +func UnusedFieldDifference(a, b UnusedKeyMap, foundKeys []string) UnusedKeyMap { + if a == nil { + return nil + } + if b == nil { + return a + } + res := make(UnusedKeyMap) + for k, v := range a { + if _, ok := b[k]; !ok && !strutil.StrListContainsCaseInsensitive(foundKeys, govalidator.UnderscoreToCamelCase(k)) { + res[k] = v + } + } + return res +} From d10e912ec33fb7fdd09b2d01228bb7c758690d21 Mon Sep 17 00:00:00 2001 From: Lars Lehtonen Date: Tue, 11 May 2021 10:12:54 -0700 Subject: [PATCH 003/101] vault: deprecate errwrap.Wrapf() (#11577) --- vault/acl.go | 3 +- vault/audited_headers.go | 11 ++-- vault/barrier_aes_gcm.go | 51 ++++++++------- vault/cluster.go | 9 ++- vault/cluster/cluster.go | 3 +- vault/core.go | 80 ++++++++++++------------ vault/cors.go | 8 +-- vault/counters.go | 12 ++-- vault/dynamic_system_view.go | 3 +- vault/expiration.go | 54 ++++++++-------- vault/expiration_util.go | 5 +- vault/generate_root.go | 11 ++-- vault/generate_root_recovery.go | 8 +-- vault/ha.go | 17 +++-- vault/identity_store.go | 9 ++- vault/identity_store_entities.go | 5 +- vault/identity_store_groups.go | 3 +- vault/identity_store_oidc.go | 3 +- vault/identity_store_util.go | 66 ++++++++++---------- vault/init.go | 29 +++++---- vault/keyring.go | 5 +- vault/logical_cubbyhole.go | 11 ++-- vault/logical_passthrough.go | 11 ++-- vault/logical_system.go | 28 ++++----- vault/plugin_catalog.go | 19 +++--- vault/plugin_reload.go | 3 +- vault/policy.go | 17 +++-- vault/policy_store.go | 33 +++++----- vault/raft.go | 77 ++++++++++++----------- vault/rekey.go | 95 ++++++++++++++-------------- vault/request_handling.go | 6 +- vault/seal.go | 29 +++++---- vault/seal_autoseal.go | 59 +++++++++--------- vault/token_store.go | 103 +++++++++++++++---------------- vault/wrapping.go | 13 ++-- 35 files changed, 437 insertions(+), 462 deletions(-) diff --git a/vault/acl.go b/vault/acl.go index 040258f6f3f66..78a37dd3fd750 100644 --- a/vault/acl.go +++ b/vault/acl.go @@ -8,7 +8,6 @@ import ( "strings" "github.com/armon/go-radix" - "github.com/hashicorp/errwrap" "github.com/hashicorp/go-multierror" "github.com/hashicorp/vault/helper/identity" "github.com/hashicorp/vault/helper/namespace" @@ -125,7 +124,7 @@ func NewACL(ctx context.Context, policies []*Policy) (*ACL, error) { if !ok { clonedPerms, err := pc.Permissions.Clone() if err != nil { - return nil, errwrap.Wrapf("error cloning ACL permissions: {{err}}", err) + return nil, fmt.Errorf("error cloning ACL permissions: %w", err) } switch { case pc.HasSegmentWildcards: diff --git a/vault/audited_headers.go b/vault/audited_headers.go index 7abe78cd695bd..4e83d9bff4451 100644 --- a/vault/audited_headers.go +++ b/vault/audited_headers.go @@ -6,7 +6,6 @@ import ( "strings" "sync" - "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/sdk/logical" ) @@ -50,11 +49,11 @@ func (a *AuditedHeadersConfig) add(ctx context.Context, header string, hmac bool a.Headers[strings.ToLower(header)] = &auditedHeaderSettings{hmac} entry, err := logical.StorageEntryJSON(auditedHeadersEntry, a.Headers) if err != nil { - return errwrap.Wrapf("failed to persist audited headers config: {{err}}", err) + return fmt.Errorf("failed to persist audited headers config: %w", err) } if err := a.view.Put(ctx, entry); err != nil { - return errwrap.Wrapf("failed to persist audited headers config: {{err}}", err) + return fmt.Errorf("failed to persist audited headers config: %w", err) } return nil @@ -78,11 +77,11 @@ func (a *AuditedHeadersConfig) remove(ctx context.Context, header string) error delete(a.Headers, strings.ToLower(header)) entry, err := logical.StorageEntryJSON(auditedHeadersEntry, a.Headers) if err != nil { - return errwrap.Wrapf("failed to persist audited headers config: {{err}}", err) + return fmt.Errorf("failed to persist audited headers config: %w", err) } if err := a.view.Put(ctx, entry); err != nil { - return errwrap.Wrapf("failed to persist audited headers config: {{err}}", err) + return fmt.Errorf("failed to persist audited headers config: %w", err) } return nil @@ -135,7 +134,7 @@ func (c *Core) setupAuditedHeadersConfig(ctx context.Context) error { // Create the config out, err := view.Get(ctx, auditedHeadersEntry) if err != nil { - return errwrap.Wrapf("failed to read config: {{err}}", err) + return fmt.Errorf("failed to read config: %w", err) } headers := make(map[string]*auditedHeaderSettings) diff --git a/vault/barrier_aes_gcm.go b/vault/barrier_aes_gcm.go index edce5cef27a55..730680b8f952a 100644 --- a/vault/barrier_aes_gcm.go +++ b/vault/barrier_aes_gcm.go @@ -16,7 +16,6 @@ import ( "time" "github.com/armon/go-metrics" - "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/helper/strutil" "github.com/hashicorp/vault/sdk/logical" @@ -132,7 +131,7 @@ func (b *AESGCMBarrier) Initialized(ctx context.Context) (bool, error) { // Read the keyring file keys, err := b.backend.List(ctx, keyringPrefix) if err != nil { - return false, errwrap.Wrapf("failed to check for initialization: {{err}}", err) + return false, fmt.Errorf("failed to check for initialization: %w", err) } if strutil.StrListContains(keys, "keyring") { b.initialized.Store(true) @@ -142,7 +141,7 @@ func (b *AESGCMBarrier) Initialized(ctx context.Context) (bool, error) { // Fallback, check for the old sentinel file out, err := b.backend.Get(ctx, barrierInitPath) if err != nil { - return false, errwrap.Wrapf("failed to check for initialization: {{err}}", err) + return false, fmt.Errorf("failed to check for initialization: %w", err) } b.initialized.Store(out != nil) return out != nil, nil @@ -167,7 +166,7 @@ func (b *AESGCMBarrier) Initialize(ctx context.Context, key, sealKey []byte, rea // Generate encryption key encrypt, err := b.GenerateKey(reader) if err != nil { - return errwrap.Wrapf("failed to generate encryption key: {{err}}", err) + return fmt.Errorf("failed to generate encryption key: %w", err) } // Create a new keyring, install the keys @@ -179,7 +178,7 @@ func (b *AESGCMBarrier) Initialize(ctx context.Context, key, sealKey []byte, rea Value: encrypt, }) if err != nil { - return errwrap.Wrapf("failed to create keyring: {{err}}", err) + return fmt.Errorf("failed to create keyring: %w", err) } err = b.persistKeyring(ctx, keyring) @@ -198,7 +197,7 @@ func (b *AESGCMBarrier) Initialize(ctx context.Context, key, sealKey []byte, rea Value: sealKey, }) if err != nil { - return errwrap.Wrapf("failed to store new seal key: {{err}}", err) + return fmt.Errorf("failed to store new seal key: %w", err) } } @@ -212,7 +211,7 @@ func (b *AESGCMBarrier) persistKeyring(ctx context.Context, keyring *Keyring) er keyringBuf, err := keyring.Serialize() defer memzero(keyringBuf) if err != nil { - return errwrap.Wrapf("failed to serialize keyring: {{err}}", err) + return fmt.Errorf("failed to serialize keyring: %w", err) } // Create the AES-GCM @@ -233,7 +232,7 @@ func (b *AESGCMBarrier) persistKeyring(ctx context.Context, keyring *Keyring) er Value: value, } if err := b.backend.Put(ctx, pe); err != nil { - return errwrap.Wrapf("failed to persist keyring: {{err}}", err) + return fmt.Errorf("failed to persist keyring: %w", err) } // Serialize the master key value @@ -245,7 +244,7 @@ func (b *AESGCMBarrier) persistKeyring(ctx context.Context, keyring *Keyring) er keyBuf, err := key.Serialize() defer memzero(keyBuf) if err != nil { - return errwrap.Wrapf("failed to serialize master key: {{err}}", err) + return fmt.Errorf("failed to serialize master key: %w", err) } // Encrypt the master key @@ -265,7 +264,7 @@ func (b *AESGCMBarrier) persistKeyring(ctx context.Context, keyring *Keyring) er Value: value, } if err := b.backend.Put(ctx, pe); err != nil { - return errwrap.Wrapf("failed to persist master key: {{err}}", err) + return fmt.Errorf("failed to persist master key: %w", err) } return nil } @@ -322,7 +321,7 @@ func (b *AESGCMBarrier) ReloadKeyring(ctx context.Context) error { // Read in the keyring out, err := b.backend.Get(ctx, keyringPath) if err != nil { - return errwrap.Wrapf("failed to check for keyring: {{err}}", err) + return fmt.Errorf("failed to check for keyring: %w", err) } // Ensure that the keyring exists. This should never happen, @@ -359,7 +358,7 @@ func (b *AESGCMBarrier) ReloadKeyring(ctx context.Context) error { func (b *AESGCMBarrier) recoverKeyring(plaintext []byte) error { keyring, err := DeserializeKeyring(plaintext) if err != nil { - return errwrap.Wrapf("keyring deserialization failed: {{err}}", err) + return fmt.Errorf("keyring deserialization failed: %w", err) } // Setup the keyring and finish @@ -375,7 +374,7 @@ func (b *AESGCMBarrier) ReloadMasterKey(ctx context.Context) error { // Read the masterKeyPath upgrade out, err := b.Get(ctx, masterKeyPath) if err != nil { - return errwrap.Wrapf("failed to read master key path: {{err}}", err) + return fmt.Errorf("failed to read master key path: %w", err) } // The masterKeyPath could be missing (backwards incompatible), @@ -391,7 +390,7 @@ func (b *AESGCMBarrier) ReloadMasterKey(ctx context.Context) error { out, err = b.lockSwitchedGet(ctx, masterKeyPath, false) if err != nil { - return errwrap.Wrapf("failed to read master key path: {{err}}", err) + return fmt.Errorf("failed to read master key path: %w", err) } if out == nil { @@ -402,7 +401,7 @@ func (b *AESGCMBarrier) ReloadMasterKey(ctx context.Context) error { key, err := DeserializeKey(out.Value) memzero(out.Value) if err != nil { - return errwrap.Wrapf("failed to deserialize key: {{err}}", err) + return fmt.Errorf("failed to deserialize key: %w", err) } // Check if the master key is the same @@ -437,7 +436,7 @@ func (b *AESGCMBarrier) Unseal(ctx context.Context, key []byte) error { // Read in the keyring out, err := b.backend.Get(ctx, keyringPath) if err != nil { - return errwrap.Wrapf("failed to check for keyring: {{err}}", err) + return fmt.Errorf("failed to check for keyring: %w", err) } if out != nil { // Verify the term is always just one @@ -459,7 +458,7 @@ func (b *AESGCMBarrier) Unseal(ctx context.Context, key []byte) error { // Recover the keyring err = b.recoverKeyring(plain) if err != nil { - return errwrap.Wrapf("keyring deserialization failed: {{err}}", err) + return fmt.Errorf("keyring deserialization failed: %w", err) } b.sealed = false @@ -470,7 +469,7 @@ func (b *AESGCMBarrier) Unseal(ctx context.Context, key []byte) error { // Read the barrier initialization key out, err = b.backend.Get(ctx, barrierInitPath) if err != nil { - return errwrap.Wrapf("failed to check for initialization: {{err}}", err) + return fmt.Errorf("failed to check for initialization: %w", err) } if out == nil { return ErrBarrierNotInit @@ -511,7 +510,7 @@ func (b *AESGCMBarrier) Unseal(ctx context.Context, key []byte) error { Value: init.Key, }) if err != nil { - return errwrap.Wrapf("failed to create keyring: {{err}}", err) + return fmt.Errorf("failed to create keyring: %w", err) } if err := b.persistKeyring(ctx, keyring); err != nil { return err @@ -519,7 +518,7 @@ func (b *AESGCMBarrier) Unseal(ctx context.Context, key []byte) error { // Delete the old barrier entry if err := b.backend.Delete(ctx, barrierInitPath); err != nil { - return errwrap.Wrapf("failed to delete barrier init file: {{err}}", err) + return fmt.Errorf("failed to delete barrier init file: %w", err) } // Set the vault as unsealed @@ -555,7 +554,7 @@ func (b *AESGCMBarrier) Rotate(ctx context.Context, randomSource io.Reader) (uin // Generate a new key encrypt, err := b.GenerateKey(randomSource) if err != nil { - return 0, errwrap.Wrapf("failed to generate encryption key: {{err}}", err) + return 0, fmt.Errorf("failed to generate encryption key: %w", err) } // Get the next term @@ -569,7 +568,7 @@ func (b *AESGCMBarrier) Rotate(ctx context.Context, randomSource io.Reader) (uin Value: encrypt, }) if err != nil { - return 0, errwrap.Wrapf("failed to add new encryption key: {{err}}", err) + return 0, fmt.Errorf("failed to add new encryption key: %w", err) } // Persist the new keyring @@ -691,7 +690,7 @@ func (b *AESGCMBarrier) CheckUpgrade(ctx context.Context) (bool, uint32, error) // Update the keyring newKeyring, err := b.keyring.AddKey(key) if err != nil { - return false, 0, errwrap.Wrapf("failed to add new encryption key: {{err}}", err) + return false, 0, fmt.Errorf("failed to add new encryption key: %w", err) } b.keyring = newKeyring @@ -866,7 +865,7 @@ func (b *AESGCMBarrier) lockSwitchedGet(ctx context.Context, key string, getLock // Decrypt the ciphertext plain, err := b.decrypt(key, gcm, pe.Value) if err != nil { - return nil, errwrap.Wrapf("decryption failed: {{err}}", err) + return nil, fmt.Errorf("decryption failed: %w", err) } // Wrap in a logical entry @@ -945,7 +944,7 @@ func (b *AESGCMBarrier) aeadFromKey(key []byte) (cipher.AEAD, error) { // Create the AES cipher aesCipher, err := aes.NewCipher(key) if err != nil { - return nil, errwrap.Wrapf("failed to create cipher: {{err}}", err) + return nil, fmt.Errorf("failed to create cipher: %w", err) } // Create the GCM mode AEAD @@ -1080,7 +1079,7 @@ func (b *AESGCMBarrier) Decrypt(_ context.Context, key string, ciphertext []byte // Decrypt the ciphertext plain, err := b.decrypt(key, gcm, ciphertext) if err != nil { - return nil, errwrap.Wrapf("decryption failed: {{err}}", err) + return nil, fmt.Errorf("decryption failed: %w", err) } return plain, nil diff --git a/vault/cluster.go b/vault/cluster.go index 649dd0d3d2096..c114e09bdd1a8 100644 --- a/vault/cluster.go +++ b/vault/cluster.go @@ -17,7 +17,6 @@ import ( "strings" "time" - "github.com/hashicorp/errwrap" uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/logical" @@ -71,7 +70,7 @@ func (c *Core) Cluster(ctx context.Context) (*Cluster, error) { // Decode the cluster information if err = jsonutil.DecodeJSON(entry.Value, &cluster); err != nil { - return nil, errwrap.Wrapf("failed to decode cluster details: {{err}}", err) + return nil, fmt.Errorf("failed to decode cluster details: %w", err) } // Set in config file @@ -136,7 +135,7 @@ func (c *Core) loadLocalClusterTLS(adv activeAdvertisement) (retErr error) { cert, err := x509.ParseCertificate(adv.ClusterCert) if err != nil { c.logger.Error("failed parsing local cluster certificate", "error", err) - return errwrap.Wrapf("error parsing local cluster certificate: {{err}}", err) + return fmt.Errorf("error parsing local cluster certificate: %w", err) } c.localClusterParsedCert.Store(cert) @@ -247,13 +246,13 @@ func (c *Core) setupCluster(ctx context.Context) error { certBytes, err := x509.CreateCertificate(rand.Reader, template, template, c.localClusterPrivateKey.Load().(*ecdsa.PrivateKey).Public(), c.localClusterPrivateKey.Load().(*ecdsa.PrivateKey)) if err != nil { c.logger.Error("error generating self-signed cert", "error", err) - return errwrap.Wrapf("unable to generate local cluster certificate: {{err}}", err) + return fmt.Errorf("unable to generate local cluster certificate: %w", err) } parsedCert, err := x509.ParseCertificate(certBytes) if err != nil { c.logger.Error("error parsing self-signed cert", "error", err) - return errwrap.Wrapf("error parsing generated certificate: {{err}}", err) + return fmt.Errorf("error parsing generated certificate: %w", err) } c.localClusterCert.Store(certBytes) diff --git a/vault/cluster/cluster.go b/vault/cluster/cluster.go index 3c0e71ad5bba1..b4ec28cf6f4cb 100644 --- a/vault/cluster/cluster.go +++ b/vault/cluster/cluster.go @@ -12,7 +12,6 @@ import ( "sync/atomic" "time" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/helper/consts" "golang.org/x/net/http2" @@ -95,7 +94,7 @@ func NewListener(networkLayer NetworkLayer, cipherSuites []uint16, logger log.Lo func (cl *Listener) SetAdvertiseAddr(addr string) error { u, err := url.ParseRequestURI(addr) if err != nil { - return errwrap.Wrapf("failed to parse advertise address: {{err}}", err) + return fmt.Errorf("failed to parse advertise address: %w", err) } cl.advertise = &NetAddr{ Host: u.Host, diff --git a/vault/core.go b/vault/core.go index 464eb7c203031..770f0f8c31acd 100644 --- a/vault/core.go +++ b/vault/core.go @@ -727,7 +727,7 @@ func NewCore(conf *CoreConfig) (*Core, error) { if conf.RedirectAddr != "" { u, err := url.Parse(conf.RedirectAddr) if err != nil { - return nil, errwrap.Wrapf("redirect address is not valid url: {{err}}", err) + return nil, fmt.Errorf("redirect address is not valid url: %w", err) } if u.Scheme == "" { @@ -875,7 +875,7 @@ func NewCore(conf *CoreConfig) (*Core, error) { default: suites, err := tlsutil.ParseCiphers(conf.ClusterCipherSuites) if err != nil { - return nil, errwrap.Wrapf("error parsing cluster cipher suites: {{err}}", err) + return nil, fmt.Errorf("error parsing cluster cipher suites: %w", err) } c.clusterCipherSuites = suites } @@ -920,7 +920,7 @@ func NewCore(conf *CoreConfig) (*Core, error) { // Construct a new AES-GCM barrier c.barrier, err = NewAESGCMBarrier(c.physical) if err != nil { - return nil, errwrap.Wrapf("barrier setup failed: {{err}}", err) + return nil, fmt.Errorf("barrier setup failed: %w", err) } // We create the funcs here, then populate the given config with it so that @@ -940,7 +940,7 @@ func NewCore(conf *CoreConfig) (*Core, error) { if conf.PluginDirectory != "" { c.pluginDirectory, err = filepath.Abs(conf.PluginDirectory) if err != nil { - return nil, errwrap.Wrapf("core setup failed, could not verify plugin directory: {{err}}", err) + return nil, fmt.Errorf("core setup failed, could not verify plugin directory: %w", err) } } @@ -1342,7 +1342,7 @@ func (c *Core) getUnsealKey(ctx context.Context, seal Seal) ([]byte, error) { } else { unsealKey, err = shamir.Combine(c.unlockInfo.Parts) if err != nil { - return nil, errwrap.Wrapf("failed to compute combined key: {{err}}", err) + return nil, fmt.Errorf("failed to compute combined key: %w", err) } } @@ -1426,20 +1426,20 @@ func (c *Core) migrateSeal(ctx context.Context) error { // Set the recovery and barrier keys to be the same. recoveryKey, err := c.migrationInfo.seal.RecoveryKey(ctx) if err != nil { - return errwrap.Wrapf("error getting recovery key to set on new seal: {{err}}", err) + return fmt.Errorf("error getting recovery key to set on new seal: %w", err) } if err := c.seal.SetRecoveryKey(ctx, recoveryKey); err != nil { - return errwrap.Wrapf("error setting new recovery key information during migrate: {{err}}", err) + return fmt.Errorf("error setting new recovery key information during migrate: %w", err) } barrierKeys, err := c.migrationInfo.seal.GetStoredKeys(ctx) if err != nil { - return errwrap.Wrapf("error getting stored keys to set on new seal: {{err}}", err) + return fmt.Errorf("error getting stored keys to set on new seal: %w", err) } if err := c.seal.SetStoredKeys(ctx, barrierKeys); err != nil { - return errwrap.Wrapf("error setting new barrier key information during migrate: {{err}}", err) + return fmt.Errorf("error setting new barrier key information during migrate: %w", err) } case c.migrationInfo.seal.RecoveryKeySupported(): @@ -1448,22 +1448,22 @@ func (c *Core) migrateSeal(ctx context.Context) error { recoveryKey, err := c.migrationInfo.seal.RecoveryKey(ctx) if err != nil { - return errwrap.Wrapf("error getting recovery key to set on new seal: {{err}}", err) + return fmt.Errorf("error getting recovery key to set on new seal: %w", err) } // We have recovery keys; we're going to use them as the new shamir KeK. err = c.seal.GetAccess().Wrapper.(*aeadwrapper.ShamirWrapper).SetAESGCMKeyBytes(recoveryKey) if err != nil { - return errwrap.Wrapf("failed to set master key in seal: {{err}}", err) + return fmt.Errorf("failed to set master key in seal: %w", err) } barrierKeys, err := c.migrationInfo.seal.GetStoredKeys(ctx) if err != nil { - return errwrap.Wrapf("error getting stored keys to set on new seal: {{err}}", err) + return fmt.Errorf("error getting stored keys to set on new seal: %w", err) } if err := c.seal.SetStoredKeys(ctx, barrierKeys); err != nil { - return errwrap.Wrapf("error setting new barrier key information during migrate: {{err}}", err) + return fmt.Errorf("error setting new barrier key information during migrate: %w", err) } case c.seal.RecoveryKeySupported(): @@ -1471,24 +1471,24 @@ func (c *Core) migrateSeal(ctx context.Context) error { // Migration is happening from shamir -> auto. In this case use the shamir // combined key that was used to store the master key as the new recovery key. if err := c.seal.SetRecoveryKey(ctx, c.migrationInfo.unsealKey); err != nil { - return errwrap.Wrapf("error setting new recovery key information: {{err}}", err) + return fmt.Errorf("error setting new recovery key information: %w", err) } // Generate a new master key newMasterKey, err := c.barrier.GenerateKey(c.secureRandomReader) if err != nil { - return errwrap.Wrapf("error generating new master key: {{err}}", err) + return fmt.Errorf("error generating new master key: %w", err) } // Rekey the barrier. This handles the case where the shamir seal we're // migrating from was a legacy seal without a stored master key. if err := c.barrier.Rekey(ctx, newMasterKey); err != nil { - return errwrap.Wrapf("error rekeying barrier during migration: {{err}}", err) + return fmt.Errorf("error rekeying barrier during migration: %w", err) } // Store the new master key if err := c.seal.SetStoredKeys(ctx, [][]byte{newMasterKey}); err != nil { - return errwrap.Wrapf("error storing new master key: {{err}}", err) + return fmt.Errorf("error storing new master key: %w", err) } default: @@ -1497,7 +1497,7 @@ func (c *Core) migrateSeal(ctx context.Context) error { err = c.migrateSealConfig(ctx) if err != nil { - return errwrap.Wrapf("error storing new seal configs: {{err}}", err) + return fmt.Errorf("error storing new seal configs: %w", err) } // Flag migration performed for seal-rewrap later @@ -2146,28 +2146,28 @@ func (c *Core) preSeal() error { c.clusterParamsLock.Lock() if err := stopReplication(c); err != nil { - result = multierror.Append(result, errwrap.Wrapf("error stopping replication: {{err}}", err)) + result = multierror.Append(result, fmt.Errorf("error stopping replication: %w", err)) } c.clusterParamsLock.Unlock() if err := c.teardownAudits(); err != nil { - result = multierror.Append(result, errwrap.Wrapf("error tearing down audits: {{err}}", err)) + result = multierror.Append(result, fmt.Errorf("error tearing down audits: %w", err)) } if err := c.stopExpiration(); err != nil { - result = multierror.Append(result, errwrap.Wrapf("error stopping expiration: {{err}}", err)) + result = multierror.Append(result, fmt.Errorf("error stopping expiration: %w", err)) } c.stopActivityLog() if err := c.teardownCredentials(context.Background()); err != nil { - result = multierror.Append(result, errwrap.Wrapf("error tearing down credentials: {{err}}", err)) + result = multierror.Append(result, fmt.Errorf("error tearing down credentials: %w", err)) } if err := c.teardownPolicyStore(); err != nil { - result = multierror.Append(result, errwrap.Wrapf("error tearing down policy store: {{err}}", err)) + result = multierror.Append(result, fmt.Errorf("error tearing down policy store: %w", err)) } if err := c.stopRollback(); err != nil { - result = multierror.Append(result, errwrap.Wrapf("error stopping rollback: {{err}}", err)) + result = multierror.Append(result, fmt.Errorf("error stopping rollback: %w", err)) } if err := c.unloadMounts(context.Background()); err != nil { - result = multierror.Append(result, errwrap.Wrapf("error unloading mounts: {{err}}", err)) + result = multierror.Append(result, fmt.Errorf("error unloading mounts: %w", err)) } if err := enterprisePreSeal(c); err != nil { result = multierror.Append(result, err) @@ -2266,7 +2266,7 @@ func lastRemoteUpstreamWALImpl(c *Core) uint64 { func (c *Core) PhysicalSealConfigs(ctx context.Context) (*SealConfig, *SealConfig, error) { pe, err := c.physical.Get(ctx, barrierSealConfigPath) if err != nil { - return nil, nil, errwrap.Wrapf("failed to fetch barrier seal configuration at migration check time: {{err}}", err) + return nil, nil, fmt.Errorf("failed to fetch barrier seal configuration at migration check time: %w", err) } if pe == nil { return nil, nil, nil @@ -2275,11 +2275,11 @@ func (c *Core) PhysicalSealConfigs(ctx context.Context) (*SealConfig, *SealConfi barrierConf := new(SealConfig) if err := jsonutil.DecodeJSON(pe.Value, barrierConf); err != nil { - return nil, nil, errwrap.Wrapf("failed to decode barrier seal configuration at migration check time: {{err}}", err) + return nil, nil, fmt.Errorf("failed to decode barrier seal configuration at migration check time: %w", err) } err = barrierConf.Validate() if err != nil { - return nil, nil, errwrap.Wrapf("failed to validate barrier seal configuration at migration check time: {{err}}", err) + return nil, nil, fmt.Errorf("failed to validate barrier seal configuration at migration check time: %w", err) } // In older versions of vault the default seal would not store a type. This // is here to offer backwards compatibility for older seal configs. @@ -2290,16 +2290,16 @@ func (c *Core) PhysicalSealConfigs(ctx context.Context) (*SealConfig, *SealConfi var recoveryConf *SealConfig pe, err = c.physical.Get(ctx, recoverySealConfigPlaintextPath) if err != nil { - return nil, nil, errwrap.Wrapf("failed to fetch seal configuration at migration check time: {{err}}", err) + return nil, nil, fmt.Errorf("failed to fetch seal configuration at migration check time: %w", err) } if pe != nil { recoveryConf = &SealConfig{} if err := jsonutil.DecodeJSON(pe.Value, recoveryConf); err != nil { - return nil, nil, errwrap.Wrapf("failed to decode seal configuration at migration check time: {{err}}", err) + return nil, nil, fmt.Errorf("failed to decode seal configuration at migration check time: %w", err) } err = recoveryConf.Validate() if err != nil { - return nil, nil, errwrap.Wrapf("failed to validate seal configuration at migration check time: {{err}}", err) + return nil, nil, fmt.Errorf("failed to validate seal configuration at migration check time: %w", err) } // In older versions of vault the default seal would not store a type. This // is here to offer backwards compatibility for older seal configs. @@ -2388,7 +2388,7 @@ func (c *Core) adjustForSealMigration(unwrapSeal Seal) error { if existBarrierSealConfig.Type != wrapping.Shamir && existRecoverySealConfig == nil { entry, err := c.physical.Get(ctx, recoverySealConfigPath) if err != nil { - return errwrap.Wrapf(fmt.Sprintf("failed to read %q recovery seal configuration: {{err}}", existBarrierSealConfig.Type), err) + return fmt.Errorf("failed to read %q recovery seal configuration: %w", existBarrierSealConfig.Type, err) } if entry == nil { return errors.New("Recovery seal configuration not found for existing seal") @@ -2444,15 +2444,15 @@ func (c *Core) migrateSealConfig(ctx context.Context) error { } if err := c.seal.SetBarrierConfig(ctx, bc); err != nil { - return errwrap.Wrapf("error storing barrier config after migration: {{err}}", err) + return fmt.Errorf("error storing barrier config after migration: %w", err) } if c.seal.RecoveryKeySupported() { if err := c.seal.SetRecoveryConfig(ctx, rc); err != nil { - return errwrap.Wrapf("error storing recovery config after migration: {{err}}", err) + return fmt.Errorf("error storing recovery config after migration: %w", err) } } else if err := c.physical.Delete(ctx, recoverySealConfigPlaintextPath); err != nil { - return errwrap.Wrapf("failed to delete old recovery seal configuration during migration: {{err}}", err) + return fmt.Errorf("failed to delete old recovery seal configuration during migration: %w", err) } return nil @@ -2508,7 +2508,7 @@ func (c *Core) unsealKeyToMasterKey(ctx context.Context, seal Seal, combinedKey switch seal.StoredKeysSupported() { case vaultseal.StoredKeysSupportedGeneric: if err := seal.VerifyRecoveryKey(ctx, combinedKey); err != nil { - return nil, errwrap.Wrapf("recovery key verification failed: {{err}}", err) + return nil, fmt.Errorf("recovery key verification failed: %w", err) } storedKeys, err := seal.GetStoredKeys(ctx) @@ -2520,7 +2520,7 @@ func (c *Core) unsealKeyToMasterKey(ctx context.Context, seal Seal, combinedKey err = fmt.Errorf("expected exactly one stored key, got %d", len(storedKeys)) } if err != nil { - return nil, errwrap.Wrapf("unable to retrieve stored keys: {{err}}", err) + return nil, fmt.Errorf("unable to retrieve stored keys: %w", err) } return storedKeys[0], nil @@ -2534,7 +2534,7 @@ func (c *Core) unsealKeyToMasterKey(ctx context.Context, seal Seal, combinedKey testseal.SetCore(c) cfg, err := seal.BarrierConfig(ctx) if err != nil { - return nil, errwrap.Wrapf("failed to setup test barrier config: {{err}}", err) + return nil, fmt.Errorf("failed to setup test barrier config: %w", err) } testseal.SetCachedBarrierConfig(cfg) seal = testseal @@ -2542,7 +2542,7 @@ func (c *Core) unsealKeyToMasterKey(ctx context.Context, seal Seal, combinedKey err := seal.GetAccess().Wrapper.(*aeadwrapper.ShamirWrapper).SetAESGCMKeyBytes(combinedKey) if err != nil { - return nil, errwrap.Wrapf("failed to setup unseal key: {{err}}", err) + return nil, fmt.Errorf("failed to setup unseal key: %w", err) } storedKeys, err := seal.GetStoredKeys(ctx) if storedKeys == nil && err == nil && allowMissing { @@ -2552,7 +2552,7 @@ func (c *Core) unsealKeyToMasterKey(ctx context.Context, seal Seal, combinedKey err = fmt.Errorf("expected exactly one stored key, got %d", len(storedKeys)) } if err != nil { - return nil, errwrap.Wrapf("unable to retrieve stored keys: {{err}}", err) + return nil, fmt.Errorf("unable to retrieve stored keys: %w", err) } return storedKeys[0], nil diff --git a/vault/cors.go b/vault/cors.go index 8a9533b0c81e0..0ee0df2292f88 100644 --- a/vault/cors.go +++ b/vault/cors.go @@ -3,10 +3,10 @@ package vault import ( "context" "errors" + "fmt" "sync" "sync/atomic" - "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/strutil" "github.com/hashicorp/vault/sdk/logical" @@ -53,11 +53,11 @@ func (c *Core) saveCORSConfig(ctx context.Context) error { entry, err := logical.StorageEntryJSON("cors", localConfig) if err != nil { - return errwrap.Wrapf("failed to create CORS config entry: {{err}}", err) + return fmt.Errorf("failed to create CORS config entry: %w", err) } if err := view.Put(ctx, entry); err != nil { - return errwrap.Wrapf("failed to save CORS config: {{err}}", err) + return fmt.Errorf("failed to save CORS config: %w", err) } return nil @@ -70,7 +70,7 @@ func (c *Core) loadCORSConfig(ctx context.Context) error { // Load the config in out, err := view.Get(ctx, "cors") if err != nil { - return errwrap.Wrapf("failed to read CORS config: {{err}}", err) + return fmt.Errorf("failed to read CORS config: %w", err) } if out == nil { return nil diff --git a/vault/counters.go b/vault/counters.go index c0a914a8ebb50..e900922547feb 100644 --- a/vault/counters.go +++ b/vault/counters.go @@ -2,11 +2,11 @@ package vault import ( "context" + "fmt" "sort" "sync/atomic" "time" - "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/sdk/logical" ) @@ -58,7 +58,7 @@ func (c *Core) loadAllRequestCounters(ctx context.Context, now time.Time) ([]Dat datepaths, err := view.List(ctx, "") if err != nil { - return nil, errwrap.Wrapf("failed to read request counters: {{err}}", err) + return nil, fmt.Errorf("failed to read request counters: %w", err) } var all []DatedRequestCounter @@ -66,7 +66,7 @@ func (c *Core) loadAllRequestCounters(ctx context.Context, now time.Time) ([]Dat for _, datepath := range datepaths { datesubpaths, err := view.List(ctx, datepath) if err != nil { - return nil, errwrap.Wrapf("failed to read request counters: {{err}}", err) + return nil, fmt.Errorf("failed to read request counters: %w", err) } sort.Strings(datesubpaths) for _, datesubpath := range datesubpaths { @@ -123,7 +123,7 @@ func (c *Core) loadRequestCounters(ctx context.Context, datepath string) (*Reque out, err := view.Get(ctx, datepath) if err != nil { - return nil, errwrap.Wrapf("failed to read request counters: {{err}}", err) + return nil, fmt.Errorf("failed to read request counters: %w", err) } if out == nil { return nil, nil @@ -160,11 +160,11 @@ func (c *Core) saveCurrentRequestCounters(ctx context.Context, now time.Time) er } entry, err := logical.StorageEntryJSON(writeDatePath, localCounters) if err != nil { - return errwrap.Wrapf("failed to create request counters entry: {{err}}", err) + return fmt.Errorf("failed to create request counters entry: %w", err) } if err := view.Put(ctx, entry); err != nil { - return errwrap.Wrapf("failed to save request counters: {{err}}", err) + return fmt.Errorf("failed to save request counters: %w", err) } if shouldReset { diff --git a/vault/dynamic_system_view.go b/vault/dynamic_system_view.go index 41b132bd1c2e0..86e8b560ba737 100644 --- a/vault/dynamic_system_view.go +++ b/vault/dynamic_system_view.go @@ -5,7 +5,6 @@ import ( "fmt" "time" - "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/helper/identity" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/helper/random" @@ -219,7 +218,7 @@ func (d dynamicSystemView) LookupPlugin(ctx context.Context, name string, plugin return nil, err } if r == nil { - return nil, errwrap.Wrapf(fmt.Sprintf("{{err}}: %s", name), ErrPluginNotFound) + return nil, fmt.Errorf("%w: %s", ErrPluginNotFound, name) } return r, nil diff --git a/vault/expiration.go b/vault/expiration.go index 33fe34f40bed7..4c3f28f5f819a 100644 --- a/vault/expiration.go +++ b/vault/expiration.go @@ -583,12 +583,12 @@ func (m *ExpirationManager) Tidy(ctx context.Context) error { le, err := m.loadEntry(ctx, leaseID) if err != nil { - tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf(fmt.Sprintf("failed to load the lease ID %q: {{err}}", leaseID), err)) + tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to load the lease ID %q: %w", leaseID, err)) return } if le == nil { - tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf(fmt.Sprintf("nil entry for lease ID %q: {{err}}", leaseID), err)) + tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("nil entry for lease ID %q: %w", leaseID, err)) return } @@ -609,7 +609,7 @@ func (m *ExpirationManager) Tidy(ctx context.Context) error { lock.RUnlock() if err != nil { - tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to lookup token: {{err}}", err)) + tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to lookup token: %w", err)) return } @@ -639,7 +639,7 @@ func (m *ExpirationManager) Tidy(ctx context.Context) error { // again err = m.revokeCommon(ctx, leaseID, true, true) if err != nil { - tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf(fmt.Sprintf("failed to revoke an invalid lease with ID %q: {{err}}", leaseID), err)) + tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to revoke an invalid lease with ID %q: %w", leaseID, err)) return } revokedCount++ @@ -1032,7 +1032,7 @@ func (m *ExpirationManager) RevokeByToken(ctx context.Context, te *logical.Token // Lookup the leases existing, err := m.lookupLeasesByToken(tokenCtx, te) if err != nil { - return errwrap.Wrapf("failed to scan for leases: {{err}}", err) + return fmt.Errorf("failed to scan for leases: %w", err) } // Revoke all the keys @@ -1101,7 +1101,7 @@ func (m *ExpirationManager) revokePrefixCommon(ctx context.Context, prefix strin if err == nil && le != nil { if sync { if err := m.revokeCommon(ctx, prefix, force, false); err != nil { - return errwrap.Wrapf(fmt.Sprintf("failed to revoke %q: {{err}}", prefix), err) + return fmt.Errorf("failed to revoke %q: %w", prefix, err) } return nil } @@ -1119,7 +1119,7 @@ func (m *ExpirationManager) revokePrefixCommon(ctx context.Context, prefix strin sub := view.SubView(prefix) existing, err := logical.CollectKeys(ctx, sub) if err != nil { - return errwrap.Wrapf("failed to scan for leases: {{err}}", err) + return fmt.Errorf("failed to scan for leases: %w", err) } // Revoke all the keys @@ -1128,11 +1128,11 @@ func (m *ExpirationManager) revokePrefixCommon(ctx context.Context, prefix strin switch { case sync: if err := m.revokeCommon(ctx, leaseID, force, false); err != nil { - return errwrap.Wrapf(fmt.Sprintf("failed to revoke %q (%d / %d): {{err}}", leaseID, idx+1, len(existing)), err) + return fmt.Errorf("failed to revoke %q (%d / %d): %w", leaseID, idx+1, len(existing), err) } default: if err := m.LazyRevoke(ctx, leaseID); err != nil { - return errwrap.Wrapf(fmt.Sprintf("failed to revoke %q (%d / %d): {{err}}", leaseID, idx+1, len(existing)), err) + return fmt.Errorf("failed to revoke %q (%d / %d): %w", leaseID, idx+1, len(existing), err) } } } @@ -1443,17 +1443,17 @@ func (m *ExpirationManager) Register(ctx context.Context, req *logical.Request, revokeCtx := namespace.ContextWithNamespace(m.quitContext, ns) revResp, err := m.router.Route(revokeCtx, logical.RevokeRequest(req.Path, resp.Secret, resp.Data)) if err != nil { - retErr = multierror.Append(retErr, errwrap.Wrapf("an additional internal error was encountered revoking the newly-generated secret: {{err}}", err)) + retErr = multierror.Append(retErr, fmt.Errorf("an additional internal error was encountered revoking the newly-generated secret: %w", err)) } else if revResp != nil && revResp.IsError() { - retErr = multierror.Append(retErr, errwrap.Wrapf("an additional error was encountered revoking the newly-generated secret: {{err}}", revResp.Error())) + retErr = multierror.Append(retErr, fmt.Errorf("an additional error was encountered revoking the newly-generated secret: %w", revResp.Error())) } if err := m.deleteEntry(ctx, le); err != nil { - retErr = multierror.Append(retErr, errwrap.Wrapf("an additional error was encountered deleting any lease associated with the newly-generated secret: {{err}}", err)) + retErr = multierror.Append(retErr, fmt.Errorf("an additional error was encountered deleting any lease associated with the newly-generated secret: %w", err)) } if err := m.removeIndexByToken(ctx, le, indexToken); err != nil { - retErr = multierror.Append(retErr, errwrap.Wrapf("an additional error was encountered removing lease indexes associated with the newly-generated secret: {{err}}", err)) + retErr = multierror.Append(retErr, fmt.Errorf("an additional error was encountered removing lease indexes associated with the newly-generated secret: %w", err)) } } }() @@ -1791,7 +1791,7 @@ func (m *ExpirationManager) revokeEntry(ctx context.Context, le *leaseEntry) err } if err := m.tokenStore.revokeTree(ctx, le); err != nil { - return errwrap.Wrapf("failed to revoke token: {{err}}", err) + return fmt.Errorf("failed to revoke token: %w", err) } return nil @@ -1828,7 +1828,7 @@ func (m *ExpirationManager) renewEntry(ctx context.Context, le *leaseEntry, incr req := logical.RenewRequest(le.Path, &secret, le.Data) resp, err := m.router.Route(nsCtx, req) if err != nil || (resp != nil && resp.IsError()) { - return nil, errwrap.Wrapf(fmt.Sprintf("failed to renew entry: resp: %#v err: {{err}}", resp), err) + return nil, fmt.Errorf("failed to renew entry: resp: %#v err: %w", resp, err) } return resp, nil } @@ -1856,7 +1856,7 @@ func (m *ExpirationManager) renewAuthEntry(ctx context.Context, req *logical.Req authReq.Connection = req.Connection resp, err := m.router.Route(nsCtx, authReq) if err != nil { - return nil, errwrap.Wrapf("failed to renew entry: {{err}}", err) + return nil, fmt.Errorf("failed to renew entry: %w", err) } return resp, nil } @@ -1902,14 +1902,14 @@ func (m *ExpirationManager) loadEntryInternal(ctx context.Context, leaseID strin view := m.leaseView(ns) out, err := view.Get(ctx, leaseID) if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("failed to read lease entry %s: {{err}}", leaseID), err) + return nil, fmt.Errorf("failed to read lease entry %s: %w", leaseID, err) } if out == nil { return nil, nil } le, err := decodeLeaseEntry(out.Value) if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("failed to decode lease entry %s: {{err}}", leaseID), err) + return nil, fmt.Errorf("failed to decode lease entry %s: %w", leaseID, err) } le.namespace = ns @@ -1943,7 +1943,7 @@ func (m *ExpirationManager) persistEntry(ctx context.Context, le *leaseEntry) er // Encode the entry buf, err := le.encode() if err != nil { - return errwrap.Wrapf("failed to encode lease entry: {{err}}", err) + return fmt.Errorf("failed to encode lease entry: %w", err) } // Write out to the view @@ -1957,7 +1957,7 @@ func (m *ExpirationManager) persistEntry(ctx context.Context, le *leaseEntry) er view := m.leaseView(le.namespace) if err := view.Put(ctx, &ent); err != nil { - return errwrap.Wrapf("failed to persist lease entry: {{err}}", err) + return fmt.Errorf("failed to persist lease entry: %w", err) } return nil } @@ -1966,7 +1966,7 @@ func (m *ExpirationManager) persistEntry(ctx context.Context, le *leaseEntry) er func (m *ExpirationManager) deleteEntry(ctx context.Context, le *leaseEntry) error { view := m.leaseView(le.namespace) if err := view.Delete(ctx, le.LeaseID); err != nil { - return errwrap.Wrapf("failed to delete lease entry: {{err}}", err) + return fmt.Errorf("failed to delete lease entry: %w", err) } return nil } @@ -2003,7 +2003,7 @@ func (m *ExpirationManager) createIndexByToken(ctx context.Context, le *leaseEnt } tokenView := m.tokenIndexView(tokenNS) if err := tokenView.Put(ctx, &ent); err != nil { - return errwrap.Wrapf("failed to persist lease index entry: {{err}}", err) + return fmt.Errorf("failed to persist lease index entry: %w", err) } return nil } @@ -2081,7 +2081,7 @@ func (m *ExpirationManager) removeIndexByToken(ctx context.Context, le *leaseEnt key := saltedID + "/" + leaseSaltedID tokenView := m.tokenIndexView(tokenNS) if err := tokenView.Delete(ctx, key); err != nil { - return errwrap.Wrapf("failed to delete lease index entry: {{err}}", err) + return fmt.Errorf("failed to delete lease index entry: %w", err) } return nil } @@ -2173,7 +2173,7 @@ func (m *ExpirationManager) lookupLeasesByToken(ctx context.Context, te *logical prefix := saltedID + "/" subKeys, err := tokenView.List(ctx, prefix) if err != nil { - return nil, errwrap.Wrapf("failed to list leases: {{err}}", err) + return nil, fmt.Errorf("failed to list leases: %w", err) } // Read each index entry @@ -2181,7 +2181,7 @@ func (m *ExpirationManager) lookupLeasesByToken(ctx context.Context, te *logical for _, sub := range subKeys { out, err := tokenView.Get(ctx, prefix+sub) if err != nil { - return nil, errwrap.Wrapf("failed to read lease index: {{err}}", err) + return nil, fmt.Errorf("failed to read lease index: %w", err) } if out == nil { continue @@ -2198,13 +2198,13 @@ func (m *ExpirationManager) lookupLeasesByToken(ctx context.Context, te *logical prefix := saltedID + "/" subKeys, err := tokenView.List(ctx, prefix) if err != nil { - return nil, errwrap.Wrapf("failed to list leases on root namespace: {{err}}", err) + return nil, fmt.Errorf("failed to list leases on root namespace: %w", err) } for _, sub := range subKeys { out, err := tokenView.Get(ctx, prefix+sub) if err != nil { - return nil, errwrap.Wrapf("failed to read lease index on root namespace: {{err}}", err) + return nil, fmt.Errorf("failed to read lease index on root namespace: %w", err) } if out == nil { continue diff --git a/vault/expiration_util.go b/vault/expiration_util.go index c1bdaae187cc2..eac1703bc6190 100644 --- a/vault/expiration_util.go +++ b/vault/expiration_util.go @@ -3,7 +3,8 @@ package vault import ( - "github.com/hashicorp/errwrap" + "fmt" + "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/sdk/logical" ) @@ -21,7 +22,7 @@ func (m *ExpirationManager) collectLeases() (map[*namespace.Namespace][]string, existing := make(map[*namespace.Namespace][]string) keys, err := logical.CollectKeys(m.quitContext, m.leaseView(namespace.RootNamespace)) if err != nil { - return nil, 0, errwrap.Wrapf("failed to scan for leases: {{err}}", err) + return nil, 0, fmt.Errorf("failed to scan for leases: %w", err) } existing[namespace.RootNamespace] = keys leaseCount += len(keys) diff --git a/vault/generate_root.go b/vault/generate_root.go index 745f57d5a1f97..b701e5bfe51d3 100644 --- a/vault/generate_root.go +++ b/vault/generate_root.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" - "github.com/hashicorp/errwrap" "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/helper/pgpkeys" "github.com/hashicorp/vault/helper/xor" @@ -41,10 +40,10 @@ type generateStandardRootToken struct{} func (g generateStandardRootToken) authenticate(ctx context.Context, c *Core, combinedKey []byte) error { masterKey, err := c.unsealKeyToMasterKeyPostUnseal(ctx, combinedKey) if err != nil { - return errwrap.Wrapf("unable to authenticate: {{err}}", err) + return fmt.Errorf("unable to authenticate: %w", err) } if err := c.barrier.VerifyMaster(masterKey); err != nil { - return errwrap.Wrapf("master key verification failed: {{err}}", err) + return fmt.Errorf("master key verification failed: %w", err) } return nil @@ -142,7 +141,7 @@ func (c *Core) GenerateRootInit(otp, pgpKey string, strategy GenerateRootStrateg case len(pgpKey) > 0: fingerprints, err := pgpkeys.GetFingerprints([]string{pgpKey}, nil) if err != nil { - return errwrap.Wrapf("error parsing PGP key: {{err}}", err) + return fmt.Errorf("error parsing PGP key: %w", err) } if len(fingerprints) != 1 || fingerprints[0] == "" { return fmt.Errorf("could not acquire PGP key entity") @@ -304,13 +303,13 @@ func (c *Core) GenerateRootUpdate(ctx context.Context, key []byte, nonce string, combinedKey, err = shamir.Combine(c.generateRootProgress) c.generateRootProgress = nil if err != nil { - return nil, errwrap.Wrapf("failed to compute master key: {{err}}", err) + return nil, fmt.Errorf("failed to compute master key: %w", err) } } if err := strategy.authenticate(ctx, c, combinedKey); err != nil { c.logger.Error("root generation aborted", "error", err.Error()) - return nil, errwrap.Wrapf("root generation aborted: {{err}}", err) + return nil, fmt.Errorf("root generation aborted: %w", err) } // Run the generate strategy diff --git a/vault/generate_root_recovery.go b/vault/generate_root_recovery.go index 4ad839ea22dc6..f016af013eefd 100644 --- a/vault/generate_root_recovery.go +++ b/vault/generate_root_recovery.go @@ -2,8 +2,8 @@ package vault import ( "context" + "fmt" - "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/sdk/helper/base62" "go.uber.org/atomic" ) @@ -23,17 +23,17 @@ type generateRecoveryToken struct { func (g *generateRecoveryToken) authenticate(ctx context.Context, c *Core, combinedKey []byte) error { key, err := c.unsealKeyToMasterKeyPostUnseal(ctx, combinedKey) if err != nil { - return errwrap.Wrapf("unable to authenticate: {{err}}", err) + return fmt.Errorf("unable to authenticate: %w", err) } // Use the retrieved master key to unseal the barrier if err := c.barrier.Unseal(ctx, key); err != nil { - return errwrap.Wrapf("recovery operation token generation failed, cannot unseal barrier: {{err}}", err) + return fmt.Errorf("recovery operation token generation failed, cannot unseal barrier: %w", err) } for _, v := range c.postRecoveryUnsealFuncs { if err := v(); err != nil { - return errwrap.Wrapf("failed to run post unseal func: {{err}}", err) + return fmt.Errorf("failed to run post unseal func: %w", err) } } return nil diff --git a/vault/ha.go b/vault/ha.go index d81d18ab7281d..158a6e6041433 100644 --- a/vault/ha.go +++ b/vault/ha.go @@ -18,7 +18,6 @@ import ( "github.com/hashicorp/vault/sdk/physical" "github.com/armon/go-metrics" - "github.com/hashicorp/errwrap" aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-uuid" @@ -828,7 +827,7 @@ func (c *Core) checkKeyUpgrades(ctx context.Context) error { func (c *Core) reloadMasterKey(ctx context.Context) error { if err := c.barrier.ReloadMasterKey(ctx); err != nil { - return errwrap.Wrapf("error reloading master key: {{err}}", err) + return fmt.Errorf("error reloading master key: %w", err) } return nil } @@ -854,7 +853,7 @@ func (c *Core) reloadShamirKey(ctx context.Context) error { case seal.StoredKeysNotSupported: keyring, err := c.barrier.Keyring() if err != nil { - return errwrap.Wrapf("failed to update seal access: {{err}}", err) + return fmt.Errorf("failed to update seal access: %w", err) } shamirKey = keyring.masterKey } @@ -863,23 +862,23 @@ func (c *Core) reloadShamirKey(ctx context.Context) error { func (c *Core) performKeyUpgrades(ctx context.Context) error { if err := c.checkKeyUpgrades(ctx); err != nil { - return errwrap.Wrapf("error checking for key upgrades: {{err}}", err) + return fmt.Errorf("error checking for key upgrades: %w", err) } if err := c.reloadMasterKey(ctx); err != nil { - return errwrap.Wrapf("error reloading master key: {{err}}", err) + return fmt.Errorf("error reloading master key: %w", err) } if err := c.barrier.ReloadKeyring(ctx); err != nil { - return errwrap.Wrapf("error reloading keyring: {{err}}", err) + return fmt.Errorf("error reloading keyring: %w", err) } if err := c.reloadShamirKey(ctx); err != nil { - return errwrap.Wrapf("error reloading shamir kek key: {{err}}", err) + return fmt.Errorf("error reloading shamir kek key: %w", err) } if err := c.scheduleUpgradeCleanup(ctx); err != nil { - return errwrap.Wrapf("error scheduling upgrade cleanup: {{err}}", err) + return fmt.Errorf("error scheduling upgrade cleanup: %w", err) } return nil @@ -891,7 +890,7 @@ func (c *Core) scheduleUpgradeCleanup(ctx context.Context) error { // List the upgrades upgrades, err := c.barrier.List(ctx, keyringUpgradePrefix) if err != nil { - return errwrap.Wrapf("failed to list upgrades: {{err}}", err) + return fmt.Errorf("failed to list upgrades: %w", err) } // Nothing to do if no upgrades diff --git a/vault/identity_store.go b/vault/identity_store.go index b7e5422ac7868..176ba2a00786c 100644 --- a/vault/identity_store.go +++ b/vault/identity_store.go @@ -8,7 +8,6 @@ import ( metrics "github.com/armon/go-metrics" "github.com/golang/protobuf/ptypes" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" "github.com/hashicorp/vault/helper/identity" @@ -67,12 +66,12 @@ func NewIdentityStore(ctx context.Context, core *Core, config *logical.BackendCo core.AddLogger(groupsPackerLogger) iStore.entityPacker, err = storagepacker.NewStoragePacker(iStore.view, entitiesPackerLogger, "") if err != nil { - return nil, errwrap.Wrapf("failed to create entity packer: {{err}}", err) + return nil, fmt.Errorf("failed to create entity packer: %w", err) } iStore.groupPacker, err = storagepacker.NewStoragePacker(iStore.view, groupsPackerLogger, groupBucketsPrefix) if err != nil { - return nil, errwrap.Wrapf("failed to create group packer: {{err}}", err) + return nil, fmt.Errorf("failed to create group packer: %w", err) } iStore.Backend = &framework.Backend{ @@ -348,7 +347,7 @@ func (i *IdentityStore) parseEntityFromBucketItem(ctx context.Context, item *sto var oldEntity identity.EntityStorageEntry oldEntityErr := ptypes.UnmarshalAny(item.Message, &oldEntity) if oldEntityErr != nil { - return nil, errwrap.Wrapf("failed to decode entity from storage bucket item: {{err}}", err) + return nil, fmt.Errorf("failed to decode entity from storage bucket item: %w", err) } i.logger.Debug("upgrading the entity using patch introduced with vault 0.8.2.1", "entity_id", oldEntity.ID) @@ -425,7 +424,7 @@ func (i *IdentityStore) parseGroupFromBucketItem(item *storagepacker.Item) (*ide var group identity.Group err := ptypes.UnmarshalAny(item.Message, &group) if err != nil { - return nil, errwrap.Wrapf("failed to decode group from storage bucket item: {{err}}", err) + return nil, fmt.Errorf("failed to decode group from storage bucket item: %w", err) } if group.NamespaceID == "" { diff --git a/vault/identity_store_entities.go b/vault/identity_store_entities.go index 70dd3c507ce68..95949c8559284 100644 --- a/vault/identity_store_entities.go +++ b/vault/identity_store_entities.go @@ -7,7 +7,6 @@ import ( "strings" "github.com/golang/protobuf/ptypes" - "github.com/hashicorp/errwrap" memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/vault/helper/identity" "github.com/hashicorp/vault/helper/identity/mfa" @@ -644,7 +643,7 @@ func (i *IdentityStore) handlePathEntityListCommon(ctx context.Context, req *log iter, err := txn.Get(entitiesTable, "namespace_id", ns.ID) if err != nil { - return nil, errwrap.Wrapf("failed to fetch iterator for entities in memdb: {{err}}", err) + return nil, fmt.Errorf("failed to fetch iterator for entities in memdb: %w", err) } ws.Add(iter.WatchCh()) @@ -793,7 +792,7 @@ func (i *IdentityStore) mergeEntity(ctx context.Context, txn *memdb.Txn, toEntit err = i.MemDBUpsertAliasInTxn(txn, alias, false) if err != nil { - return nil, errwrap.Wrapf("failed to update alias during merge: {{err}}", err) + return nil, fmt.Errorf("failed to update alias during merge: %w", err) } // Add the alias to the desired entity diff --git a/vault/identity_store_groups.go b/vault/identity_store_groups.go index 5f8c88f6bfebf..0be981d9a2603 100644 --- a/vault/identity_store_groups.go +++ b/vault/identity_store_groups.go @@ -6,7 +6,6 @@ import ( "strings" "github.com/golang/protobuf/ptypes" - "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/helper/identity" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/sdk/framework" @@ -478,7 +477,7 @@ func (i *IdentityStore) handleGroupListCommon(ctx context.Context, byID bool) (* iter, err := txn.Get(groupsTable, "namespace_id", ns.ID) if err != nil { - return nil, errwrap.Wrapf("failed to lookup groups using namespace ID: {{err}}", err) + return nil, fmt.Errorf("failed to lookup groups using namespace ID: %w", err) } var keys []string diff --git a/vault/identity_store_oidc.go b/vault/identity_store_oidc.go index 7d9e098f0668f..dec2695bc3e5c 100644 --- a/vault/identity_store_oidc.go +++ b/vault/identity_store_oidc.go @@ -14,7 +14,6 @@ import ( "strings" "time" - "github.com/hashicorp/errwrap" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/helper/identity" @@ -780,7 +779,7 @@ func (i *IdentityStore) pathOIDCGenerateToken(ctx context.Context, req *logical. signedIdToken, err := key.signPayload(payload) if err != nil { - return nil, errwrap.Wrapf("error signing OIDC token: {{err}}", err) + return nil, fmt.Errorf("error signing OIDC token: %w", err) } return &logical.Response{ diff --git a/vault/identity_store_util.go b/vault/identity_store_util.go index a868fd37469a2..f6535b615f53a 100644 --- a/vault/identity_store_util.go +++ b/vault/identity_store_util.go @@ -82,7 +82,7 @@ func (i *IdentityStore) loadGroups(ctx context.Context) error { i.logger.Debug("identity loading groups") existing, err := i.groupPacker.View().List(ctx, groupBucketsPrefix) if err != nil { - return errwrap.Wrapf("failed to scan for groups: {{err}}", err) + return fmt.Errorf("failed to scan for groups: %w", err) } i.logger.Debug("groups collected", "num_existing", len(existing)) @@ -162,7 +162,7 @@ func (i *IdentityStore) loadGroups(ctx context.Context) error { err = i.UpsertGroupInTxn(ctx, txn, group, persist) if err != nil { txn.Abort() - return errwrap.Wrapf("failed to update group in memdb: {{err}}", err) + return fmt.Errorf("failed to update group in memdb: %w", err) } txn.Commit() @@ -181,7 +181,7 @@ func (i *IdentityStore) loadEntities(ctx context.Context) error { i.logger.Debug("loading entities") existing, err := i.entityPacker.View().List(ctx, storagepacker.StoragePackerBucketsPrefix) if err != nil { - return errwrap.Wrapf("failed to scan for entities: {{err}}", err) + return fmt.Errorf("failed to scan for entities: %w", err) } i.logger.Debug("entities collected", "num_existing", len(existing)) @@ -307,7 +307,7 @@ func (i *IdentityStore) loadEntities(ctx context.Context) error { // Only update MemDB and don't hit the storage again err = i.upsertEntity(nsCtx, entity, nil, false) if err != nil { - return errwrap.Wrapf("failed to update entity in MemDB: {{err}}", err) + return fmt.Errorf("failed to update entity in MemDB: %w", err) } } } @@ -522,18 +522,18 @@ func (i *IdentityStore) MemDBUpsertAliasInTxn(txn *memdb.Txn, alias *identity.Al aliasRaw, err := txn.First(tableName, "id", alias.ID) if err != nil { - return errwrap.Wrapf("failed to lookup alias from memdb using alias ID: {{err}}", err) + return fmt.Errorf("failed to lookup alias from memdb using alias ID: %w", err) } if aliasRaw != nil { err = txn.Delete(tableName, aliasRaw) if err != nil { - return errwrap.Wrapf("failed to delete alias from memdb: {{err}}", err) + return fmt.Errorf("failed to delete alias from memdb: %w", err) } } if err := txn.Insert(tableName, alias); err != nil { - return errwrap.Wrapf("failed to update alias into memdb: {{err}}", err) + return fmt.Errorf("failed to update alias into memdb: %w", err) } return nil @@ -555,7 +555,7 @@ func (i *IdentityStore) MemDBAliasByIDInTxn(txn *memdb.Txn, aliasID string, clon aliasRaw, err := txn.First(tableName, "id", aliasID) if err != nil { - return nil, errwrap.Wrapf("failed to fetch alias from memdb using alias ID: {{err}}", err) + return nil, fmt.Errorf("failed to fetch alias from memdb using alias ID: %w", err) } if aliasRaw == nil { @@ -618,7 +618,7 @@ func (i *IdentityStore) MemDBAliasByFactorsInTxn(txn *memdb.Txn, mountAccessor, aliasRaw, err := txn.First(tableName, "factors", mountAccessor, aliasName) if err != nil { - return nil, errwrap.Wrapf("failed to fetch alias from memdb using factors: {{err}}", err) + return nil, fmt.Errorf("failed to fetch alias from memdb using factors: %w", err) } if aliasRaw == nil { @@ -662,7 +662,7 @@ func (i *IdentityStore) MemDBDeleteAliasByIDInTxn(txn *memdb.Txn, aliasID string err = txn.Delete(tableName, alias) if err != nil { - return errwrap.Wrapf("failed to delete alias from memdb: {{err}}", err) + return fmt.Errorf("failed to delete alias from memdb: %w", err) } return nil @@ -701,18 +701,18 @@ func (i *IdentityStore) MemDBUpsertEntityInTxn(txn *memdb.Txn, entity *identity. entityRaw, err := txn.First(entitiesTable, "id", entity.ID) if err != nil { - return errwrap.Wrapf("failed to lookup entity from memdb using entity id: {{err}}", err) + return fmt.Errorf("failed to lookup entity from memdb using entity id: %w", err) } if entityRaw != nil { err = txn.Delete(entitiesTable, entityRaw) if err != nil { - return errwrap.Wrapf("failed to delete entity from memdb: {{err}}", err) + return fmt.Errorf("failed to delete entity from memdb: %w", err) } } if err := txn.Insert(entitiesTable, entity); err != nil { - return errwrap.Wrapf("failed to update entity into memdb: {{err}}", err) + return fmt.Errorf("failed to update entity into memdb: %w", err) } return nil @@ -729,7 +729,7 @@ func (i *IdentityStore) MemDBEntityByIDInTxn(txn *memdb.Txn, entityID string, cl entityRaw, err := txn.First(entitiesTable, "id", entityID) if err != nil { - return nil, errwrap.Wrapf("failed to fetch entity from memdb using entity id: {{err}}", err) + return nil, fmt.Errorf("failed to fetch entity from memdb using entity id: %w", err) } if entityRaw == nil { @@ -780,7 +780,7 @@ func (i *IdentityStore) MemDBEntityByNameInTxn(ctx context.Context, txn *memdb.T entityRaw, err := txn.First(entitiesTable, "name", ns.ID, entityName) if err != nil { - return nil, errwrap.Wrapf("failed to fetch entity from memdb using entity name: {{err}}", err) + return nil, fmt.Errorf("failed to fetch entity from memdb using entity name: %w", err) } if entityRaw == nil { @@ -810,7 +810,7 @@ func (i *IdentityStore) MemDBEntitiesByBucketKeyInTxn(txn *memdb.Txn, bucketKey entitiesIter, err := txn.Get(entitiesTable, "bucket_key", bucketKey) if err != nil { - return nil, errwrap.Wrapf("failed to lookup entities using bucket entry key hash: {{err}}", err) + return nil, fmt.Errorf("failed to lookup entities using bucket entry key hash: %w", err) } var entities []*identity.Entity @@ -830,7 +830,7 @@ func (i *IdentityStore) MemDBEntityByMergedEntityID(mergedEntityID string, clone entityRaw, err := txn.First(entitiesTable, "merged_entity_ids", mergedEntityID) if err != nil { - return nil, errwrap.Wrapf("failed to fetch entity from memdb using merged entity id: {{err}}", err) + return nil, fmt.Errorf("failed to fetch entity from memdb using merged entity id: %w", err) } if entityRaw == nil { @@ -918,7 +918,7 @@ func (i *IdentityStore) MemDBDeleteEntityByIDInTxn(txn *memdb.Txn, entityID stri err = txn.Delete(entitiesTable, entity) if err != nil { - return errwrap.Wrapf("failed to delete entity from memdb: {{err}}", err) + return fmt.Errorf("failed to delete entity from memdb: %w", err) } return nil @@ -944,7 +944,7 @@ func (i *IdentityStore) sanitizeAlias(ctx context.Context, alias *identity.Alias // Alias metadata should always be map[string]string err = validateMetadata(alias.Metadata) if err != nil { - return errwrap.Wrapf("invalid alias metadata: {{err}}", err) + return fmt.Errorf("invalid alias metadata: %w", err) } // Create an ID if there isn't one already @@ -1022,7 +1022,7 @@ func (i *IdentityStore) sanitizeEntity(ctx context.Context, entity *identity.Ent // Entity metadata should always be map[string]string err = validateMetadata(entity.Metadata) if err != nil { - return errwrap.Wrapf("invalid entity metadata: {{err}}", err) + return fmt.Errorf("invalid entity metadata: %w", err) } // Set the creation and last update times @@ -1086,7 +1086,7 @@ func (i *IdentityStore) sanitizeAndUpsertGroup(ctx context.Context, group *ident // Entity metadata should always be map[string]string err = validateMetadata(group.Metadata) if err != nil { - return errwrap.Wrapf("invalid group metadata: {{err}}", err) + return fmt.Errorf("invalid group metadata: %w", err) } // Set the creation and last update times @@ -1102,7 +1102,7 @@ func (i *IdentityStore) sanitizeAndUpsertGroup(ctx context.Context, group *ident for _, entityID := range group.MemberEntityIDs { entity, err := i.MemDBEntityByID(entityID, false) if err != nil { - return errwrap.Wrapf(fmt.Sprintf("failed to validate entity ID %q: {{err}}", entityID), err) + return fmt.Errorf("failed to validate entity ID %q: %w", entityID, err) } if entity == nil { return fmt.Errorf("invalid entity ID %q", entityID) @@ -1294,7 +1294,7 @@ func validateMetadata(meta map[string]string) error { for key, value := range meta { if err := validateMetaPair(key, value); err != nil { - return errwrap.Wrapf(fmt.Sprintf("failed to load metadata pair (%q, %q): {{err}}", key, value), err) + return fmt.Errorf("failed to load metadata pair (%q, %q): %w", key, value, err) } } @@ -1337,7 +1337,7 @@ func (i *IdentityStore) MemDBGroupByNameInTxn(ctx context.Context, txn *memdb.Tx groupRaw, err := txn.First(groupsTable, "name", ns.ID, groupName) if err != nil { - return nil, errwrap.Wrapf("failed to fetch group from memdb using group name: {{err}}", err) + return nil, fmt.Errorf("failed to fetch group from memdb using group name: %w", err) } if groupRaw == nil { @@ -1464,18 +1464,18 @@ func (i *IdentityStore) MemDBUpsertGroupInTxn(txn *memdb.Txn, group *identity.Gr groupRaw, err := txn.First(groupsTable, "id", group.ID) if err != nil { - return errwrap.Wrapf("failed to lookup group from memdb using group id: {{err}}", err) + return fmt.Errorf("failed to lookup group from memdb using group id: %w", err) } if groupRaw != nil { err = txn.Delete(groupsTable, groupRaw) if err != nil { - return errwrap.Wrapf("failed to delete group from memdb: {{err}}", err) + return fmt.Errorf("failed to delete group from memdb: %w", err) } } if err := txn.Insert(groupsTable, group); err != nil { - return errwrap.Wrapf("failed to update group into memdb: {{err}}", err) + return fmt.Errorf("failed to update group into memdb: %w", err) } return nil @@ -1501,7 +1501,7 @@ func (i *IdentityStore) MemDBDeleteGroupByIDInTxn(txn *memdb.Txn, groupID string err = txn.Delete("groups", group) if err != nil { - return errwrap.Wrapf("failed to delete group from memdb: {{err}}", err) + return fmt.Errorf("failed to delete group from memdb: %w", err) } return nil @@ -1518,7 +1518,7 @@ func (i *IdentityStore) MemDBGroupByIDInTxn(txn *memdb.Txn, groupID string, clon groupRaw, err := txn.First(groupsTable, "id", groupID) if err != nil { - return nil, errwrap.Wrapf("failed to fetch group from memdb using group ID: {{err}}", err) + return nil, fmt.Errorf("failed to fetch group from memdb using group ID: %w", err) } if groupRaw == nil { @@ -1554,7 +1554,7 @@ func (i *IdentityStore) MemDBGroupsByParentGroupIDInTxn(txn *memdb.Txn, memberGr groupsIter, err := txn.Get(groupsTable, "parent_group_ids", memberGroupID) if err != nil { - return nil, errwrap.Wrapf("failed to lookup groups using member group ID: {{err}}", err) + return nil, fmt.Errorf("failed to lookup groups using member group ID: %w", err) } var groups []*identity.Group @@ -1596,7 +1596,7 @@ func (i *IdentityStore) MemDBGroupsByMemberEntityIDInTxn(txn *memdb.Txn, entityI groupsIter, err := txn.Get(groupsTable, "member_entity_ids", entityID) if err != nil { - return nil, errwrap.Wrapf("failed to lookup groups using entity ID: {{err}}", err) + return nil, fmt.Errorf("failed to lookup groups using entity ID: %w", err) } var groups []*identity.Group @@ -1842,7 +1842,7 @@ func (i *IdentityStore) MemDBGroupsByBucketKeyInTxn(txn *memdb.Txn, bucketKey st groupsIter, err := txn.Get(groupsTable, "bucket_key", bucketKey) if err != nil { - return nil, errwrap.Wrapf("failed to lookup groups using bucket entry key hash: {{err}}", err) + return nil, fmt.Errorf("failed to lookup groups using bucket entry key hash: %w", err) } var groups []*identity.Group @@ -2058,7 +2058,7 @@ func (i *IdentityStore) handleAliasListCommon(ctx context.Context, groupAlias bo iter, err := txn.Get(tableName, "namespace_id", ns.ID) if err != nil { - return nil, errwrap.Wrapf("failed to fetch iterator for aliases in memdb: {{err}}", err) + return nil, fmt.Errorf("failed to fetch iterator for aliases in memdb: %w", err) } ws.Add(iter.WatchCh()) diff --git a/vault/init.go b/vault/init.go index 3afde6b81ec27..64f1c6a9b1689 100644 --- a/vault/init.go +++ b/vault/init.go @@ -13,7 +13,6 @@ import ( "github.com/hashicorp/vault/physical/raft" "github.com/hashicorp/vault/vault/seal" - "github.com/hashicorp/errwrap" aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/helper/pgpkeys" @@ -125,7 +124,7 @@ func (c *Core) generateShares(sc *SealConfig) ([]byte, [][]byte, error) { // Generate a master key masterKey, err := c.barrier.GenerateKey(c.secureRandomReader) if err != nil { - return nil, nil, errwrap.Wrapf("key generation failed: {{err}}", err) + return nil, nil, fmt.Errorf("key generation failed: %w", err) } // Return the master key if only a single key part is used @@ -136,7 +135,7 @@ func (c *Core) generateShares(sc *SealConfig) ([]byte, [][]byte, error) { // Split the master key using the Shamir algorithm shares, err := shamir.Split(masterKey, sc.SecretShares, sc.SecretThreshold) if err != nil { - return nil, nil, errwrap.Wrapf("failed to generate barrier shares: {{err}}", err) + return nil, nil, fmt.Errorf("failed to generate barrier shares: %w", err) } unsealKeys = shares } @@ -212,14 +211,14 @@ func (c *Core) Initialize(ctx context.Context, initParams *InitParams) (*InitRes // Check if the seal configuration is valid if err := recoveryConfig.Validate(); err != nil { c.logger.Error("invalid recovery configuration", "error", err) - return nil, errwrap.Wrapf("invalid recovery configuration: {{err}}", err) + return nil, fmt.Errorf("invalid recovery configuration: %w", err) } } // Check if the seal configuration is valid if err := barrierConfig.Validate(); err != nil { c.logger.Error("invalid seal configuration", "error", err) - return nil, errwrap.Wrapf("invalid seal configuration: {{err}}", err) + return nil, fmt.Errorf("invalid seal configuration: %w", err) } // Avoid an initialization race @@ -256,7 +255,7 @@ func (c *Core) Initialize(ctx context.Context, initParams *InitParams) (*InitRes err = c.seal.Init(ctx) if err != nil { c.logger.Error("failed to initialize seal", "error", err) - return nil, errwrap.Wrapf("error initializing seal: {{err}}", err) + return nil, fmt.Errorf("error initializing seal: %w", err) } initPTCleanup := initPTFunc(c) @@ -283,7 +282,7 @@ func (c *Core) Initialize(ctx context.Context, initParams *InitParams) (*InitRes // Initialize the barrier if err := c.barrier.Initialize(ctx, barrierKey, sealKey, c.secureRandomReader); err != nil { c.logger.Error("failed to initialize barrier", "error", err) - return nil, errwrap.Wrapf("failed to initialize barrier: {{err}}", err) + return nil, fmt.Errorf("failed to initialize barrier: %w", err) } if c.logger.IsInfo() { c.logger.Info("security barrier initialized", "stored", barrierConfig.StoredShares, "shares", barrierConfig.SecretShares, "threshold", barrierConfig.SecretThreshold) @@ -292,7 +291,7 @@ func (c *Core) Initialize(ctx context.Context, initParams *InitParams) (*InitRes // Unseal the barrier if err := c.barrier.Unseal(ctx, barrierKey); err != nil { c.logger.Error("failed to unseal barrier", "error", err) - return nil, errwrap.Wrapf("failed to unseal barrier: {{err}}", err) + return nil, fmt.Errorf("failed to unseal barrier: %w", err) } // Ensure the barrier is re-sealed @@ -308,7 +307,7 @@ func (c *Core) Initialize(ctx context.Context, initParams *InitParams) (*InitRes err = c.seal.SetBarrierConfig(ctx, barrierConfig) if err != nil { c.logger.Error("failed to save barrier configuration", "error", err) - return nil, errwrap.Wrapf("barrier configuration saving failed: {{err}}", err) + return nil, fmt.Errorf("barrier configuration saving failed: %w", err) } results := &InitResult{ @@ -322,18 +321,18 @@ func (c *Core) Initialize(ctx context.Context, initParams *InitParams) (*InitRes keysToStore := [][]byte{barrierKey} if err := c.seal.GetAccess().Wrapper.(*aeadwrapper.ShamirWrapper).SetAESGCMKeyBytes(sealKey); err != nil { c.logger.Error("failed to set seal key", "error", err) - return nil, errwrap.Wrapf("failed to set seal key: {{err}}", err) + return nil, fmt.Errorf("failed to set seal key: %w", err) } if err := c.seal.SetStoredKeys(ctx, keysToStore); err != nil { c.logger.Error("failed to store keys", "error", err) - return nil, errwrap.Wrapf("failed to store keys: {{err}}", err) + return nil, fmt.Errorf("failed to store keys: %w", err) } results.SecretShares = sealKeyShares case seal.StoredKeysSupportedGeneric: keysToStore := [][]byte{barrierKey} if err := c.seal.SetStoredKeys(ctx, keysToStore); err != nil { c.logger.Error("failed to store keys", "error", err) - return nil, errwrap.Wrapf("failed to store keys: {{err}}", err) + return nil, fmt.Errorf("failed to store keys: %w", err) } default: // We don't support initializing an old-style Shamir seal anymore, so @@ -365,7 +364,7 @@ func (c *Core) Initialize(ctx context.Context, initParams *InitParams) (*InitRes err = c.seal.SetRecoveryConfig(ctx, recoveryConfig) if err != nil { c.logger.Error("failed to save recovery configuration", "error", err) - return nil, errwrap.Wrapf("recovery configuration saving failed: {{err}}", err) + return nil, fmt.Errorf("recovery configuration saving failed: %w", err) } if recoveryConfig.SecretShares > 0 { @@ -456,7 +455,7 @@ func (c *Core) UnsealWithStoredKeys(ctx context.Context) error { c.Logger().Info("stored unseal keys supported, attempting fetch") keys, err := c.seal.GetStoredKeys(ctx) if err != nil { - return NewNonFatalError(errwrap.Wrapf("fetching stored unseal keys failed: {{err}}", err)) + return NewNonFatalError(fmt.Errorf("fetching stored unseal keys failed: %w", err)) } // This usually happens when auto-unseal is configured, but the servers have @@ -470,7 +469,7 @@ func (c *Core) UnsealWithStoredKeys(ctx context.Context) error { err = c.unsealInternal(ctx, keys[0]) if err != nil { - return NewNonFatalError(errwrap.Wrapf("unseal with stored key failed: {{err}}", err)) + return NewNonFatalError(fmt.Errorf("unseal with stored key failed: %w", err)) } if c.Sealed() { diff --git a/vault/keyring.go b/vault/keyring.go index e2ea9441fa2f3..7db04a7c8650f 100644 --- a/vault/keyring.go +++ b/vault/keyring.go @@ -6,7 +6,6 @@ import ( "fmt" "time" - "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/sdk/helper/jsonutil" ) @@ -73,7 +72,7 @@ func (k *Key) Serialize() ([]byte, error) { func DeserializeKey(buf []byte) (*Key, error) { k := new(Key) if err := jsonutil.DecodeJSON(buf, k); err != nil { - return nil, errwrap.Wrapf("deserialization failed: {{err}}", err) + return nil, fmt.Errorf("deserialization failed: %w", err) } return k, nil } @@ -206,7 +205,7 @@ func DeserializeKeyring(buf []byte) (*Keyring, error) { // Deserialize the keyring var enc EncodedKeyring if err := jsonutil.DecodeJSON(buf, &enc); err != nil { - return nil, errwrap.Wrapf("deserialization failed: {{err}}", err) + return nil, fmt.Errorf("deserialization failed: %w", err) } // Create a new keyring diff --git a/vault/logical_cubbyhole.go b/vault/logical_cubbyhole.go index 719be7552716f..51d5e81a3c9e0 100644 --- a/vault/logical_cubbyhole.go +++ b/vault/logical_cubbyhole.go @@ -6,7 +6,6 @@ import ( "fmt" "strings" - "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/logical" @@ -98,7 +97,7 @@ func (b *CubbyholeBackend) revoke(ctx context.Context, view *BarrierView, salted func (b *CubbyholeBackend) handleExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { out, err := req.Storage.Get(ctx, req.ClientToken+"/"+req.Path) if err != nil { - return false, errwrap.Wrapf("existence check failed: {{err}}", err) + return false, fmt.Errorf("existence check failed: %w", err) } return out != nil, nil @@ -118,7 +117,7 @@ func (b *CubbyholeBackend) handleRead(ctx context.Context, req *logical.Request, // Read the path out, err := req.Storage.Get(ctx, req.ClientToken+"/"+path) if err != nil { - return nil, errwrap.Wrapf("read failed: {{err}}", err) + return nil, fmt.Errorf("read failed: %w", err) } // Fast-path the no data case @@ -129,7 +128,7 @@ func (b *CubbyholeBackend) handleRead(ctx context.Context, req *logical.Request, // Decode the data var rawData map[string]interface{} if err := jsonutil.DecodeJSON(out.Value, &rawData); err != nil { - return nil, errwrap.Wrapf("json decoding failed: {{err}}", err) + return nil, fmt.Errorf("json decoding failed: %w", err) } // Generate the response @@ -158,7 +157,7 @@ func (b *CubbyholeBackend) handleWrite(ctx context.Context, req *logical.Request // JSON encode the data buf, err := json.Marshal(req.Data) if err != nil { - return nil, errwrap.Wrapf("json encoding failed: {{err}}", err) + return nil, fmt.Errorf("json encoding failed: %w", err) } // Write out a new key @@ -170,7 +169,7 @@ func (b *CubbyholeBackend) handleWrite(ctx context.Context, req *logical.Request entry.SealWrap = true } if err := req.Storage.Put(ctx, entry); err != nil { - return nil, errwrap.Wrapf("failed to write: {{err}}", err) + return nil, fmt.Errorf("failed to write: %w", err) } return nil, nil diff --git a/vault/logical_passthrough.go b/vault/logical_passthrough.go index 23d854dc27a02..c04bcd4ab01ff 100644 --- a/vault/logical_passthrough.go +++ b/vault/logical_passthrough.go @@ -6,7 +6,6 @@ import ( "fmt" "strings" - "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/helper/parseutil" @@ -95,7 +94,7 @@ func (b *PassthroughBackend) handleRevoke(ctx context.Context, req *logical.Requ func (b *PassthroughBackend) handleExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { out, err := req.Storage.Get(ctx, req.Path) if err != nil { - return false, errwrap.Wrapf("existence check failed: {{err}}", err) + return false, fmt.Errorf("existence check failed: %w", err) } return out != nil, nil @@ -105,7 +104,7 @@ func (b *PassthroughBackend) handleRead(ctx context.Context, req *logical.Reques // Read the path out, err := req.Storage.Get(ctx, req.Path) if err != nil { - return nil, errwrap.Wrapf("read failed: {{err}}", err) + return nil, fmt.Errorf("read failed: %w", err) } // Fast-path the no data case @@ -117,7 +116,7 @@ func (b *PassthroughBackend) handleRead(ctx context.Context, req *logical.Reques var rawData map[string]interface{} if err := jsonutil.DecodeJSON(out.Value, &rawData); err != nil { - return nil, errwrap.Wrapf("json decoding failed: {{err}}", err) + return nil, fmt.Errorf("json decoding failed: %w", err) } var resp *logical.Response @@ -180,7 +179,7 @@ func (b *PassthroughBackend) handleWrite(ctx context.Context, req *logical.Reque // JSON encode the data buf, err := json.Marshal(req.Data) if err != nil { - return nil, errwrap.Wrapf("json encoding failed: {{err}}", err) + return nil, fmt.Errorf("json encoding failed: %w", err) } // Write out a new key @@ -189,7 +188,7 @@ func (b *PassthroughBackend) handleWrite(ctx context.Context, req *logical.Reque Value: buf, } if err := req.Storage.Put(ctx, entry); err != nil { - return nil, errwrap.Wrapf("failed to write: {{err}}", err) + return nil, fmt.Errorf("failed to write: %w", err) } return nil, nil diff --git a/vault/logical_system.go b/vault/logical_system.go index c2d3f2653f518..fe743cf6541c2 100644 --- a/vault/logical_system.go +++ b/vault/logical_system.go @@ -633,7 +633,7 @@ func (b *SystemBackend) handleRekeyRetrieve( recovery bool) (*logical.Response, error) { backup, err := b.Core.RekeyRetrieveBackup(ctx, recovery) if err != nil { - return nil, errwrap.Wrapf("unable to look up backed-up keys: {{err}}", err) + return nil, fmt.Errorf("unable to look up backed-up keys: %w", err) } if backup == nil { return logical.ErrorResponse("no backed-up keys found"), nil @@ -648,7 +648,7 @@ func (b *SystemBackend) handleRekeyRetrieve( } key, err := hex.DecodeString(j) if err != nil { - return nil, errwrap.Wrapf("error decoding hex-encoded backup key: {{err}}", err) + return nil, fmt.Errorf("error decoding hex-encoded backup key: %w", err) } currB64Keys = append(currB64Keys, base64.StdEncoding.EncodeToString(key)) keysB64[k] = currB64Keys @@ -684,7 +684,7 @@ func (b *SystemBackend) handleRekeyDelete( recovery bool) (*logical.Response, error) { err := b.Core.RekeyDeleteBackup(ctx, recovery) if err != nil { - return nil, errwrap.Wrapf("error during deletion of backed-up keys: {{err}}", err) + return nil, fmt.Errorf("error during deletion of backed-up keys: %w", err) } return nil, nil @@ -1511,11 +1511,11 @@ func (b *SystemBackend) handleTuneWriteCommon(ctx context.Context, path string, // enabled. If the vkv backend suports downgrading this can be removed. meVersion, err := parseutil.ParseInt(mountEntry.Options["version"]) if err != nil { - return nil, errwrap.Wrapf("unable to parse mount entry: {{err}}", err) + return nil, fmt.Errorf("unable to parse mount entry: %w", err) } optVersion, err := parseutil.ParseInt(v) if err != nil { - return handleError(errwrap.Wrapf("unable to parse options: {{err}}", err)) + return handleError(fmt.Errorf("unable to parse options: %w", err)) } // Only accept valid versions @@ -2715,7 +2715,7 @@ func (b *SystemBackend) handleWrappingUnwrap(ctx context.Context, req *logical.R httpResp := &logical.HTTPResponse{} err = jsonutil.DecodeJSON([]byte(response), httpResp) if err != nil { - return nil, errwrap.Wrapf("error decoding wrapped response: {{err}}", err) + return nil, fmt.Errorf("error decoding wrapped response: %w", err) } if httpResp.Data != nil && (httpResp.Data[logical.HTTPStatusCode] != nil || @@ -2769,7 +2769,7 @@ func (b *SystemBackend) responseWrappingUnwrap(ctx context.Context, te *logical. // Use the token to decrement the use count to avoid a second operation on the token. _, err := b.Core.tokenStore.UseTokenByID(ctx, tokenID) if err != nil { - return "", errwrap.Wrapf("error decrementing wrapping token's use-count: {{err}}", err) + return "", fmt.Errorf("error decrementing wrapping token's use-count: %w", err) } defer b.Core.tokenStore.revokeOrphan(ctx, tokenID) @@ -2783,7 +2783,7 @@ func (b *SystemBackend) responseWrappingUnwrap(ctx context.Context, te *logical. cubbyReq.SetTokenEntry(te) cubbyResp, err := b.Core.router.Route(ctx, cubbyReq) if err != nil { - return "", errwrap.Wrapf("error looking up wrapping information: {{err}}", err) + return "", fmt.Errorf("error looking up wrapping information: %w", err) } if cubbyResp == nil { return "no information found; wrapping token may be from a previous Vault version", ErrInternalError @@ -2979,7 +2979,7 @@ func (b *SystemBackend) handleWrappingLookup(ctx context.Context, req *logical.R cubbyReq.SetTokenEntry(te) cubbyResp, err := b.Core.router.Route(ctx, cubbyReq) if err != nil { - return nil, errwrap.Wrapf("error looking up wrapping information: {{err}}", err) + return nil, fmt.Errorf("error looking up wrapping information: %w", err) } if cubbyResp == nil { return logical.ErrorResponse("no information found; wrapping token may be from a previous Vault version"), nil @@ -3001,7 +3001,7 @@ func (b *SystemBackend) handleWrappingLookup(ctx context.Context, req *logical.R if creationTTLRaw != nil { creationTTL, err := creationTTLRaw.(json.Number).Int64() if err != nil { - return nil, errwrap.Wrapf("error reading creation_ttl value from wrapping information: {{err}}", err) + return nil, fmt.Errorf("error reading creation_ttl value from wrapping information: %w", err) } resp.Data["creation_ttl"] = time.Duration(creationTTL).Seconds() } @@ -3046,7 +3046,7 @@ func (b *SystemBackend) handleWrappingRewrap(ctx context.Context, req *logical.R // Use the token to decrement the use count to avoid a second operation on the token. _, err := b.Core.tokenStore.UseTokenByID(ctx, token) if err != nil { - return nil, errwrap.Wrapf("error decrementing wrapping token's use-count: {{err}}", err) + return nil, fmt.Errorf("error decrementing wrapping token's use-count: %w", err) } defer b.Core.tokenStore.revokeOrphan(ctx, token) } @@ -3060,7 +3060,7 @@ func (b *SystemBackend) handleWrappingRewrap(ctx context.Context, req *logical.R cubbyReq.SetTokenEntry(te) cubbyResp, err := b.Core.router.Route(ctx, cubbyReq) if err != nil { - return nil, errwrap.Wrapf("error looking up wrapping information: {{err}}", err) + return nil, fmt.Errorf("error looking up wrapping information: %w", err) } if cubbyResp == nil { return logical.ErrorResponse("no information found; wrapping token may be from a previous Vault version"), nil @@ -3079,7 +3079,7 @@ func (b *SystemBackend) handleWrappingRewrap(ctx context.Context, req *logical.R } creationTTL, err := cubbyResp.Data["creation_ttl"].(json.Number).Int64() if err != nil { - return nil, errwrap.Wrapf("error reading creation_ttl value from wrapping information: {{err}}", err) + return nil, fmt.Errorf("error reading creation_ttl value from wrapping information: %w", err) } // Get creation_path to return as the response later @@ -3098,7 +3098,7 @@ func (b *SystemBackend) handleWrappingRewrap(ctx context.Context, req *logical.R cubbyReq.SetTokenEntry(te) cubbyResp, err = b.Core.router.Route(ctx, cubbyReq) if err != nil { - return nil, errwrap.Wrapf("error looking up response: {{err}}", err) + return nil, fmt.Errorf("error looking up response: %w", err) } if cubbyResp == nil { return logical.ErrorResponse("no information found; wrapping token may be from a previous Vault version"), nil diff --git a/vault/plugin_catalog.go b/vault/plugin_catalog.go index d52c1ac379ed9..71d4603e6e195 100644 --- a/vault/plugin_catalog.go +++ b/vault/plugin_catalog.go @@ -10,7 +10,6 @@ import ( "strings" "sync" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" multierror "github.com/hashicorp/go-multierror" v4 "github.com/hashicorp/vault/sdk/database/dbplugin" @@ -157,13 +156,13 @@ func (c *PluginCatalog) UpgradePlugins(ctx context.Context, logger log.Logger) e for _, pluginName := range plugins { pluginRaw, err := c.catalogView.Get(ctx, pluginName) if err != nil { - retErr = multierror.Append(errwrap.Wrapf("failed to load plugin entry: {{err}}", err)) + retErr = multierror.Append(fmt.Errorf("failed to load plugin entry: %w", err)) continue } plugin := new(pluginutil.PluginRunner) if err := jsonutil.DecodeJSON(pluginRaw.Value, plugin); err != nil { - retErr = multierror.Append(errwrap.Wrapf("failed to decode plugin entry: {{err}}", err)) + retErr = multierror.Append(fmt.Errorf("failed to decode plugin entry: %w", err)) continue } @@ -215,20 +214,20 @@ func (c *PluginCatalog) get(ctx context.Context, name string, pluginType consts. // Look for external plugins in the barrier out, err := c.catalogView.Get(ctx, pluginType.String()+"/"+name) if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("failed to retrieve plugin %q: {{err}}", name), err) + return nil, fmt.Errorf("failed to retrieve plugin %q: %w", name, err) } if out == nil { // Also look for external plugins under what their name would have been if they // were registered before plugin types existed. out, err = c.catalogView.Get(ctx, name) if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("failed to retrieve plugin %q: {{err}}", name), err) + return nil, fmt.Errorf("failed to retrieve plugin %q: %w", name, err) } } if out != nil { entry := new(pluginutil.PluginRunner) if err := jsonutil.DecodeJSON(out.Value, entry); err != nil { - return nil, errwrap.Wrapf("failed to decode plugin entry: {{err}}", err) + return nil, fmt.Errorf("failed to decode plugin entry: %w", err) } if entry.Type != pluginType && entry.Type != consts.PluginTypeUnknown { return nil, nil @@ -279,11 +278,11 @@ func (c *PluginCatalog) setInternal(ctx context.Context, name string, pluginType commandFull := filepath.Join(c.directory, command) sym, err := filepath.EvalSymlinks(commandFull) if err != nil { - return errwrap.Wrapf("error while validating the command path: {{err}}", err) + return fmt.Errorf("error while validating the command path: %w", err) } symAbs, err := filepath.Abs(filepath.Dir(sym)) if err != nil { - return errwrap.Wrapf("error while validating the command path: {{err}}", err) + return fmt.Errorf("error while validating the command path: %w", err) } if symAbs != c.directory { @@ -324,7 +323,7 @@ func (c *PluginCatalog) setInternal(ctx context.Context, name string, pluginType buf, err := json.Marshal(entry) if err != nil { - return errwrap.Wrapf("failed to encode plugin entry: {{err}}", err) + return fmt.Errorf("failed to encode plugin entry: %w", err) } logicalEntry := logical.StorageEntry{ @@ -332,7 +331,7 @@ func (c *PluginCatalog) setInternal(ctx context.Context, name string, pluginType Value: buf, } if err := c.catalogView.Put(ctx, &logicalEntry); err != nil { - return errwrap.Wrapf("failed to persist plugin entry: {{err}}", err) + return fmt.Errorf("failed to persist plugin entry: %w", err) } return nil } diff --git a/vault/plugin_reload.go b/vault/plugin_reload.go index 0d94623e7c010..bfc4555866134 100644 --- a/vault/plugin_reload.go +++ b/vault/plugin_reload.go @@ -7,7 +7,6 @@ import ( "github.com/hashicorp/vault/helper/namespace" - "github.com/hashicorp/errwrap" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/vault/sdk/helper/strutil" "github.com/hashicorp/vault/sdk/logical" @@ -47,7 +46,7 @@ func (c *Core) reloadMatchingPluginMounts(ctx context.Context, mounts []string) err := c.reloadBackendCommon(ctx, entry, isAuth) if err != nil { - errors = multierror.Append(errors, errwrap.Wrapf(fmt.Sprintf("cannot reload plugin on %q: {{err}}", mount), err)) + errors = multierror.Append(errors, fmt.Errorf("cannot reload plugin on %q: %w", mount, err)) continue } c.logger.Info("successfully reloaded plugin", "plugin", entry.Accessor, "path", entry.Path) diff --git a/vault/policy.go b/vault/policy.go index 6f6ec8b558a1f..e87372933e186 100644 --- a/vault/policy.go +++ b/vault/policy.go @@ -6,7 +6,6 @@ import ( "strings" "time" - "github.com/hashicorp/errwrap" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" @@ -233,7 +232,7 @@ func parseACLPolicyWithTemplating(ns *namespace.Namespace, rules string, perform // Parse the rules root, err := hcl.Parse(rules) if err != nil { - return nil, errwrap.Wrapf("failed to parse policy: {{err}}", err) + return nil, fmt.Errorf("failed to parse policy: %w", err) } // Top-level item should be the object list @@ -248,7 +247,7 @@ func parseACLPolicyWithTemplating(ns *namespace.Namespace, rules string, perform "path", } if err := hclutil.CheckHCLKeys(list, valid); err != nil { - return nil, errwrap.Wrapf("failed to parse policy: {{err}}", err) + return nil, fmt.Errorf("failed to parse policy: %w", err) } // Create the initial policy and store the raw text of the rules @@ -258,12 +257,12 @@ func parseACLPolicyWithTemplating(ns *namespace.Namespace, rules string, perform namespace: ns, } if err := hcl.DecodeObject(&p, list); err != nil { - return nil, errwrap.Wrapf("failed to parse policy: {{err}}", err) + return nil, fmt.Errorf("failed to parse policy: %w", err) } if o := list.Filter("path"); len(o.Items) > 0 { if err := parsePaths(&p, o, performTemplating, entity, groups); err != nil { - return nil, errwrap.Wrapf("failed to parse policy: {{err}}", err) + return nil, fmt.Errorf("failed to parse policy: %w", err) } } @@ -298,7 +297,7 @@ func parsePaths(result *Policy, list *ast.ObjectList, performTemplating bool, en String: key, }) if err != nil { - return errwrap.Wrapf("failed to validate policy templating: {{err}}", err) + return fmt.Errorf("failed to validate policy templating: %w", err) } if hasTemplating { result.Templated = true @@ -406,14 +405,14 @@ func parsePaths(result *Policy, list *ast.ObjectList, performTemplating bool, en if pc.MinWrappingTTLHCL != nil { dur, err := parseutil.ParseDurationSecond(pc.MinWrappingTTLHCL) if err != nil { - return errwrap.Wrapf("error parsing min_wrapping_ttl: {{err}}", err) + return fmt.Errorf("error parsing min_wrapping_ttl: %w", err) } pc.Permissions.MinWrappingTTL = dur } if pc.MaxWrappingTTLHCL != nil { dur, err := parseutil.ParseDurationSecond(pc.MaxWrappingTTLHCL) if err != nil { - return errwrap.Wrapf("error parsing max_wrapping_ttl: {{err}}", err) + return fmt.Errorf("error parsing max_wrapping_ttl: %w", err) } pc.Permissions.MaxWrappingTTL = dur } @@ -428,7 +427,7 @@ func parsePaths(result *Policy, list *ast.ObjectList, performTemplating bool, en if pc.ControlGroupHCL.TTL != nil { dur, err := parseutil.ParseDurationSecond(pc.ControlGroupHCL.TTL) if err != nil { - return errwrap.Wrapf("error parsing control group max ttl: {{err}}", err) + return fmt.Errorf("error parsing control group max ttl: %w", err) } pc.Permissions.ControlGroup.TTL = dur } diff --git a/vault/policy_store.go b/vault/policy_store.go index 33f8cf0b6d351..9798d186f9677 100644 --- a/vault/policy_store.go +++ b/vault/policy_store.go @@ -9,7 +9,6 @@ import ( "time" metrics "github.com/armon/go-metrics" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" lru "github.com/hashicorp/golang-lru" "github.com/hashicorp/vault/helper/identity" @@ -373,7 +372,7 @@ func (ps *PolicyStore) setPolicyInternal(ctx context.Context, p *Policy) error { sentinelPolicy: p.sentinelPolicy, }) if err != nil { - return errwrap.Wrapf("failed to create entry: {{err}}", err) + return fmt.Errorf("failed to create entry: %w", err) } // Construct the cache key @@ -384,14 +383,14 @@ func (ps *PolicyStore) setPolicyInternal(ctx context.Context, p *Policy) error { rgpView := ps.getRGPView(p.namespace) rgp, err := rgpView.Get(ctx, entry.Key) if err != nil { - return errwrap.Wrapf("failed looking up conflicting policy: {{err}}", err) + return fmt.Errorf("failed looking up conflicting policy: %w", err) } if rgp != nil { return fmt.Errorf("cannot reuse policy names between ACLs and RGPs") } if err := view.Put(ctx, entry); err != nil { - return errwrap.Wrapf("failed to persist policy: {{err}}", err) + return fmt.Errorf("failed to persist policy: %w", err) } ps.policyTypeMap.Store(index, PolicyTypeACL) @@ -404,7 +403,7 @@ func (ps *PolicyStore) setPolicyInternal(ctx context.Context, p *Policy) error { aclView := ps.getACLView(p.namespace) acl, err := aclView.Get(ctx, entry.Key) if err != nil { - return errwrap.Wrapf("failed looking up conflicting policy: {{err}}", err) + return fmt.Errorf("failed looking up conflicting policy: %w", err) } if acl != nil { return fmt.Errorf("cannot reuse policy names between ACLs and RGPs") @@ -525,7 +524,7 @@ func (ps *PolicyStore) switchedGetPolicy(ctx context.Context, name string, polic out, err := view.Get(ctx, name) if err != nil { - return nil, errwrap.Wrapf("failed to read policy: {{err}}", err) + return nil, fmt.Errorf("failed to read policy: %w", err) } if out == nil { @@ -536,7 +535,7 @@ func (ps *PolicyStore) switchedGetPolicy(ctx context.Context, name string, polic policy := new(Policy) err = out.DecodeJSON(policyEntry) if err != nil { - return nil, errwrap.Wrapf("failed to parse policy: {{err}}", err) + return nil, fmt.Errorf("failed to parse policy: %w", err) } // Set these up here so that they're available for loading into @@ -552,7 +551,7 @@ func (ps *PolicyStore) switchedGetPolicy(ctx context.Context, name string, polic // Parse normally p, err := ParseACLPolicy(ns, policyEntry.Raw) if err != nil { - return nil, errwrap.Wrapf("failed to parse policy: {{err}}", err) + return nil, fmt.Errorf("failed to parse policy: %w", err) } policy.Paths = p.Paths @@ -687,7 +686,7 @@ func (ps *PolicyStore) switchedDeletePolicy(ctx context.Context, name string, po if physicalDeletion { err := view.Delete(ctx, name) if err != nil { - return errwrap.Wrapf("failed to delete policy: {{err}}", err) + return fmt.Errorf("failed to delete policy: %w", err) } } @@ -702,7 +701,7 @@ func (ps *PolicyStore) switchedDeletePolicy(ctx context.Context, name string, po if physicalDeletion { err := view.Delete(ctx, name) if err != nil { - return errwrap.Wrapf("failed to delete policy: {{err}}", err) + return fmt.Errorf("failed to delete policy: %w", err) } } @@ -719,7 +718,7 @@ func (ps *PolicyStore) switchedDeletePolicy(ctx context.Context, name string, po if physicalDeletion { err := view.Delete(ctx, name) if err != nil { - return errwrap.Wrapf("failed to delete policy: {{err}}", err) + return fmt.Errorf("failed to delete policy: %w", err) } } @@ -765,7 +764,7 @@ func (ps *PolicyStore) ACL(ctx context.Context, entity *identity.Entity, policyN for _, nsPolicyName := range nsPolicyNames { p, err := ps.GetPolicy(policyCtx, nsPolicyName, PolicyTypeToken) if err != nil { - return nil, errwrap.Wrapf("failed to get policy: {{err}}", err) + return nil, fmt.Errorf("failed to get policy: %w", err) } if p != nil { policies = append(policies, p) @@ -782,14 +781,14 @@ func (ps *PolicyStore) ACL(ctx context.Context, entity *identity.Entity, policyN if entity != nil { directGroups, inheritedGroups, err := ps.core.identityStore.groupsByEntityID(entity.ID) if err != nil { - return nil, errwrap.Wrapf("failed to fetch group memberships: {{err}}", err) + return nil, fmt.Errorf("failed to fetch group memberships: %w", err) } groups = append(directGroups, inheritedGroups...) } } p, err := parseACLPolicyWithTemplating(policy.namespace, policy.Raw, true, entity, groups) if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("error parsing templated policy %q: {{err}}", policy.Name), err) + return nil, fmt.Errorf("error parsing templated policy %q: %w", policy.Name, err) } p.Name = policy.Name policies[i] = p @@ -799,7 +798,7 @@ func (ps *PolicyStore) ACL(ctx context.Context, entity *identity.Entity, policyN // Construct the ACL acl, err := NewACL(ctx, policies) if err != nil { - return nil, errwrap.Wrapf("failed to construct ACL: {{err}}", err) + return nil, fmt.Errorf("failed to construct ACL: %w", err) } return acl, nil @@ -822,7 +821,7 @@ func (ps *PolicyStore) loadACLPolicyInternal(ctx context.Context, policyName, po // Check if the policy already exists policy, err := ps.GetPolicy(ctx, policyName, PolicyTypeACL) if err != nil { - return errwrap.Wrapf(fmt.Sprintf("error fetching %s policy from store: {{err}}", policyName), err) + return fmt.Errorf("error fetching %s policy from store: %w", policyName, err) } if policy != nil { if !strutil.StrListContains(immutablePolicies, policyName) || policyText == policy.Raw { @@ -832,7 +831,7 @@ func (ps *PolicyStore) loadACLPolicyInternal(ctx context.Context, policyName, po policy, err = ParseACLPolicy(ns, policyText) if err != nil { - return errwrap.Wrapf(fmt.Sprintf("error parsing %s policy: {{err}}", policyName), err) + return fmt.Errorf("error parsing %s policy: %w", policyName, err) } if policy == nil { diff --git a/vault/raft.go b/vault/raft.go index f663d854d50cf..ab7932005982a 100644 --- a/vault/raft.go +++ b/vault/raft.go @@ -13,7 +13,6 @@ import ( "time" "github.com/golang/protobuf/proto" - "github.com/hashicorp/errwrap" cleanhttp "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-discover" discoverk8s "github.com/hashicorp/go-discover/provider/k8s" @@ -237,13 +236,13 @@ func (c *Core) raftTLSRotateDirect(ctx context.Context, logger hclog.Logger, sto // Create a new key raftTLSKey, err := raft.GenerateTLSKey(c.secureRandomReader) if err != nil { - return time.Time{}, errwrap.Wrapf("failed to generate new raft TLS key: {{err}}", err) + return time.Time{}, fmt.Errorf("failed to generate new raft TLS key: %w", err) } // Read the existing keyring keyring, err := c.raftReadTLSKeyring(ctx) if err != nil { - return time.Time{}, errwrap.Wrapf("failed to read raft TLS keyring: {{err}}", err) + return time.Time{}, fmt.Errorf("failed to read raft TLS keyring: %w", err) } // Advance the term and store the new key, replacing the old one. @@ -256,10 +255,10 @@ func (c *Core) raftTLSRotateDirect(ctx context.Context, logger hclog.Logger, sto keyring.ActiveKeyID = raftTLSKey.ID entry, err := logical.StorageEntryJSON(raftTLSStoragePath, keyring) if err != nil { - return time.Time{}, errwrap.Wrapf("failed to json encode keyring: {{err}}", err) + return time.Time{}, fmt.Errorf("failed to json encode keyring: %w", err) } if err := c.barrier.Put(ctx, entry); err != nil { - return time.Time{}, errwrap.Wrapf("failed to write keyring: {{err}}", err) + return time.Time{}, fmt.Errorf("failed to write keyring: %w", err) } logger.Info("wrote new raft TLS config") @@ -354,7 +353,7 @@ func (c *Core) raftTLSRotatePhased(ctx context.Context, logger hclog.Logger, raf // Read the existing keyring keyring, err := c.raftReadTLSKeyring(ctx) if err != nil { - return time.Time{}, errwrap.Wrapf("failed to read raft TLS keyring: {{err}}", err) + return time.Time{}, fmt.Errorf("failed to read raft TLS keyring: %w", err) } switch { @@ -365,10 +364,10 @@ func (c *Core) raftTLSRotatePhased(ctx context.Context, logger hclog.Logger, raf keyring.AppliedIndex = raftBackend.AppliedIndex() entry, err := logical.StorageEntryJSON(raftTLSStoragePath, keyring) if err != nil { - return time.Time{}, errwrap.Wrapf("failed to json encode keyring: {{err}}", err) + return time.Time{}, fmt.Errorf("failed to json encode keyring: %w", err) } if err := c.barrier.Put(ctx, entry); err != nil { - return time.Time{}, errwrap.Wrapf("failed to write keyring: {{err}}", err) + return time.Time{}, fmt.Errorf("failed to write keyring: %w", err) } case len(keyring.Keys) > 1: @@ -386,7 +385,7 @@ func (c *Core) raftTLSRotatePhased(ctx context.Context, logger hclog.Logger, raf // Create a new key raftTLSKey, err := raft.GenerateTLSKey(c.secureRandomReader) if err != nil { - return time.Time{}, errwrap.Wrapf("failed to generate new raft TLS key: {{err}}", err) + return time.Time{}, fmt.Errorf("failed to generate new raft TLS key: %w", err) } // Advance the term and store the new key @@ -394,10 +393,10 @@ func (c *Core) raftTLSRotatePhased(ctx context.Context, logger hclog.Logger, raf keyring.Keys = append(keyring.Keys, raftTLSKey) entry, err := logical.StorageEntryJSON(raftTLSStoragePath, keyring) if err != nil { - return time.Time{}, errwrap.Wrapf("failed to json encode keyring: {{err}}", err) + return time.Time{}, fmt.Errorf("failed to json encode keyring: %w", err) } if err := c.barrier.Put(ctx, entry); err != nil { - return time.Time{}, errwrap.Wrapf("failed to write keyring: {{err}}", err) + return time.Time{}, fmt.Errorf("failed to write keyring: %w", err) } // Write the keyring again with the new applied index. This allows us to @@ -406,10 +405,10 @@ func (c *Core) raftTLSRotatePhased(ctx context.Context, logger hclog.Logger, raf keyring.AppliedIndex = raftBackend.AppliedIndex() entry, err = logical.StorageEntryJSON(raftTLSStoragePath, keyring) if err != nil { - return time.Time{}, errwrap.Wrapf("failed to json encode keyring: {{err}}", err) + return time.Time{}, fmt.Errorf("failed to json encode keyring: %w", err) } if err := c.barrier.Put(ctx, entry); err != nil { - return time.Time{}, errwrap.Wrapf("failed to write keyring: {{err}}", err) + return time.Time{}, fmt.Errorf("failed to write keyring: %w", err) } logger.Info("wrote new raft TLS config") @@ -423,7 +422,7 @@ func (c *Core) raftTLSRotatePhased(ctx context.Context, logger hclog.Logger, raf checkCommitted := func() error { keyring, err := c.raftReadTLSKeyring(ctx) if err != nil { - return errwrap.Wrapf("failed to read raft TLS keyring: {{err}}", err) + return fmt.Errorf("failed to read raft TLS keyring: %w", err) } switch { @@ -444,15 +443,15 @@ func (c *Core) raftTLSRotatePhased(ctx context.Context, logger hclog.Logger, raf keyring.Term += 1 entry, err := logical.StorageEntryJSON(raftTLSStoragePath, keyring) if err != nil { - return errwrap.Wrapf("failed to json encode keyring: {{err}}", err) + return fmt.Errorf("failed to json encode keyring: %w", err) } if err := c.barrier.Put(ctx, entry); err != nil { - return errwrap.Wrapf("failed to write keyring: {{err}}", err) + return fmt.Errorf("failed to write keyring: %w", err) } // Update the TLS Key in the backend if err := raftBackend.SetTLSKeyring(keyring); err != nil { - return errwrap.Wrapf("failed to install keyring: {{err}}", err) + return fmt.Errorf("failed to install keyring: %w", err) } logger.Info("installed new raft TLS key", "term", keyring.Term) @@ -719,7 +718,7 @@ func (c *Core) JoinRaftCluster(ctx context.Context, leaderInfos []*raft.LeaderJo init, err := c.InitializedLocally(ctx) if err != nil { - return false, errwrap.Wrapf("failed to check if core is initialized: {{err}}", err) + return false, fmt.Errorf("failed to check if core is initialized: %w", err) } isRaftHAOnly := c.isRaftHAOnly() @@ -775,7 +774,7 @@ func (c *Core) JoinRaftCluster(ctx context.Context, leaderInfos []*raft.LeaderJo var adv activeAdvertisement err = jsonutil.DecodeJSON(entry.Value, &adv) if err != nil { - return false, errwrap.Wrapf("unable to decoded leader entry: {{err}}", err) + return false, fmt.Errorf("unable to decoded leader entry: %w", err) } leaderInfos[0].LeaderAPIAddr = adv.RedirectAddr @@ -783,7 +782,7 @@ func (c *Core) JoinRaftCluster(ctx context.Context, leaderInfos []*raft.LeaderJo disco, err := newDiscover() if err != nil { - return false, errwrap.Wrapf("failed to create auto-join discovery: {{err}}", err) + return false, fmt.Errorf("failed to create auto-join discovery: %w", err) } join := func(retry bool) error { @@ -797,7 +796,7 @@ func (c *Core) JoinRaftCluster(ctx context.Context, leaderInfos []*raft.LeaderJo init, err := c.InitializedLocally(ctx) if err != nil { - return errwrap.Wrapf("failed to check if core is initialized: {{err}}", err) + return fmt.Errorf("failed to check if core is initialized: %w", err) } if init && !isRaftHAOnly { @@ -813,21 +812,21 @@ func (c *Core) JoinRaftCluster(ctx context.Context, leaderInfos []*raft.LeaderJo if leaderInfo.TLSConfig == nil && (len(leaderInfo.LeaderCACert) != 0 || len(leaderInfo.LeaderClientCert) != 0 || len(leaderInfo.LeaderClientKey) != 0) { leaderInfo.TLSConfig, err = tlsutil.ClientTLSConfig([]byte(leaderInfo.LeaderCACert), []byte(leaderInfo.LeaderClientCert), []byte(leaderInfo.LeaderClientKey)) if err != nil { - return errwrap.Wrapf("failed to create TLS config: {{err}}", err) + return fmt.Errorf("failed to create TLS config: %w", err) } leaderInfo.TLSConfig.ServerName = leaderInfo.LeaderTLSServerName } if leaderInfo.TLSConfig == nil && leaderInfo.LeaderTLSServerName != "" { leaderInfo.TLSConfig, err = tlsutil.SetupTLSConfig(map[string]string{"address": leaderInfo.LeaderTLSServerName}, "") if err != nil { - return errwrap.Wrapf("failed to create TLS config: {{err}}", err) + return fmt.Errorf("failed to create TLS config: %w", err) } } if leaderInfo.TLSConfig != nil { transport.TLSClientConfig = leaderInfo.TLSConfig.Clone() if err := http2.ConfigureTransport(transport); err != nil { - return errwrap.Wrapf("failed to configure TLS: {{err}}", err) + return fmt.Errorf("failed to configure TLS: %w", err) } } @@ -837,7 +836,7 @@ func (c *Core) JoinRaftCluster(ctx context.Context, leaderInfos []*raft.LeaderJo config := api.DefaultConfig() if config.Error != nil { - return errwrap.Wrapf("failed to create api client: {{err}}", config.Error) + return fmt.Errorf("failed to create api client: %w", config.Error) } config.Address = leaderAddr @@ -846,7 +845,7 @@ func (c *Core) JoinRaftCluster(ctx context.Context, leaderInfos []*raft.LeaderJo apiClient, err := api.NewClient(config) if err != nil { - return errwrap.Wrapf("failed to create api client: {{err}}", err) + return fmt.Errorf("failed to create api client: %w", err) } // Attempt to join the leader by requesting for the bootstrap challenge @@ -854,7 +853,7 @@ func (c *Core) JoinRaftCluster(ctx context.Context, leaderInfos []*raft.LeaderJo "server_id": raftBackend.NodeID(), }) if err != nil { - return errwrap.Wrapf("error during raft bootstrap init call: {{err}}", err) + return fmt.Errorf("error during raft bootstrap init call: %w", err) } if secret == nil { return errors.New("could not retrieve raft bootstrap package") @@ -876,12 +875,12 @@ func (c *Core) JoinRaftCluster(ctx context.Context, leaderInfos []*raft.LeaderJo } challengeRaw, err := base64.StdEncoding.DecodeString(challengeB64.(string)) if err != nil { - return errwrap.Wrapf("error decoding raft bootstrap challenge: {{err}}", err) + return fmt.Errorf("error decoding raft bootstrap challenge: %w", err) } eBlob := &wrapping.EncryptedBlobInfo{} if err := proto.Unmarshal(challengeRaw, eBlob); err != nil { - return errwrap.Wrapf("error decoding raft bootstrap challenge: {{err}}", err) + return fmt.Errorf("error decoding raft bootstrap challenge: %w", err) } raftInfo := &raftInformation{ @@ -912,7 +911,7 @@ func (c *Core) JoinRaftCluster(ctx context.Context, leaderInfos []*raft.LeaderJo } if err := c.joinRaftSendAnswer(ctx, c.seal.GetAccess(), raftInfo); err != nil { - return errwrap.Wrapf("failed to send answer to raft leader node: {{err}}", err) + return fmt.Errorf("failed to send answer to raft leader node: %w", err) } if c.seal.BarrierType() == wrapping.Shamir && !isRaftHAOnly { @@ -1014,7 +1013,7 @@ func (c *Core) JoinRaftCluster(ctx context.Context, leaderInfos []*raft.LeaderJo default: if err := join(false); err != nil { c.logger.Error("failed to join raft cluster", "error", err) - return false, errwrap.Wrapf("failed to join raft cluster: {{err}}", err) + return false, fmt.Errorf("failed to join raft cluster: %w", err) } } @@ -1061,12 +1060,12 @@ func (c *Core) joinRaftSendAnswer(ctx context.Context, sealAccess *seal.Access, plaintext, err := sealAccess.Decrypt(ctx, raftInfo.challenge, nil) if err != nil { - return errwrap.Wrapf("error decrypting challenge: {{err}}", err) + return fmt.Errorf("error decrypting challenge: %w", err) } parsedClusterAddr, err := url.Parse(c.ClusterAddr()) if err != nil { - return errwrap.Wrapf("error parsing cluster address: {{err}}", err) + return fmt.Errorf("error parsing cluster address: %w", err) } clusterAddr := parsedClusterAddr.Host if atomic.LoadUint32(&TestingUpdateClusterAddr) == 1 && strings.HasSuffix(clusterAddr, ":0") { @@ -1108,7 +1107,7 @@ func (c *Core) joinRaftSendAnswer(ctx context.Context, sealAccess *seal.Access, err = c.startClusterListener(ctx) if err != nil { - return errwrap.Wrapf("error starting cluster: {{err}}", err) + return fmt.Errorf("error starting cluster: %w", err) } raftBackend.SetRestoreCallback(c.raftSnapshotRestoreCallback(true, true)) @@ -1118,7 +1117,7 @@ func (c *Core) joinRaftSendAnswer(ctx context.Context, sealAccess *seal.Access, } err = raftBackend.SetupCluster(ctx, opts) if err != nil { - return errwrap.Wrapf("failed to setup raft cluster: {{err}}", err) + return fmt.Errorf("failed to setup raft cluster: %w", err) } return nil @@ -1158,7 +1157,7 @@ func (c *Core) RaftBootstrap(ctx context.Context, onInit bool) error { parsedClusterAddr, err := url.Parse(c.ClusterAddr()) if err != nil { - return errwrap.Wrapf("error parsing cluster address: {{err}}", err) + return fmt.Errorf("error parsing cluster address: %w", err) } if err := raftBackend.Bootstrap([]raft.Peer{ { @@ -1166,7 +1165,7 @@ func (c *Core) RaftBootstrap(ctx context.Context, onInit bool) error { Address: parsedClusterAddr.Host, }, }); err != nil { - return errwrap.Wrapf("could not bootstrap clustered storage: {{err}}", err) + return fmt.Errorf("could not bootstrap clustered storage: %w", err) } raftOpts := raft.SetupOpts{ @@ -1177,7 +1176,7 @@ func (c *Core) RaftBootstrap(ctx context.Context, onInit bool) error { // Generate the TLS Keyring info for SetupCluster to consume raftTLS, err := c.raftCreateTLSKeyring(ctx) if err != nil { - return errwrap.Wrapf("could not generate TLS keyring during bootstrap: {{err}}", err) + return fmt.Errorf("could not generate TLS keyring during bootstrap: %w", err) } raftBackend.SetRestoreCallback(c.raftSnapshotRestoreCallback(true, true)) @@ -1187,7 +1186,7 @@ func (c *Core) RaftBootstrap(ctx context.Context, onInit bool) error { } if err := raftBackend.SetupCluster(ctx, raftOpts); err != nil { - return errwrap.Wrapf("could not start clustered storage: {{err}}", err) + return fmt.Errorf("could not start clustered storage: %w", err) } return nil diff --git a/vault/rekey.go b/vault/rekey.go index 38912324bdbc8..4ce8638567641 100644 --- a/vault/rekey.go +++ b/vault/rekey.go @@ -9,7 +9,6 @@ import ( "fmt" "net/http" - "github.com/hashicorp/errwrap" wrapping "github.com/hashicorp/go-kms-wrapping" aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead" "github.com/hashicorp/go-uuid" @@ -85,7 +84,7 @@ func (c *Core) RekeyThreshold(ctx context.Context, recovery bool) (int, logical. config, err = c.seal.BarrierConfig(ctx) } if err != nil { - return 0, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("unable to look up config: {{err}}", err).Error()) + return 0, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("unable to look up config: %w", err).Error()) } if config == nil { return 0, logical.CodedError(http.StatusBadRequest, ErrNotInit.Error()) @@ -205,7 +204,7 @@ func (c *Core) BarrierRekeyInit(config *SealConfig) logical.HTTPCodedError { // Check if the seal configuration is valid if err := config.Validate(); err != nil { c.logger.Error("invalid rekey seal configuration", "error", err) - return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("invalid rekey seal configuration: {{err}}", err).Error()) + return logical.CodedError(http.StatusInternalServerError, fmt.Errorf("invalid rekey seal configuration: %w", err).Error()) } c.stateLock.RLock() @@ -232,7 +231,7 @@ func (c *Core) BarrierRekeyInit(config *SealConfig) logical.HTTPCodedError { nonce, err := uuid.GenerateUUID() if err != nil { c.barrierRekeyConfig = nil - return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("error generating nonce for procedure: {{err}}", err).Error()) + return logical.CodedError(http.StatusInternalServerError, fmt.Errorf("error generating nonce for procedure: %w", err).Error()) } c.barrierRekeyConfig.Nonce = nonce @@ -251,7 +250,7 @@ func (c *Core) RecoveryRekeyInit(config *SealConfig) logical.HTTPCodedError { // Check if the seal configuration is valid if err := config.Validate(); err != nil { c.logger.Error("invalid recovery configuration", "error", err) - return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("invalid recovery configuration: {{err}}", err).Error()) + return logical.CodedError(http.StatusInternalServerError, fmt.Errorf("invalid recovery configuration: %w", err).Error()) } if !c.seal.RecoveryKeySupported() { @@ -282,7 +281,7 @@ func (c *Core) RecoveryRekeyInit(config *SealConfig) logical.HTTPCodedError { nonce, err := uuid.GenerateUUID() if err != nil { c.recoveryRekeyConfig = nil - return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("error generating nonce for procedure: {{err}}", err).Error()) + return logical.CodedError(http.StatusInternalServerError, fmt.Errorf("error generating nonce for procedure: %w", err).Error()) } c.recoveryRekeyConfig.Nonce = nonce @@ -340,7 +339,7 @@ func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string) existingConfig, err = c.seal.BarrierConfig(ctx) } if err != nil { - return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to fetch existing config: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to fetch existing config: %w", err).Error()) } // Ensure the barrier is initialized if existingConfig == nil { @@ -387,7 +386,7 @@ func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string) recoveredKey, err = shamir.Combine(c.barrierRekeyConfig.RekeyProgress) c.barrierRekeyConfig.RekeyProgress = nil if err != nil { - return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to compute master key: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to compute master key: %w", err).Error()) } } @@ -395,7 +394,7 @@ func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string) case useRecovery: if err := c.seal.VerifyRecoveryKey(ctx, recoveredKey); err != nil { c.logger.Error("rekey recovery key verification failed", "error", err) - return nil, logical.CodedError(http.StatusBadRequest, errwrap.Wrapf("recovery key verification failed: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusBadRequest, fmt.Errorf("recovery key verification failed: %w", err).Error()) } case c.seal.BarrierType() == wrapping.Shamir: if c.seal.StoredKeysSupported() == seal.StoredKeysSupportedShamirMaster { @@ -407,22 +406,22 @@ func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string) testseal.SetCore(c) err = testseal.GetAccess().Wrapper.(*aeadwrapper.ShamirWrapper).SetAESGCMKeyBytes(recoveredKey) if err != nil { - return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to setup unseal key: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to setup unseal key: %w", err).Error()) } cfg, err := c.seal.BarrierConfig(ctx) if err != nil { - return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to setup test barrier config: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to setup test barrier config: %w", err).Error()) } testseal.SetCachedBarrierConfig(cfg) stored, err := testseal.GetStoredKeys(ctx) if err != nil { - return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to read master key: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to read master key: %w", err).Error()) } recoveredKey = stored[0] } if err := c.barrier.VerifyMaster(recoveredKey); err != nil { c.logger.Error("master key verification failed", "error", err) - return nil, logical.CodedError(http.StatusBadRequest, errwrap.Wrapf("master key verification failed: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusBadRequest, fmt.Errorf("master key verification failed: %w", err).Error()) } } @@ -432,7 +431,7 @@ func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string) newKey, err := c.barrier.GenerateKey(c.secureRandomReader) if err != nil { c.logger.Error("failed to generate master key", "error", err) - return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("master key generation failed: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("master key generation failed: %w", err).Error()) } results := &RekeyResult{ @@ -448,7 +447,7 @@ func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string) shares, err := shamir.Split(newKey, c.barrierRekeyConfig.SecretShares, c.barrierRekeyConfig.SecretThreshold) if err != nil { c.logger.Error("failed to generate shares", "error", err) - return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to generate shares: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to generate shares: %w", err).Error()) } results.SecretShares = shares } @@ -462,7 +461,7 @@ func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string) } results.PGPFingerprints, results.SecretShares, err = pgpkeys.EncryptShares(hexEncodedShares, c.barrierRekeyConfig.PGPKeys) if err != nil { - return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to encrypt shares: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to encrypt shares: %w", err).Error()) } // If backup is enabled, store backup info in vault.coreBarrierUnsealKeysBackupPath @@ -484,7 +483,7 @@ func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string) buf, err := json.Marshal(backupVals) if err != nil { c.logger.Error("failed to marshal unseal key backup", "error", err) - return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to marshal unseal key backup: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to marshal unseal key backup: %w", err).Error()) } pe := &physical.Entry{ Key: coreBarrierUnsealKeysBackupPath, @@ -492,7 +491,7 @@ func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string) } if err = c.physical.Put(ctx, pe); err != nil { c.logger.Error("failed to save unseal key backup", "error", err) - return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to save unseal key backup: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to save unseal key backup: %w", err).Error()) } } } @@ -502,7 +501,7 @@ func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string) nonce, err := uuid.GenerateUUID() if err != nil { c.barrierRekeyConfig = nil - return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to generate verification nonce: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to generate verification nonce: %w", err).Error()) } c.barrierRekeyConfig.VerificationNonce = nonce c.barrierRekeyConfig.VerificationKey = newKey @@ -513,7 +512,7 @@ func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string) } if err := c.performBarrierRekey(ctx, newKey); err != nil { - return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to perform barrier rekey: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to perform barrier rekey: %w", err).Error()) } c.barrierRekeyConfig = nil @@ -526,7 +525,7 @@ func (c *Core) performBarrierRekey(ctx context.Context, newSealKey []byte) logic // We won't be able to call SetStoredKeys without setting StoredShares=1. existingConfig, err := c.seal.BarrierConfig(ctx) if err != nil { - return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to fetch existing config: {{err}}", err).Error()) + return logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to fetch existing config: %w", err).Error()) } existingConfig.StoredShares = 1 c.seal.SetCachedBarrierConfig(existingConfig) @@ -535,23 +534,23 @@ func (c *Core) performBarrierRekey(ctx context.Context, newSealKey []byte) logic if c.seal.StoredKeysSupported() != seal.StoredKeysSupportedGeneric { err := c.seal.GetAccess().Wrapper.(*aeadwrapper.ShamirWrapper).SetAESGCMKeyBytes(newSealKey) if err != nil { - return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to update barrier seal key: {{err}}", err).Error()) + return logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to update barrier seal key: %w", err).Error()) } } newMasterKey, err := c.barrier.GenerateKey(c.secureRandomReader) if err != nil { - return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to perform rekey: {{err}}", err).Error()) + return logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to perform rekey: %w", err).Error()) } if err := c.seal.SetStoredKeys(ctx, [][]byte{newMasterKey}); err != nil { c.logger.Error("failed to store keys", "error", err) - return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to store keys: {{err}}", err).Error()) + return logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to store keys: %w", err).Error()) } // Rekey the barrier if err := c.barrier.Rekey(ctx, newMasterKey); err != nil { c.logger.Error("failed to rekey barrier", "error", err) - return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to rekey barrier: {{err}}", err).Error()) + return logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to rekey barrier: %w", err).Error()) } if c.logger.IsInfo() { c.logger.Info("security barrier rekeyed", "stored", c.barrierRekeyConfig.StoredShares, "shares", c.barrierRekeyConfig.SecretShares, "threshold", c.barrierRekeyConfig.SecretThreshold) @@ -564,7 +563,7 @@ func (c *Core) performBarrierRekey(ctx context.Context, newSealKey []byte) logic }) if err != nil { c.logger.Error("failed to store new seal key", "error", err) - return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to store new seal key: {{err}}", err).Error()) + return logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to store new seal key: %w", err).Error()) } } @@ -572,7 +571,7 @@ func (c *Core) performBarrierRekey(ctx context.Context, newSealKey []byte) logic if err := c.seal.SetBarrierConfig(ctx, c.barrierRekeyConfig); err != nil { c.logger.Error("error saving rekey seal configuration", "error", err) - return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to save rekey seal configuration: {{err}}", err).Error()) + return logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to save rekey seal configuration: %w", err).Error()) } // Write to the canary path, which will force a synchronous truing during @@ -582,7 +581,7 @@ func (c *Core) performBarrierRekey(ctx context.Context, newSealKey []byte) logic Value: []byte(c.barrierRekeyConfig.Nonce), }); err != nil { c.logger.Error("error saving keyring canary", "error", err) - return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to save keyring canary: {{err}}", err).Error()) + return logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to save keyring canary: %w", err).Error()) } c.barrierRekeyConfig.RekeyProgress = nil @@ -618,7 +617,7 @@ func (c *Core) RecoveryRekeyUpdate(ctx context.Context, key []byte, nonce string // Get the seal configuration existingConfig, err := c.seal.RecoveryConfig(ctx) if err != nil { - return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to fetch existing recovery config: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to fetch existing recovery config: %w", err).Error()) } // Ensure the seal is initialized if existingConfig == nil { @@ -665,21 +664,21 @@ func (c *Core) RecoveryRekeyUpdate(ctx context.Context, key []byte, nonce string recoveryKey, err = shamir.Combine(c.recoveryRekeyConfig.RekeyProgress) c.recoveryRekeyConfig.RekeyProgress = nil if err != nil { - return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to compute recovery key: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to compute recovery key: %w", err).Error()) } } // Verify the recovery key if err := c.seal.VerifyRecoveryKey(ctx, recoveryKey); err != nil { c.logger.Error("recovery key verification failed", "error", err) - return nil, logical.CodedError(http.StatusBadRequest, errwrap.Wrapf("recovery key verification failed: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusBadRequest, fmt.Errorf("recovery key verification failed: %w", err).Error()) } // Generate a new master key newMasterKey, err := c.barrier.GenerateKey(c.secureRandomReader) if err != nil { c.logger.Error("failed to generate recovery key", "error", err) - return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("recovery key generation failed: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("recovery key generation failed: %w", err).Error()) } // Return the master key if only a single key part is used @@ -694,7 +693,7 @@ func (c *Core) RecoveryRekeyUpdate(ctx context.Context, key []byte, nonce string shares, err := shamir.Split(newMasterKey, c.recoveryRekeyConfig.SecretShares, c.recoveryRekeyConfig.SecretThreshold) if err != nil { c.logger.Error("failed to generate shares", "error", err) - return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to generate shares: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to generate shares: %w", err).Error()) } results.SecretShares = shares } @@ -706,7 +705,7 @@ func (c *Core) RecoveryRekeyUpdate(ctx context.Context, key []byte, nonce string } results.PGPFingerprints, results.SecretShares, err = pgpkeys.EncryptShares(hexEncodedShares, c.recoveryRekeyConfig.PGPKeys) if err != nil { - return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to encrypt shares: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to encrypt shares: %w", err).Error()) } if c.recoveryRekeyConfig.Backup { @@ -727,7 +726,7 @@ func (c *Core) RecoveryRekeyUpdate(ctx context.Context, key []byte, nonce string buf, err := json.Marshal(backupVals) if err != nil { c.logger.Error("failed to marshal recovery key backup", "error", err) - return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to marshal recovery key backup: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to marshal recovery key backup: %w", err).Error()) } pe := &physical.Entry{ Key: coreRecoveryUnsealKeysBackupPath, @@ -735,7 +734,7 @@ func (c *Core) RecoveryRekeyUpdate(ctx context.Context, key []byte, nonce string } if err = c.physical.Put(ctx, pe); err != nil { c.logger.Error("failed to save unseal key backup", "error", err) - return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to save unseal key backup: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to save unseal key backup: %w", err).Error()) } } } @@ -746,7 +745,7 @@ func (c *Core) RecoveryRekeyUpdate(ctx context.Context, key []byte, nonce string nonce, err := uuid.GenerateUUID() if err != nil { c.recoveryRekeyConfig = nil - return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to generate verification nonce: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to generate verification nonce: %w", err).Error()) } c.recoveryRekeyConfig.VerificationNonce = nonce c.recoveryRekeyConfig.VerificationKey = newMasterKey @@ -757,7 +756,7 @@ func (c *Core) RecoveryRekeyUpdate(ctx context.Context, key []byte, nonce string } if err := c.performRecoveryRekey(ctx, newMasterKey); err != nil { - return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to perform recovery rekey: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to perform recovery rekey: %w", err).Error()) } c.recoveryRekeyConfig = nil @@ -767,14 +766,14 @@ func (c *Core) RecoveryRekeyUpdate(ctx context.Context, key []byte, nonce string func (c *Core) performRecoveryRekey(ctx context.Context, newMasterKey []byte) logical.HTTPCodedError { if err := c.seal.SetRecoveryKey(ctx, newMasterKey); err != nil { c.logger.Error("failed to set recovery key", "error", err) - return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to set recovery key: {{err}}", err).Error()) + return logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to set recovery key: %w", err).Error()) } c.recoveryRekeyConfig.VerificationKey = nil if err := c.seal.SetRecoveryConfig(ctx, c.recoveryRekeyConfig); err != nil { c.logger.Error("error saving rekey seal configuration", "error", err) - return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to save rekey seal configuration: {{err}}", err).Error()) + return logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to save rekey seal configuration: %w", err).Error()) } // Write to the canary path, which will force a synchronous truing during @@ -784,7 +783,7 @@ func (c *Core) performRecoveryRekey(ctx context.Context, newMasterKey []byte) lo Value: []byte(c.recoveryRekeyConfig.Nonce), }); err != nil { c.logger.Error("error saving keyring canary", "error", err) - return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to save keyring canary: {{err}}", err).Error()) + return logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to save keyring canary: %w", err).Error()) } c.recoveryRekeyConfig.RekeyProgress = nil @@ -876,7 +875,7 @@ func (c *Core) RekeyVerify(ctx context.Context, key []byte, nonce string, recove var err error recoveredKey, err = shamir.Combine(config.VerificationProgress) if err != nil { - return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to compute key for verification: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to compute key for verification: %w", err).Error()) } } @@ -888,12 +887,12 @@ func (c *Core) RekeyVerify(ctx context.Context, key []byte, nonce string, recove switch recovery { case false: if err := c.performBarrierRekey(ctx, recoveredKey); err != nil { - return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to perform rekey: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to perform rekey: %w", err).Error()) } c.barrierRekeyConfig = nil default: if err := c.performRecoveryRekey(ctx, recoveredKey); err != nil { - return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to perform recovery key rekey: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("failed to perform recovery key rekey: %w", err).Error()) } c.recoveryRekeyConfig = nil } @@ -988,7 +987,7 @@ func (c *Core) RekeyRetrieveBackup(ctx context.Context, recovery bool) (*RekeyBa entry, err = c.physical.Get(ctx, coreBarrierUnsealKeysBackupPath) } if err != nil { - return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("error getting keys from backup: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("error getting keys from backup: %w", err).Error()) } if entry == nil { return nil, nil @@ -997,7 +996,7 @@ func (c *Core) RekeyRetrieveBackup(ctx context.Context, recovery bool) (*RekeyBa ret := &RekeyBackup{} err = jsonutil.DecodeJSON(entry.Value, ret) if err != nil { - return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("error decoding backup keys: {{err}}", err).Error()) + return nil, logical.CodedError(http.StatusInternalServerError, fmt.Errorf("error decoding backup keys: %w", err).Error()) } return ret, nil @@ -1018,13 +1017,13 @@ func (c *Core) RekeyDeleteBackup(ctx context.Context, recovery bool) logical.HTT if recovery { err := c.physical.Delete(ctx, coreRecoveryUnsealKeysBackupPath) if err != nil { - return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("error deleting backup keys: {{err}}", err).Error()) + return logical.CodedError(http.StatusInternalServerError, fmt.Errorf("error deleting backup keys: %w", err).Error()) } return nil } err := c.physical.Delete(ctx, coreBarrierUnsealKeysBackupPath) if err != nil { - return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("error deleting backup keys: {{err}}", err).Error()) + return logical.CodedError(http.StatusInternalServerError, fmt.Errorf("error deleting backup keys: %w", err).Error()) } return nil } diff --git a/vault/request_handling.go b/vault/request_handling.go index 4e8643b919c19..2dfdf6b73b287 100644 --- a/vault/request_handling.go +++ b/vault/request_handling.go @@ -419,7 +419,7 @@ func (c *Core) switchedLockHandleRequest(httpCtx context.Context, req *logical.R ns, err := namespace.FromContext(httpCtx) if err != nil { cancel() - return nil, errwrap.Wrapf("could not parse namespace from http context: {{err}}", err) + return nil, fmt.Errorf("could not parse namespace from http context: %w", err) } ctx = namespace.ContextWithNamespace(ctx, ns) @@ -754,7 +754,7 @@ func (c *Core) handleRequest(ctx context.Context, req *logical.Request) (retResp c.logger.Trace("request rejected due to lease count quota violation", "request_path", req.Path) } - retErr = multierror.Append(retErr, errwrap.Wrapf(fmt.Sprintf("request path %q: {{err}}", req.Path), quotas.ErrLeaseCountQuotaExceeded)) + retErr = multierror.Append(retErr, fmt.Errorf("request path %q: %w", req.Path, quotas.ErrLeaseCountQuotaExceeded)) return nil, auth, retErr } @@ -1152,7 +1152,7 @@ func (c *Core) handleLoginRequest(ctx context.Context, req *logical.Request) (re c.logger.Trace("request rejected due to lease count quota violation", "request_path", req.Path) } - retErr = multierror.Append(retErr, errwrap.Wrapf(fmt.Sprintf("request path %q: {{err}}", req.Path), quotas.ErrLeaseCountQuotaExceeded)) + retErr = multierror.Append(retErr, fmt.Errorf("request path %q: %w", req.Path, quotas.ErrLeaseCountQuotaExceeded)) return } diff --git a/vault/seal.go b/vault/seal.go index 3bd0cc80b41aa..6ef7da7ee97d1 100644 --- a/vault/seal.go +++ b/vault/seal.go @@ -13,7 +13,6 @@ import ( "github.com/hashicorp/vault/sdk/physical" "github.com/golang/protobuf/proto" - "github.com/hashicorp/errwrap" wrapping "github.com/hashicorp/go-kms-wrapping" "github.com/hashicorp/vault/vault/seal" "github.com/keybase/go-crypto/openpgp" @@ -178,7 +177,7 @@ func (d *defaultSeal) BarrierConfig(ctx context.Context) (*SealConfig, error) { pe, err := d.core.physical.Get(ctx, barrierSealConfigPath) if err != nil { d.core.logger.Error("failed to read seal configuration", "error", err) - return nil, errwrap.Wrapf("failed to check seal configuration: {{err}}", err) + return nil, fmt.Errorf("failed to check seal configuration: %w", err) } // If the seal configuration is missing, we are not initialized @@ -192,7 +191,7 @@ func (d *defaultSeal) BarrierConfig(ctx context.Context) (*SealConfig, error) { // Decode the barrier entry if err := jsonutil.DecodeJSON(pe.Value, &conf); err != nil { d.core.logger.Error("failed to decode seal configuration", "error", err) - return nil, errwrap.Wrapf("failed to decode seal configuration: {{err}}", err) + return nil, fmt.Errorf("failed to decode seal configuration: %w", err) } switch conf.Type { @@ -208,7 +207,7 @@ func (d *defaultSeal) BarrierConfig(ctx context.Context) (*SealConfig, error) { // Check for a valid seal configuration if err := conf.Validate(); err != nil { d.core.logger.Error("invalid seal configuration", "error", err) - return nil, errwrap.Wrapf("seal validation failed: {{err}}", err) + return nil, fmt.Errorf("seal validation failed: %w", err) } d.SetCachedBarrierConfig(&conf) @@ -239,7 +238,7 @@ func (d *defaultSeal) SetBarrierConfig(ctx context.Context, config *SealConfig) // Encode the seal configuration buf, err := json.Marshal(config) if err != nil { - return errwrap.Wrapf("failed to encode seal configuration: {{err}}", err) + return fmt.Errorf("failed to encode seal configuration: %w", err) } // Store the seal configuration @@ -250,7 +249,7 @@ func (d *defaultSeal) SetBarrierConfig(ctx context.Context, config *SealConfig) if err := d.core.physical.Put(ctx, pe); err != nil { d.core.logger.Error("failed to write seal configuration", "error", err) - return errwrap.Wrapf("failed to write seal configuration: {{err}}", err) + return fmt.Errorf("failed to write seal configuration: %w", err) } d.SetCachedBarrierConfig(config.Clone()) @@ -370,11 +369,11 @@ func (s *SealConfig) Validate() error { for _, keystring := range s.PGPKeys { data, err := base64.StdEncoding.DecodeString(keystring) if err != nil { - return errwrap.Wrapf("error decoding given PGP key: {{err}}", err) + return fmt.Errorf("error decoding given PGP key: %w", err) } _, err = openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(data))) if err != nil { - return errwrap.Wrapf("error parsing given PGP key: {{err}}", err) + return fmt.Errorf("error parsing given PGP key: %w", err) } } } @@ -443,18 +442,18 @@ func writeStoredKeys(ctx context.Context, storage physical.Backend, encryptor *s buf, err := json.Marshal(keys) if err != nil { - return errwrap.Wrapf("failed to encode keys for storage: {{err}}", err) + return fmt.Errorf("failed to encode keys for storage: %w", err) } // Encrypt and marshal the keys blobInfo, err := encryptor.Encrypt(ctx, buf, nil) if err != nil { - return &ErrEncrypt{Err: errwrap.Wrapf("failed to encrypt keys for storage: {{err}}", err)} + return &ErrEncrypt{Err: fmt.Errorf("failed to encrypt keys for storage: %w", err)} } value, err := proto.Marshal(blobInfo) if err != nil { - return errwrap.Wrapf("failed to marshal value for storage: {{err}}", err) + return fmt.Errorf("failed to marshal value for storage: %w", err) } // Store the seal configuration. @@ -464,7 +463,7 @@ func writeStoredKeys(ctx context.Context, storage physical.Backend, encryptor *s } if err := storage.Put(ctx, pe); err != nil { - return errwrap.Wrapf("failed to write keys to storage: {{err}}", err) + return fmt.Errorf("failed to write keys to storage: %w", err) } return nil @@ -473,7 +472,7 @@ func writeStoredKeys(ctx context.Context, storage physical.Backend, encryptor *s func readStoredKeys(ctx context.Context, storage physical.Backend, encryptor *seal.Access) ([][]byte, error) { pe, err := storage.Get(ctx, StoredBarrierKeysPath) if err != nil { - return nil, errwrap.Wrapf("failed to fetch stored keys: {{err}}", err) + return nil, fmt.Errorf("failed to fetch stored keys: %w", err) } // This is not strictly an error; we may not have any stored keys, for @@ -484,12 +483,12 @@ func readStoredKeys(ctx context.Context, storage physical.Backend, encryptor *se blobInfo := &wrapping.EncryptedBlobInfo{} if err := proto.Unmarshal(pe.Value, blobInfo); err != nil { - return nil, errwrap.Wrapf("failed to proto decode stored keys: {{err}}", err) + return nil, fmt.Errorf("failed to proto decode stored keys: %w", err) } pt, err := encryptor.Decrypt(ctx, blobInfo, nil) if err != nil { - return nil, &ErrDecrypt{Err: errwrap.Wrapf("failed to encrypt keys for storage: {{err}}", err)} + return nil, &ErrDecrypt{Err: fmt.Errorf("failed to encrypt keys for storage: %w", err)} } // Decode the barrier entry diff --git a/vault/seal_autoseal.go b/vault/seal_autoseal.go index 5f38582b0d8c3..a037bc866ced7 100644 --- a/vault/seal_autoseal.go +++ b/vault/seal_autoseal.go @@ -8,7 +8,6 @@ import ( "sync/atomic" proto "github.com/golang/protobuf/proto" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" wrapping "github.com/hashicorp/go-kms-wrapping" "github.com/hashicorp/vault/sdk/physical" @@ -101,7 +100,7 @@ func (d *autoSeal) GetStoredKeys(ctx context.Context) ([][]byte, error) { func (d *autoSeal) upgradeStoredKeys(ctx context.Context) error { pe, err := d.core.physical.Get(ctx, StoredBarrierKeysPath) if err != nil { - return errwrap.Wrapf("failed to fetch stored keys: {{err}}", err) + return fmt.Errorf("failed to fetch stored keys: %w", err) } if pe == nil { return fmt.Errorf("no stored keys found") @@ -109,7 +108,7 @@ func (d *autoSeal) upgradeStoredKeys(ctx context.Context) error { blobInfo := &wrapping.EncryptedBlobInfo{} if err := proto.Unmarshal(pe.Value, blobInfo); err != nil { - return errwrap.Wrapf("failed to proto decode stored keys: {{err}}", err) + return fmt.Errorf("failed to proto decode stored keys: %w", err) } if blobInfo.KeyInfo != nil && blobInfo.KeyInfo.KeyID != d.Access.KeyID() { @@ -117,17 +116,17 @@ func (d *autoSeal) upgradeStoredKeys(ctx context.Context) error { pt, err := d.Decrypt(ctx, blobInfo, nil) if err != nil { - return errwrap.Wrapf("failed to decrypt encrypted stored keys: {{err}}", err) + return fmt.Errorf("failed to decrypt encrypted stored keys: %w", err) } // Decode the barrier entry var keys [][]byte if err := json.Unmarshal(pt, &keys); err != nil { - return errwrap.Wrapf("failed to decode stored keys: {{err}}", err) + return fmt.Errorf("failed to decode stored keys: %w", err) } if err := d.SetStoredKeys(ctx, keys); err != nil { - return errwrap.Wrapf("failed to save upgraded stored keys: {{err}}", err) + return fmt.Errorf("failed to save upgraded stored keys: %w", err) } } return nil @@ -167,7 +166,7 @@ func (d *autoSeal) BarrierConfig(ctx context.Context) (*SealConfig, error) { entry, err := d.core.physical.Get(ctx, barrierSealConfigPath) if err != nil { d.logger.Error("failed to read seal configuration", "seal_type", sealType, "error", err) - return nil, errwrap.Wrapf(fmt.Sprintf("failed to read %q seal configuration: {{err}}", sealType), err) + return nil, fmt.Errorf("failed to read %q seal configuration: %w", sealType, err) } // If the seal configuration is missing, we are not initialized @@ -182,13 +181,13 @@ func (d *autoSeal) BarrierConfig(ctx context.Context) (*SealConfig, error) { err = json.Unmarshal(entry.Value, conf) if err != nil { d.logger.Error("failed to decode seal configuration", "seal_type", sealType, "error", err) - return nil, errwrap.Wrapf(fmt.Sprintf("failed to decode %q seal configuration: {{err}}", sealType), err) + return nil, fmt.Errorf("failed to decode %q seal configuration: %w", sealType, err) } // Check for a valid seal configuration if err := conf.Validate(); err != nil { d.logger.Error("invalid seal configuration", "seal_type", sealType, "error", err) - return nil, errwrap.Wrapf(fmt.Sprintf("%q seal validation failed: {{err}}", sealType), err) + return nil, fmt.Errorf("%q seal validation failed: %w", sealType, err) } barrierTypeUpgradeCheck(d.BarrierType(), conf) @@ -217,7 +216,7 @@ func (d *autoSeal) SetBarrierConfig(ctx context.Context, conf *SealConfig) error // Encode the seal configuration buf, err := json.Marshal(conf) if err != nil { - return errwrap.Wrapf("failed to encode barrier seal configuration: {{err}}", err) + return fmt.Errorf("failed to encode barrier seal configuration: %w", err) } // Store the seal configuration @@ -228,7 +227,7 @@ func (d *autoSeal) SetBarrierConfig(ctx context.Context, conf *SealConfig) error if err := d.core.physical.Put(ctx, pe); err != nil { d.logger.Error("failed to write barrier seal configuration", "error", err) - return errwrap.Wrapf("failed to write barrier seal configuration: {{err}}", err) + return fmt.Errorf("failed to write barrier seal configuration: %w", err) } d.SetCachedBarrierConfig(conf.Clone()) @@ -261,7 +260,7 @@ func (d *autoSeal) RecoveryConfig(ctx context.Context) (*SealConfig, error) { entry, err = d.core.physical.Get(ctx, recoverySealConfigPlaintextPath) if err != nil { d.logger.Error("failed to read seal configuration", "seal_type", sealType, "error", err) - return nil, errwrap.Wrapf(fmt.Sprintf("failed to read %q seal configuration: {{err}}", sealType), err) + return nil, fmt.Errorf("failed to read %q seal configuration: %w", sealType, err) } if entry == nil { @@ -274,7 +273,7 @@ func (d *autoSeal) RecoveryConfig(ctx context.Context) (*SealConfig, error) { // return the correct seal config be, err := d.core.barrier.Get(ctx, recoverySealConfigPath) if err != nil { - return nil, errwrap.Wrapf("failed to read old recovery seal configuration: {{err}}", err) + return nil, fmt.Errorf("failed to read old recovery seal configuration: %w", err) } // If the seal configuration is missing, then we are not initialized. @@ -295,13 +294,13 @@ func (d *autoSeal) RecoveryConfig(ctx context.Context) (*SealConfig, error) { conf := &SealConfig{} if err := json.Unmarshal(entry.Value, conf); err != nil { d.logger.Error("failed to decode seal configuration", "seal_type", sealType, "error", err) - return nil, errwrap.Wrapf(fmt.Sprintf("failed to decode %q seal configuration: {{err}}", sealType), err) + return nil, fmt.Errorf("failed to decode %q seal configuration: %w", sealType, err) } // Check for a valid seal configuration if err := conf.Validate(); err != nil { d.logger.Error("invalid seal configuration", "seal_type", sealType, "error", err) - return nil, errwrap.Wrapf(fmt.Sprintf("%q seal validation failed: {{err}}", sealType), err) + return nil, fmt.Errorf("%q seal validation failed: %w", sealType, err) } if conf.Type != d.RecoveryType() { @@ -335,7 +334,7 @@ func (d *autoSeal) SetRecoveryConfig(ctx context.Context, conf *SealConfig) erro // Encode the seal configuration buf, err := json.Marshal(conf) if err != nil { - return errwrap.Wrapf("failed to encode recovery seal configuration: {{err}}", err) + return fmt.Errorf("failed to encode recovery seal configuration: %w", err) } // Store the seal configuration directly in the physical storage @@ -346,7 +345,7 @@ func (d *autoSeal) SetRecoveryConfig(ctx context.Context, conf *SealConfig) erro if err := d.core.physical.Put(ctx, pe); err != nil { d.logger.Error("failed to write recovery seal configuration", "error", err) - return errwrap.Wrapf("failed to write recovery seal configuration: {{err}}", err) + return fmt.Errorf("failed to write recovery seal configuration: %w", err) } d.recoveryConfig.Store(conf.Clone()) @@ -387,12 +386,12 @@ func (d *autoSeal) SetRecoveryKey(ctx context.Context, key []byte) error { // Encrypt and marshal the keys blobInfo, err := d.Encrypt(ctx, key, nil) if err != nil { - return errwrap.Wrapf("failed to encrypt keys for storage: {{err}}", err) + return fmt.Errorf("failed to encrypt keys for storage: %w", err) } value, err := proto.Marshal(blobInfo) if err != nil { - return errwrap.Wrapf("failed to marshal value for storage: {{err}}", err) + return fmt.Errorf("failed to marshal value for storage: %w", err) } be := &physical.Entry{ @@ -402,7 +401,7 @@ func (d *autoSeal) SetRecoveryKey(ctx context.Context, key []byte) error { if err := d.core.physical.Put(ctx, be); err != nil { d.logger.Error("failed to write recovery key", "error", err) - return errwrap.Wrapf("failed to write recovery key: {{err}}", err) + return fmt.Errorf("failed to write recovery key: %w", err) } return nil @@ -416,7 +415,7 @@ func (d *autoSeal) getRecoveryKeyInternal(ctx context.Context) ([]byte, error) { pe, err := d.core.physical.Get(ctx, recoveryKeyPath) if err != nil { d.logger.Error("failed to read recovery key", "error", err) - return nil, errwrap.Wrapf("failed to read recovery key: {{err}}", err) + return nil, fmt.Errorf("failed to read recovery key: %w", err) } if pe == nil { d.logger.Warn("no recovery key found") @@ -425,12 +424,12 @@ func (d *autoSeal) getRecoveryKeyInternal(ctx context.Context) ([]byte, error) { blobInfo := &wrapping.EncryptedBlobInfo{} if err := proto.Unmarshal(pe.Value, blobInfo); err != nil { - return nil, errwrap.Wrapf("failed to proto decode stored keys: {{err}}", err) + return nil, fmt.Errorf("failed to proto decode stored keys: %w", err) } pt, err := d.Decrypt(ctx, blobInfo, nil) if err != nil { - return nil, errwrap.Wrapf("failed to decrypt encrypted stored keys: {{err}}", err) + return nil, fmt.Errorf("failed to decrypt encrypted stored keys: %w", err) } return pt, nil @@ -439,7 +438,7 @@ func (d *autoSeal) getRecoveryKeyInternal(ctx context.Context) ([]byte, error) { func (d *autoSeal) upgradeRecoveryKey(ctx context.Context) error { pe, err := d.core.physical.Get(ctx, recoveryKeyPath) if err != nil { - return errwrap.Wrapf("failed to fetch recovery key: {{err}}", err) + return fmt.Errorf("failed to fetch recovery key: %w", err) } if pe == nil { return fmt.Errorf("no recovery key found") @@ -447,7 +446,7 @@ func (d *autoSeal) upgradeRecoveryKey(ctx context.Context) error { blobInfo := &wrapping.EncryptedBlobInfo{} if err := proto.Unmarshal(pe.Value, blobInfo); err != nil { - return errwrap.Wrapf("failed to proto decode recovery key: {{err}}", err) + return fmt.Errorf("failed to proto decode recovery key: %w", err) } if blobInfo.KeyInfo != nil && blobInfo.KeyInfo.KeyID != d.Access.KeyID() { @@ -455,10 +454,10 @@ func (d *autoSeal) upgradeRecoveryKey(ctx context.Context) error { pt, err := d.Decrypt(ctx, blobInfo, nil) if err != nil { - return errwrap.Wrapf("failed to decrypt encrypted recovery key: {{err}}", err) + return fmt.Errorf("failed to decrypt encrypted recovery key: %w", err) } if err := d.SetRecoveryKey(ctx, pt); err != nil { - return errwrap.Wrapf("failed to save upgraded recovery key: {{err}}", err) + return fmt.Errorf("failed to save upgraded recovery key: %w", err) } } return nil @@ -471,7 +470,7 @@ func (d *autoSeal) migrateRecoveryConfig(ctx context.Context) error { // Get config from the old recoverySealConfigPath path be, err := d.core.barrier.Get(ctx, recoverySealConfigPath) if err != nil { - return errwrap.Wrapf("failed to read old recovery seal configuration during migration: {{err}}", err) + return fmt.Errorf("failed to read old recovery seal configuration during migration: %w", err) } // If this entry is nil, then skip migration @@ -490,12 +489,12 @@ func (d *autoSeal) migrateRecoveryConfig(ctx context.Context) error { } if err := d.core.physical.Put(ctx, pe); err != nil { - return errwrap.Wrapf("failed to write recovery seal configuration during migration: {{err}}", err) + return fmt.Errorf("failed to write recovery seal configuration during migration: %w", err) } // Perform deletion of the old entry if err := d.core.barrier.Delete(ctx, recoverySealConfigPath); err != nil { - return errwrap.Wrapf("failed to delete old recovery seal configuration during migration: {{err}}", err) + return fmt.Errorf("failed to delete old recovery seal configuration during migration: %w", err) } return nil diff --git a/vault/token_store.go b/vault/token_store.go index b83c535689fc6..e2f7c4edbae29 100644 --- a/vault/token_store.go +++ b/vault/token_store.go @@ -16,7 +16,6 @@ import ( "github.com/armon/go-metrics" "github.com/armon/go-radix" "github.com/golang/protobuf/proto" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-sockaddr" @@ -787,12 +786,12 @@ func (ts *TokenStore) createAccessor(ctx context.Context, entry *logical.TokenEn aEntryBytes, err := jsonutil.EncodeJSON(aEntry) if err != nil { - return errwrap.Wrapf("failed to marshal accessor index entry: {{err}}", err) + return fmt.Errorf("failed to marshal accessor index entry: %w", err) } le := &logical.StorageEntry{Key: saltID, Value: aEntryBytes} if err := ts.accessorView(tokenNS).Put(ctx, le); err != nil { - return errwrap.Wrapf("failed to persist accessor index entry: {{err}}", err) + return fmt.Errorf("failed to persist accessor index entry: %w", err) } return nil } @@ -967,7 +966,7 @@ func (ts *TokenStore) storeCommon(ctx context.Context, entry *logical.TokenEntry // Marshal the entry enc, err := json.Marshal(entry) if err != nil { - return errwrap.Wrapf("failed to encode entry: {{err}}", err) + return fmt.Errorf("failed to encode entry: %w", err) } if writeSecondary { @@ -979,7 +978,7 @@ func (ts *TokenStore) storeCommon(ctx context.Context, entry *logical.TokenEntry // Ensure the parent exists parent, err := ts.Lookup(ctx, entry.Parent) if err != nil { - return errwrap.Wrapf("failed to lookup parent: {{err}}", err) + return fmt.Errorf("failed to lookup parent: %w", err) } if parent == nil { return fmt.Errorf("parent token not found") @@ -1008,7 +1007,7 @@ func (ts *TokenStore) storeCommon(ctx context.Context, entry *logical.TokenEntry le := &logical.StorageEntry{Key: path} if err := ts.parentView(parentNS).Put(ctx, le); err != nil { - return errwrap.Wrapf("failed to persist entry: {{err}}", err) + return fmt.Errorf("failed to persist entry: %w", err) } } } @@ -1019,7 +1018,7 @@ func (ts *TokenStore) storeCommon(ctx context.Context, entry *logical.TokenEntry le.SealWrap = true } if err := ts.idView(tokenNS).Put(ctx, le); err != nil { - return errwrap.Wrapf("failed to persist entry: {{err}}", err) + return fmt.Errorf("failed to persist entry: %w", err) } return nil } @@ -1055,7 +1054,7 @@ func (ts *TokenStore) UseToken(ctx context.Context, te *logical.TokenEntry) (*lo var err error te, err = ts.lookupInternal(ctx, te.ID, false, false) if err != nil { - return nil, errwrap.Wrapf("failed to refresh entry: {{err}}", err) + return nil, fmt.Errorf("failed to refresh entry: %w", err) } // If it can't be found we shouldn't be trying to use it, so if we get nil // back, it is because it has been revoked in the interim or will be @@ -1188,7 +1187,7 @@ func (ts *TokenStore) lookupBatchToken(ctx context.Context, id string) (*logical func (ts *TokenStore) lookupInternal(ctx context.Context, id string, salted, tainted bool) (*logical.TokenEntry, error) { ns, err := namespace.FromContext(ctx) if err != nil { - return nil, errwrap.Wrapf("failed to find namespace in context: {{err}}", err) + return nil, fmt.Errorf("failed to find namespace in context: %w", err) } // If it starts with "b." it's a batch token @@ -1206,7 +1205,7 @@ func (ts *TokenStore) lookupInternal(ctx context.Context, id string, salted, tai if nsID != "" { tokenNS, err := NamespaceByID(ctx, nsID, ts.core) if err != nil { - return nil, errwrap.Wrapf("failed to look up namespace from the token: {{err}}", err) + return nil, fmt.Errorf("failed to look up namespace from the token: %w", err) } if tokenNS != nil { if tokenNS.ID != ns.ID { @@ -1230,7 +1229,7 @@ func (ts *TokenStore) lookupInternal(ctx context.Context, id string, salted, tai raw, err = ts.idView(ns).Get(ctx, lookupID) if err != nil { - return nil, errwrap.Wrapf("failed to read entry: {{err}}", err) + return nil, fmt.Errorf("failed to read entry: %w", err) } // Bail if not found @@ -1241,7 +1240,7 @@ func (ts *TokenStore) lookupInternal(ctx context.Context, id string, salted, tai // Unmarshal the token entry := new(logical.TokenEntry) if err := jsonutil.DecodeJSON(raw.Value, entry); err != nil { - return nil, errwrap.Wrapf("failed to decode entry: {{err}}", err) + return nil, fmt.Errorf("failed to decode entry: %w", err) } // This is a token that is awaiting deferred revocation or tainted @@ -1301,7 +1300,7 @@ func (ts *TokenStore) lookupInternal(ctx context.Context, id string, salted, tai // If fields are getting upgraded, store the changes if persistNeeded { if err := ts.store(ctx, entry); err != nil { - return nil, errwrap.Wrapf("failed to persist token upgrade: {{err}}", err) + return nil, fmt.Errorf("failed to persist token upgrade: %w", err) } } return entry, nil @@ -1316,7 +1315,7 @@ func (ts *TokenStore) lookupInternal(ctx context.Context, id string, salted, tai } le, err := ts.expiration.FetchLeaseTimesByToken(ctx, entry) if err != nil { - return nil, errwrap.Wrapf("failed to fetch lease times: {{err}}", err) + return nil, fmt.Errorf("failed to fetch lease times: %w", err) } var ret *logical.TokenEntry @@ -1354,7 +1353,7 @@ func (ts *TokenStore) lookupInternal(ctx context.Context, id string, salted, tai // If fields are getting upgraded, store the changes if persistNeeded { if err := ts.store(ctx, entry); err != nil { - return nil, errwrap.Wrapf("failed to persist token upgrade: {{err}}", err) + return nil, fmt.Errorf("failed to persist token upgrade: %w", err) } } @@ -1428,7 +1427,7 @@ func (ts *TokenStore) revokeInternal(ctx context.Context, saltedID string, skipO // before we return, we can remove the token store entry if ret == nil { if err := ts.idView(tokenNS).Delete(ctx, saltedID); err != nil { - ret = errwrap.Wrapf("failed to delete entry: {{err}}", err) + ret = fmt.Errorf("failed to delete entry: %w", err) } } @@ -1467,7 +1466,7 @@ func (ts *TokenStore) revokeInternal(ctx context.Context, saltedID string, skipO default: parentNS, err = NamespaceByID(ctx, parentNSID, ts.core) if err != nil { - return errwrap.Wrapf("failed to get parent namespace: {{err}}", err) + return fmt.Errorf("failed to get parent namespace: %w", err) } if parentNS == nil { return namespace.ErrNoNamespace @@ -1488,7 +1487,7 @@ func (ts *TokenStore) revokeInternal(ctx context.Context, saltedID string, skipO } if err = ts.parentView(parentNS).Delete(ctx, path); err != nil { - return errwrap.Wrapf("failed to delete entry: {{err}}", err) + return fmt.Errorf("failed to delete entry: %w", err) } } @@ -1500,7 +1499,7 @@ func (ts *TokenStore) revokeInternal(ctx context.Context, saltedID string, skipO } if err = ts.accessorView(tokenNS).Delete(ctx, accessorSaltedID); err != nil { - return errwrap.Wrapf("failed to delete entry: {{err}}", err) + return fmt.Errorf("failed to delete entry: %w", err) } } @@ -1514,7 +1513,7 @@ func (ts *TokenStore) revokeInternal(ctx context.Context, saltedID string, skipO // on child prefixes as there will be none (as saltedID entry is a leaf node). children, err := ts.parentView(tokenNS).List(ctx, saltedID+"/") if err != nil { - return errwrap.Wrapf("failed to scan for children: {{err}}", err) + return fmt.Errorf("failed to scan for children: %w", err) } for _, child := range children { var childNSID string @@ -1523,7 +1522,7 @@ func (ts *TokenStore) revokeInternal(ctx context.Context, saltedID string, skipO if childNSID != "" { childNS, err := NamespaceByID(ctx, childNSID, ts.core) if err != nil { - return errwrap.Wrapf("failed to get child token: {{err}}", err) + return fmt.Errorf("failed to get child token: %w", err) } if childNS == nil { return namespace.ErrNoNamespace @@ -1534,13 +1533,13 @@ func (ts *TokenStore) revokeInternal(ctx context.Context, saltedID string, skipO entry, err := ts.lookupInternal(childCtx, child, true, true) if err != nil { - return errwrap.Wrapf("failed to get child token: {{err}}", err) + return fmt.Errorf("failed to get child token: %w", err) } if entry == nil { // Seems it's already revoked, so nothing to do here except delete the index err = ts.parentView(tokenNS).Delete(ctx, child) if err != nil { - return errwrap.Wrapf("failed to delete child entry: {{err}}", err) + return fmt.Errorf("failed to delete child entry: %w", err) } continue } @@ -1552,7 +1551,7 @@ func (ts *TokenStore) revokeInternal(ctx context.Context, saltedID string, skipO err = ts.store(childCtx, entry) if err != nil { lock.Unlock() - return errwrap.Wrapf("failed to update child token: {{err}}", err) + return fmt.Errorf("failed to update child token: %w", err) } lock.Unlock() @@ -1562,7 +1561,7 @@ func (ts *TokenStore) revokeInternal(ctx context.Context, saltedID string, skipO // of logical.ClearView err = ts.parentView(tokenNS).Delete(ctx, child) if err != nil { - return errwrap.Wrapf("failed to delete child entry: {{err}}", err) + return fmt.Errorf("failed to delete child entry: %w", err) } } } @@ -1633,7 +1632,7 @@ func (ts *TokenStore) revokeTreeInternal(ctx context.Context, id string) error { if saltedNSID != "" { saltedNS, err = NamespaceByID(ctx, saltedNSID, ts.core) if err != nil { - return errwrap.Wrapf("failed to find namespace for token revocation: {{err}}", err) + return fmt.Errorf("failed to find namespace for token revocation: %w", err) } saltedCtx = namespace.ContextWithNamespace(ctx, saltedNS) @@ -1642,7 +1641,7 @@ func (ts *TokenStore) revokeTreeInternal(ctx context.Context, id string) error { path := saltedID + "/" childrenRaw, err := ts.parentView(saltedNS).List(saltedCtx, path) if err != nil { - return errwrap.Wrapf("failed to scan for children: {{err}}", err) + return fmt.Errorf("failed to scan for children: %w", err) } // Filter the child list to remove any items that have ever been in the dfs stack. @@ -1653,7 +1652,7 @@ func (ts *TokenStore) revokeTreeInternal(ctx context.Context, id string) error { children = append(children, child) } else { if err = ts.parentView(saltedNS).Delete(saltedCtx, path+child); err != nil { - return errwrap.Wrapf("failed to delete entry: {{err}}", err) + return fmt.Errorf("failed to delete entry: %w", err) } ts.Logger().Warn("token cycle found", "token", child) @@ -1669,7 +1668,7 @@ func (ts *TokenStore) revokeTreeInternal(ctx context.Context, id string) error { // would have been deleted, and any pending leases for deletion will be restored // by the expiration manager. if err := ts.revokeInternal(saltedCtx, saltedID, true); err != nil { - return errwrap.Wrapf("failed to revoke entry: {{err}}", err) + return fmt.Errorf("failed to revoke entry: %w", err) } // If the length of l is equal to 1, then the last token has been deleted if l == 1 { @@ -1756,7 +1755,7 @@ func (ts *TokenStore) lookupByAccessor(ctx context.Context, id string, salted, t entry, err := ts.accessorView(ns).Get(ctx, lookupID) if err != nil { - return aEntry, errwrap.Wrapf("failed to read index using accessor: {{err}}", err) + return aEntry, fmt.Errorf("failed to read index using accessor: %w", err) } if entry == nil { return aEntry, &logical.StatusBadRequest{Err: "invalid accessor"} @@ -1767,7 +1766,7 @@ func (ts *TokenStore) lookupByAccessor(ctx context.Context, id string, salted, t if err != nil { te, err := ts.lookupInternal(ctx, string(entry.Value), false, tainted) if err != nil { - return accessorEntry{}, errwrap.Wrapf("failed to look up token using accessor index: {{err}}", err) + return accessorEntry{}, fmt.Errorf("failed to look up token using accessor index: %w", err) } // It's hard to reason about what to do here if te is nil -- it may be // that the token was revoked async, or that it's an old accessor index @@ -1799,7 +1798,7 @@ func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data ns, err := namespace.FromContext(ctx) if err != nil { - return nil, errwrap.Wrapf("failed to get namespace from context: {{err}}", err) + return nil, fmt.Errorf("failed to get namespace from context: %w", err) } go func() { @@ -1818,13 +1817,13 @@ func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data // List out all the accessors saltedAccessorList, err := ts.accessorView(ns).List(quitCtx, "") if err != nil { - return errwrap.Wrapf("failed to fetch accessor index entries: {{err}}", err) + return fmt.Errorf("failed to fetch accessor index entries: %w", err) } // First, clean up secondary index entries that are no longer valid parentList, err := ts.parentView(ns).List(quitCtx, "") if err != nil { - return errwrap.Wrapf("failed to fetch secondary index entries: {{err}}", err) + return fmt.Errorf("failed to fetch secondary index entries: %w", err) } // List all the cubbyhole storage keys @@ -1836,7 +1835,7 @@ func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data cubbyholeKeys, err := bview.List(quitCtx, "") if err != nil { - return errwrap.Wrapf("failed to fetch cubbyhole storage keys: {{err}}", err) + return fmt.Errorf("failed to fetch cubbyhole storage keys: %w", err) } var countParentEntries, deletedCountParentEntries, countParentList, deletedCountParentList int64 @@ -1849,7 +1848,7 @@ func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data // Get the children children, err := ts.parentView(ns).List(quitCtx, parent) if err != nil { - tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to read secondary index: {{err}}", err)) + tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to read secondary index: %w", err)) continue } @@ -1884,7 +1883,7 @@ func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data te.Parent = "" err = ts.store(quitCtx, te) if err != nil { - tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to convert child token into an orphan token: {{err}}", err)) + tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to convert child token into an orphan token: %w", err)) } lock.Unlock() continue @@ -1896,7 +1895,7 @@ func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data ts.logger.Debug("deleting invalid secondary index", "index", index) err = ts.parentView(ns).Delete(quitCtx, index) if err != nil { - tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to delete secondary index: {{err}}", err)) + tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to delete secondary index: %w", err)) continue } deletedChildrenCount++ @@ -1933,7 +1932,7 @@ func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data accessorEntry, err := ts.lookupByAccessor(quitCtx, saltedAccessor, true, true) if err != nil { - tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to read the accessor index: {{err}}", err)) + tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to read the accessor index: %w", err)) continue } @@ -1945,7 +1944,7 @@ func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data // item since this is just a best-effort operation err = ts.accessorView(ns).Delete(quitCtx, saltedAccessor) if err != nil { - tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to delete the accessor index: {{err}}", err)) + tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to delete the accessor index: %w", err)) continue } deletedCountAccessorEmptyToken++ @@ -1958,7 +1957,7 @@ func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data // exist te, err := ts.lookupInternal(quitCtx, accessorEntry.TokenID, false, true) if err != nil { - tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to lookup tainted ID: {{err}}", err)) + tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to lookup tainted ID: %w", err)) lock.RUnlock() continue } @@ -1985,7 +1984,7 @@ func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data // the leases associated with the token. err = ts.expiration.RevokeByToken(quitCtx, tokenEntry) if err != nil { - tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to revoke leases of expired token: {{err}}", err)) + tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to revoke leases of expired token: %w", err)) continue } deletedCountInvalidTokenInAccessor++ @@ -1996,7 +1995,7 @@ func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data // entry to try again. err = ts.accessorView(ns).Delete(quitCtx, saltedAccessor) if err != nil { - tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to delete accessor entry: {{err}}", err)) + tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to delete accessor entry: %w", err)) continue } deletedCountAccessorInvalidToken++ @@ -2006,7 +2005,7 @@ func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data case te.NamespaceID == namespace.RootNamespaceID && !strings.HasPrefix(te.ID, "s."): saltedID, err := ts.SaltID(quitCtx, te.ID) if err != nil { - tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to create salted token id: {{err}}", err)) + tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to create salted token id: %w", err)) continue } validCubbyholeKeys[salt.SaltID(ts.cubbyholeBackend.saltUUID, saltedID, salt.SHA1Hash)] = true @@ -2033,7 +2032,7 @@ func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data ts.logger.Info("deleting invalid cubbyhole", "key", key) err = ts.cubbyholeBackend.revoke(quitCtx, bview, key) if err != nil { - tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf(fmt.Sprintf("failed to revoke cubbyhole key %q: {{err}}", key), err)) + tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to revoke cubbyhole key %q: %w", key, err)) } deletedCountInvalidCubbyholeKey++ } @@ -2215,7 +2214,7 @@ func (ts *TokenStore) handleCreateCommon(ctx context.Context, req *logical.Reque // Read the parent policy parent, err := ts.Lookup(ctx, req.ClientToken) if err != nil { - return nil, errwrap.Wrapf("parent token lookup failed: {{err}}", err) + return nil, fmt.Errorf("parent token lookup failed: %w", err) } if parent == nil { return logical.ErrorResponse("parent token lookup failed: no parent found"), logical.ErrInvalidRequest @@ -2897,7 +2896,7 @@ func (ts *TokenStore) handleRevokeOrphan(ctx context.Context, req *logical.Reque // running in the same namespace or a parent. te, err := ts.Lookup(ctx, id) if err != nil { - return nil, errwrap.Wrapf("error when looking up token to revoke: {{err}}", err) + return nil, fmt.Errorf("error when looking up token to revoke: %w", err) } if te == nil { return logical.ErrorResponse("token to revoke not found"), logical.ErrInvalidRequest @@ -3048,7 +3047,7 @@ func (ts *TokenStore) handleRenew(ctx context.Context, req *logical.Request, dat // Lookup the token te, err := ts.Lookup(ctx, id) if err != nil { - return nil, errwrap.Wrapf("error looking up token to renew: {{err}}", err) + return nil, fmt.Errorf("error looking up token to renew: %w", err) } if te == nil { return logical.ErrorResponse("token not found"), logical.ErrInvalidRequest @@ -3073,7 +3072,7 @@ func (ts *TokenStore) authRenew(ctx context.Context, req *logical.Request, d *fr te, err := ts.Lookup(ctx, req.Auth.ClientToken) if err != nil { - return nil, errwrap.Wrapf("error looking up token: {{err}}", err) + return nil, fmt.Errorf("error looking up token: %w", err) } if te == nil { return nil, fmt.Errorf("no token entry found during lookup") @@ -3087,7 +3086,7 @@ func (ts *TokenStore) authRenew(ctx context.Context, req *logical.Request, d *fr role, err := ts.tokenStoreRole(ctx, te.Role) if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("error looking up role %q: {{err}}", te.Role), err) + return nil, fmt.Errorf("error looking up role %q: %w", te.Role, err) } if role == nil { return nil, fmt.Errorf("original token role %q could not be found, not renewing", te.Role) @@ -3305,7 +3304,7 @@ func (ts *TokenStore) tokenStoreRoleCreateUpdate(ctx context.Context, req *logic // Next parse token fields from the helper if err := entry.ParseTokenFields(req, data); err != nil { - return logical.ErrorResponse(errwrap.Wrapf("error parsing role fields: {{err}}", err).Error()), nil + return logical.ErrorResponse(fmt.Errorf("error parsing role fields: %w", err).Error()), nil } entry.TokenType = oldEntryTokenType @@ -3358,7 +3357,7 @@ func (ts *TokenStore) tokenStoreRoleCreateUpdate(ctx context.Context, req *logic if ok { boundCIDRs, err := parseutil.ParseAddrs(boundCIDRsRaw.([]string)) if err != nil { - return logical.ErrorResponse(errwrap.Wrapf("error parsing bound_cidrs: {{err}}", err).Error()), nil + return logical.ErrorResponse(fmt.Errorf("error parsing bound_cidrs: %w", err).Error()), nil } entry.BoundCIDRs = boundCIDRs entry.TokenBoundCIDRs = entry.BoundCIDRs diff --git a/vault/wrapping.go b/vault/wrapping.go index 6e67093d4cd1f..03026f7fe483c 100644 --- a/vault/wrapping.go +++ b/vault/wrapping.go @@ -11,7 +11,6 @@ import ( "time" "github.com/armon/go-metrics" - "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/sdk/helper/certutil" @@ -38,7 +37,7 @@ func (c *Core) ensureWrappingKey(ctx context.Context) error { if entry == nil { key, err := ecdsa.GenerateKey(elliptic.P521(), c.secureRandomReader) if err != nil { - return errwrap.Wrapf("failed to generate wrapping key: {{err}}", err) + return fmt.Errorf("failed to generate wrapping key: %w", err) } keyParams.D = key.D keyParams.X = key.X @@ -46,20 +45,20 @@ func (c *Core) ensureWrappingKey(ctx context.Context) error { keyParams.Type = corePrivateKeyTypeP521 val, err := jsonutil.EncodeJSON(keyParams) if err != nil { - return errwrap.Wrapf("failed to encode wrapping key: {{err}}", err) + return fmt.Errorf("failed to encode wrapping key: %w", err) } entry = &logical.StorageEntry{ Key: coreWrappingJWTKeyPath, Value: val, } if err = c.barrier.Put(ctx, entry); err != nil { - return errwrap.Wrapf("failed to store wrapping key: {{err}}", err) + return fmt.Errorf("failed to store wrapping key: %w", err) } } // Redundant if we just created it, but in this case serves as a check anyways if err = jsonutil.DecodeJSON(entry.Value, &keyParams); err != nil { - return errwrap.Wrapf("failed to decode wrapping key parameters: {{err}}", err) + return fmt.Errorf("failed to decode wrapping key parameters: %w", err) } c.wrappingJWTKey = &ecdsa.PrivateKey{ @@ -406,12 +405,12 @@ func (c *Core) ValidateWrappingToken(ctx context.Context, req *logical.Request) // Implement the jose library way parsedJWT, err := squarejwt.ParseSigned(token) if err != nil { - return false, errwrap.Wrapf("wrapping token could not be parsed: {{err}}", err) + return false, fmt.Errorf("wrapping token could not be parsed: %w", err) } var claims squarejwt.Claims allClaims := make(map[string]interface{}) if err = parsedJWT.Claims(&c.wrappingJWTKey.PublicKey, &claims, &allClaims); err != nil { - return false, errwrap.Wrapf("wrapping token signature could not be validated: {{err}}", err) + return false, fmt.Errorf("wrapping token signature could not be validated: %w", err) } typeClaimRaw, ok := allClaims["type"] if !ok { From d22749f557fc6382176198a3dbb0513852e318a6 Mon Sep 17 00:00:00 2001 From: Kyle MacDonald Date: Tue, 11 May 2021 14:17:11 -0400 Subject: [PATCH 004/101] website: add redirect for /trial (#11587) --- website/redirects.next.js | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/website/redirects.next.js b/website/redirects.next.js index 0331ab29058cf..f04a4361254a9 100644 --- a/website/redirects.next.js +++ b/website/redirects.next.js @@ -1,4 +1,9 @@ module.exports = [ + { + source: '/trial', + destination: 'https://www.hashicorp.com/products/vault/trial', + permanent: true, + }, { source: '/intro', destination: '/intro/getting-started', From 85474879fcd29ab5e847966eebd76c8745b1a3ad Mon Sep 17 00:00:00 2001 From: swayne275 Date: Tue, 11 May 2021 14:04:06 -0600 Subject: [PATCH 005/101] Vault 2303: Count irrevocable leases in quotas/metrics and other improvements (#11542) * shrink generic error message * move zombie loading to updatePendingInternal from loadEntryInternal * probably the right metric/lease behavior for irrevocable leases... * comment improvements * test total lease count with valid and irrevocable leases --- vault/expiration.go | 113 ++++++++++++++++++++++----------------- vault/expiration_test.go | 100 ++++++++++++++++++++++++++++++++-- 2 files changed, 162 insertions(+), 51 deletions(-) diff --git a/vault/expiration.go b/vault/expiration.go index 4c3f28f5f819a..cee48ae15fe84 100644 --- a/vault/expiration.go +++ b/vault/expiration.go @@ -70,11 +70,9 @@ const ( // storage/memory maxIrrevocableErrorLength = 240 - genericIrrevocableErrorMessage = "no error message given" -) + genericIrrevocableErrorMessage = "unknown" -var ( - errOutOfRetries = errors.New("out of retries") + outOfRetriesMessage = "out of retries" ) type pendingInfo struct { @@ -239,7 +237,7 @@ func (r *revocationJob) OnFailure(err error) { r.m.logger.Trace("marking lease as irrevocable", "lease_id", r.leaseID, "error", err) if pending.revokesAttempted >= maxRevokeAttempts { r.m.logger.Trace("lease has consumed all retry attempts", "lease_id", r.leaseID) - err = fmt.Errorf("%v: %w", errOutOfRetries.Error(), err) + err = fmt.Errorf("%v: %w", outOfRetriesMessage, err) } le, loadErr := r.m.loadEntry(r.nsCtx, r.leaseID) @@ -528,18 +526,28 @@ func (m *ExpirationManager) invalidate(key string) { return } default: - // Handle lease update + // Update the lease in memory m.updatePendingInternal(le) } default: - // There is no entry in the pending map and the invalidation - // resulted in a nil entry. if le == nil { - // If in the nonexpiring map, remove there. + // There is no entry in the pending map and the invalidation + // resulted in a nil entry. Therefore we should clean up the + // other maps, and update metrics/quotas if appropriate. m.nonexpiring.Delete(leaseID) + + if _, ok := m.irrevocable.Load(leaseID); ok { + m.irrevocable.Delete(leaseID) + + m.leaseCount-- + if err := m.core.quotasHandleLeases(ctx, quotas.LeaseActionDeleted, []string{leaseID}); err != nil { + m.logger.Error("failed to update quota on lease invalidation", "error", err) + return + } + } return } - // Handle lease creation + // Handle lease update (if irrevocable) or creation (if pending) m.updatePendingInternal(le) } } @@ -985,7 +993,7 @@ func (m *ExpirationManager) revokeCommon(ctx context.Context, leaseID string, fo // Clear the expiration handler m.pendingLock.Lock() - m.removeFromPending(ctx, leaseID) + m.removeFromPending(ctx, leaseID, true) m.nonexpiring.Delete(leaseID) m.irrevocable.Delete(leaseID) m.pendingLock.Unlock() @@ -1715,12 +1723,8 @@ func (m *ExpirationManager) updatePending(le *leaseEntry) { // updatePendingInternal is the locked version of updatePending; do not call // this without a write lock on m.pending func (m *ExpirationManager) updatePendingInternal(le *leaseEntry) { - if le.isIrrevocable() { - return - } - // Check for an existing timer - info, ok := m.pending.Load(le.LeaseID) + info, leaseInPending := m.pending.Load(le.LeaseID) var pending pendingInfo if le.ExpireTime.IsZero() { @@ -1735,7 +1739,7 @@ func (m *ExpirationManager) updatePendingInternal(le *leaseEntry) { // if the timer happened to exist, stop the time and delete it from the // pending timers. - if ok { + if leaseInPending { info.(pendingInfo).timer.Stop() m.pending.Delete(le.LeaseID) m.leaseCount-- @@ -1749,31 +1753,44 @@ func (m *ExpirationManager) updatePendingInternal(le *leaseEntry) { leaseTotal := le.ExpireTime.Sub(time.Now()) leaseCreated := false - // Create entry if it does not exist or reset if it does - if ok { - pending = info.(pendingInfo) - pending.timer.Reset(leaseTotal) - // No change to lease count in this case + + if le.isIrrevocable() { + // It's possible this function is being called to update the in-memory state + // for a lease from pending to irrevocable (we don't support the opposite). + // If this is the case, we need to know if the lease was previously counted + // so that we can maintain correct metric and quota lease counts. + _, leaseInIrrevocable := m.irrevocable.Load(le.LeaseID) + if !(leaseInPending || leaseInIrrevocable) { + leaseCreated = true + } + + m.removeFromPending(m.quitContext, le.LeaseID, false) + m.irrevocable.Store(le.LeaseID, m.inMemoryLeaseInfo(le)) } else { - leaseID, namespace := le.LeaseID, le.namespace - // Extend the timer by the lease total - timer := time.AfterFunc(leaseTotal, func() { - m.expireFunc(m.quitContext, m, leaseID, namespace) - }) - pending = pendingInfo{ - timer: timer, - } - // new lease - m.leaseCount++ - leaseCreated = true - } + // Create entry if it does not exist or reset if it does + if leaseInPending { + pending = info.(pendingInfo) + pending.timer.Reset(leaseTotal) + // No change to lease count in this case + } else { + leaseID, namespace := le.LeaseID, le.namespace + // Extend the timer by the lease total + timer := time.AfterFunc(leaseTotal, func() { + m.expireFunc(m.quitContext, m, leaseID, namespace) + }) + pending = pendingInfo{ + timer: timer, + } - // Retain some information in-memory - pending.cachedLeaseInfo = m.inMemoryLeaseInfo(le) + leaseCreated = true + } - m.pending.Store(le.LeaseID, pending) + pending.cachedLeaseInfo = m.inMemoryLeaseInfo(le) + m.pending.Store(le.LeaseID, pending) + } if leaseCreated { + m.leaseCount++ if err := m.core.quotasHandleLeases(m.quitContext, quotas.LeaseActionCreated, []string{le.LeaseID}); err != nil { m.logger.Error("failed to update quota on lease creation", "error", err) return @@ -1913,11 +1930,6 @@ func (m *ExpirationManager) loadEntryInternal(ctx context.Context, leaseID strin } le.namespace = ns - if le.isIrrevocable() { - m.irrevocable.Store(le.LeaseID, m.inMemoryLeaseInfo(le)) - return le, nil - } - if restoreMode { if checkRestored { // If we have already loaded this lease, we don't need to update on @@ -2351,19 +2363,24 @@ func (m *ExpirationManager) walkLeases(walkFn leaseWalkFunction) error { } // must be called with m.pendingLock held -func (m *ExpirationManager) removeFromPending(ctx context.Context, leaseID string) { +// set decrementCounters true to decrement the lease count metric and quota +func (m *ExpirationManager) removeFromPending(ctx context.Context, leaseID string, decrementCounters bool) { if info, ok := m.pending.Load(leaseID); ok { pending := info.(pendingInfo) pending.timer.Stop() m.pending.Delete(leaseID) - m.leaseCount-- - // Log but do not fail; unit tests (and maybe Tidy on production systems) - if err := m.core.quotasHandleLeases(ctx, quotas.LeaseActionDeleted, []string{leaseID}); err != nil { - m.logger.Error("failed to update quota on revocation", "error", err) + if decrementCounters { + m.leaseCount-- + // Log but do not fail; unit tests (and maybe Tidy on production systems) + if err := m.core.quotasHandleLeases(ctx, quotas.LeaseActionDeleted, []string{leaseID}); err != nil { + m.logger.Error("failed to update quota on revocation", "error", err) + } } } } +// Marks a pending lease as irrevocable. Because the lease is being moved from +// pending to irrevocable, no total lease count metrics/quotas updates are needed // note: must be called with pending lock held func (m *ExpirationManager) markLeaseIrrevocable(ctx context.Context, le *leaseEntry, err error) { if le == nil { @@ -2390,7 +2407,7 @@ func (m *ExpirationManager) markLeaseIrrevocable(ctx context.Context, le *leaseE m.persistEntry(ctx, le) m.irrevocable.Store(le.LeaseID, m.inMemoryLeaseInfo(le)) - m.removeFromPending(ctx, le.LeaseID) + m.removeFromPending(ctx, le.LeaseID, false) m.nonexpiring.Delete(le.LeaseID) } diff --git a/vault/expiration_test.go b/vault/expiration_test.go index 7a6079ece55df..2447332dad770 100644 --- a/vault/expiration_test.go +++ b/vault/expiration_test.go @@ -56,7 +56,7 @@ func TestExpiration_Metrics(t *testing.T) { // Set up a count function to calculate number of leases count := 0 - countFunc := func(leaseID string) { + countFunc := func(_ string) { count++ } @@ -177,7 +177,7 @@ func TestExpiration_Metrics(t *testing.T) { } if !foundLabelOne || !foundLabelTwo || !foundLabelThree { - t.Errorf("One of the labels is missing") + t.Errorf("One of the labels is missing. one: %t, two: %t, three: %t", foundLabelOne, foundLabelTwo, foundLabelThree) } // test the same leases while ignoring namespaces so the 2 different namespaces get aggregated @@ -219,6 +219,99 @@ func TestExpiration_Metrics(t *testing.T) { } } +func TestExpiration_TotalLeaseCount(t *testing.T) { + // Quotas and internal lease count tracker are coupled, so this is a proxy + // for testing the total lease count quota + c, _, _ := TestCoreUnsealed(t) + exp := c.expiration + + expectedCount := 0 + otherNS := &namespace.Namespace{ + ID: "nsid", + Path: "foo/bar", + } + for i := 0; i < 50; i++ { + le := &leaseEntry{ + LeaseID: "lease" + fmt.Sprintf("%d", i), + Path: "foo/bar/" + fmt.Sprintf("%d", i), + namespace: namespace.RootNamespace, + IssueTime: time.Now(), + ExpireTime: time.Now().Add(time.Hour), + } + + otherNSle := &leaseEntry{ + LeaseID: "lease" + fmt.Sprintf("%d", i) + "/blah.nsid", + Path: "foo/bar/" + fmt.Sprintf("%d", i) + "/blah.nsid", + namespace: otherNS, + IssueTime: time.Now(), + ExpireTime: time.Now().Add(time.Hour), + } + + exp.pendingLock.Lock() + if err := exp.persistEntry(namespace.RootContext(nil), le); err != nil { + exp.pendingLock.Unlock() + t.Fatalf("error persisting irrevocable entry: %v", err) + } + exp.updatePendingInternal(le) + expectedCount++ + + if err := exp.persistEntry(namespace.RootContext(nil), otherNSle); err != nil { + exp.pendingLock.Unlock() + t.Fatalf("error persisting irrevocable entry: %v", err) + } + exp.updatePendingInternal(otherNSle) + expectedCount++ + exp.pendingLock.Unlock() + } + + // add some irrevocable leases to each count to ensure they are counted too + // note: irrevocable leases almost certainly have an expire time set in the + // past, but for this exercise it should be fine to set it to whatever + for i := 50; i < 60; i++ { + le := &leaseEntry{ + LeaseID: "lease" + fmt.Sprintf("%d", i+1), + Path: "foo/bar/" + fmt.Sprintf("%d", i+1), + namespace: namespace.RootNamespace, + IssueTime: time.Now(), + ExpireTime: time.Now(), + RevokeErr: "some err message", + } + + otherNSle := &leaseEntry{ + LeaseID: "lease" + fmt.Sprintf("%d", i+1) + "/blah.nsid", + Path: "foo/bar/" + fmt.Sprintf("%d", i+1) + "/blah.nsid", + namespace: otherNS, + IssueTime: time.Now(), + ExpireTime: time.Now(), + RevokeErr: "some err message", + } + + exp.pendingLock.Lock() + if err := exp.persistEntry(namespace.RootContext(nil), le); err != nil { + exp.pendingLock.Unlock() + t.Fatalf("error persisting irrevocable entry: %v", err) + } + exp.updatePendingInternal(le) + expectedCount++ + + if err := exp.persistEntry(namespace.RootContext(nil), otherNSle); err != nil { + exp.pendingLock.Unlock() + t.Fatalf("error persisting irrevocable entry: %v", err) + } + exp.updatePendingInternal(otherNSle) + expectedCount++ + exp.pendingLock.Unlock() + } + + exp.pendingLock.RLock() + count := exp.leaseCount + exp.pendingLock.RUnlock() + + if count != expectedCount { + t.Errorf("bad lease count. expected %d, got %d", expectedCount, count) + } +} + func TestExpiration_Tidy(t *testing.T) { var err error @@ -1145,7 +1238,7 @@ func TestExpiration_RevokeByToken(t *testing.T) { defer noop.Unlock() if len(noop.Requests) != 3 { - t.Fatalf("Bad: %v", noop.Requests) + t.Fatalf("Bad: %#v", noop.Requests) } for _, req := range noop.Requests { if req.Operation != logical.RevokeOperation { @@ -2585,6 +2678,7 @@ func TestExpiration_MarkIrrevocable(t *testing.T) { if err != nil { t.Fatalf("error loading non irrevocable lease after restore: %v", err) } + exp.updatePending(loadedLE) if !loadedLE.isIrrevocable() { t.Fatalf("irrevocable lease is not irrevocable and should be") From 98db3a089fe3b2f89cc445d2280f0f2a9742653a Mon Sep 17 00:00:00 2001 From: Austin Gebauer <34121980+austingebauer@users.noreply.github.com> Date: Tue, 11 May 2021 16:57:12 -0700 Subject: [PATCH 006/101] Update GCP auth docs for signJwt transition to Service Account Credentials API (#11568) --- website/content/api-docs/auth/gcp.mdx | 2 +- .../docs/agent/autoauth/methods/gcp.mdx | 8 ++-- website/content/docs/auth/gcp.mdx | 38 ++++++++++--------- 3 files changed, 26 insertions(+), 22 deletions(-) diff --git a/website/content/api-docs/auth/gcp.mdx b/website/content/api-docs/auth/gcp.mdx index 6f5cd717ac654..0b58c8202fcae 100644 --- a/website/content/api-docs/auth/gcp.mdx +++ b/website/content/api-docs/auth/gcp.mdx @@ -468,5 +468,5 @@ $ curl \ [gcp-adc]: https://developers.google.com/identity/protocols/application-default-credentials [jwt]: https://tools.ietf.org/html/rfc7519 -[signjwt-method]: https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts/signJwt +[signjwt-method]: https://cloud.google.com/iam/docs/reference/credentials/rest/v1/projects.serviceAccounts/signJwt [instance-token]: https://cloud.google.com/compute/docs/instances/verifying-instance-identity#request_signature diff --git a/website/content/docs/agent/autoauth/methods/gcp.mdx b/website/content/docs/agent/autoauth/methods/gcp.mdx index 929480d965946..0f15a1dfd61e3 100644 --- a/website/content/docs/agent/autoauth/methods/gcp.mdx +++ b/website/content/docs/agent/autoauth/methods/gcp.mdx @@ -13,7 +13,7 @@ authentication types are supported. ## Credentials Vault will use the GCP SDK's normal credential chain behavior. You can set a -static `credentials` value but it is usually not needed. If running on GCE +static `credentials` value, but it is usually not needed. If running on GCE using Application Default Credentials, you may need to specify the service account and project since ADC does not provide metadata used to automatically determine these. @@ -30,8 +30,8 @@ determine these. - `service_account` `(string: optional)` - The service account to use, if it cannot be automatically determined -- `project` `(string: optional)` - The project to use, if it cannot be - automatically determined - - `jwt_exp` `(string or int: optional)` - The number of minutes a generated JWT should be valid for when using the `iam` method; defaults to 15 minutes + +-> **Note:** The `project` parameter has been removed in Vault 1.5.9+, 1.6.5+, and 1.7.2+. +It is no longer needed for configuration and will be ignored if provided. diff --git a/website/content/docs/auth/gcp.mdx b/website/content/docs/auth/gcp.mdx index 0b5c6b9a6856a..7f28810aa5852 100644 --- a/website/content/docs/auth/gcp.mdx +++ b/website/content/docs/auth/gcp.mdx @@ -37,13 +37,15 @@ request to Vault. This helper is only available for IAM-type roles. $ vault login -method=gcp \ role="my-role" \ service_account="authenticating-account@my-project.iam.gserviceaccount.com" \ - project="my-project" \ jwt_exp="15m" \ credentials=@path/to/signer/credentials.json ``` For more usage information, run `vault auth help gcp`. +-> **Note:** The `project` parameter has been removed in Vault 1.5.9+, 1.6.5+, and 1.7.2+. +It is no longer needed for configuration and will be ignored if provided. + ### Via the CLI ```shell-session @@ -229,9 +231,9 @@ for IAM service accounts looks like this: [![Vault Google Cloud IAM Login Workflow](/img/vault-gcp-iam-auth-workflow.svg)](/img/vault-gcp-iam-auth-workflow.svg) -1. The client generates a signed JWT using the IAM - [`projects.serviceAccounts.signJwt`][signjwt-method] method. For examples of - how to do this, see the [Generating JWTs](#generating-jwts) section. +1. The client generates a signed JWT using the Service Account Credentials + [`projects.serviceAccounts.signJwt`][signjwt-method] API method. For examples + of how to do this, see the [Generating JWTs](#generating-jwts) section. 2. The client sends this signed JWT to Vault along with a role name. @@ -269,10 +271,10 @@ another cloud provider. This section details the various methods and examples for obtaining JWT tokens. -### IAM +### Service Account Credentials API -This describes how to use the GCP IAM [API method][signjwt-method] directly -to generate the signed JWT with the claims that Vault expects. Note the CLI +This describes how to use the GCP Service Account Credentials [API method][signjwt-method] +directly to generate the signed JWT with the claims that Vault expects. Note the CLI does this process for you and is much easier, and that there is very little reason to do this yourself. @@ -288,33 +290,35 @@ Vault requires the following minimum claim set: } ``` -For the API method, expiration is optional and will default to an hour. -If specified, expiration must be a -[NumericDate](https://tools.ietf.org/html/rfc7519#section-2) value (seconds from -Epoch). This value must be before the max JWT expiration allowed for a role. -This defaults to 15 minutes and cannot be more than 1 hour. +For the API method, providing the expiration claim `exp` is required. If it is omitted, +it will not be added automatically and Vault will deny authentication. Expiration must +be specified as a [NumericDate](https://tools.ietf.org/html/rfc7519#section-2) value +(seconds from Epoch). This value must be before the max JWT expiration allowed for a +role. This defaults to 15 minutes and cannot be more than 1 hour. One you have all this information, the JWT token can be signed using curl and [oauth2l](https://github.com/google/oauth2l): ```text ROLE="my-role" -PROJECT="my-project" SERVICE_ACCOUNT="service-account@my-project.iam.gserviceaccount.com" OAUTH_TOKEN="$(oauth2l header cloud-platform)" -JWT_CLAIM="{\\\"aud\\\":\\\"vault/${ROLE}\\\", \\\"sub\\\": \\\"${SERVICE_ACCOUNT}\\\"}" +EXPIRATION="" +JWT_CLAIM="{\\\"aud\\\":\\\"vault/${ROLE}\\\", \\\"sub\\\": \\\"${SERVICE_ACCOUNT}\\\", \\\"exp\\\": ${EXPIRATION}}" curl \ --header "${OAUTH_TOKEN}" \ --header "Content-Type: application/json" \ --request POST \ --data "{\"payload\": \"${JWT_CLAIM}\"}" \ - "https://iam.googleapis.com/v1/projects/${PROJECT}/serviceAccounts/${SERVICE_ACCOUNT}:signJwt" + "https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/${SERVICE_ACCOUNT}:signJwt" ``` #### gcloud Example -You can also do this through the (currently beta) gcloud command. +You can also do this through the (currently beta) gcloud command. Note that you will +be required to provide the expiration claim `exp` as a part of the JWT input to the +command. ```shell-session $ gcloud beta iam service-accounts sign-jwt $INPUT_JWT_CLAIMS $OUTPUT_JWT_FILE \ @@ -352,7 +356,7 @@ The GCP Auth Plugin has a full HTTP API. Please see the [API docs][api-docs] for more details. [jwt]: https://tools.ietf.org/html/rfc7519 -[signjwt-method]: https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts/signJwt +[signjwt-method]: https://cloud.google.com/iam/docs/reference/credentials/rest/v1/projects.serviceAccounts/signJwt [cloud-creds]: https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application [service-accounts]: https://cloud.google.com/compute/docs/access/service-accounts [api-docs]: /api/auth/gcp From a9c2b720b2de54e95653551a6af1771042513694 Mon Sep 17 00:00:00 2001 From: Chelsea Shaw <82459713+hashishaw@users.noreply.github.com> Date: Wed, 12 May 2021 10:12:33 -0500 Subject: [PATCH 007/101] UI: Regex validation on transform templates (#11586) * Add regex validator component with tests, add to form-field, use in transform template * Update tests with data-test selectors * Add changelog --- changelog/11586.txt | 3 + ui/app/components/regex-validator.hbs | 68 +++++++++++++++++++ ui/app/components/regex-validator.js | 49 +++++++++++++ ui/app/models/transform/template.js | 1 + ui/app/styles/components/regex-validator.scss | 10 +++ ui/app/styles/core.scss | 1 + .../addon/templates/components/form-field.hbs | 13 ++++ .../components/regex-validator-test.js | 60 ++++++++++++++++ 8 files changed, 205 insertions(+) create mode 100644 changelog/11586.txt create mode 100644 ui/app/components/regex-validator.hbs create mode 100644 ui/app/components/regex-validator.js create mode 100644 ui/app/styles/components/regex-validator.scss create mode 100644 ui/tests/integration/components/regex-validator-test.js diff --git a/changelog/11586.txt b/changelog/11586.txt new file mode 100644 index 0000000000000..31c40692ca726 --- /dev/null +++ b/changelog/11586.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add regex validation to Transform Template pattern input +``` diff --git a/ui/app/components/regex-validator.hbs b/ui/app/components/regex-validator.hbs new file mode 100644 index 0000000000000..18c5cf64fbebd --- /dev/null +++ b/ui/app/components/regex-validator.hbs @@ -0,0 +1,68 @@ +
+
+
+ + {{#if @attr.options.subText}} +

{{@attr.options.subText}} {{#if @attr.options.docLink}}See our documentation for help.{{/if}}

+ {{/if}} +
+
+ + Validation + +
+
+ +
+{{#if this.showTestValue}} +
+ + + + {{#if (and this.testValue @value)}} +
+ {{#if this.regexError}} + + {{else}} +
+
+ {{/if}} +
+ {{/if}} +
+{{/if}} + diff --git a/ui/app/components/regex-validator.js b/ui/app/components/regex-validator.js new file mode 100644 index 0000000000000..f71539435afe0 --- /dev/null +++ b/ui/app/components/regex-validator.js @@ -0,0 +1,49 @@ +/** + * @module RegexValidator + * RegexValidator components are used to provide input forms for regex values, along with a toggle-able validation input which does not get saved to the model. + * + * @example + * ```js + * const attrExample = { + * name: 'valName', + * options: { + * helpText: 'Shows in tooltip', + * subText: 'Shows underneath label', + * docLink: 'Adds docs link to subText if present', + * defaultValue: 'hello', // Shows if no value on model + * } + * } + * + * ``` + * @param {func} onChange - the action that should trigger when the main input is changed. + * @param {string} value - the value of the main input which will be updated in onChange + * @param {string} labelString - Form label. Anticipated from form-field + * @param {object} attr - attribute from model. Anticipated from form-field. Example of attribute shape above + */ + +import Component from '@glimmer/component'; +import { tracked } from '@glimmer/tracking'; +import { action } from '@ember/object'; + +export default class RegexValidator extends Component { + @tracked testValue = ''; + @tracked showTestValue = false; + + get regexError() { + const testString = this.testValue; + if (!testString || !this.args.value) return false; + const regex = new RegExp(this.args.value, 'g'); + const matchArray = testString.toString().match(regex); + return testString !== matchArray?.join(''); + } + + @action + updateTestValue(evt) { + this.testValue = evt.target.value; + } + + @action + toggleTestValue() { + this.showTestValue = !this.showTestValue; + } +} diff --git a/ui/app/models/transform/template.js b/ui/app/models/transform/template.js index 4d8602de8a75b..103589e87a81a 100644 --- a/ui/app/models/transform/template.js +++ b/ui/app/models/transform/template.js @@ -19,6 +19,7 @@ const M = Model.extend({ }), type: attr('string', { defaultValue: 'regex' }), pattern: attr('string', { + editType: 'regex', subText: 'The template’s pattern defines the data format. Expressed in regex.', }), alphabet: attr('array', { diff --git a/ui/app/styles/components/regex-validator.scss b/ui/app/styles/components/regex-validator.scss new file mode 100644 index 0000000000000..72fb255c79308 --- /dev/null +++ b/ui/app/styles/components/regex-validator.scss @@ -0,0 +1,10 @@ +.regex-label-wrapper { + display: flex; + align-items: flex-end; +} +.regex-label { + flex: 1 0 auto; +} +.regex-toggle { + flex: 0 1 auto; +} diff --git a/ui/app/styles/core.scss b/ui/app/styles/core.scss index 3f51210fcd2ce..f0b4fb8983504 100644 --- a/ui/app/styles/core.scss +++ b/ui/app/styles/core.scss @@ -80,6 +80,7 @@ @import './components/radio-card'; @import './components/radial-progress'; @import './components/raft-join'; +@import './components/regex-validator'; @import './components/replication-dashboard'; @import './components/replication-doc-link'; @import './components/replication-header'; diff --git a/ui/lib/core/addon/templates/components/form-field.hbs b/ui/lib/core/addon/templates/components/form-field.hbs index 2d05c7ae3dc1f..ac2118aa4b8db 100644 --- a/ui/lib/core/addon/templates/components/form-field.hbs +++ b/ui/lib/core/addon/templates/components/form-field.hbs @@ -13,6 +13,7 @@ "ttl" "stringArray" "json" + "regex" ) ) ) @@ -129,6 +130,18 @@ @initialValue={{or (get model valuePath) attr.options.setDefault}} /> +{{else if (eq attr.options.editType "regex")}} + {{!-- Regex Validated Input --}} + {{else if (eq attr.options.editType "optionalText")}} {{!-- Togglable Text Input --}} ` + ); + assert.dom('.regex-label label').hasText('Regex Example', 'Label is correct'); + assert.dom('[data-test-toggle-input="example-validation-toggle"]').exists('Validation toggle exists'); + assert.dom('[data-test-regex-validator-test-string]').doesNotExist('Test string input does not show'); + + await click('[data-test-toggle-input="example-validation-toggle"]'); + assert.dom('[data-test-regex-validator-test-string]').exists('Test string input shows after toggle'); + assert + .dom('[data-test-regex-validation-message]') + .doesNotExist('Validation message does not show if test string is empty'); + + await fillIn('[data-test-input="example-testval"]', '123a'); + assert.dom('[data-test-regex-validation-message]').exists('Validation message shows after input filled'); + assert + .dom('[data-test-inline-error-message]') + .hasText("Your regex doesn't match the subject string", 'Shows error when regex does not match string'); + + await fillIn('[data-test-input="example-testval"]', '1234'); + assert + .dom('[data-test-inline-success-message]') + .hasText('Your regex matches the subject string', 'Shows success when regex matches'); + + await fillIn('[data-test-input="example-testval"]', '12345'); + assert + .dom('[data-test-inline-error-message]') + .hasText( + "Your regex doesn't match the subject string", + "Shows error if regex doesn't match complete string" + ); + await fillIn('[data-test-input="example"]', '(\\d{5})'); + assert.ok(spy.calledOnce, 'Calls the passed onChange function when main input is changed'); + }); +}); From f12f24b5b6b3ff0d933d59fe3c457f9389f16de7 Mon Sep 17 00:00:00 2001 From: Scott Miller Date: Wed, 12 May 2021 10:47:38 -0500 Subject: [PATCH 008/101] Add an exponential backoff to TCP listeners to avoid fast loops in error scenarios (#11588) * Add an exponential backoff to TCP listeners to avoid fast loops in error scenarios * reset loop delay * changelog --- changelog/11588.txt | 3 +++ vault/cluster/cluster.go | 24 ++++++++++++++++++++++++ 2 files changed, 27 insertions(+) create mode 100644 changelog/11588.txt diff --git a/changelog/11588.txt b/changelog/11588.txt new file mode 100644 index 0000000000000..1f7f0365abfa5 --- /dev/null +++ b/changelog/11588.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Add a small (<1s) exponential backoff to failed TCP listener Accept failures. +``` \ No newline at end of file diff --git a/vault/cluster/cluster.go b/vault/cluster/cluster.go index b4ec28cf6f4cb..016e73c32abd5 100644 --- a/vault/cluster/cluster.go +++ b/vault/cluster/cluster.go @@ -279,6 +279,15 @@ func (cl *Listener) Run(ctx context.Context) error { close(closeCh) }() + // baseDelay is the initial delay after an Accept() error before attempting again + const baseDelay = 5 * time.Millisecond + + // maxDelay is the maximum delay after an Accept() error before attempting again. + // In the case that this function is error-looping, it will delay the shutdown check. + // Therefore, changes to maxDelay may have an effect on the latency of shutdown. + const maxDelay = 1 * time.Second + + var loopDelay time.Duration for { if atomic.LoadUint32(cl.shutdown) > 0 { return @@ -298,8 +307,23 @@ func (cl *Listener) Run(ctx context.Context) error { if conn != nil { conn.Close() } + + if loopDelay == 0 { + loopDelay = baseDelay + } else { + loopDelay *= 2 + } + + if loopDelay > maxDelay { + loopDelay = maxDelay + } + + time.Sleep(loopDelay) continue } + // No error, reset loop delay + loopDelay = 0 + if conn == nil { continue } From 9dad633adf835ff941f352195d0c5bf94b7c6de8 Mon Sep 17 00:00:00 2001 From: Angel Garbarino Date: Wed, 12 May 2021 11:20:47 -0600 Subject: [PATCH 009/101] skip flaky control group test (#11595) --- ui/tests/acceptance/enterprise-control-groups-test.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ui/tests/acceptance/enterprise-control-groups-test.js b/ui/tests/acceptance/enterprise-control-groups-test.js index b5aa368f9ca61..373c6f057df51 100644 --- a/ui/tests/acceptance/enterprise-control-groups-test.js +++ b/ui/tests/acceptance/enterprise-control-groups-test.js @@ -1,5 +1,5 @@ import { settled, currentURL, currentRouteName, visit } from '@ember/test-helpers'; -import { module, test } from 'qunit'; +import { module, test, skip } from 'qunit'; import { setupApplicationTest } from 'ember-qunit'; import { create } from 'ember-cli-page-object'; @@ -200,12 +200,12 @@ module('Acceptance | Enterprise | control groups', function(hooks) { } }; - test('it allows the full flow to work without a saved token', async function(assert) { + skip('it allows the full flow to work without a saved token', async function(assert) { await workflow(assert, this); await settled(); }); - test('it allows the full flow to work with a saved token', async function(assert) { + skip('it allows the full flow to work with a saved token', async function(assert) { await workflow(assert, this, true); await settled(); }); From 08de9ad97898270da018d0aa65cf2743290b7a55 Mon Sep 17 00:00:00 2001 From: Scott Miller Date: Wed, 12 May 2021 12:54:40 -0500 Subject: [PATCH 010/101] Add infrastructure and helpers for skipping diagnose checks (#11593) * Add infrastructure for skipping tests * Add infrastructure for skipping tests * Set it * Update vault/diagnose/helpers.go Co-authored-by: swayne275 * Implement type alias for test functions Co-authored-by: swayne275 --- command/operator_diagnose.go | 1 + vault/diagnose/helpers.go | 60 ++++++++++++++++++++++++++++++---- vault/diagnose/helpers_test.go | 11 +++++++ vault/diagnose/output.go | 14 ++++++-- 4 files changed, 77 insertions(+), 9 deletions(-) diff --git a/command/operator_diagnose.go b/command/operator_diagnose.go index 8b8511c4dffb2..a395c7706d8e6 100644 --- a/command/operator_diagnose.go +++ b/command/operator_diagnose.go @@ -133,6 +133,7 @@ func (c *OperatorDiagnoseCommand) RunWithParsedFlags() int { c.UI.Output(version.GetVersion().FullVersionNumber(true)) ctx := diagnose.Context(context.Background(), c.diagnose) err := c.offlineDiagnostics(ctx) + c.diagnose.SetSkipList(c.flagSkips) if err != nil { return 1 diff --git a/vault/diagnose/helpers.go b/vault/diagnose/helpers.go index 599bda3099ed7..ad41b421c00a9 100644 --- a/vault/diagnose/helpers.go +++ b/vault/diagnose/helpers.go @@ -13,6 +13,7 @@ import ( const ( warningEventName = "warning" + skippedEventName = "skipped" actionKey = "actionKey" spotCheckOkEventName = "spot-check-ok" spotCheckWarnEventName = "spot-check-warn" @@ -25,10 +26,13 @@ const ( var diagnoseSession = struct{}{} var noopTracer = trace.NewNoopTracerProvider().Tracer("vault-diagnose") +type testFunction func(context.Context) error + type Session struct { tc *TelemetryCollector tracer trace.Tracer tp *sdktrace.TracerProvider + skip map[string]bool } // New initializes a Diagnose tracing session. In particular this wires a TelemetryCollector, which @@ -47,15 +51,39 @@ func New() *Session { tp: tp, tc: tc, tracer: tracer, + skip: make(map[string]bool), } return sess } +func (s *Session) SetSkipList(ls []string) { + for _, e := range ls { + s.skip[e] = true + } +} + +// IsSkipped returns true if skipName is present in the skip list. Can be used in combination with Skip to mark a +// span skipped and conditionally skip some logic. +func (s *Session) IsSkipped(skipName string) bool { + return s.skip[skipName] +} + // Context returns a new context with a defined diagnose session func Context(ctx context.Context, sess *Session) context.Context { return context.WithValue(ctx, diagnoseSession, sess) } +// CurrentSession retrieves the active diagnose session from the context, or nil if none. +func CurrentSession(ctx context.Context) *Session { + sessionCtxVal := ctx.Value(diagnoseSession) + if sessionCtxVal != nil { + + return sessionCtxVal.(*Session) + + } + return nil +} + // Finalize ends the Diagnose session, returning the root of the result tree. This will be empty until // the outermost span ends. func (s *Session) Finalize(ctx context.Context) *Result { @@ -65,10 +93,8 @@ func (s *Session) Finalize(ctx context.Context) *Result { // StartSpan starts a "diagnose" span, which is really just an OpenTelemetry Tracing span. func StartSpan(ctx context.Context, spanName string, options ...trace.SpanOption) (context.Context, trace.Span) { - sessionCtxVal := ctx.Value(diagnoseSession) - if sessionCtxVal != nil { - - session := sessionCtxVal.(*Session) + session := CurrentSession(ctx) + if session != nil { return session.tracer.Start(ctx, spanName, options...) } else { return noopTracer.Start(ctx, spanName, options...) @@ -88,6 +114,12 @@ func Error(ctx context.Context, err error, options ...trace.EventOption) error { return err } +// Skipped marks the current span skipped +func Skipped(ctx context.Context) { + span := trace.SpanFromContext(ctx) + span.AddEvent(skippedEventName) +} + // Warn records a warning on the current span func Warn(ctx context.Context, msg string) { span := trace.SpanFromContext(ctx) @@ -139,7 +171,7 @@ func SpotCheck(ctx context.Context, checkName string, f func() error) error { // Test creates a new named span, and executes the provided function within it. If the function returns an error, // the span is considered to have failed. -func Test(ctx context.Context, spanName string, function func(context.Context) error, options ...trace.SpanOption) error { +func Test(ctx context.Context, spanName string, function testFunction, options ...trace.SpanOption) error { ctx, span := StartSpan(ctx, spanName, options...) defer span.End() @@ -154,7 +186,7 @@ func Test(ctx context.Context, spanName string, function func(context.Context) e // complete within the timeout, e.g. // // diagnose.Test(ctx, "my-span", diagnose.WithTimeout(5 * time.Second, myTestFunc)) -func WithTimeout(d time.Duration, f func(context.Context) error) func(ctx context.Context) error { +func WithTimeout(d time.Duration, f testFunction) testFunction { return func(ctx context.Context) error { rch := make(chan error) t := time.NewTimer(d) @@ -168,3 +200,19 @@ func WithTimeout(d time.Duration, f func(context.Context) error) func(ctx contex } } } + +// Skippable wraps a Test function with logic that will not run the test if the skipName +// was in the session's skip list +func Skippable(skipName string, f testFunction) testFunction { + return func(ctx context.Context) error { + session := CurrentSession(ctx) + if session != nil { + if !session.IsSkipped(skipName) { + return f(ctx) + } else { + Skipped(ctx) + } + } + return nil + } +} diff --git a/vault/diagnose/helpers_test.go b/vault/diagnose/helpers_test.go index ffa45e6dd716a..ebe92f59797ee 100644 --- a/vault/diagnose/helpers_test.go +++ b/vault/diagnose/helpers_test.go @@ -31,9 +31,14 @@ func TestDiagnoseOtelResults(t *testing.T) { Status: ErrorStatus, Message: "no scones", }, + { + Name: "dispose-grounds", + Status: SkippedStatus, + }, }, } sess := New() + sess.SetSkipList([]string{"dispose-grounds"}) ctx := Context(context.Background(), sess) func() { @@ -70,6 +75,7 @@ func makeCoffee(ctx context.Context) error { SpotCheck(ctx, "pick-scone", pickScone) + Test(ctx, "dispose-grounds", Skippable("dispose-grounds", disposeGrounds)) return nil } @@ -89,3 +95,8 @@ func brewCoffee(ctx context.Context) error { func pickScone() error { return errors.New("no scones") } + +func disposeGrounds(_ context.Context) error { + //Done! + return nil +} diff --git a/vault/diagnose/output.go b/vault/diagnose/output.go index 3b281d4e87c66..2fdad10ff2526 100644 --- a/vault/diagnose/output.go +++ b/vault/diagnose/output.go @@ -17,12 +17,14 @@ import ( const ( status_unknown = "[ ] " status_ok = "\u001b[32m[ ok ]\u001b[0m " - status_failed = "\u001b[31m[failed]\u001b[0m " + status_failed = "\u001b[31m[ fail ]\u001b[0m " status_warn = "\u001b[33m[ warn ]\u001b[0m " + status_skipped = "\u001b[90m[ skip ]\u001b[0m " same_line = "\u001b[F" ErrorStatus = "error" WarningStatus = "warn" OkStatus = "ok" + SkippedStatus = "skipped" ) var errUnimplemented = errors.New("unimplemented") @@ -133,6 +135,8 @@ func (t *TelemetryCollector) getOrBuildResult(id trace.SpanID) *Result { r.Warnings = append(r.Warnings, a.Value.AsString()) } } + case skippedEventName: + r.Status = SkippedStatus case ErrorStatus: var message string var action string @@ -218,11 +222,13 @@ func (t *TelemetryCollector) getOrBuildResult(id trace.SpanID) *Result { case codes.Unset: if len(r.Warnings) > 0 { r.Status = WarningStatus - } else { + } else if r.Status != SkippedStatus { r.Status = OkStatus } case codes.Ok: - r.Status = OkStatus + if r.Status != SkippedStatus { + r.Status = OkStatus + } case codes.Error: r.Status = ErrorStatus } @@ -251,6 +257,8 @@ func (r *Result) write(sb *strings.Builder, depth int) { sb.WriteString(status_warn) case ErrorStatus: sb.WriteString(status_failed) + case SkippedStatus: + sb.WriteString(status_skipped) } sb.WriteString(r.Name) From f1c0a7744fb0196fddb8e696f9058be57684bed4 Mon Sep 17 00:00:00 2001 From: Josh Black Date: Wed, 12 May 2021 12:19:25 -0700 Subject: [PATCH 011/101] Provide a new API endpoint for retrieving signed licenses (#11543) --- command/license_get.go | 10 ++++++-- vault/logical_system_helpers.go | 12 --------- website/content/api-docs/system/license.mdx | 28 ++++++++++++++++++++- 3 files changed, 35 insertions(+), 15 deletions(-) diff --git a/command/license_get.go b/command/license_get.go index 147c751964942..ffda9ec52b085 100644 --- a/command/license_get.go +++ b/command/license_get.go @@ -2,7 +2,6 @@ package command import ( "fmt" - "strconv" "strings" "github.com/mitchellh/cli" @@ -82,7 +81,14 @@ func (c *LicenseGetCommand) Run(args []string) int { return 2 } - secret, err := client.Logical().ReadWithData("sys/license", map[string][]string{"signed": {strconv.FormatBool(c.signed)}}) + var path string + if c.signed { + path = "sys/license/signed" + } else { + path = "sys/license" + } + + secret, err := client.Logical().Read(path) if err != nil { c.UI.Error(fmt.Sprintf("Error retrieving license: %s", err)) return 2 diff --git a/vault/logical_system_helpers.go b/vault/logical_system_helpers.go index d2e27eba7e6e3..0c5016751a867 100644 --- a/vault/logical_system_helpers.go +++ b/vault/logical_system_helpers.go @@ -55,18 +55,6 @@ var ( } } - pathLicenseRead = func(b *SystemBackend) framework.OperationFunc { - return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - return nil, nil - } - } - - pathLicenseUpdate = func(b *SystemBackend) framework.OperationFunc { - return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - return nil, nil - } - } - entPaths = func(b *SystemBackend) []*framework.Path { return []*framework.Path{ { diff --git a/website/content/api-docs/system/license.mdx b/website/content/api-docs/system/license.mdx index fc5f7a0bf855e..bd31a6c624920 100644 --- a/website/content/api-docs/system/license.mdx +++ b/website/content/api-docs/system/license.mdx @@ -2,7 +2,7 @@ layout: api page_title: /sys/license - HTTP API description: |- - The `/sys/license` endpoint is used to view and update the license used in + The `/sys/license` endpoint is used to view and update the license used in Vault. --- @@ -43,6 +43,32 @@ $ curl \ } ``` +## Read Signed License + +This endpoint returns the signed license blob for the currently installed license. + +| Method | Path | +| :----- | :------------- | +| `GET` | `/sys/license/signed` | + +### Sample Request + +```shell-session +$ curl \ + --header "X-Vault-Token: ..." \ + http://127.0.0.1:8200/v1/sys/license/signed +``` + +### Sample Response + +```json +{ + "data": { + "signed": "01ABCDEFG..." + } +} +``` + ## Install License This endpoint is used to install a license into Vault. From c180ba706c541c7ce8c096e68d284cff238e6af7 Mon Sep 17 00:00:00 2001 From: Chelsea Shaw <82459713+hashishaw@users.noreply.github.com> Date: Wed, 12 May 2021 14:33:35 -0500 Subject: [PATCH 012/101] Fix: link on database role item goes to correct URL (#11597) * Fix: link on database role item goes to correct URL * Add changelog --- changelog/11597.txt | 3 +++ ui/app/templates/components/database-list-item.hbs | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog/11597.txt diff --git a/changelog/11597.txt b/changelog/11597.txt new file mode 100644 index 0000000000000..4a9c113d71ae6 --- /dev/null +++ b/changelog/11597.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix text link URL on database roles list +``` \ No newline at end of file diff --git a/ui/app/templates/components/database-list-item.hbs b/ui/app/templates/components/database-list-item.hbs index 13dd33e9abc61..2badff9aae893 100644 --- a/ui/app/templates/components/database-list-item.hbs +++ b/ui/app/templates/components/database-list-item.hbs @@ -8,7 +8,7 @@ }}
- + Date: Wed, 12 May 2021 15:22:02 -0600 Subject: [PATCH 013/101] Update MongoDB tests to not fail in Go 1.16 (#11533) --- .circleci/config.yml | 10 +- .circleci/config/commands/go_test.yml | 2 +- go.mod | 1 + plugins/database/mongodb/mongodb_test.go | 29 ++- .../google/go-cmp/cmp/cmpopts/equate.go | 148 +++++++++++++ .../google/go-cmp/cmp/cmpopts/errors_go113.go | 15 ++ .../go-cmp/cmp/cmpopts/errors_xerrors.go | 18 ++ .../google/go-cmp/cmp/cmpopts/ignore.go | 206 ++++++++++++++++++ .../google/go-cmp/cmp/cmpopts/sort.go | 147 +++++++++++++ .../go-cmp/cmp/cmpopts/struct_filter.go | 187 ++++++++++++++++ .../google/go-cmp/cmp/cmpopts/xform.go | 35 +++ vendor/modules.txt | 2 + 12 files changed, 791 insertions(+), 9 deletions(-) create mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go create mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/errors_go113.go create mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/errors_xerrors.go create mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go create mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go create mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go create mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go diff --git a/.circleci/config.yml b/.circleci/config.yml index faafe343e4643..e459a5eb1ac52 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -952,7 +952,7 @@ jobs: -e DOCKER_CERT_PATH -e DOCKER_HOST -e DOCKER_MACHINE_NAME -e DOCKER_TLS_VERIFY -e NO_PROXY \ -e VAULT_TEST_LOG_DIR=/tmp/testlogs \ --network vaulttest --name \ - testcontainer docker.mirror.hashicorp.services/circleci/golang:1.15.11-buster \ + testcontainer docker.mirror.hashicorp.services/circleci/golang:1.16.2-buster \ tail -f /dev/null # Run tests @@ -1167,7 +1167,7 @@ jobs: -e DOCKER_CERT_PATH -e DOCKER_HOST -e DOCKER_MACHINE_NAME -e DOCKER_TLS_VERIFY -e NO_PROXY \ -e VAULT_TEST_LOG_DIR=/tmp/testlogs \ --network vaulttest --name \ - testcontainer docker.mirror.hashicorp.services/circleci/golang:1.15.11-buster \ + testcontainer docker.mirror.hashicorp.services/circleci/golang:1.16.2-buster \ tail -f /dev/null # Run tests @@ -1633,7 +1633,7 @@ jobs: -e DOCKER_CERT_PATH -e DOCKER_HOST -e DOCKER_MACHINE_NAME -e DOCKER_TLS_VERIFY -e NO_PROXY \ -e VAULT_TEST_LOG_DIR=/tmp/testlogs \ --network vaulttest --name \ - testcontainer docker.mirror.hashicorp.services/circleci/golang:1.15.11-buster \ + testcontainer docker.mirror.hashicorp.services/circleci/golang:1.16.2-buster \ tail -f /dev/null # Run tests @@ -1779,7 +1779,7 @@ jobs: -e DOCKER_CERT_PATH -e DOCKER_HOST -e DOCKER_MACHINE_NAME -e DOCKER_TLS_VERIFY -e NO_PROXY \ -e VAULT_TEST_LOG_DIR=/tmp/testlogs \ --network vaulttest --name \ - testcontainer docker.mirror.hashicorp.services/circleci/golang:1.15.11-buster \ + testcontainer docker.mirror.hashicorp.services/circleci/golang:1.16.2-buster \ tail -f /dev/null # Run tests @@ -2376,7 +2376,7 @@ jobs: -e DOCKER_CERT_PATH -e DOCKER_HOST -e DOCKER_MACHINE_NAME -e DOCKER_TLS_VERIFY -e NO_PROXY \ -e VAULT_TEST_LOG_DIR=/tmp/testlogs \ --network vaulttest --name \ - testcontainer docker.mirror.hashicorp.services/circleci/golang:1.15.11-buster \ + testcontainer docker.mirror.hashicorp.services/circleci/golang:1.16.2-buster \ tail -f /dev/null # Run tests diff --git a/.circleci/config/commands/go_test.yml b/.circleci/config/commands/go_test.yml index b8146bd3396e5..27a2951cdf648 100644 --- a/.circleci/config/commands/go_test.yml +++ b/.circleci/config/commands/go_test.yml @@ -14,7 +14,7 @@ parameters: default: false go_image: type: string - default: "docker.mirror.hashicorp.services/circleci/golang:1.15.11-buster" + default: "docker.mirror.hashicorp.services/circleci/golang:1.16.2-buster" use_docker: type: boolean default: false diff --git a/go.mod b/go.mod index 09de5df8c03d7..555a6ffcc9041 100644 --- a/go.mod +++ b/go.mod @@ -52,6 +52,7 @@ require ( github.com/go-test/deep v1.0.7 github.com/gocql/gocql v0.0.0-20210401103645-80ab1e13e309 github.com/golang/protobuf v1.4.2 + github.com/google/go-cmp v0.5.5 github.com/google/go-github v17.0.0+incompatible github.com/google/go-metrics-stackdriver v0.2.0 github.com/hashicorp/consul-template v0.25.2 diff --git a/plugins/database/mongodb/mongodb_test.go b/plugins/database/mongodb/mongodb_test.go index cb088892487de..832b0ce1f7d16 100644 --- a/plugins/database/mongodb/mongodb_test.go +++ b/plugins/database/mongodb/mongodb_test.go @@ -7,9 +7,13 @@ import ( "fmt" "reflect" "strings" + "sync" "testing" "time" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/vault/helper/testhelpers/certhelpers" "github.com/hashicorp/vault/helper/testhelpers/mongodb" dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" @@ -346,9 +350,7 @@ func TestGetTLSAuth(t *testing.T) { if !test.expectErr && err != nil { t.Fatalf("no error expected, got: %s", err) } - if !reflect.DeepEqual(actual, test.expectOpts) { - t.Fatalf("Actual:\n%#v\nExpected:\n%#v", actual, test.expectOpts) - } + assertDeepEqual(t, test.expectOpts, actual) }) } } @@ -363,6 +365,27 @@ func appendToCertPool(t *testing.T, pool *x509.CertPool, caPem []byte) *x509.Cer return pool } +var cmpClientOptionsOpts = cmp.Options{ + cmp.AllowUnexported(options.ClientOptions{}), + + cmp.AllowUnexported(tls.Config{}), + cmpopts.IgnoreTypes(sync.Mutex{}, sync.RWMutex{}), + + // 'lazyCerts' has a func field which can't be compared. + cmpopts.IgnoreFields(x509.CertPool{}, "lazyCerts"), + cmp.AllowUnexported(x509.CertPool{}), +} + +// Need a special comparison for ClientOptions because reflect.DeepEquals won't work in Go 1.16. +// See: https://github.com/golang/go/issues/45891 +func assertDeepEqual(t *testing.T, a, b *options.ClientOptions) { + t.Helper() + + if diff := cmp.Diff(a, b, cmpClientOptionsOpts); diff != "" { + t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff) + } +} + func createDBUser(t testing.TB, connURL, db, username, password string) { t.Helper() diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go new file mode 100644 index 0000000000000..e4ffca838a17a --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go @@ -0,0 +1,148 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cmpopts provides common options for the cmp package. +package cmpopts + +import ( + "math" + "reflect" + "time" + + "github.com/google/go-cmp/cmp" +) + +func equateAlways(_, _ interface{}) bool { return true } + +// EquateEmpty returns a Comparer option that determines all maps and slices +// with a length of zero to be equal, regardless of whether they are nil. +// +// EquateEmpty can be used in conjunction with SortSlices and SortMaps. +func EquateEmpty() cmp.Option { + return cmp.FilterValues(isEmpty, cmp.Comparer(equateAlways)) +} + +func isEmpty(x, y interface{}) bool { + vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) + return (x != nil && y != nil && vx.Type() == vy.Type()) && + (vx.Kind() == reflect.Slice || vx.Kind() == reflect.Map) && + (vx.Len() == 0 && vy.Len() == 0) +} + +// EquateApprox returns a Comparer option that determines float32 or float64 +// values to be equal if they are within a relative fraction or absolute margin. +// This option is not used when either x or y is NaN or infinite. +// +// The fraction determines that the difference of two values must be within the +// smaller fraction of the two values, while the margin determines that the two +// values must be within some absolute margin. +// To express only a fraction or only a margin, use 0 for the other parameter. +// The fraction and margin must be non-negative. +// +// The mathematical expression used is equivalent to: +// |x-y| ≤ max(fraction*min(|x|, |y|), margin) +// +// EquateApprox can be used in conjunction with EquateNaNs. +func EquateApprox(fraction, margin float64) cmp.Option { + if margin < 0 || fraction < 0 || math.IsNaN(margin) || math.IsNaN(fraction) { + panic("margin or fraction must be a non-negative number") + } + a := approximator{fraction, margin} + return cmp.Options{ + cmp.FilterValues(areRealF64s, cmp.Comparer(a.compareF64)), + cmp.FilterValues(areRealF32s, cmp.Comparer(a.compareF32)), + } +} + +type approximator struct{ frac, marg float64 } + +func areRealF64s(x, y float64) bool { + return !math.IsNaN(x) && !math.IsNaN(y) && !math.IsInf(x, 0) && !math.IsInf(y, 0) +} +func areRealF32s(x, y float32) bool { + return areRealF64s(float64(x), float64(y)) +} +func (a approximator) compareF64(x, y float64) bool { + relMarg := a.frac * math.Min(math.Abs(x), math.Abs(y)) + return math.Abs(x-y) <= math.Max(a.marg, relMarg) +} +func (a approximator) compareF32(x, y float32) bool { + return a.compareF64(float64(x), float64(y)) +} + +// EquateNaNs returns a Comparer option that determines float32 and float64 +// NaN values to be equal. +// +// EquateNaNs can be used in conjunction with EquateApprox. +func EquateNaNs() cmp.Option { + return cmp.Options{ + cmp.FilterValues(areNaNsF64s, cmp.Comparer(equateAlways)), + cmp.FilterValues(areNaNsF32s, cmp.Comparer(equateAlways)), + } +} + +func areNaNsF64s(x, y float64) bool { + return math.IsNaN(x) && math.IsNaN(y) +} +func areNaNsF32s(x, y float32) bool { + return areNaNsF64s(float64(x), float64(y)) +} + +// EquateApproxTime returns a Comparer option that determines two non-zero +// time.Time values to be equal if they are within some margin of one another. +// If both times have a monotonic clock reading, then the monotonic time +// difference will be used. The margin must be non-negative. +func EquateApproxTime(margin time.Duration) cmp.Option { + if margin < 0 { + panic("margin must be a non-negative number") + } + a := timeApproximator{margin} + return cmp.FilterValues(areNonZeroTimes, cmp.Comparer(a.compare)) +} + +func areNonZeroTimes(x, y time.Time) bool { + return !x.IsZero() && !y.IsZero() +} + +type timeApproximator struct { + margin time.Duration +} + +func (a timeApproximator) compare(x, y time.Time) bool { + // Avoid subtracting times to avoid overflow when the + // difference is larger than the largest representible duration. + if x.After(y) { + // Ensure x is always before y + x, y = y, x + } + // We're within the margin if x+margin >= y. + // Note: time.Time doesn't have AfterOrEqual method hence the negation. + return !x.Add(a.margin).Before(y) +} + +// AnyError is an error that matches any non-nil error. +var AnyError anyError + +type anyError struct{} + +func (anyError) Error() string { return "any error" } +func (anyError) Is(err error) bool { return err != nil } + +// EquateErrors returns a Comparer option that determines errors to be equal +// if errors.Is reports them to match. The AnyError error can be used to +// match any non-nil error. +func EquateErrors() cmp.Option { + return cmp.FilterValues(areConcreteErrors, cmp.Comparer(compareErrors)) +} + +// areConcreteErrors reports whether x and y are types that implement error. +// The input types are deliberately of the interface{} type rather than the +// error type so that we can handle situations where the current type is an +// interface{}, but the underlying concrete types both happen to implement +// the error interface. +func areConcreteErrors(x, y interface{}) bool { + _, ok1 := x.(error) + _, ok2 := y.(error) + return ok1 && ok2 +} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/errors_go113.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/errors_go113.go new file mode 100644 index 0000000000000..26fe25d6afbcb --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/errors_go113.go @@ -0,0 +1,15 @@ +// Copyright 2021, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.13 + +package cmpopts + +import "errors" + +func compareErrors(x, y interface{}) bool { + xe := x.(error) + ye := y.(error) + return errors.Is(xe, ye) || errors.Is(ye, xe) +} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/errors_xerrors.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/errors_xerrors.go new file mode 100644 index 0000000000000..6eeb8d6e6543e --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/errors_xerrors.go @@ -0,0 +1,18 @@ +// Copyright 2021, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.13 + +// TODO(≥go1.13): For support on = 0 && (ss.less(v, start, i-1) || ss.less(v, i-1, start)) { + panic(fmt.Sprintf("incomparable values detected: want equal elements: %v", v.Slice(start, i))) + } + start = -1 + } else if start == -1 { + start = i + } + } +} +func (ss sliceSorter) less(v reflect.Value, i, j int) bool { + vx, vy := v.Index(i), v.Index(j) + return ss.fnc.Call([]reflect.Value{vx, vy})[0].Bool() +} + +// SortMaps returns a Transformer option that flattens map[K]V types to be a +// sorted []struct{K, V}. The less function must be of the form +// "func(T, T) bool" which is used to sort any map with key K that is +// assignable to T. +// +// Flattening the map into a slice has the property that cmp.Equal is able to +// use Comparers on K or the K.Equal method if it exists. +// +// The less function must be: +// • Deterministic: less(x, y) == less(x, y) +// • Irreflexive: !less(x, x) +// • Transitive: if !less(x, y) and !less(y, z), then !less(x, z) +// • Total: if x != y, then either less(x, y) or less(y, x) +// +// SortMaps can be used in conjunction with EquateEmpty. +func SortMaps(lessFunc interface{}) cmp.Option { + vf := reflect.ValueOf(lessFunc) + if !function.IsType(vf.Type(), function.Less) || vf.IsNil() { + panic(fmt.Sprintf("invalid less function: %T", lessFunc)) + } + ms := mapSorter{vf.Type().In(0), vf} + return cmp.FilterValues(ms.filter, cmp.Transformer("cmpopts.SortMaps", ms.sort)) +} + +type mapSorter struct { + in reflect.Type // T + fnc reflect.Value // func(T, T) bool +} + +func (ms mapSorter) filter(x, y interface{}) bool { + vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) + return (x != nil && y != nil && vx.Type() == vy.Type()) && + (vx.Kind() == reflect.Map && vx.Type().Key().AssignableTo(ms.in)) && + (vx.Len() != 0 || vy.Len() != 0) +} +func (ms mapSorter) sort(x interface{}) interface{} { + src := reflect.ValueOf(x) + outType := reflect.StructOf([]reflect.StructField{ + {Name: "K", Type: src.Type().Key()}, + {Name: "V", Type: src.Type().Elem()}, + }) + dst := reflect.MakeSlice(reflect.SliceOf(outType), src.Len(), src.Len()) + for i, k := range src.MapKeys() { + v := reflect.New(outType).Elem() + v.Field(0).Set(k) + v.Field(1).Set(src.MapIndex(k)) + dst.Index(i).Set(v) + } + sort.Slice(dst.Interface(), func(i, j int) bool { return ms.less(dst, i, j) }) + ms.checkSort(dst) + return dst.Interface() +} +func (ms mapSorter) checkSort(v reflect.Value) { + for i := 1; i < v.Len(); i++ { + if !ms.less(v, i-1, i) { + panic(fmt.Sprintf("partial order detected: want %v < %v", v.Index(i-1), v.Index(i))) + } + } +} +func (ms mapSorter) less(v reflect.Value, i, j int) bool { + vx, vy := v.Index(i).Field(0), v.Index(j).Field(0) + return ms.fnc.Call([]reflect.Value{vx, vy})[0].Bool() +} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go new file mode 100644 index 0000000000000..a09829c3af920 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go @@ -0,0 +1,187 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmpopts + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp" +) + +// filterField returns a new Option where opt is only evaluated on paths that +// include a specific exported field on a single struct type. +// The struct type is specified by passing in a value of that type. +// +// The name may be a dot-delimited string (e.g., "Foo.Bar") to select a +// specific sub-field that is embedded or nested within the parent struct. +func filterField(typ interface{}, name string, opt cmp.Option) cmp.Option { + // TODO: This is currently unexported over concerns of how helper filters + // can be composed together easily. + // TODO: Add tests for FilterField. + + sf := newStructFilter(typ, name) + return cmp.FilterPath(sf.filter, opt) +} + +type structFilter struct { + t reflect.Type // The root struct type to match on + ft fieldTree // Tree of fields to match on +} + +func newStructFilter(typ interface{}, names ...string) structFilter { + // TODO: Perhaps allow * as a special identifier to allow ignoring any + // number of path steps until the next field match? + // This could be useful when a concrete struct gets transformed into + // an anonymous struct where it is not possible to specify that by type, + // but the transformer happens to provide guarantees about the names of + // the transformed fields. + + t := reflect.TypeOf(typ) + if t == nil || t.Kind() != reflect.Struct { + panic(fmt.Sprintf("%T must be a non-pointer struct", typ)) + } + var ft fieldTree + for _, name := range names { + cname, err := canonicalName(t, name) + if err != nil { + panic(fmt.Sprintf("%s: %v", strings.Join(cname, "."), err)) + } + ft.insert(cname) + } + return structFilter{t, ft} +} + +func (sf structFilter) filter(p cmp.Path) bool { + for i, ps := range p { + if ps.Type().AssignableTo(sf.t) && sf.ft.matchPrefix(p[i+1:]) { + return true + } + } + return false +} + +// fieldTree represents a set of dot-separated identifiers. +// +// For example, inserting the following selectors: +// Foo +// Foo.Bar.Baz +// Foo.Buzz +// Nuka.Cola.Quantum +// +// Results in a tree of the form: +// {sub: { +// "Foo": {ok: true, sub: { +// "Bar": {sub: { +// "Baz": {ok: true}, +// }}, +// "Buzz": {ok: true}, +// }}, +// "Nuka": {sub: { +// "Cola": {sub: { +// "Quantum": {ok: true}, +// }}, +// }}, +// }} +type fieldTree struct { + ok bool // Whether this is a specified node + sub map[string]fieldTree // The sub-tree of fields under this node +} + +// insert inserts a sequence of field accesses into the tree. +func (ft *fieldTree) insert(cname []string) { + if ft.sub == nil { + ft.sub = make(map[string]fieldTree) + } + if len(cname) == 0 { + ft.ok = true + return + } + sub := ft.sub[cname[0]] + sub.insert(cname[1:]) + ft.sub[cname[0]] = sub +} + +// matchPrefix reports whether any selector in the fieldTree matches +// the start of path p. +func (ft fieldTree) matchPrefix(p cmp.Path) bool { + for _, ps := range p { + switch ps := ps.(type) { + case cmp.StructField: + ft = ft.sub[ps.Name()] + if ft.ok { + return true + } + if len(ft.sub) == 0 { + return false + } + case cmp.Indirect: + default: + return false + } + } + return false +} + +// canonicalName returns a list of identifiers where any struct field access +// through an embedded field is expanded to include the names of the embedded +// types themselves. +// +// For example, suppose field "Foo" is not directly in the parent struct, +// but actually from an embedded struct of type "Bar". Then, the canonical name +// of "Foo" is actually "Bar.Foo". +// +// Suppose field "Foo" is not directly in the parent struct, but actually +// a field in two different embedded structs of types "Bar" and "Baz". +// Then the selector "Foo" causes a panic since it is ambiguous which one it +// refers to. The user must specify either "Bar.Foo" or "Baz.Foo". +func canonicalName(t reflect.Type, sel string) ([]string, error) { + var name string + sel = strings.TrimPrefix(sel, ".") + if sel == "" { + return nil, fmt.Errorf("name must not be empty") + } + if i := strings.IndexByte(sel, '.'); i < 0 { + name, sel = sel, "" + } else { + name, sel = sel[:i], sel[i:] + } + + // Type must be a struct or pointer to struct. + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf("%v must be a struct", t) + } + + // Find the canonical name for this current field name. + // If the field exists in an embedded struct, then it will be expanded. + sf, _ := t.FieldByName(name) + if !isExported(name) { + // Avoid using reflect.Type.FieldByName for unexported fields due to + // buggy behavior with regard to embeddeding and unexported fields. + // See https://golang.org/issue/4876 for details. + sf = reflect.StructField{} + for i := 0; i < t.NumField() && sf.Name == ""; i++ { + if t.Field(i).Name == name { + sf = t.Field(i) + } + } + } + if sf.Name == "" { + return []string{name}, fmt.Errorf("does not exist") + } + var ss []string + for i := range sf.Index { + ss = append(ss, t.FieldByIndex(sf.Index[:i+1]).Name) + } + if sel == "" { + return ss, nil + } + ssPost, err := canonicalName(sf.Type, sel) + return append(ss, ssPost...), err +} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go new file mode 100644 index 0000000000000..4eb49d63db311 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go @@ -0,0 +1,35 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmpopts + +import ( + "github.com/google/go-cmp/cmp" +) + +type xformFilter struct{ xform cmp.Option } + +func (xf xformFilter) filter(p cmp.Path) bool { + for _, ps := range p { + if t, ok := ps.(cmp.Transform); ok && t.Option() == xf.xform { + return false + } + } + return true +} + +// AcyclicTransformer returns a Transformer with a filter applied that ensures +// that the transformer cannot be recursively applied upon its own output. +// +// An example use case is a transformer that splits a string by lines: +// AcyclicTransformer("SplitLines", func(s string) []string{ +// return strings.Split(s, "\n") +// }) +// +// Had this been an unfiltered Transformer instead, this would result in an +// infinite cycle converting a string to []string to [][]string and so on. +func AcyclicTransformer(name string, xformFunc interface{}) cmp.Option { + xf := xformFilter{cmp.Transformer(name, xformFunc)} + return cmp.FilterPath(xf.filter, xf.xform) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 868151ba14a0d..d00c1aed6aa3b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -438,7 +438,9 @@ github.com/golang/snappy # github.com/google/flatbuffers v1.11.0 github.com/google/flatbuffers/go # github.com/google/go-cmp v0.5.5 +## explicit github.com/google/go-cmp/cmp +github.com/google/go-cmp/cmp/cmpopts github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function From 67ca3beb1ecde48a5a6a8befb783b1b95aa5ad8e Mon Sep 17 00:00:00 2001 From: Michael Golowka <72365+pcman312@users.noreply.github.com> Date: Wed, 12 May 2021 15:22:41 -0600 Subject: [PATCH 014/101] Check ErrPluginStaticUnsupported for fallback to RotateRootCredentials (#11585) --- builtin/logical/database/version_wrapper.go | 2 +- .../logical/database/version_wrapper_test.go | 27 ++++++++++++++++++- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/builtin/logical/database/version_wrapper.go b/builtin/logical/database/version_wrapper.go index d1b5359717763..e625404e52d7d 100644 --- a/builtin/logical/database/version_wrapper.go +++ b/builtin/logical/database/version_wrapper.go @@ -152,7 +152,7 @@ func (d databaseVersionWrapper) changePasswordLegacy(ctx context.Context, userna err = d.changeUserPasswordLegacy(ctx, username, passwordChange) // If changing the root user's password but SetCredentials is unimplemented, fall back to RotateRootCredentials - if isRootUser && status.Code(err) == codes.Unimplemented { + if isRootUser && (err == v4.ErrPluginStaticUnsupported || status.Code(err) == codes.Unimplemented) { saveConfig, err = d.changeRootUserPasswordLegacy(ctx, passwordChange) if err != nil { return nil, err diff --git a/builtin/logical/database/version_wrapper_test.go b/builtin/logical/database/version_wrapper_test.go index 56ec37d029035..2680de93c6a6d 100644 --- a/builtin/logical/database/version_wrapper_test.go +++ b/builtin/logical/database/version_wrapper_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + v4 "github.com/hashicorp/vault/sdk/database/dbplugin" v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" "github.com/hashicorp/vault/sdk/logical" "github.com/stretchr/testify/mock" @@ -672,7 +673,7 @@ func TestUpdateUser_legacyDB(t *testing.T) { expectedConfig: nil, expectErr: true, }, - "change password - RotateRootCredentials": { + "change password - RotateRootCredentials (gRPC Unimplemented)": { req: v5.UpdateUserRequest{ Username: "existing_user", Password: &v5.ChangePassword{ @@ -696,6 +697,30 @@ func TestUpdateUser_legacyDB(t *testing.T) { }, expectErr: false, }, + "change password - RotateRootCredentials (ErrPluginStaticUnsupported)": { + req: v5.UpdateUserRequest{ + Username: "existing_user", + Password: &v5.ChangePassword{ + NewPassword: "newpassowrd", + }, + }, + isRootUser: true, + + setCredentialsErr: v4.ErrPluginStaticUnsupported, + setCredentialsCalls: 1, + + rotateRootConfig: map[string]interface{}{ + "foo": "bar", + }, + rotateRootCalls: 1, + + renewUserCalls: 0, + + expectedConfig: map[string]interface{}{ + "foo": "bar", + }, + expectErr: false, + }, "change password - RotateRootCredentials failed": { req: v5.UpdateUserRequest{ Username: "existing_user", From e2f47039b54f679ce4fbc262b96c22db4c43fa7b Mon Sep 17 00:00:00 2001 From: Calvin Leung Huang <1883212+calvn@users.noreply.github.com> Date: Wed, 12 May 2021 14:59:07 -0700 Subject: [PATCH 015/101] agent/cert: properly return the cached client on AuthClient (#11576) * agent/cert: properly return the cached client on AuthClient * test: pass in nil client config, check on pointer values directly * test: pass in nil client config * changelog: add changelog entry --- changelog/11576.txt | 4 + command/agent/auth/cert/cert.go | 2 +- command/agent/auth/cert/cert_test.go | 133 ++++++++++++++++++ .../auth/cert/test-fixtures/keys/cert.pem | 22 +++ .../auth/cert/test-fixtures/keys/key.pem | 27 ++++ .../auth/cert/test-fixtures/keys/pkioutput | 74 ++++++++++ .../auth/cert/test-fixtures/root/pkioutput | 74 ++++++++++ .../auth/cert/test-fixtures/root/root.crl | 12 ++ .../cert/test-fixtures/root/rootcacert.pem | 20 +++ .../cert/test-fixtures/root/rootcakey.pem | 27 ++++ 10 files changed, 394 insertions(+), 1 deletion(-) create mode 100644 changelog/11576.txt create mode 100644 command/agent/auth/cert/cert_test.go create mode 100644 command/agent/auth/cert/test-fixtures/keys/cert.pem create mode 100644 command/agent/auth/cert/test-fixtures/keys/key.pem create mode 100644 command/agent/auth/cert/test-fixtures/keys/pkioutput create mode 100644 command/agent/auth/cert/test-fixtures/root/pkioutput create mode 100644 command/agent/auth/cert/test-fixtures/root/root.crl create mode 100644 command/agent/auth/cert/test-fixtures/root/rootcacert.pem create mode 100644 command/agent/auth/cert/test-fixtures/root/rootcakey.pem diff --git a/changelog/11576.txt b/changelog/11576.txt new file mode 100644 index 0000000000000..0886ee983f652 --- /dev/null +++ b/changelog/11576.txt @@ -0,0 +1,4 @@ +```release-note:bug +agent/cert: Fix issue where the API client on agent was not honoring certificate +information from the auto-auth config map on renewals or retries. +``` \ No newline at end of file diff --git a/command/agent/auth/cert/cert.go b/command/agent/auth/cert/cert.go index 297b71da05008..3c8162a5eacda 100644 --- a/command/agent/auth/cert/cert.go +++ b/command/agent/auth/cert/cert.go @@ -108,7 +108,7 @@ func (c *certMethod) AuthClient(client *api.Client) (*api.Client, error) { if c.caCert != "" || (c.clientKey != "" && c.clientCert != "") { // Return cached client if present if c.client != nil { - return client, nil + return c.client, nil } config := api.DefaultConfig() diff --git a/command/agent/auth/cert/cert_test.go b/command/agent/auth/cert/cert_test.go new file mode 100644 index 0000000000000..9f1378c280561 --- /dev/null +++ b/command/agent/auth/cert/cert_test.go @@ -0,0 +1,133 @@ +package cert + +import ( + "context" + "os" + "path" + "reflect" + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agent/auth" +) + +func TestCertAuthMethod_Authenticate(t *testing.T) { + config := &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "cert-test", + Config: map[string]interface{}{ + "name": "foo", + }, + } + + method, err := NewCertAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + client, err := api.NewClient(nil) + if err != nil { + t.Fatal(err) + } + + loginPath, _, authMap, err := method.Authenticate(context.Background(), client) + if err != nil { + t.Fatal(err) + } + + expectedLoginPath := path.Join(config.MountPath, "/login") + if loginPath != expectedLoginPath { + t.Fatalf("mismatch on login path: got: %s, expected: %s", loginPath, expectedLoginPath) + } + + expectedAuthMap := map[string]interface{}{ + "name": config.Config["name"], + } + if !reflect.DeepEqual(authMap, expectedAuthMap) { + t.Fatalf("mismatch on login path:\ngot:\n\t%v\nexpected:\n\t%v", authMap, expectedAuthMap) + } +} + +func TestCertAuthMethod_AuthClient_withoutCerts(t *testing.T) { + config := &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "cert-test", + Config: map[string]interface{}{ + "name": "without-certs", + }, + } + + method, err := NewCertAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + client, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + + clientToUse, err := method.(auth.AuthMethodWithClient).AuthClient(client) + if err != nil { + t.Fatal(err) + } + + if client != clientToUse { + t.Fatal("error: expected AuthClient to return back original client") + } +} + +func TestCertAuthMethod_AuthClient_withCerts(t *testing.T) { + + clientCert, err := os.Open("./test-fixtures/keys/cert.pem") + if err != nil { + t.Fatal(err) + } + defer clientCert.Close() + + clientKey, err := os.Open("./test-fixtures/keys/key.pem") + if err != nil { + t.Fatal(err) + } + defer clientKey.Close() + + config := &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "cert-test", + Config: map[string]interface{}{ + "name": "with-certs", + "client_cert": clientCert.Name(), + "client_key": clientKey.Name(), + }, + } + + method, err := NewCertAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + client, err := api.NewClient(nil) + if err != nil { + t.Fatal(err) + } + + clientToUse, err := method.(auth.AuthMethodWithClient).AuthClient(client) + if err != nil { + t.Fatal(err) + } + + if client == clientToUse { + t.Fatal("expected client from AuthClient to be different from original client") + } + + // Call AuthClient again to get back the cached client + cachedClient, err := method.(auth.AuthMethodWithClient).AuthClient(client) + if err != nil { + t.Fatal(err) + } + + if cachedClient != clientToUse { + t.Fatal("expected client from AuthClient to return back a cached client") + } +} diff --git a/command/agent/auth/cert/test-fixtures/keys/cert.pem b/command/agent/auth/cert/test-fixtures/keys/cert.pem new file mode 100644 index 0000000000000..67ef67dd8d718 --- /dev/null +++ b/command/agent/auth/cert/test-fixtures/keys/cert.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIUf+jhKTFBnqSs34II0WS1L4QsbbAwDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzQxWhcNMjUw +MTA1MTAyODExWjAbMRkwFwYDVQQDExBjZXJ0LmV4YW1wbGUuY29tMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxS +TRAVnygAftetT8puHflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGn +SgMld6ZWRhNheZhA6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmi +YYMiIWplidMmMO5NTRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5 +donyqtnaHuIJGuUdy54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVG +B+5+AAGF5iuHC3N2DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABo4H1 +MIHyMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUm++e +HpyM3p708bgZJuRYEdX1o+UwHwYDVR0jBBgwFoAUncSzT/6HMexyuiU9/7EgHu+o +k5swOwYIKwYBBQUHAQEELzAtMCsGCCsGAQUFBzAChh9odHRwOi8vMTI3LjAuMC4x +OjgyMDAvdjEvcGtpL2NhMCEGA1UdEQQaMBiCEGNlcnQuZXhhbXBsZS5jb22HBH8A +AAEwMQYDVR0fBCowKDAmoCSgIoYgaHR0cDovLzEyNy4wLjAuMTo4MjAwL3YxL3Br +aS9jcmwwDQYJKoZIhvcNAQELBQADggEBABsuvmPSNjjKTVN6itWzdQy+SgMIrwfs +X1Yb9Lefkkwmp9ovKFNQxa4DucuCuzXcQrbKwWTfHGgR8ct4rf30xCRoA7dbQWq4 +aYqNKFWrRaBRAaaYZ/O1ApRTOrXqRx9Eqr0H1BXLsoAq+mWassL8sf6siae+CpwA +KqBko5G0dNXq5T4i2LQbmoQSVetIrCJEeMrU+idkuqfV2h1BQKgSEhFDABjFdTCN +QDAHsEHsi2M4/jRW9fqEuhHSDfl2n7tkFUI8wTHUUCl7gXwweJ4qtaSXIwKXYzNj +xqKHA8Purc1Yfybz4iE1JCROi9fInKlzr5xABq8nb9Qc/J9DIQM+Xmk= +-----END CERTIFICATE----- \ No newline at end of file diff --git a/command/agent/auth/cert/test-fixtures/keys/key.pem b/command/agent/auth/cert/test-fixtures/keys/key.pem new file mode 100644 index 0000000000000..add982002acf7 --- /dev/null +++ b/command/agent/auth/cert/test-fixtures/keys/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxSTRAVnygAftetT8pu +HflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGnSgMld6ZWRhNheZhA +6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmiYYMiIWplidMmMO5N +TRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5donyqtnaHuIJGuUd +y54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVGB+5+AAGF5iuHC3N2 +DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABAoIBAHR7fFV0eAGaopsX +9OD0TUGlsephBXb43g0GYHfJ/1Ew18w9oaxszJEqkl+PB4W3xZ3yG3e8ZomxDOhF +RreF2WgG5xOfhDogMwu6NodbArfgnAvoC6JnW3qha8HMP4F500RFVyCRcd6A3Frd +rFtaZn/UyCsBAN8/zkwPeYHayo7xX6d9kzgRl9HluEX5PXI5+3uiBDUiM085gkLI +5Cmadh9fMdjfhDXI4x2JYmILpp/9Nlc/krB15s5n1MPNtn3yL0TI0tWp0WlwDCV7 +oUm1SfIM0F1fXGFyFDcqwoIr6JCQgXk6XtTg31YhH1xgUIclUVdtHqmAwAbLdIhQ +GAiHn2kCgYEAwD4pZ8HfpiOG/EHNoWsMATc/5yC7O8F9WbvcHZQIymLY4v/7HKZb +VyOR6UQ5/O2cztSGIuKSF6+OK1C34lOyCuTSOTFrjlgEYtLIXjdGLfFdtOO8GRQR +akVXdwuzNAjTBaH5eXbG+NKcjmCvZL48dQVlfDTVulzFGbcsVTHIMQUCgYEA7IQI +FVsKnY3KqpyGqXq92LMcsT3XgW6X1BIIV+YhJ5AFUFkFrjrbXs94/8XyLfi0xBQy +efK+8g5sMs7koF8LyZEcAXWZJQduaKB71hoLlRaU4VQkL/dl2B6VFmAII/CsRCYh +r9RmDN2PF/mp98Ih9dpC1VqcCDRGoTYsd7jLalMCgYAMgH5k1wDaZxkSMp1S0AlZ +0uP+/evvOOgT+9mWutfPgZolOQx1koQCKLgGeX9j6Xf3I28NubpSfAI84uTyfQrp +FnRtb79U5Hh0jMynA+U2e6niZ6UF5H41cQj9Hu+qhKBkj2IP+h96cwfnYnZFkPGR +kqZE65KyqfHPeFATwkcImQKBgCdrfhlpGiTWXCABhKQ8s+WpPLAB2ahV8XJEKyXT +UlVQuMIChGLcpnFv7P/cUxf8asx/fUY8Aj0/0CLLvulHziQjTmKj4gl86pb/oIQ3 +xRRtNhU0O+/OsSfLORgIm3K6C0w0esregL/GMbJSR1TnA1gBr7/1oSnw5JC8Ab9W +injHAoGAJT1MGAiQrhlt9GCGe6Ajw4omdbY0wS9NXefnFhf7EwL0es52ezZ28zpU +2LXqSFbtann5CHgpSLxiMYPDIf+er4xgg9Bz34tz1if1rDfP2Qrxdrpr4jDnrGT3 +gYC2qCpvVD9RRUMKFfnJTfl5gMQdBW/LINkHtJ82snAeLl3gjQ4= +-----END RSA PRIVATE KEY----- diff --git a/command/agent/auth/cert/test-fixtures/keys/pkioutput b/command/agent/auth/cert/test-fixtures/keys/pkioutput new file mode 100644 index 0000000000000..526ff03167b2d --- /dev/null +++ b/command/agent/auth/cert/test-fixtures/keys/pkioutput @@ -0,0 +1,74 @@ +Key Value +lease_id pki/issue/example-dot-com/d8214077-9976-8c68-9c07-6610da30aea4 +lease_duration 279359999 +lease_renewable false +certificate -----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIUf+jhKTFBnqSs34II0WS1L4QsbbAwDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzQxWhcNMjUw +MTA1MTAyODExWjAbMRkwFwYDVQQDExBjZXJ0LmV4YW1wbGUuY29tMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxS +TRAVnygAftetT8puHflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGn +SgMld6ZWRhNheZhA6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmi +YYMiIWplidMmMO5NTRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5 +donyqtnaHuIJGuUdy54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVG +B+5+AAGF5iuHC3N2DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABo4H1 +MIHyMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUm++e +HpyM3p708bgZJuRYEdX1o+UwHwYDVR0jBBgwFoAUncSzT/6HMexyuiU9/7EgHu+o +k5swOwYIKwYBBQUHAQEELzAtMCsGCCsGAQUFBzAChh9odHRwOi8vMTI3LjAuMC4x +OjgyMDAvdjEvcGtpL2NhMCEGA1UdEQQaMBiCEGNlcnQuZXhhbXBsZS5jb22HBH8A +AAEwMQYDVR0fBCowKDAmoCSgIoYgaHR0cDovLzEyNy4wLjAuMTo4MjAwL3YxL3Br +aS9jcmwwDQYJKoZIhvcNAQELBQADggEBABsuvmPSNjjKTVN6itWzdQy+SgMIrwfs +X1Yb9Lefkkwmp9ovKFNQxa4DucuCuzXcQrbKwWTfHGgR8ct4rf30xCRoA7dbQWq4 +aYqNKFWrRaBRAaaYZ/O1ApRTOrXqRx9Eqr0H1BXLsoAq+mWassL8sf6siae+CpwA +KqBko5G0dNXq5T4i2LQbmoQSVetIrCJEeMrU+idkuqfV2h1BQKgSEhFDABjFdTCN +QDAHsEHsi2M4/jRW9fqEuhHSDfl2n7tkFUI8wTHUUCl7gXwweJ4qtaSXIwKXYzNj +xqKHA8Purc1Yfybz4iE1JCROi9fInKlzr5xABq8nb9Qc/J9DIQM+Xmk= +-----END CERTIFICATE----- +issuing_ca -----BEGIN CERTIFICATE----- +MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw +MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7 +Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0 +z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x +AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb +6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH +SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G +A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx +7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc +BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA +wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2 +U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa +cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N +ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ +t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk +zehNe5dFTjFpylg1o6b8Ow== +-----END CERTIFICATE----- +private_key -----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxSTRAVnygAftetT8pu +HflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGnSgMld6ZWRhNheZhA +6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmiYYMiIWplidMmMO5N +TRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5donyqtnaHuIJGuUd +y54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVGB+5+AAGF5iuHC3N2 +DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABAoIBAHR7fFV0eAGaopsX +9OD0TUGlsephBXb43g0GYHfJ/1Ew18w9oaxszJEqkl+PB4W3xZ3yG3e8ZomxDOhF +RreF2WgG5xOfhDogMwu6NodbArfgnAvoC6JnW3qha8HMP4F500RFVyCRcd6A3Frd +rFtaZn/UyCsBAN8/zkwPeYHayo7xX6d9kzgRl9HluEX5PXI5+3uiBDUiM085gkLI +5Cmadh9fMdjfhDXI4x2JYmILpp/9Nlc/krB15s5n1MPNtn3yL0TI0tWp0WlwDCV7 +oUm1SfIM0F1fXGFyFDcqwoIr6JCQgXk6XtTg31YhH1xgUIclUVdtHqmAwAbLdIhQ +GAiHn2kCgYEAwD4pZ8HfpiOG/EHNoWsMATc/5yC7O8F9WbvcHZQIymLY4v/7HKZb +VyOR6UQ5/O2cztSGIuKSF6+OK1C34lOyCuTSOTFrjlgEYtLIXjdGLfFdtOO8GRQR +akVXdwuzNAjTBaH5eXbG+NKcjmCvZL48dQVlfDTVulzFGbcsVTHIMQUCgYEA7IQI +FVsKnY3KqpyGqXq92LMcsT3XgW6X1BIIV+YhJ5AFUFkFrjrbXs94/8XyLfi0xBQy +efK+8g5sMs7koF8LyZEcAXWZJQduaKB71hoLlRaU4VQkL/dl2B6VFmAII/CsRCYh +r9RmDN2PF/mp98Ih9dpC1VqcCDRGoTYsd7jLalMCgYAMgH5k1wDaZxkSMp1S0AlZ +0uP+/evvOOgT+9mWutfPgZolOQx1koQCKLgGeX9j6Xf3I28NubpSfAI84uTyfQrp +FnRtb79U5Hh0jMynA+U2e6niZ6UF5H41cQj9Hu+qhKBkj2IP+h96cwfnYnZFkPGR +kqZE65KyqfHPeFATwkcImQKBgCdrfhlpGiTWXCABhKQ8s+WpPLAB2ahV8XJEKyXT +UlVQuMIChGLcpnFv7P/cUxf8asx/fUY8Aj0/0CLLvulHziQjTmKj4gl86pb/oIQ3 +xRRtNhU0O+/OsSfLORgIm3K6C0w0esregL/GMbJSR1TnA1gBr7/1oSnw5JC8Ab9W +injHAoGAJT1MGAiQrhlt9GCGe6Ajw4omdbY0wS9NXefnFhf7EwL0es52ezZ28zpU +2LXqSFbtann5CHgpSLxiMYPDIf+er4xgg9Bz34tz1if1rDfP2Qrxdrpr4jDnrGT3 +gYC2qCpvVD9RRUMKFfnJTfl5gMQdBW/LINkHtJ82snAeLl3gjQ4= +-----END RSA PRIVATE KEY----- +private_key_type rsa diff --git a/command/agent/auth/cert/test-fixtures/root/pkioutput b/command/agent/auth/cert/test-fixtures/root/pkioutput new file mode 100644 index 0000000000000..312ae18deae8f --- /dev/null +++ b/command/agent/auth/cert/test-fixtures/root/pkioutput @@ -0,0 +1,74 @@ +Key Value +lease_id pki/root/generate/exported/7bf99d76-dd3e-2c5b-04ce-5253062ad586 +lease_duration 315359999 +lease_renewable false +certificate -----BEGIN CERTIFICATE----- +MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw +MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7 +Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0 +z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x +AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb +6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH +SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G +A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx +7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc +BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA +wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2 +U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa +cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N +ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ +t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk +zehNe5dFTjFpylg1o6b8Ow== +-----END CERTIFICATE----- +expiration 1.772072879e+09 +issuing_ca -----BEGIN CERTIFICATE----- +MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw +MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7 +Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0 +z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x +AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb +6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH +SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G +A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx +7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc +BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA +wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2 +U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa +cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N +ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ +t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk +zehNe5dFTjFpylg1o6b8Ow== +-----END CERTIFICATE----- +private_key -----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEA7FMy+FO4hFzZiHFmmY/B6A/zpyCep9PWZfGLUDtDuprHbg2p +t/TQjegMxC0AmWPZEHeG9FIJvT+WQXuLUG5v5MbG4zs21mqnYXwWAbTPaZ35bpp3 +BbbquXFtAqOZG8yfrob1g9OSgmY+bG3ZNxxv35qmbBbuWyUvmPwfjzEAclMxv48w +/2Rs4dXkBuvc9PiNdQ9Sv+ZYG8GIqIcbRd38cSaXI4Q94BOHEr4jm1vqb54H7twv +0Gy9CnLXfn7ZtEDFSmG8WLk2GvIna+UC+gnxSqKCw0rcTby63rQUCgdJZF8VQUVi +18+BMNLXI4pT/P9cxTaCJC/YeuV5a3SaniOoFQIDAQABAoIBAQCoGZJC84JnnIgb +ttZNWuWKBXbCJcDVDikOQJ9hBZbqsFg1X0CfGmQS3MHf9Ubc1Ro8zVjQh15oIEfn +8lIpdzTeXcpxLdiW8ix3ekVJF20F6pnXY8ZP6UnTeOwamXY6QPZAtb0D9UXcvY+f +nw+IVRD6082XS0Rmzu+peYWVXDy+FDN+HJRANBcdJZz8gOmNBIe0qDWx1b85d/s8 +2Kk1Wwdss1IwAGeSddTSwzBNaaHdItZaMZOqPW1gRyBfVSkcUQIE6zn2RKw2b70t +grkIvyRcTdfmiKbqkkJ+eR+ITOUt0cBZSH4cDjlQA+r7hulvoBpQBRj068Toxkcc +bTagHaPBAoGBAPWPGVkHqhTbJ/DjmqDIStxby2M1fhhHt4xUGHinhUYjQjGOtDQ9 +0mfaB7HObudRiSLydRAVGAHGyNJdQcTeFxeQbovwGiYKfZSA1IGpea7dTxPpGEdN +ksA0pzSp9MfKzX/MdLuAkEtO58aAg5YzsgX9hDNxo4MhH/gremZhEGZlAoGBAPZf +lqdYvAL0fjHGJ1FUEalhzGCGE9PH2iOqsxqLCXK7bDbzYSjvuiHkhYJHAOgVdiW1 +lB34UHHYAqZ1VVoFqJ05gax6DE2+r7K5VV3FUCaC0Zm3pavxchU9R/TKP82xRrBj +AFWwdgDTxUyvQEmgPR9sqorftO71Iz2tiwyTpIfxAoGBAIhEMLzHFAse0rtKkrRG +ccR27BbRyHeQ1Lp6sFnEHKEfT8xQdI/I/snCpCJ3e/PBu2g5Q9z416mktiyGs8ib +thTNgYsGYnxZtfaCx2pssanoBcn2wBJRae5fSapf5gY49HDG9MBYR7qCvvvYtSzU +4yWP2ZzyotpRt3vwJKxLkN5BAoGAORHpZvhiDNkvxj3da7Rqpu7VleJZA2y+9hYb +iOF+HcqWhaAY+I+XcTRrTMM/zYLzLEcEeXDEyao86uwxCjpXVZw1kotvAC9UqbTO +tnr3VwRkoxPsV4kFYTAh0+1pnC8dbcxxDmhi3Uww3tOVs7hfkEDuvF6XnebA9A+Y +LyCgMzECgYEA6cCU8QODOivIKWFRXucvWckgE6MYDBaAwe6qcLsd1Q/gpE2e3yQc +4RB3bcyiPROLzMLlXFxf1vSNJQdIaVfrRv+zJeGIiivLPU8+Eq4Lrb+tl1LepcOX +OzQeADTSCn5VidOfjDkIst9UXjMlrFfV9/oJEw5Eiqa6lkNPCGDhfA8= +-----END RSA PRIVATE KEY----- +private_key_type rsa +serial_number 6f:98:9d:f8:67:1a:31:e3:27:60:1b:f7:32:f7:53:19:68:a0:c8:9d diff --git a/command/agent/auth/cert/test-fixtures/root/root.crl b/command/agent/auth/cert/test-fixtures/root/root.crl new file mode 100644 index 0000000000000..a80c9e4117cb7 --- /dev/null +++ b/command/agent/auth/cert/test-fixtures/root/root.crl @@ -0,0 +1,12 @@ +-----BEGIN X509 CRL----- +MIIBrjCBlzANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbRcN +MTYwMjI5MDIyOTE3WhcNMjUwMTA1MTAyOTE3WjArMCkCFG+YnfhnGjHjJ2Ab9zL3 +UxlooMidFxExNjAyMjgyMTI5MTctMDUwMKAjMCEwHwYDVR0jBBgwFoAUncSzT/6H +MexyuiU9/7EgHu+ok5swDQYJKoZIhvcNAQELBQADggEBAG9YDXpNe4LJroKZmVCn +HqMhW8eyzyaPak2nPPGCVUnc6vt8rlBYQU+xlBizD6xatZQDMPgrT8sBl9W3ysXk +RUlliHsT/SHddMz5dAZsBPRMJ7pYWLTx8jI4w2WRfbSyI4bY/6qTRNkEBUv+Fk8J +xvwB89+EM0ENcVMhv9ghsUA8h7kOg673HKwRstLDAzxS/uLmEzFjj8SV2m5DbV2Y +UUCKRSV20/kxJMIC9x2KikZhwOSyv1UE1otD+RQvbfAoZPUDmvp2FR/E0NGjBBOg +1TtCPRrl63cjqU3s8KQ4uah9Vj+Cwcu9n/yIKKtNQq4NKHvagv8GlUsoJ4BdAxCw +IA0= +-----END X509 CRL----- diff --git a/command/agent/auth/cert/test-fixtures/root/rootcacert.pem b/command/agent/auth/cert/test-fixtures/root/rootcacert.pem new file mode 100644 index 0000000000000..dcb307a140115 --- /dev/null +++ b/command/agent/auth/cert/test-fixtures/root/rootcacert.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw +MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7 +Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0 +z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x +AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb +6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH +SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G +A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx +7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc +BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA +wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2 +U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa +cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N +ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ +t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk +zehNe5dFTjFpylg1o6b8Ow== +-----END CERTIFICATE----- diff --git a/command/agent/auth/cert/test-fixtures/root/rootcakey.pem b/command/agent/auth/cert/test-fixtures/root/rootcakey.pem new file mode 100644 index 0000000000000..e950da5ba3040 --- /dev/null +++ b/command/agent/auth/cert/test-fixtures/root/rootcakey.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEA7FMy+FO4hFzZiHFmmY/B6A/zpyCep9PWZfGLUDtDuprHbg2p +t/TQjegMxC0AmWPZEHeG9FIJvT+WQXuLUG5v5MbG4zs21mqnYXwWAbTPaZ35bpp3 +BbbquXFtAqOZG8yfrob1g9OSgmY+bG3ZNxxv35qmbBbuWyUvmPwfjzEAclMxv48w +/2Rs4dXkBuvc9PiNdQ9Sv+ZYG8GIqIcbRd38cSaXI4Q94BOHEr4jm1vqb54H7twv +0Gy9CnLXfn7ZtEDFSmG8WLk2GvIna+UC+gnxSqKCw0rcTby63rQUCgdJZF8VQUVi +18+BMNLXI4pT/P9cxTaCJC/YeuV5a3SaniOoFQIDAQABAoIBAQCoGZJC84JnnIgb +ttZNWuWKBXbCJcDVDikOQJ9hBZbqsFg1X0CfGmQS3MHf9Ubc1Ro8zVjQh15oIEfn +8lIpdzTeXcpxLdiW8ix3ekVJF20F6pnXY8ZP6UnTeOwamXY6QPZAtb0D9UXcvY+f +nw+IVRD6082XS0Rmzu+peYWVXDy+FDN+HJRANBcdJZz8gOmNBIe0qDWx1b85d/s8 +2Kk1Wwdss1IwAGeSddTSwzBNaaHdItZaMZOqPW1gRyBfVSkcUQIE6zn2RKw2b70t +grkIvyRcTdfmiKbqkkJ+eR+ITOUt0cBZSH4cDjlQA+r7hulvoBpQBRj068Toxkcc +bTagHaPBAoGBAPWPGVkHqhTbJ/DjmqDIStxby2M1fhhHt4xUGHinhUYjQjGOtDQ9 +0mfaB7HObudRiSLydRAVGAHGyNJdQcTeFxeQbovwGiYKfZSA1IGpea7dTxPpGEdN +ksA0pzSp9MfKzX/MdLuAkEtO58aAg5YzsgX9hDNxo4MhH/gremZhEGZlAoGBAPZf +lqdYvAL0fjHGJ1FUEalhzGCGE9PH2iOqsxqLCXK7bDbzYSjvuiHkhYJHAOgVdiW1 +lB34UHHYAqZ1VVoFqJ05gax6DE2+r7K5VV3FUCaC0Zm3pavxchU9R/TKP82xRrBj +AFWwdgDTxUyvQEmgPR9sqorftO71Iz2tiwyTpIfxAoGBAIhEMLzHFAse0rtKkrRG +ccR27BbRyHeQ1Lp6sFnEHKEfT8xQdI/I/snCpCJ3e/PBu2g5Q9z416mktiyGs8ib +thTNgYsGYnxZtfaCx2pssanoBcn2wBJRae5fSapf5gY49HDG9MBYR7qCvvvYtSzU +4yWP2ZzyotpRt3vwJKxLkN5BAoGAORHpZvhiDNkvxj3da7Rqpu7VleJZA2y+9hYb +iOF+HcqWhaAY+I+XcTRrTMM/zYLzLEcEeXDEyao86uwxCjpXVZw1kotvAC9UqbTO +tnr3VwRkoxPsV4kFYTAh0+1pnC8dbcxxDmhi3Uww3tOVs7hfkEDuvF6XnebA9A+Y +LyCgMzECgYEA6cCU8QODOivIKWFRXucvWckgE6MYDBaAwe6qcLsd1Q/gpE2e3yQc +4RB3bcyiPROLzMLlXFxf1vSNJQdIaVfrRv+zJeGIiivLPU8+Eq4Lrb+tl1LepcOX +OzQeADTSCn5VidOfjDkIst9UXjMlrFfV9/oJEw5Eiqa6lkNPCGDhfA8= +-----END RSA PRIVATE KEY----- From f679f3028c1de8bbd2f45473bfdc8ce49c5bccb7 Mon Sep 17 00:00:00 2001 From: Michael Golowka <72365+pcman312@users.noreply.github.com> Date: Wed, 12 May 2021 17:09:56 -0600 Subject: [PATCH 016/101] DB engine: Check ErrPluginStaticUnsupported in rollback code (#11601) --- builtin/logical/database/rollback.go | 4 +++- changelog/11585.txt | 3 +++ 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 changelog/11585.txt diff --git a/builtin/logical/database/rollback.go b/builtin/logical/database/rollback.go index ab261b87e16c8..c8221f3c4702a 100644 --- a/builtin/logical/database/rollback.go +++ b/builtin/logical/database/rollback.go @@ -4,6 +4,8 @@ import ( "context" "errors" + "github.com/hashicorp/vault/sdk/database/dbplugin" + v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" "github.com/hashicorp/vault/sdk/logical" "github.com/mitchellh/mapstructure" @@ -104,7 +106,7 @@ func (b *databaseBackend) rollbackDatabaseCredentials(ctx context.Context, confi // It actually is the root user here, but we only want to use SetCredentials since // RotateRootCredentials doesn't give any control over what password is used _, err = dbi.database.UpdateUser(ctx, updateReq, false) - if status.Code(err) == codes.Unimplemented { + if status.Code(err) == codes.Unimplemented || err == dbplugin.ErrPluginStaticUnsupported { return nil } return err diff --git a/changelog/11585.txt b/changelog/11585.txt new file mode 100644 index 0000000000000..c983802a6a8aa --- /dev/null +++ b/changelog/11585.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/database: Fixes issue for V4 database interface where `SetCredentials` wasn't falling back to using `RotateRootCredentials` if `SetCredentials` is `Unimplemented` +``` From f589fd5fb2bca0fbe53799d2d7574e1cefb84697 Mon Sep 17 00:00:00 2001 From: Jim Kalafut Date: Wed, 12 May 2021 16:10:32 -0700 Subject: [PATCH 017/101] changelog++ --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 49d4114224aca..e8f362bb49d3d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,7 +40,7 @@ BUG FIXES: * storage/raft: leader_tls_servername wasn't used unless leader_ca_cert_file and/or mTLS were configured. [[GH-11252](https://github.com/hashicorp/vault/pull/11252)] * ui: Add root rotation statements support to appropriate database secret engine plugins [[GH-11404](https://github.com/hashicorp/vault/pull/11404)] * ui: Fix bug where the UI does not recognize version 2 KV until refresh, and fix [object Object] error message [[GH-11258](https://github.com/hashicorp/vault/pull/11258)] -* ui: Fix footer URL linking to the correct version changelog. [[GH-11283](https://github.com/hashicorp/vault/pull/11283)] +* ui: Fix OIDC bug seen when running on HCP [[GH-11283](https://github.com/hashicorp/vault/pull/11283)] * ui: Fix namespace-bug on login [[GH-11182](https://github.com/hashicorp/vault/pull/11182)] * ui: Fix status menu no showing on login [[GH-11213](https://github.com/hashicorp/vault/pull/11213)] * ui: fix issue where select-one option was not showing in secrets database role creation [[GH-11294](https://github.com/hashicorp/vault/pull/11294)] From 4612703ce26b9b9cc179048264a5ecd647f1c3b9 Mon Sep 17 00:00:00 2001 From: Jason O'Donnell <2160810+jasonodonnell@users.noreply.github.com> Date: Thu, 13 May 2021 09:55:46 -0400 Subject: [PATCH 018/101] core: updates to password policy generator (#11596) * core: fix bug in password policies not using namespaces * Add changelog --- changelog/11596.txt | 3 ++ vault/dynamic_system_view.go | 4 +- vault/dynamic_system_view_test.go | 88 ++++++++++++++++++------------- vault/password_policy_util.go | 33 ++++++++++++ 4 files changed, 89 insertions(+), 39 deletions(-) create mode 100644 changelog/11596.txt create mode 100644 vault/password_policy_util.go diff --git a/changelog/11596.txt b/changelog/11596.txt new file mode 100644 index 0000000000000..3735ca0bf8584 --- /dev/null +++ b/changelog/11596.txt @@ -0,0 +1,3 @@ +```release-note:bug +core (enterprise): Fix plugins mounted in namespaces being unable to use password policies +``` diff --git a/vault/dynamic_system_view.go b/vault/dynamic_system_view.go index 86e8b560ba737..c9e9c16cbe5e3 100644 --- a/vault/dynamic_system_view.go +++ b/vault/dynamic_system_view.go @@ -336,11 +336,11 @@ func (d dynamicSystemView) GeneratePasswordFromPolicy(ctx context.Context, polic // Ensure there's a timeout on the context of some sort if _, hasTimeout := ctx.Deadline(); !hasTimeout { var cancel func() - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) + ctx, cancel = context.WithTimeout(ctx, 1*time.Second) defer cancel() } - policyCfg, err := retrievePasswordPolicy(ctx, d.core.systemBarrierView, policyName) + policyCfg, err := d.retrievePasswordPolicy(ctx, policyName) if err != nil { return "", fmt.Errorf("failed to retrieve password policy: %w", err) } diff --git a/vault/dynamic_system_view_test.go b/vault/dynamic_system_view_test.go index 2b5044d9f41cc..b7861428cf91b 100644 --- a/vault/dynamic_system_view_test.go +++ b/vault/dynamic_system_view_test.go @@ -2,7 +2,7 @@ package vault import ( "context" - "encoding/json" + "encoding/base64" "fmt" "reflect" "sort" @@ -16,6 +16,22 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) +var testPolicyName = "testpolicy" +var rawTestPasswordPolicy = ` +length = 20 +rule "charset" { + charset = "abcdefghijklmnopqrstuvwxyz" + min_chars = 1 +} +rule "charset" { + charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + min_chars = 1 +} +rule "charset" { + charset = "0123456789" + min_chars = 1 +}` + func TestIdentity_BackendTemplating(t *testing.T) { var err error coreConfig := &CoreConfig{ @@ -157,47 +173,45 @@ func TestIdentity_BackendTemplating(t *testing.T) { } func TestDynamicSystemView_GeneratePasswordFromPolicy_successful(t *testing.T) { - policyName := "testpolicy" - rawPolicy := map[string]interface{}{ - "policy": `length = 20 -rule "charset" { - charset = "abcdefghijklmnopqrstuvwxyz" - min_chars = 1 -} -rule "charset" { - charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - min_chars = 1 -} -rule "charset" { - charset = "0123456789" - min_chars = 1 -}`, - } - marshalledPolicy, err := json.Marshal(rawPolicy) - if err != nil { - t.Fatalf("Unable to set up test: unable to marshal raw policy to JSON: %s", err) + var err error + coreConfig := &CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: log.NewNullLogger(), + CredentialBackends: map[string]logical.Factory{}, } - testStorage := fakeBarrier{ - getEntry: &logical.StorageEntry{ - Key: getPasswordPolicyKey(policyName), - Value: marshalledPolicy, - }, - } + cluster := NewTestCluster(t, coreConfig, &TestClusterOptions{}) - dsv := dynamicSystemView{ - core: &Core{ - systemBarrierView: NewBarrierView(testStorage, "sys/"), - }, + cluster.Start() + defer cluster.Cleanup() + + core := cluster.Cores[0].Core + TestWaitActive(t, core) + + b64Policy := base64.StdEncoding.EncodeToString([]byte(rawTestPasswordPolicy)) + + path := fmt.Sprintf("sys/policies/password/%s", testPolicyName) + req := logical.TestRequest(t, logical.CreateOperation, path) + req.ClientToken = cluster.RootToken + req.Data["policy"] = b64Policy + + _, err = core.HandleRequest(namespace.RootContext(nil), req) + if err != nil { + t.Fatalf("err: %v", err) } + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() + ctx = namespace.RootContext(ctx) + dsv := dynamicSystemView{core: cluster.Cores[0].Core} + runeset := map[rune]bool{} runesFound := []rune{} for i := 0; i < 100; i++ { - actual, err := dsv.GeneratePasswordFromPolicy(ctx, policyName) + actual, err := dsv.GeneratePasswordFromPolicy(ctx, testPolicyName) if err != nil { t.Fatalf("no error expected, but got: %s", err) } @@ -220,12 +234,6 @@ rule "charset" { } } -type runes []rune - -func (r runes) Len() int { return len(r) } -func (r runes) Less(i, j int) bool { return r[i] < r[j] } -func (r runes) Swap(i, j int) { r[i], r[j] = r[j], r[i] } - func TestDynamicSystemView_GeneratePasswordFromPolicy_failed(t *testing.T) { type testCase struct { policyName string @@ -282,6 +290,12 @@ func TestDynamicSystemView_GeneratePasswordFromPolicy_failed(t *testing.T) { } } +type runes []rune + +func (r runes) Len() int { return len(r) } +func (r runes) Less(i, j int) bool { return r[i] < r[j] } +func (r runes) Swap(i, j int) { r[i], r[j] = r[j], r[i] } + type fakeBarrier struct { getEntry *logical.StorageEntry getErr error diff --git a/vault/password_policy_util.go b/vault/password_policy_util.go new file mode 100644 index 0000000000000..133373677f1c1 --- /dev/null +++ b/vault/password_policy_util.go @@ -0,0 +1,33 @@ +// +build !enterprise + +package vault + +import ( + "context" + "encoding/json" + "fmt" +) + +const ( + passwordPolicySubPath = "password_policy/" +) + +// retrievePasswordPolicy retrieves a password policy from the logical storage +func (d dynamicSystemView) retrievePasswordPolicy(ctx context.Context, policyName string) (*passwordPolicyConfig, error) { + storage := d.core.systemBarrierView.SubView(passwordPolicySubPath) + entry, err := storage.Get(ctx, policyName) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + policyCfg := &passwordPolicyConfig{} + err = json.Unmarshal(entry.Value, &policyCfg) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal stored data: %w", err) + } + + return policyCfg, nil +} From c8fe8984ebdb3a026c45d507699c4ffdfe426e63 Mon Sep 17 00:00:00 2001 From: Pierce Bartine Date: Thu, 13 May 2021 07:18:15 -0700 Subject: [PATCH 019/101] Add ServerName to Vault Agent template config (#11288) * Add ServerName to Vault Agent template config * Remove newline * Add changelog for 11288 * Update changelog/11288.txt Co-authored-by: Jason O'Donnell <2160810+jasonodonnell@users.noreply.github.com> Co-authored-by: Jason O'Donnell <2160810+jasonodonnell@users.noreply.github.com> --- changelog/11288.txt | 3 +++ command/agent/template/template.go | 13 +++++++------ 2 files changed, 10 insertions(+), 6 deletions(-) create mode 100644 changelog/11288.txt diff --git a/changelog/11288.txt b/changelog/11288.txt new file mode 100644 index 0000000000000..6f0e95c8f53af --- /dev/null +++ b/changelog/11288.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Fixed agent templating to use configured tls servername values +``` diff --git a/command/agent/template/template.go b/command/agent/template/template.go index 6591e9a189200..9396d1f82116d 100644 --- a/command/agent/template/template.go +++ b/command/agent/template/template.go @@ -274,12 +274,13 @@ func newRunnerConfig(sc *ServerConfig, templates ctconfig.TemplateConfigs) (*ctc skipVerify := sc.AgentConfig.Vault.TLSSkipVerify verify := !skipVerify conf.Vault.SSL = &ctconfig.SSLConfig{ - Enabled: pointerutil.BoolPtr(true), - Verify: &verify, - Cert: &sc.AgentConfig.Vault.ClientCert, - Key: &sc.AgentConfig.Vault.ClientKey, - CaCert: &sc.AgentConfig.Vault.CACert, - CaPath: &sc.AgentConfig.Vault.CAPath, + Enabled: pointerutil.BoolPtr(true), + Verify: &verify, + Cert: &sc.AgentConfig.Vault.ClientCert, + Key: &sc.AgentConfig.Vault.ClientKey, + CaCert: &sc.AgentConfig.Vault.CACert, + CaPath: &sc.AgentConfig.Vault.CAPath, + ServerName: &sc.AgentConfig.Vault.TLSServerName, } } enabled := attempts > 0 From 0ef61459b82d1c4287e3398540c0e3114fc2a3c8 Mon Sep 17 00:00:00 2001 From: Robison Jacka Date: Thu, 13 May 2021 14:37:22 -0700 Subject: [PATCH 020/101] Add support for templated values in SSH CA DefaultExtensions. (#11495) * Add support for templated values in SSH CA DefaultExtensions. * Reworking the logic per feedback, adding basic test. * Adding test, so we cover both default extension templating & ignoring default when user-provided extensions are present. * Fixed up an unintentional extension handling defect, added test to cover the case. * Refactor Default Extension tests into `enabled` and `disabled`. --- builtin/logical/ssh/backend_test.go | 253 +++++++++++++++++++++++++++- builtin/logical/ssh/path_roles.go | 136 ++++++++------- builtin/logical/ssh/path_sign.go | 52 ++++-- changelog/11495.txt | 3 + 4 files changed, 366 insertions(+), 78 deletions(-) create mode 100644 changelog/11495.txt diff --git a/builtin/logical/ssh/backend_test.go b/builtin/logical/ssh/backend_test.go index baf60e05532da..b4253ba1ce0bf 100644 --- a/builtin/logical/ssh/backend_test.go +++ b/builtin/logical/ssh/backend_test.go @@ -19,8 +19,10 @@ import ( "golang.org/x/crypto/ssh" + "github.com/hashicorp/vault/builtin/credential/userpass" "github.com/hashicorp/vault/helper/testhelpers/docker" logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/vault" "github.com/mitchellh/mapstructure" ) @@ -122,6 +124,7 @@ SjOQL/GkH1nkRcDS9++aAAAAAmNhAQID dockerImageTagSupportsRSA1 = "8.1_p1-r0-ls20" dockerImageTagSupportsNoRSA1 = "8.4_p1-r3-ls48" + ) func prepareTestContainer(t *testing.T, tag, caPublicKeyPEM string) (func(), string) { @@ -158,7 +161,7 @@ func prepareTestContainer(t *testing.T, tag, caPublicKeyPEM string) (func(), str // Install util-linux for non-busybox flock that supports timeout option err = testSSH("vaultssh", sshAddress, ssh.PublicKeys(signer), fmt.Sprintf(` - set -e; + set -e; sudo ln -s /config /home/vaultssh sudo apk add util-linux; echo "LogLevel DEBUG" | sudo tee -a /config/ssh_host_keys/sshd_config; @@ -1318,6 +1321,252 @@ func TestBackend_DisallowUserProvidedKeyIDs(t *testing.T) { logicaltest.Test(t, testCase) } +func TestBackend_DefExtTemplatingEnabled(t *testing.T) { + cluster, userpassToken := getSshCaTestCluster(t, testUserName) + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + // Get auth accessor for identity template. + auths, err := client.Sys().ListAuth() + if err != nil { + t.Fatal(err) + } + userpassAccessor := auths["userpass/"].Accessor + + // Write SSH role. + _, err = client.Logical().Write("ssh/roles/test", map[string]interface{}{ + "key_type": "ca", + "allowed_extensions": "login@zipzap.com", + "allow_user_certificates": true, + "allowed_users": "tuber", + "default_user": "tuber", + "default_extensions_template": true, + "default_extensions": map[string]interface{}{ + "login@foobar.com": "{{identity.entity.aliases." + userpassAccessor + ".name}}", + }, + }) + if err != nil { + t.Fatal(err) + } + + sshKeyID := "vault-userpass-"+testUserName+"-9bd0f01b7dfc50a13aa5e5cd11aea19276968755c8f1f9c98965d04147f30ed0" + + // Issue SSH certificate with default extensions templating enabled, and no user-provided extensions + client.SetToken(userpassToken) + resp, err := client.Logical().Write("ssh/sign/test", map[string]interface{}{ + "public_key": publicKey4096, + }) + if err != nil { + t.Fatal(err) + } + signedKey := resp.Data["signed_key"].(string) + key, _ := base64.StdEncoding.DecodeString(strings.Split(signedKey, " ")[1]) + + parsedKey, err := ssh.ParsePublicKey(key) + if err != nil { + t.Fatal(err) + } + + defaultExtensionPermissions := map[string]string{ + "login@foobar.com": testUserName, + } + + err = validateSSHCertificate(parsedKey.(*ssh.Certificate), sshKeyID, ssh.UserCert, []string{"tuber"}, map[string]string{}, defaultExtensionPermissions, 16*time.Hour) + if err != nil { + t.Fatal(err) + } + + // Issue SSH certificate with default extensions templating enabled, and user-provided extensions + // The certificate should only have the user-provided extensions, and no templated extensions + userProvidedExtensionPermissions := map[string]string{ + "login@zipzap.com": "some_other_user_name", + } + resp, err = client.Logical().Write("ssh/sign/test", map[string]interface{}{ + "public_key": publicKey4096, + "extensions": userProvidedExtensionPermissions, + }) + if err != nil { + t.Fatal(err) + } + signedKey = resp.Data["signed_key"].(string) + key, _ = base64.StdEncoding.DecodeString(strings.Split(signedKey, " ")[1]) + + parsedKey, err = ssh.ParsePublicKey(key) + if err != nil { + t.Fatal(err) + } + + err = validateSSHCertificate(parsedKey.(*ssh.Certificate), sshKeyID, ssh.UserCert, []string{"tuber"}, map[string]string{}, userProvidedExtensionPermissions, 16*time.Hour) + if err != nil { + t.Fatal(err) + } + + // Issue SSH certificate with default extensions templating enabled, and invalid user-provided extensions - it should fail + invalidUserProvidedExtensionPermissions := map[string]string{ + "login@foobar.com": "{{identity.entity.metadata}}", + } + resp, err = client.Logical().Write("ssh/sign/test", map[string]interface{}{ + "public_key": publicKey4096, + "extensions": invalidUserProvidedExtensionPermissions, + }) + if err == nil { + t.Fatal("expected an error while attempting to sign a key with invalid permissions") + } +} + +func TestBackend_DefExtTemplatingDisabled(t *testing.T) { + cluster, userpassToken := getSshCaTestCluster(t, testUserName) + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + // Get auth accessor for identity template. + auths, err := client.Sys().ListAuth() + if err != nil { + t.Fatal(err) + } + userpassAccessor := auths["userpass/"].Accessor + + // Write SSH role to test with any extension. We also provide a templated default extension, + // to verify that it's not actually being evaluated + _, err = client.Logical().Write("ssh/roles/test_allow_all_extensions", map[string]interface{}{ + "key_type": "ca", + "allow_user_certificates": true, + "allowed_users": "tuber", + "default_user": "tuber", + "default_extensions_template": false, + "default_extensions": map[string]interface{}{ + "login@foobar.com": "{{identity.entity.aliases." + userpassAccessor + ".name}}", + }, + }) + if err != nil { + t.Fatal(err) + } + + sshKeyID := "vault-userpass-"+testUserName+"-9bd0f01b7dfc50a13aa5e5cd11aea19276968755c8f1f9c98965d04147f30ed0" + +// Issue SSH certificate with default extensions templating disabled, and no user-provided extensions + client.SetToken(userpassToken) + defaultExtensionPermissions := map[string]string{ + "login@foobar.com": "{{identity.entity.aliases." + userpassAccessor + ".name}}", + "login@zipzap.com": "some_other_user_name", + } + resp, err := client.Logical().Write("ssh/sign/test_allow_all_extensions", map[string]interface{}{ + "public_key": publicKey4096, + "extensions": defaultExtensionPermissions, + }) + if err != nil { + t.Fatal(err) + } + signedKey := resp.Data["signed_key"].(string) + key, _ := base64.StdEncoding.DecodeString(strings.Split(signedKey, " ")[1]) + + parsedKey, err := ssh.ParsePublicKey(key) + if err != nil { + t.Fatal(err) + } + + err = validateSSHCertificate(parsedKey.(*ssh.Certificate), sshKeyID, ssh.UserCert, []string{"tuber"}, map[string]string{}, defaultExtensionPermissions, 16*time.Hour) + if err != nil { + t.Fatal(err) + } + + // Issue SSH certificate with default extensions templating disabled, and user-provided extensions + client.SetToken(userpassToken) + userProvidedAnyExtensionPermissions := map[string]string{ + "login@foobar.com": "not_userpassname", + "login@zipzap.com": "some_other_user_name", + } + resp, err = client.Logical().Write("ssh/sign/test_allow_all_extensions", map[string]interface{}{ + "public_key": publicKey4096, + "extensions": userProvidedAnyExtensionPermissions, + }) + if err != nil { + t.Fatal(err) + } + signedKey = resp.Data["signed_key"].(string) + key, _ = base64.StdEncoding.DecodeString(strings.Split(signedKey, " ")[1]) + + parsedKey, err = ssh.ParsePublicKey(key) + if err != nil { + t.Fatal(err) + } + + err = validateSSHCertificate(parsedKey.(*ssh.Certificate), sshKeyID, ssh.UserCert, []string{"tuber"}, map[string]string{}, userProvidedAnyExtensionPermissions, 16*time.Hour) + if err != nil { + t.Fatal(err) + } +} + +func getSshCaTestCluster(t *testing.T, userIdentity string) (*vault.TestCluster, string) { + coreConfig := &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "userpass": userpass.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "ssh": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + client := cluster.Cores[0].Client + + // Write test policy for userpass auth method. + err := client.Sys().PutPolicy("test", ` + path "ssh/*" { + capabilities = ["update"] + }`) + if err != nil { + t.Fatal(err) + } + + // Enable userpass auth method. + if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { + t.Fatal(err) + } + + // Configure test role for userpass. + if _, err := client.Logical().Write("auth/userpass/users/"+userIdentity, map[string]interface{}{ + "password": "test", + "policies": "test", + }); err != nil { + t.Fatal(err) + } + + // Login userpass for test role and keep client token. + secret, err := client.Logical().Write("auth/userpass/login/"+userIdentity, map[string]interface{}{ + "password": "test", + }) + if err != nil || secret == nil { + t.Fatal(err) + } + userpassToken := secret.Auth.ClientToken + + // Mount SSH. + err = client.Sys().Mount("ssh", &api.MountInput{ + Type: "ssh", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + }, + }) + if err != nil { + t.Fatal(err) + } + + // Configure SSH CA. + _, err = client.Logical().Write("ssh/config/ca", map[string]interface{}{ + "public_key": testCAPublicKey, + "private_key": testCAPrivateKey, + }) + if err != nil { + t.Fatal(err) + } + + return cluster, userpassToken +} + func configCaStep(caPublicKey, caPrivateKey string) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, @@ -1391,7 +1640,7 @@ func validateSSHCertificate(cert *ssh.Certificate, keyID string, certType int, v actualTTL := time.Unix(int64(cert.ValidBefore), 0).Add(-30 * time.Second).Sub(time.Unix(int64(cert.ValidAfter), 0)) if actualTTL != ttl { - return fmt.Errorf("incorrect ttl: expected: %v, actualL %v", ttl, actualTTL) + return fmt.Errorf("incorrect ttl: expected: %v, actual %v", ttl, actualTTL) } if !reflect.DeepEqual(cert.ValidPrincipals, validPrincipals) { diff --git a/builtin/logical/ssh/path_roles.go b/builtin/logical/ssh/path_roles.go index 5a4f47cb3033a..0b1ef84ec6af8 100644 --- a/builtin/logical/ssh/path_roles.go +++ b/builtin/logical/ssh/path_roles.go @@ -26,33 +26,34 @@ const ( // for both OTP and Dynamic roles. Not all the fields are mandatory for both type. // Some are applicable for one and not for other. It doesn't matter. type sshRole struct { - KeyType string `mapstructure:"key_type" json:"key_type"` - KeyName string `mapstructure:"key" json:"key"` - KeyBits int `mapstructure:"key_bits" json:"key_bits"` - AdminUser string `mapstructure:"admin_user" json:"admin_user"` - DefaultUser string `mapstructure:"default_user" json:"default_user"` - CIDRList string `mapstructure:"cidr_list" json:"cidr_list"` - ExcludeCIDRList string `mapstructure:"exclude_cidr_list" json:"exclude_cidr_list"` - Port int `mapstructure:"port" json:"port"` - InstallScript string `mapstructure:"install_script" json:"install_script"` - AllowedUsers string `mapstructure:"allowed_users" json:"allowed_users"` - AllowedUsersTemplate bool `mapstructure:"allowed_users_template" json:"allowed_users_template"` - AllowedDomains string `mapstructure:"allowed_domains" json:"allowed_domains"` - KeyOptionSpecs string `mapstructure:"key_option_specs" json:"key_option_specs"` - MaxTTL string `mapstructure:"max_ttl" json:"max_ttl"` - TTL string `mapstructure:"ttl" json:"ttl"` - DefaultCriticalOptions map[string]string `mapstructure:"default_critical_options" json:"default_critical_options"` - DefaultExtensions map[string]string `mapstructure:"default_extensions" json:"default_extensions"` - AllowedCriticalOptions string `mapstructure:"allowed_critical_options" json:"allowed_critical_options"` - AllowedExtensions string `mapstructure:"allowed_extensions" json:"allowed_extensions"` - AllowUserCertificates bool `mapstructure:"allow_user_certificates" json:"allow_user_certificates"` - AllowHostCertificates bool `mapstructure:"allow_host_certificates" json:"allow_host_certificates"` - AllowBareDomains bool `mapstructure:"allow_bare_domains" json:"allow_bare_domains"` - AllowSubdomains bool `mapstructure:"allow_subdomains" json:"allow_subdomains"` - AllowUserKeyIDs bool `mapstructure:"allow_user_key_ids" json:"allow_user_key_ids"` - KeyIDFormat string `mapstructure:"key_id_format" json:"key_id_format"` - AllowedUserKeyLengths map[string]int `mapstructure:"allowed_user_key_lengths" json:"allowed_user_key_lengths"` - AlgorithmSigner string `mapstructure:"algorithm_signer" json:"algorithm_signer"` + KeyType string `mapstructure:"key_type" json:"key_type"` + KeyName string `mapstructure:"key" json:"key"` + KeyBits int `mapstructure:"key_bits" json:"key_bits"` + AdminUser string `mapstructure:"admin_user" json:"admin_user"` + DefaultUser string `mapstructure:"default_user" json:"default_user"` + CIDRList string `mapstructure:"cidr_list" json:"cidr_list"` + ExcludeCIDRList string `mapstructure:"exclude_cidr_list" json:"exclude_cidr_list"` + Port int `mapstructure:"port" json:"port"` + InstallScript string `mapstructure:"install_script" json:"install_script"` + AllowedUsers string `mapstructure:"allowed_users" json:"allowed_users"` + AllowedUsersTemplate bool `mapstructure:"allowed_users_template" json:"allowed_users_template"` + AllowedDomains string `mapstructure:"allowed_domains" json:"allowed_domains"` + KeyOptionSpecs string `mapstructure:"key_option_specs" json:"key_option_specs"` + MaxTTL string `mapstructure:"max_ttl" json:"max_ttl"` + TTL string `mapstructure:"ttl" json:"ttl"` + DefaultCriticalOptions map[string]string `mapstructure:"default_critical_options" json:"default_critical_options"` + DefaultExtensions map[string]string `mapstructure:"default_extensions" json:"default_extensions"` + DefaultExtensionsTemplate bool `mapstructure:"default_extensions_template" json:"default_extensions_template"` + AllowedCriticalOptions string `mapstructure:"allowed_critical_options" json:"allowed_critical_options"` + AllowedExtensions string `mapstructure:"allowed_extensions" json:"allowed_extensions"` + AllowUserCertificates bool `mapstructure:"allow_user_certificates" json:"allow_user_certificates"` + AllowHostCertificates bool `mapstructure:"allow_host_certificates" json:"allow_host_certificates"` + AllowBareDomains bool `mapstructure:"allow_bare_domains" json:"allow_bare_domains"` + AllowSubdomains bool `mapstructure:"allow_subdomains" json:"allow_subdomains"` + AllowUserKeyIDs bool `mapstructure:"allow_user_key_ids" json:"allow_user_key_ids"` + KeyIDFormat string `mapstructure:"key_id_format" json:"key_id_format"` + AllowedUserKeyLengths map[string]int `mapstructure:"allowed_user_key_lengths" json:"allowed_user_key_lengths"` + AlgorithmSigner string `mapstructure:"algorithm_signer" json:"algorithm_signer"` } func pathListRoles(b *backend) *framework.Path { @@ -267,6 +268,15 @@ func pathRoles(b *backend) *framework.Path { "allowed_extensions". Defaults to none. `, }, + "default_extensions_template": { + Type: framework.TypeBool, + Description: ` + [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] + If set, Default extension values can be specified using identity template policies. + Non-templated extension values are also permitted. + `, + Default: false, + }, "allow_user_certificates": { Type: framework.TypeBool, Description: ` @@ -334,7 +344,7 @@ func pathRoles(b *backend) *framework.Path { "algorithm_signer": { Type: framework.TypeString, Description: ` - When supplied, this value specifies a signing algorithm for the key. Possible values: + When supplied, this value specifies a signing algorithm for the key. Possible values: ssh-rsa, rsa-sha2-256, rsa-sha2-512. `, DisplayAttrs: &framework.DisplayAttributes{ @@ -514,20 +524,21 @@ func (b *backend) createCARole(allowedUsers, defaultUser, signer string, data *f ttl := time.Duration(data.Get("ttl").(int)) * time.Second maxTTL := time.Duration(data.Get("max_ttl").(int)) * time.Second role := &sshRole{ - AllowedCriticalOptions: data.Get("allowed_critical_options").(string), - AllowedExtensions: data.Get("allowed_extensions").(string), - AllowUserCertificates: data.Get("allow_user_certificates").(bool), - AllowHostCertificates: data.Get("allow_host_certificates").(bool), - AllowedUsers: allowedUsers, - AllowedUsersTemplate: data.Get("allowed_users_template").(bool), - AllowedDomains: data.Get("allowed_domains").(string), - DefaultUser: defaultUser, - AllowBareDomains: data.Get("allow_bare_domains").(bool), - AllowSubdomains: data.Get("allow_subdomains").(bool), - AllowUserKeyIDs: data.Get("allow_user_key_ids").(bool), - KeyIDFormat: data.Get("key_id_format").(string), - KeyType: KeyTypeCA, - AlgorithmSigner: signer, + AllowedCriticalOptions: data.Get("allowed_critical_options").(string), + AllowedExtensions: data.Get("allowed_extensions").(string), + AllowUserCertificates: data.Get("allow_user_certificates").(bool), + AllowHostCertificates: data.Get("allow_host_certificates").(bool), + AllowedUsers: allowedUsers, + AllowedUsersTemplate: data.Get("allowed_users_template").(bool), + AllowedDomains: data.Get("allowed_domains").(string), + DefaultUser: defaultUser, + AllowBareDomains: data.Get("allow_bare_domains").(bool), + AllowSubdomains: data.Get("allow_subdomains").(bool), + AllowUserKeyIDs: data.Get("allow_user_key_ids").(bool), + DefaultExtensionsTemplate: data.Get("default_extensions_template").(bool), + KeyIDFormat: data.Get("key_id_format").(string), + KeyType: KeyTypeCA, + AlgorithmSigner: signer, } if !role.AllowUserCertificates && !role.AllowHostCertificates { @@ -600,26 +611,27 @@ func (b *backend) parseRole(role *sshRole) (map[string]interface{}, error) { } result = map[string]interface{}{ - "allowed_users": role.AllowedUsers, - "allowed_users_template": role.AllowedUsersTemplate, - "allowed_domains": role.AllowedDomains, - "default_user": role.DefaultUser, - "ttl": int64(ttl.Seconds()), - "max_ttl": int64(maxTTL.Seconds()), - "allowed_critical_options": role.AllowedCriticalOptions, - "allowed_extensions": role.AllowedExtensions, - "allow_user_certificates": role.AllowUserCertificates, - "allow_host_certificates": role.AllowHostCertificates, - "allow_bare_domains": role.AllowBareDomains, - "allow_subdomains": role.AllowSubdomains, - "allow_user_key_ids": role.AllowUserKeyIDs, - "key_id_format": role.KeyIDFormat, - "key_type": role.KeyType, - "key_bits": role.KeyBits, - "default_critical_options": role.DefaultCriticalOptions, - "default_extensions": role.DefaultExtensions, - "allowed_user_key_lengths": role.AllowedUserKeyLengths, - "algorithm_signer": role.AlgorithmSigner, + "allowed_users": role.AllowedUsers, + "allowed_users_template": role.AllowedUsersTemplate, + "allowed_domains": role.AllowedDomains, + "default_user": role.DefaultUser, + "ttl": int64(ttl.Seconds()), + "max_ttl": int64(maxTTL.Seconds()), + "allowed_critical_options": role.AllowedCriticalOptions, + "allowed_extensions": role.AllowedExtensions, + "allow_user_certificates": role.AllowUserCertificates, + "allow_host_certificates": role.AllowHostCertificates, + "allow_bare_domains": role.AllowBareDomains, + "allow_subdomains": role.AllowSubdomains, + "allow_user_key_ids": role.AllowUserKeyIDs, + "key_id_format": role.KeyIDFormat, + "key_type": role.KeyType, + "key_bits": role.KeyBits, + "default_critical_options": role.DefaultCriticalOptions, + "default_extensions": role.DefaultExtensions, + "default_extensions_template": role.DefaultExtensionsTemplate, + "allowed_user_key_lengths": role.AllowedUserKeyLengths, + "algorithm_signer": role.AlgorithmSigner, } case KeyTypeDynamic: result = map[string]interface{}{ diff --git a/builtin/logical/ssh/path_sign.go b/builtin/logical/ssh/path_sign.go index 8ab26f0c98204..acd7d2118bb38 100644 --- a/builtin/logical/ssh/path_sign.go +++ b/builtin/logical/ssh/path_sign.go @@ -155,7 +155,7 @@ func (b *backend) pathSignCertificate(ctx context.Context, req *logical.Request, return logical.ErrorResponse(err.Error()), nil } - extensions, err := b.calculateExtensions(data, role) + extensions, err := b.calculateExtensions(data, req, role) if err != nil { return logical.ErrorResponse(err.Error()), nil } @@ -356,27 +356,51 @@ func (b *backend) calculateCriticalOptions(data *framework.FieldData, role *sshR return criticalOptions, nil } -func (b *backend) calculateExtensions(data *framework.FieldData, role *sshRole) (map[string]string, error) { +func (b *backend) calculateExtensions(data *framework.FieldData, req *logical.Request, role *sshRole) (map[string]string, error) { unparsedExtensions := data.Get("extensions").(map[string]interface{}) - if len(unparsedExtensions) == 0 { - return role.DefaultExtensions, nil - } + extensions := make(map[string]string) - extensions := convertMapToStringValue(unparsedExtensions) + if len(unparsedExtensions) > 0 { + extensions := convertMapToStringValue(unparsedExtensions) + if role.AllowedExtensions != "" { + notAllowed := []string{} + allowedExtensions := strings.Split(role.AllowedExtensions, ",") - if role.AllowedExtensions != "" { - notAllowed := []string{} - allowedExtensions := strings.Split(role.AllowedExtensions, ",") + for extensionKey, _ := range extensions { + if !strutil.StrListContains(allowedExtensions, extensionKey) { + notAllowed = append(notAllowed, extensionKey) + } + } - for extension := range extensions { - if !strutil.StrListContains(allowedExtensions, extension) { - notAllowed = append(notAllowed, extension) + if len(notAllowed) != 0 { + return nil, fmt.Errorf("extensions %v are not on allowed list", notAllowed) } } + return extensions, nil + } - if len(notAllowed) != 0 { - return nil, fmt.Errorf("extensions %v are not on allowed list", notAllowed) + if role.DefaultExtensionsTemplate { + for extensionKey, extensionValue := range role.DefaultExtensions { + // Look for templating markers {{ .* }} + matched, _ := regexp.MatchString(`^{{.+?}}$`, extensionValue) + if matched { + if req.EntityID != "" { + // Retrieve extension value based on template + entityID from request. + templateExtensionValue, err := framework.PopulateIdentityTemplate(extensionValue, req.EntityID, b.System()) + if err == nil { + // Template returned an extension value that we can use + extensions[extensionKey] = templateExtensionValue + } else { + return nil, fmt.Errorf("template '%s' could not be rendered -> %s", extensionValue, err) + } + } + } else { + // Static extension value or err template + extensions[extensionKey] = extensionValue + } } + } else { + extensions = role.DefaultExtensions } return extensions, nil diff --git a/changelog/11495.txt b/changelog/11495.txt new file mode 100644 index 0000000000000..d529872f3bdc9 --- /dev/null +++ b/changelog/11495.txt @@ -0,0 +1,3 @@ +```release-note:feature +ssh: add support for templated values in SSH CA DefaultExtensions +``` \ No newline at end of file From a71eebea012302e5ef0d44ac18d18d19835140d7 Mon Sep 17 00:00:00 2001 From: Scott Miller Date: Thu, 13 May 2021 17:02:25 -0500 Subject: [PATCH 021/101] Don't backoff if a listener error was a timeout (#11594) --- vault/cluster/cluster.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/vault/cluster/cluster.go b/vault/cluster/cluster.go index 016e73c32abd5..7b5c47cfb129a 100644 --- a/vault/cluster/cluster.go +++ b/vault/cluster/cluster.go @@ -301,12 +301,17 @@ func (cl *Listener) Run(ctx context.Context) error { // Accept the connection conn, err := tlsLn.Accept() if err != nil { - if err, ok := err.(net.Error); ok && !err.Timeout() { + err, ok := err.(net.Error) + if ok && !err.Timeout() { cl.logger.Debug("non-timeout error accepting on cluster port", "error", err) } if conn != nil { conn.Close() } + if ok && err.Timeout() { + loopDelay = 0 + continue + } if loopDelay == 0 { loopDelay = baseDelay From 8fdd3f450e0d329f62a661154e22bf97f7b4e3a7 Mon Sep 17 00:00:00 2001 From: Michael Golowka <72365+pcman312@users.noreply.github.com> Date: Mon, 17 May 2021 11:40:35 -0600 Subject: [PATCH 022/101] Add ability to customize some timeouts in MongoDB database plugin (#11600) --- .../database/path_rotate_credentials.go | 16 +-- changelog/11600.txt | 9 ++ .../database/mongodb/connection_producer.go | 106 ++++++++++++++++-- .../mongodb/connection_producer_test.go | 2 +- plugins/database/mongodb/mongodb.go | 59 +++------- 5 files changed, 127 insertions(+), 65 deletions(-) create mode 100644 changelog/11600.txt diff --git a/builtin/logical/database/path_rotate_credentials.go b/builtin/logical/database/path_rotate_credentials.go index 84ed3db8d3ba9..5774ea8634600 100644 --- a/builtin/logical/database/path_rotate_credentials.go +++ b/builtin/logical/database/path_rotate_credentials.go @@ -78,6 +78,14 @@ func (b *databaseBackend) pathRotateRootCredentialsUpdate() framework.OperationF return nil, err } + // Take out the backend lock since we are swapping out the connection + b.Lock() + defer b.Unlock() + + // Take the write lock on the instance + dbi.Lock() + defer dbi.Unlock() + defer func() { // Close the plugin dbi.closed = true @@ -88,14 +96,6 @@ func (b *databaseBackend) pathRotateRootCredentialsUpdate() framework.OperationF delete(b.connections, name) }() - // Take out the backend lock since we are swapping out the connection - b.Lock() - defer b.Unlock() - - // Take the write lock on the instance - dbi.Lock() - defer dbi.Unlock() - // Generate new credentials oldPassword := config.ConnectionDetails["password"].(string) newPassword, err := dbi.database.GeneratePassword(ctx, b.System(), config.PasswordPolicy) diff --git a/changelog/11600.txt b/changelog/11600.txt new file mode 100644 index 0000000000000..f40d4bc4537ba --- /dev/null +++ b/changelog/11600.txt @@ -0,0 +1,9 @@ +```release-note:improvement +secrets/database/mongodb: Add ability to customize `SocketTimeout`, `ConnectTimeout`, and `ServerSelectionTimeout` +``` +```release-note:improvement +secrets/database/mongodb: Increased throughput by allowing for multiple request threads to simultaneously update users in MongoDB +``` +```release-note:bug +secrets/database: Fixed minor race condition when rotate-root is called +``` diff --git a/plugins/database/mongodb/connection_producer.go b/plugins/database/mongodb/connection_producer.go index f160c0a043dd8..1f0c312fa9766 100644 --- a/plugins/database/mongodb/connection_producer.go +++ b/plugins/database/mongodb/connection_producer.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/sdk/database/helper/connutil" "github.com/hashicorp/vault/sdk/database/helper/dbutil" + "github.com/mitchellh/mapstructure" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readpref" @@ -31,6 +32,10 @@ type mongoDBConnectionProducer struct { TLSCertificateKeyData []byte `json:"tls_certificate_key" structs:"-" mapstructure:"tls_certificate_key"` TLSCAData []byte `json:"tls_ca" structs:"-" mapstructure:"tls_ca"` + SocketTimeout time.Duration `json:"socket_timeout" structs:"-" mapstructure:"socket_timeout"` + ConnectTimeout time.Duration `json:"connect_timeout" structs:"-" mapstructure:"connect_timeout"` + ServerSelectionTimeout time.Duration `json:"server_selection_timeout" structs:"-" mapstructure:"server_selection_timeout"` + Initialized bool RawConfig map[string]interface{} Type string @@ -48,15 +53,47 @@ type writeConcern struct { J bool // Sync via the journal if present } +func (c *mongoDBConnectionProducer) loadConfig(cfg map[string]interface{}) error { + err := mapstructure.WeakDecode(cfg, c) + if err != nil { + return err + } + + if len(c.ConnectionURL) == 0 { + return fmt.Errorf("connection_url cannot be empty") + } + + if c.SocketTimeout < 0 { + return fmt.Errorf("socket_timeout must be >= 0") + } + if c.ConnectTimeout < 0 { + return fmt.Errorf("connect_timeout must be >= 0") + } + if c.ServerSelectionTimeout < 0 { + return fmt.Errorf("server_selection_timeout must be >= 0") + } + + opts, err := c.makeClientOpts() + if err != nil { + return err + } + + c.clientOptions = opts + + return nil +} + // Connection creates or returns an existing a database connection. If the session fails // on a ping check, the session will be closed and then re-created. -// This method does not lock the mutex and it is intended that this is the callers -// responsibility. -func (c *mongoDBConnectionProducer) Connection(ctx context.Context) (interface{}, error) { +// This method does locks the mutex on its own. +func (c *mongoDBConnectionProducer) Connection(ctx context.Context) (*mongo.Client, error) { if !c.Initialized { return nil, connutil.ErrNotInitialized } + c.Mutex.Lock() + defer c.Mutex.Unlock() + if c.client != nil { if err := c.client.Ping(ctx, readpref.Primary()); err == nil { return c.client, nil @@ -65,8 +102,7 @@ func (c *mongoDBConnectionProducer) Connection(ctx context.Context) (interface{} _ = c.client.Disconnect(ctx) } - connURL := c.getConnectionURL() - client, err := createClient(ctx, connURL, c.clientOptions) + client, err := c.createClient(ctx) if err != nil { return nil, err } @@ -74,14 +110,14 @@ func (c *mongoDBConnectionProducer) Connection(ctx context.Context) (interface{} return c.client, nil } -func createClient(ctx context.Context, connURL string, clientOptions *options.ClientOptions) (client *mongo.Client, err error) { - if clientOptions == nil { - clientOptions = options.Client() +func (c *mongoDBConnectionProducer) createClient(ctx context.Context) (client *mongo.Client, err error) { + if !c.Initialized { + return nil, fmt.Errorf("failed to create client: connection producer is not initialized") } - clientOptions.SetSocketTimeout(1 * time.Minute) - clientOptions.SetConnectTimeout(1 * time.Minute) - - client, err = mongo.Connect(ctx, options.MergeClientOptions(options.Client().ApplyURI(connURL), clientOptions)) + if c.clientOptions == nil { + return nil, fmt.Errorf("missing client options") + } + client, err = mongo.Connect(ctx, options.MergeClientOptions(options.Client().ApplyURI(c.getConnectionURL()), c.clientOptions)) if err != nil { return nil, err } @@ -120,6 +156,26 @@ func (c *mongoDBConnectionProducer) getConnectionURL() (connURL string) { return connURL } +func (c *mongoDBConnectionProducer) makeClientOpts() (*options.ClientOptions, error) { + writeOpts, err := c.getWriteConcern() + if err != nil { + return nil, err + } + + authOpts, err := c.getTLSAuth() + if err != nil { + return nil, err + } + + timeoutOpts, err := c.timeoutOpts() + if err != nil { + return nil, err + } + + opts := options.MergeClientOptions(writeOpts, authOpts, timeoutOpts) + return opts, nil +} + func (c *mongoDBConnectionProducer) getWriteConcern() (opts *options.ClientOptions, err error) { if c.WriteConcern == "" { return nil, nil @@ -206,3 +262,29 @@ func (c *mongoDBConnectionProducer) getTLSAuth() (opts *options.ClientOptions, e opts.SetTLSConfig(tlsConfig) return opts, nil } + +func (c *mongoDBConnectionProducer) timeoutOpts() (opts *options.ClientOptions, err error) { + opts = options.Client() + + if c.SocketTimeout < 0 { + return nil, fmt.Errorf("socket_timeout must be >= 0") + } + + if c.SocketTimeout == 0 { + opts.SetSocketTimeout(1 * time.Minute) + } else { + opts.SetSocketTimeout(c.SocketTimeout) + } + + if c.ConnectTimeout == 0 { + opts.SetConnectTimeout(1 * time.Minute) + } else { + opts.SetConnectTimeout(c.ConnectTimeout) + } + + if c.ServerSelectionTimeout != 0 { + opts.SetServerSelectionTimeout(c.ServerSelectionTimeout) + } + + return opts, nil +} diff --git a/plugins/database/mongodb/connection_producer_test.go b/plugins/database/mongodb/connection_producer_test.go index c39914cc537ff..4b0ccaf2514a7 100644 --- a/plugins/database/mongodb/connection_producer_test.go +++ b/plugins/database/mongodb/connection_producer_test.go @@ -103,7 +103,7 @@ net: "connectionStatus": 1, } - client, err := mongo.getConnection(ctx) + client, err := mongo.Connection(ctx) if err != nil { t.Fatalf("Unable to make connection to Mongo: %s", err) } diff --git a/plugins/database/mongodb/mongodb.go b/plugins/database/mongodb/mongodb.go index bfd8d4a3ca136..884f17dbe23af 100644 --- a/plugins/database/mongodb/mongodb.go +++ b/plugins/database/mongodb/mongodb.go @@ -7,14 +7,12 @@ import ( "io" "strings" + log "github.com/hashicorp/go-hclog" + dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" "github.com/hashicorp/vault/sdk/database/helper/dbutil" "github.com/hashicorp/vault/sdk/helper/strutil" "github.com/hashicorp/vault/sdk/helper/template" - - dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" - "github.com/mitchellh/mapstructure" "go.mongodb.org/mongo-driver/mongo" - "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readpref" "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/mongo/driver/connstring" @@ -57,15 +55,6 @@ func (m *MongoDB) Type() (string, error) { return mongoDBTypeName, nil } -func (m *MongoDB) getConnection(ctx context.Context) (*mongo.Client, error) { - client, err := m.Connection(ctx) - if err != nil { - return nil, err - } - - return client.(*mongo.Client), nil -} - func (m *MongoDB) Initialize(ctx context.Context, req dbplugin.InitializeRequest) (dbplugin.InitializeResponse, error) { m.Lock() defer m.Unlock() @@ -91,41 +80,27 @@ func (m *MongoDB) Initialize(ctx context.Context, req dbplugin.InitializeRequest return dbplugin.InitializeResponse{}, fmt.Errorf("invalid username template: %w", err) } - err = mapstructure.WeakDecode(req.Config, m.mongoDBConnectionProducer) - if err != nil { - return dbplugin.InitializeResponse{}, err - } - - if len(m.ConnectionURL) == 0 { - return dbplugin.InitializeResponse{}, fmt.Errorf("connection_url cannot be empty-mongo fail") - } - - writeOpts, err := m.getWriteConcern() - if err != nil { - return dbplugin.InitializeResponse{}, err - } - - authOpts, err := m.getTLSAuth() + err = m.mongoDBConnectionProducer.loadConfig(req.Config) if err != nil { return dbplugin.InitializeResponse{}, err } - m.clientOptions = options.MergeClientOptions(writeOpts, authOpts) - // Set initialized to true at this point since all fields are set, // and the connection can be established at a later time. m.Initialized = true if req.VerifyConnection { - _, err := m.Connection(ctx) + client, err := m.mongoDBConnectionProducer.createClient(ctx) if err != nil { return dbplugin.InitializeResponse{}, fmt.Errorf("failed to verify connection: %w", err) } - err = m.client.Ping(ctx, readpref.Primary()) + err = client.Ping(ctx, readpref.Primary()) if err != nil { + _ = client.Disconnect(ctx) // Try to prevent any sort of resource leak return dbplugin.InitializeResponse{}, fmt.Errorf("failed to verify connection: %w", err) } + m.mongoDBConnectionProducer.client = client } resp := dbplugin.InitializeResponse{ @@ -135,10 +110,6 @@ func (m *MongoDB) Initialize(ctx context.Context, req dbplugin.InitializeRequest } func (m *MongoDB) NewUser(ctx context.Context, req dbplugin.NewUserRequest) (dbplugin.NewUserResponse, error) { - // Grab the lock - m.Lock() - defer m.Unlock() - if len(req.Statements.Commands) == 0 { return dbplugin.NewUserResponse{}, dbutil.ErrEmptyCreationStatement } @@ -189,9 +160,6 @@ func (m *MongoDB) UpdateUser(ctx context.Context, req dbplugin.UpdateUserRequest } func (m *MongoDB) changeUserPassword(ctx context.Context, username, password string) error { - m.Lock() - defer m.Unlock() - connURL := m.getConnectionURL() cs, err := connstring.Parse(connURL) if err != nil { @@ -218,9 +186,6 @@ func (m *MongoDB) changeUserPassword(ctx context.Context, username, password str } func (m *MongoDB) DeleteUser(ctx context.Context, req dbplugin.DeleteUserRequest) (dbplugin.DeleteUserResponse, error) { - m.Lock() - defer m.Unlock() - // If no revocation statements provided, pass in empty JSON var revocationStatement string switch len(req.Statements.Commands) { @@ -251,6 +216,12 @@ func (m *MongoDB) DeleteUser(ctx context.Context, req dbplugin.DeleteUserRequest } err = m.runCommandWithRetry(ctx, db, dropUserCmd) + cErr, ok := err.(mongo.CommandError) + if ok && cErr.Name == "UserNotFound" { // User already removed, don't retry needlessly + log.Default().Warn("MongoDB user was deleted prior to lease revocation", "user", req.Username) + return dbplugin.DeleteUserResponse{}, nil + } + return dbplugin.DeleteUserResponse{}, err } @@ -258,7 +229,7 @@ func (m *MongoDB) DeleteUser(ctx context.Context, req dbplugin.DeleteUserRequest // on the first attempt. This should be called with the lock held func (m *MongoDB) runCommandWithRetry(ctx context.Context, db string, cmd interface{}) error { // Get the client - client, err := m.getConnection(ctx) + client, err := m.Connection(ctx) if err != nil { return err } @@ -273,7 +244,7 @@ func (m *MongoDB) runCommandWithRetry(ctx context.Context, db string, cmd interf return nil case err == io.EOF, strings.Contains(err.Error(), "EOF"): // Call getConnection to reset and retry query if we get an EOF error on first attempt. - client, err = m.getConnection(ctx) + client, err = m.Connection(ctx) if err != nil { return err } From 4c8a8189e4180d6c087203fe3838a45ded416385 Mon Sep 17 00:00:00 2001 From: Ricardo Cardenas Date: Mon, 17 May 2021 11:03:09 -0700 Subject: [PATCH 023/101] feat(aws): add ability to provide a role session name when generating STS credentials (#11345) * feat(aws): add ability to provide a sessionName to sts credentials Co-authored-by: Brad Vernon Co-authored-by: Jim Kalafut Co-authored-by: Tom Proctor --- builtin/logical/aws/path_user.go | 7 ++++++- builtin/logical/aws/secret_access_keys.go | 22 ++++++++++++++++------ changelog/11345.txt | 3 +++ website/content/api-docs/secret/aws.mdx | 7 ++++++- 4 files changed, 31 insertions(+), 8 deletions(-) create mode 100644 changelog/11345.txt diff --git a/builtin/logical/aws/path_user.go b/builtin/logical/aws/path_user.go index e98ac8e605a74..05c8730c353a4 100644 --- a/builtin/logical/aws/path_user.go +++ b/builtin/logical/aws/path_user.go @@ -32,6 +32,10 @@ func pathUser(b *backend) *framework.Path { Description: "Lifetime of the returned credentials in seconds", Default: 3600, }, + "role_session_name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Session name to use when assuming role. Max chars: 64", + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -80,6 +84,7 @@ func (b *backend) pathCredsRead(ctx context.Context, req *logical.Request, d *fr } roleArn := d.Get("role_arn").(string) + roleSessionName := d.Get("role_session_name").(string) var credentialType string switch { @@ -125,7 +130,7 @@ func (b *backend) pathCredsRead(ctx context.Context, req *logical.Request, d *fr case !strutil.StrListContains(role.RoleArns, roleArn): return logical.ErrorResponse(fmt.Sprintf("role_arn %q not in allowed role arns for Vault role %q", roleArn, roleName)), nil } - return b.assumeRole(ctx, req.Storage, req.DisplayName, roleName, roleArn, role.PolicyDocument, role.PolicyArns, role.IAMGroups, ttl) + return b.assumeRole(ctx, req.Storage, req.DisplayName, roleName, roleArn, role.PolicyDocument, role.PolicyArns, role.IAMGroups, ttl, roleSessionName) case federationTokenCred: return b.getFederationToken(ctx, req.Storage, req.DisplayName, roleName, role.PolicyDocument, role.PolicyArns, role.IAMGroups, ttl) default: diff --git a/builtin/logical/aws/secret_access_keys.go b/builtin/logical/aws/secret_access_keys.go index 6e6ee9a6a1725..327eafd9b367a 100644 --- a/builtin/logical/aws/secret_access_keys.go +++ b/builtin/logical/aws/secret_access_keys.go @@ -141,7 +141,7 @@ func (b *backend) getFederationToken(ctx context.Context, s logical.Storage, func (b *backend) assumeRole(ctx context.Context, s logical.Storage, displayName, roleName, roleArn, policy string, policyARNs []string, - iamGroups []string, lifeTimeInSeconds int64) (*logical.Response, error) { + iamGroups []string, lifeTimeInSeconds int64, roleSessionName string) (*logical.Response, error) { // grab any IAM group policies associated with the vault role, both inline // and managed @@ -165,10 +165,19 @@ func (b *backend) assumeRole(ctx context.Context, s logical.Storage, return logical.ErrorResponse(err.Error()), nil } - username, usernameWarning := genUsername(displayName, roleName, "iam_user") + roleSessionNameWarning := "" + if roleSessionName == "" { + roleSessionName, roleSessionNameWarning = genUsername(displayName, roleName, "iam_user") + } else { + roleSessionName = normalizeDisplayName(roleSessionName) + if len(roleSessionName) > 64 { + roleSessionName = roleSessionName[0:64] + roleSessionNameWarning = "the role session name was truncated to 64 characters to fit within IAM session name length limits" + } + } assumeRoleInput := &sts.AssumeRoleInput{ - RoleSessionName: aws.String(username), + RoleSessionName: aws.String(roleSessionName), RoleArn: aws.String(roleArn), DurationSeconds: &lifeTimeInSeconds, } @@ -187,8 +196,9 @@ func (b *backend) assumeRole(ctx context.Context, s logical.Storage, "access_key": *tokenResp.Credentials.AccessKeyId, "secret_key": *tokenResp.Credentials.SecretAccessKey, "security_token": *tokenResp.Credentials.SessionToken, + "arn": *tokenResp.AssumedRoleUser.Arn, }, map[string]interface{}{ - "username": username, + "username": roleSessionName, "policy": roleArn, "is_sts": true, }) @@ -199,8 +209,8 @@ func (b *backend) assumeRole(ctx context.Context, s logical.Storage, // STS are purposefully short-lived and aren't renewable resp.Secret.Renewable = false - if usernameWarning != "" { - resp.AddWarning(usernameWarning) + if roleSessionNameWarning != "" { + resp.AddWarning(roleSessionNameWarning) } return resp, nil diff --git a/changelog/11345.txt b/changelog/11345.txt new file mode 100644 index 0000000000000..8ff694ff89f7e --- /dev/null +++ b/changelog/11345.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/aws: add ability to provide a role session name when generating STS credentials +``` diff --git a/website/content/api-docs/secret/aws.mdx b/website/content/api-docs/secret/aws.mdx index 890356f3903cd..64976776d9805 100644 --- a/website/content/api-docs/secret/aws.mdx +++ b/website/content/api-docs/secret/aws.mdx @@ -523,6 +523,10 @@ credentials retrieved through `/aws/creds` must be of the `iam_user` type. the Vault role is `assumed_role`. Must match one of the allowed role ARNs in the Vault role. Optional if the Vault role only allows a single AWS role ARN; required otherwise. +- `role_session_name` `(string)` - The role session name to attach to the assumed role ARN. + `role_session_name` is limited to 64 characters; if exceeded, the `role_session_name` in the + assumed role ARN will be truncated to 64 characters. If `role_session_name` is not provided, + then it will be generated dynamically by default. - `ttl` `(string: "3600s")` – Specifies the TTL for the use of the STS token. This is specified as a string with a duration suffix. Valid only when `credential_type` is `assumed_role` or `federation_token`. When not specified, @@ -550,7 +554,8 @@ $ curl \ "data": { "access_key": "AKIA...", "secret_key": "xlCs...", - "security_token": null + "security_token": null, + "arn": "arn:aws:sts::123456789012:assumed-role/DeveloperRole/some-user-supplied-role-session-name" } } ``` From 380d34395c5f33ef2100a394112791b8bfbb7358 Mon Sep 17 00:00:00 2001 From: Nick Cabatoff Date: Mon, 17 May 2021 14:10:26 -0400 Subject: [PATCH 024/101] OSS parts of ent PR #1857: license autoloading init changes. (#11623) --- command/server_test.go | 2 +- vault/core.go | 10 ++++++++-- vault/testing.go | 35 +++++++++++++++++++---------------- vault/testing_util.go | 8 +++++--- 4 files changed, 33 insertions(+), 22 deletions(-) diff --git a/command/server_test.go b/command/server_test.go index 8d668310513b3..ba0f1d5f2b567 100644 --- a/command/server_test.go +++ b/command/server_test.go @@ -1,4 +1,4 @@ -// +build !race,!hsm +// +build !race,!hsm,!enterprise // NOTE: we can't use this with HSM. We can't set testing mode on and it's not // safe to use env vars since that provides an attack vector in the real world. diff --git a/vault/core.go b/vault/core.go index 770f0f8c31acd..8e0512d2447c0 100644 --- a/vault/core.go +++ b/vault/core.go @@ -120,6 +120,7 @@ var ( LastRemoteWAL = lastRemoteWALImpl LastRemoteUpstreamWAL = lastRemoteUpstreamWALImpl WaitUntilWALShipped = waitUntilWALShippedImpl + storedLicenseCheck = storedLicenseCheckImpl ) // NonFatalError is an error that can be returned during NewCore that should be @@ -651,8 +652,6 @@ type CoreConfig struct { License string LicensePath string LicensingConfig *LicensingConfig - // Don't set this unless in dev mode, ideally only when using inmem - DevLicenseDuration time.Duration DisablePerformanceStandby bool DisableIndexing bool @@ -923,6 +922,9 @@ func NewCore(conf *CoreConfig) (*Core, error) { return nil, fmt.Errorf("barrier setup failed: %w", err) } + if err := storedLicenseCheck(c, conf); err != nil { + return nil, err + } // We create the funcs here, then populate the given config with it so that // the caller can share state conf.ReloadFuncsLock = &c.reloadFuncsLock @@ -2862,3 +2864,7 @@ func ParseRequiredState(raw string, hmacKey []byte) (*logical.WALState, error) { ReplicatedIndex: replicatedIndex, }, nil } + +func storedLicenseCheckImpl(c *Core, conf *CoreConfig) error { + return nil +} diff --git a/vault/testing.go b/vault/testing.go index f9f0b21bdf117..855cb590696dc 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -783,10 +783,10 @@ type TestCluster struct { CleanupFunc func() SetupFunc func() - cleanupFuncs []func() - base *CoreConfig - pubKey interface{} - priKey interface{} + cleanupFuncs []func() + base *CoreConfig + LicensePublicKey ed25519.PublicKey + LicensePrivateKey ed25519.PrivateKey } func (c *TestCluster) Start() { @@ -1093,6 +1093,8 @@ type TestClusterOptions struct { CoreMetricSinkProvider func(clusterName string) (*metricsutil.ClusterMetricSink, *metricsutil.MetricsHelper) PhysicalFactoryConfig map[string]interface{} + LicensePublicKey ed25519.PublicKey + LicensePrivateKey ed25519.PrivateKey } var DefaultNumCores = 3 @@ -1450,7 +1452,6 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te coreConfig.DevToken = base.DevToken coreConfig.EnableRaw = base.EnableRaw coreConfig.DisableSealWrap = base.DisableSealWrap - coreConfig.DevLicenseDuration = base.DevLicenseDuration coreConfig.DisableCache = base.DisableCache coreConfig.LicensingConfig = base.LicensingConfig coreConfig.DisablePerformanceStandby = base.DisablePerformanceStandby @@ -1557,12 +1558,14 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te coreConfig.HAPhysical = haPhys.(physical.HABackend) } - pubKey, priKey, err := testGenerateCoreKeys() - if err != nil { - t.Fatalf("err: %v", err) + if testCluster.LicensePublicKey == nil { + pubKey, priKey, err := testGenerateCoreKeys() + if err != nil { + t.Fatalf("err: %v", err) + } + testCluster.LicensePublicKey = pubKey + testCluster.LicensePrivateKey = priKey } - testCluster.pubKey = pubKey - testCluster.priKey = priKey if opts != nil && opts.InmemClusterLayers { if opts.ClusterLayers != nil { @@ -1581,7 +1584,7 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te coreConfigs := []*CoreConfig{} for i := 0; i < numCores; i++ { - cleanup, c, localConfig, handler := testCluster.newCore(t, i, coreConfig, opts, listeners[i], pubKey) + cleanup, c, localConfig, handler := testCluster.newCore(t, i, coreConfig, opts, listeners[i], testCluster.LicensePublicKey) testCluster.cleanupFuncs = append(testCluster.cleanupFuncs, cleanup) cores = append(cores, c) @@ -1644,7 +1647,7 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te // Extra Setup for _, tcc := range testCluster.Cores { - testExtraTestCoreSetup(t, priKey, tcc) + testExtraTestCoreSetup(t, testCluster.LicensePrivateKey, tcc) } // Cleanup @@ -1722,7 +1725,7 @@ func (cluster *TestCluster) StartCore(t testing.T, idx int, opts *TestClusterOpt } // Create a new Core - cleanup, newCore, localConfig, coreHandler := cluster.newCore(t, idx, tcc.CoreConfig, opts, tcc.Listeners, cluster.pubKey) + cleanup, newCore, localConfig, coreHandler := cluster.newCore(t, idx, tcc.CoreConfig, opts, tcc.Listeners, cluster.LicensePublicKey) if coreHandler != nil { tcc.Handler = coreHandler tcc.Server.Handler = coreHandler @@ -1740,7 +1743,7 @@ func (cluster *TestCluster) StartCore(t testing.T, idx int, opts *TestClusterOpt tcc.Client = cluster.getAPIClient(t, opts, tcc.Listeners[0].Address.Port, tcc.TLSConfig) testAdjustUnderlyingStorage(tcc) - testExtraTestCoreSetup(t, cluster.priKey, tcc) + testExtraTestCoreSetup(t, cluster.LicensePrivateKey, tcc) // Start listeners for _, ln := range tcc.Listeners { @@ -1751,7 +1754,7 @@ func (cluster *TestCluster) StartCore(t testing.T, idx int, opts *TestClusterOpt tcc.Logger().Info("restarted test core", "core", idx) } -func (testCluster *TestCluster) newCore(t testing.T, idx int, coreConfig *CoreConfig, opts *TestClusterOptions, listeners []*TestListener, pubKey interface{}) (func(), *Core, CoreConfig, http.Handler) { +func (testCluster *TestCluster) newCore(t testing.T, idx int, coreConfig *CoreConfig, opts *TestClusterOptions, listeners []*TestListener, pubKey ed25519.PublicKey) (func(), *Core, CoreConfig, http.Handler) { localConfig := *coreConfig cleanupFunc := func() {} var handler http.Handler @@ -1818,7 +1821,7 @@ func (testCluster *TestCluster) newCore(t testing.T, idx int, coreConfig *CoreCo switch { case localConfig.LicensingConfig != nil: if pubKey != nil { - localConfig.LicensingConfig.AdditionalPublicKeys = append(localConfig.LicensingConfig.AdditionalPublicKeys, pubKey.(ed25519.PublicKey)) + localConfig.LicensingConfig.AdditionalPublicKeys = append(localConfig.LicensingConfig.AdditionalPublicKeys, pubKey) } default: localConfig.LicensingConfig = testGetLicensingConfig(pubKey) diff --git a/vault/testing_util.go b/vault/testing_util.go index 61584d7e06594..99eb631f4b0e7 100644 --- a/vault/testing_util.go +++ b/vault/testing_util.go @@ -3,12 +3,14 @@ package vault import ( + "crypto/ed25519" + testing "github.com/mitchellh/go-testing-interface" ) -func testGenerateCoreKeys() (interface{}, interface{}, error) { return nil, nil, nil } -func testGetLicensingConfig(interface{}) *LicensingConfig { return &LicensingConfig{} } -func testExtraTestCoreSetup(testing.T, interface{}, *TestClusterCore) {} +func testGenerateCoreKeys() (ed25519.PublicKey, ed25519.PrivateKey, error) { return nil, nil, nil } +func testGetLicensingConfig(key ed25519.PublicKey) *LicensingConfig { return &LicensingConfig{} } +func testExtraTestCoreSetup(testing.T, ed25519.PrivateKey, *TestClusterCore) {} func testAdjustUnderlyingStorage(tcc *TestClusterCore) { tcc.UnderlyingStorage = tcc.physical } From c1e9469f2a92de2da3b33fc3f7b789a6fed565d7 Mon Sep 17 00:00:00 2001 From: Michael Golowka <72365+pcman312@users.noreply.github.com> Date: Mon, 17 May 2021 13:56:35 -0600 Subject: [PATCH 025/101] AWS Auth: Update error message to include underlying error (#11638) --- builtin/credential/aws/path_login.go | 2 +- changelog/11638.txt | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog/11638.txt diff --git a/builtin/credential/aws/path_login.go b/builtin/credential/aws/path_login.go index 6aeaba6dcf68b..03c63f2e6a267 100644 --- a/builtin/credential/aws/path_login.go +++ b/builtin/credential/aws/path_login.go @@ -1364,7 +1364,7 @@ func (b *backend) pathLoginUpdateIam(ctx context.Context, req *logical.Request, if roleEntry.InferredEntityType == ec2EntityType { instance, err := b.validateInstance(ctx, req.Storage, entity.SessionInfo, roleEntry.InferredAWSRegion, callerID.Account) if err != nil { - return logical.ErrorResponse(fmt.Sprintf("failed to verify %s as a valid EC2 instance in region %s", entity.SessionInfo, roleEntry.InferredAWSRegion)), nil + return logical.ErrorResponse("failed to verify %s as a valid EC2 instance in region %s: %s", entity.SessionInfo, roleEntry.InferredAWSRegion, err), nil } // build a fake identity doc to pass on metadata about the instance to verifyInstanceMeetsRoleRequirements diff --git a/changelog/11638.txt b/changelog/11638.txt new file mode 100644 index 0000000000000..5ed50652b150f --- /dev/null +++ b/changelog/11638.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/aws: Underlying error included in validation failure message. +``` From aca54214d56b88bf0c8282d606d2d395b82dffeb Mon Sep 17 00:00:00 2001 From: Vishal Nayak Date: Mon, 17 May 2021 16:35:52 -0400 Subject: [PATCH 026/101] Minor fix to the docs (#11489) --- website/content/api-docs/system/storage/raft.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/content/api-docs/system/storage/raft.mdx b/website/content/api-docs/system/storage/raft.mdx index 049b9aaf28d1f..5f2438c5f73b1 100644 --- a/website/content/api-docs/system/storage/raft.mdx +++ b/website/content/api-docs/system/storage/raft.mdx @@ -195,7 +195,7 @@ it. Unavailable if Raft is used exclusively for `ha_storage`. $ curl \ --header "X-Vault-Token: ..." \ --request POST \ - -- data-binary @raft.snap + --data-binary @raft.snap \ http://127.0.0.1:8200/v1/sys/storage/raft/snapshot ``` @@ -216,6 +216,6 @@ snapshot data. Unavailable if Raft is used exclusively for `ha_storage`. $ curl \ --header "X-Vault-Token: ..." \ --request POST \ - --data-binary @raft.snap + --data-binary @raft.snap \ http://127.0.0.1:8200/v1/sys/storage/raft/snapshot-force ``` From a79bbc6071682cdf0887a3dfdbe74a720ec5d8d8 Mon Sep 17 00:00:00 2001 From: Chelsea Shaw <82459713+hashishaw@users.noreply.github.com> Date: Mon, 17 May 2021 16:41:39 -0500 Subject: [PATCH 027/101] UI/fix identity model (#11641) --- changelog/11641.txt | 3 +++ ui/app/models/identity/group.js | 4 ++-- .../components/database-connection.hbs | 7 +++--- .../components/identity/item-details.hbs | 24 +++++++++---------- .../components/identity/item-groups.hbs | 14 ++++++----- .../components/identity/item-members.hbs | 23 +++++++++++------- .../components/identity/item-metadata.hbs | 14 +++++------ .../identity/item-parent-groups.hbs | 7 +++--- .../components/identity/item-policies.hbs | 7 +++--- .../vault/cluster/access/identity/show.hbs | 2 +- .../access/identity/_shared-tests.js | 16 ++++++++++--- .../pages/components/identity/edit-form.js | 2 ++ 12 files changed, 73 insertions(+), 50 deletions(-) create mode 100644 changelog/11641.txt diff --git a/changelog/11641.txt b/changelog/11641.txt new file mode 100644 index 0000000000000..84bd31188beb3 --- /dev/null +++ b/changelog/11641.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix entity group membership and metadata not showing +``` diff --git a/ui/app/models/identity/group.js b/ui/app/models/identity/group.js index d51a96ef6af1b..31a0b69329286 100644 --- a/ui/app/models/identity/group.js +++ b/ui/app/models/identity/group.js @@ -64,8 +64,8 @@ export default IdentityModel.extend({ 'memberGroupIds.[]', function() { let { memberEntityIds, memberGroupIds } = this; - let numEntities = (memberEntityIds && memberEntityIds.get('length')) || 0; - let numGroups = (memberGroupIds && memberGroupIds.get('length')) || 0; + let numEntities = (memberEntityIds && memberEntityIds.length) || 0; + let numGroups = (memberGroupIds && memberGroupIds.length) || 0; return numEntities + numGroups > 0; } ), diff --git a/ui/app/templates/components/database-connection.hbs b/ui/app/templates/components/database-connection.hbs index 6384642525755..61a9683160dc5 100644 --- a/ui/app/templates/components/database-connection.hbs +++ b/ui/app/templates/components/database-connection.hbs @@ -87,7 +87,7 @@ {{form-field data-test-field attr=attr model=@model}} {{/if}} {{/each}} - + {{!-- Plugin Config Section --}}
@@ -148,7 +148,6 @@ /> {{else}} {{#each @model.statementFields as |attr|}} - {{log attr}} {{form-field data-test-field attr=attr model=@model}} {{/each}} {{/if}} @@ -295,11 +294,11 @@ {{#each @model.showAttrs as |attr|}} {{#let attr.options.defaultDisplay as |defaultDisplay|}} {{#if (eq attr.type "object")}} - {{else if (eq attr.type "array")}} - {{#if model.disabled}} + {{#if @model.disabled}} - {{#if model.canEdit}} - {{/if}} {{/if}} - - - - + + + +
- {{#each model.mergedEntityIds as |id|}} + {{#each @model.mergedEntityIds as |id|}}
{{id}}
{{/each}}
- -
- {{#if model.canEdit}} - + {{#if @model.canEdit}} + {{/if}}
{{/linked-block}} {{/each}} - {{#each model.memberEntityIds as |gid|}} + {{#each @model.memberEntityIds as |gid|}} {{#linked-block "vault.cluster.access.identity.show" "groups" @@ -32,14 +32,19 @@ }}
- + {{gid}} + />{{gid}} +
- {{#if model.canEdit}} - + {{#if @model.canEdit}} + {{/if}}
diff --git a/ui/app/templates/components/identity/item-metadata.hbs b/ui/app/templates/components/identity/item-metadata.hbs index 91873d83d9a61..495b8fc8b105d 100644 --- a/ui/app/templates/components/identity/item-metadata.hbs +++ b/ui/app/templates/components/identity/item-metadata.hbs @@ -1,4 +1,4 @@ -{{#each-in model.metadata as |key value|}} +{{#each-in @model.metadata as |key value|}}
@@ -10,19 +10,19 @@ {{value}}
- {{#if model.canEdit}} - + {{#if @model.canEdit}} + {{/if}}
{{else}} - - Edit {{lowercase (humanize model.identityType)}} + + Edit {{lowercase (humanize @model.identityType)}} Learn more diff --git a/ui/app/templates/components/identity/item-parent-groups.hbs b/ui/app/templates/components/identity/item-parent-groups.hbs index c0031c7c4888c..fabc4879e4cbf 100644 --- a/ui/app/templates/components/identity/item-parent-groups.hbs +++ b/ui/app/templates/components/identity/item-parent-groups.hbs @@ -1,5 +1,5 @@ -{{#if model.parentGroupIds.length}} - {{#each model.parentGroupIds as |gid|}} +{{#if @model.parentGroupIds.length}} + {{#each @model.parentGroupIds as |gid|}} {{#linked-block "vault.cluster.access.identity.show" "groups" @@ -9,7 +9,8 @@ }}
- + {{gid}} diff --git a/ui/app/templates/components/identity/item-policies.hbs b/ui/app/templates/components/identity/item-policies.hbs index 5d675588ed980..cab6df2a662f7 100644 --- a/ui/app/templates/components/identity/item-policies.hbs +++ b/ui/app/templates/components/identity/item-policies.hbs @@ -7,12 +7,13 @@ }}
- {{policyName}} + + {{policyName}}
- {{#if model.canEdit}} - + {{#if @model.canEdit}} + {{/if}}
diff --git a/ui/app/templates/vault/cluster/access/identity/show.hbs b/ui/app/templates/vault/cluster/access/identity/show.hbs index df09bf4173a26..9aa7a244ab371 100644 --- a/ui/app/templates/vault/cluster/access/identity/show.hbs +++ b/ui/app/templates/vault/cluster/access/identity/show.hbs @@ -21,7 +21,7 @@ diff --git a/ui/app/templates/vault/cluster/secrets/backend/versions.hbs b/ui/app/templates/vault/cluster/secrets/backend/versions.hbs index e245844a02f7a..9281ce6cedfee 100644 --- a/ui/app/templates/vault/cluster/secrets/backend/versions.hbs +++ b/ui/app/templates/vault/cluster/secrets/backend/versions.hbs @@ -1,11 +1,16 @@ - + model=model.engineId }} + @showCurrent={{true}} + />

@@ -17,52 +22,75 @@
-
- Version {{list.item.version}} - {{#if (eq list.item.version model.currentVersion)}} +
+ + Version {{list.item.version}} +
+ {{#if (eq list.item.version model.currentVersion)}} +
- Current + Current - {{/if}} -
-
- {{#if list.item.deleted}} +
+ {{/if}} + {{#if list.item.deleted}} +
- Deleted + Deleted - {{/if}} - {{#if list.item.destroyed}} +
+ {{/if}} + {{#if list.item.destroyed}} +
- Destroyed - - {{/if}} -
+ Destroyed + +
+ {{/if}}
- -
  • - - View version {{list.item.version}} - -
  • -
  • - - Create new version from {{list.item.version}} - -
  • -
    + + + + + + + +
    diff --git a/ui/tests/acceptance/secrets/backend/kv/secret-test.js b/ui/tests/acceptance/secrets/backend/kv/secret-test.js index 9fdde5a011f4d..a923d8e9d6d8d 100644 --- a/ui/tests/acceptance/secrets/backend/kv/secret-test.js +++ b/ui/tests/acceptance/secrets/backend/kv/secret-test.js @@ -144,7 +144,7 @@ module('Acceptance | secrets/secret/create', function(hooks) { .submit(); await listPage.create(); await editPage.createSecret(secretPath, 'foo', 'bar'); - await showPage.deleteSecret(); + await showPage.deleteSecretV1(); assert.equal( currentRouteName(), 'vault.cluster.secrets.backend.list-root', @@ -251,6 +251,78 @@ module('Acceptance | secrets/secret/create', function(hooks) { assert.ok(showPage.editIsPresent, 'shows the edit button'); }); + test('version 2 with policy with destroy capabilities shows modal', async function(assert) { + let backend = 'kv-v2'; + const V2_POLICY = ` + path "kv-v2/destroy/*" { + capabilities = ["update"] + } + path "kv-v2/metadata/*" { + capabilities = ["list", "update", "delete"] + } + path "kv-v2/data/secret" { + capabilities = ["create", "read", "update"] + } + `; + await consoleComponent.runCommands([ + `write sys/mounts/${backend} type=kv options=version=2`, + `write sys/policies/acl/kv-v2-degrade policy=${btoa(V2_POLICY)}`, + // delete any kv previously written here so that tests can be re-run + 'delete kv-v2/metadata/secret', + 'write -field=client_token auth/token/create policies=kv-v2-degrade', + ]); + + let userToken = consoleComponent.lastLogOutput; + await logout.visit(); + await authPage.login(userToken); + + await writeSecret(backend, 'secret', 'foo', 'bar'); + await click('[data-test-delete-open-modal]'); + await settled(); + assert.dom('[data-test-delete-modal="destroy-version"]').exists('destroy this version option shows'); + assert.dom('[data-test-delete-modal="destroy-all-versions"]').exists('destroy all versions option shows'); + assert.dom('[data-test-delete-modal="delete-version"]').doesNotExist('delete version does not show'); + }); + + test('version 2 with policy with only delete option does not show modal and undelete is an option', async function(assert) { + let backend = 'kv-v2'; + const V2_POLICY = ` + path "kv-v2/delete/*" { + capabilities = ["update"] + } + path "kv-v2/undelete/*" { + capabilities = ["update"] + } + path "kv-v2/metadata/*" { + capabilities = ["list","read","create","update"] + } + path "kv-v2/data/secret" { + capabilities = ["create", "read"] + } + `; + await consoleComponent.runCommands([ + `write sys/mounts/${backend} type=kv options=version=2`, + `write sys/policies/acl/kv-v2-degrade policy=${btoa(V2_POLICY)}`, + // delete any kv previously written here so that tests can be re-run + 'delete kv-v2/metadata/secret', + 'write -field=client_token auth/token/create policies=kv-v2-degrade', + ]); + + let userToken = consoleComponent.lastLogOutput; + await logout.visit(); + await authPage.login(userToken); + await writeSecret(backend, 'secret', 'foo', 'bar'); + assert.dom('[data-test-delete-open-modal]').doesNotExist('delete version does not show'); + assert.dom('[data-test-secret-v2-delete="true"]').exists('drop down delete shows'); + await showPage.deleteSecretV2(); + // unable to reload page in test scenario so going to list and back to secret to confirm deletion + let url = `/vault/secrets/${backend}/list`; + await visit(url); + await click('[data-test-secret-link="secret"]'); + assert.dom('[data-test-component="empty-state"]').exists('secret has been deleted'); + assert.dom('[data-test-secret-undelete]').exists('undelete button shows'); + }); + test('paths are properly encoded', async function(assert) { let backend = 'kv'; let paths = [ @@ -305,7 +377,7 @@ module('Acceptance | secrets/secret/create', function(hooks) { await listPage.create(); await editPage.createSecret(secretPath, 'foo', 'bar'); await settled(); - await click('[data-test-popup-menu-trigger="history"]'); + await click('[data-test-popup-menu-trigger="version"]'); await settled(); await click('[data-test-version-history]'); await settled(); diff --git a/ui/tests/pages/secrets/backend/kv/show.js b/ui/tests/pages/secrets/backend/kv/show.js index 7d1071a94cf8a..49caf8a50ae00 100644 --- a/ui/tests/pages/secrets/backend/kv/show.js +++ b/ui/tests/pages/secrets/backend/kv/show.js @@ -8,6 +8,8 @@ export default create({ text: text(), }), deleteBtn: clickable('[data-test-secret-delete] button'), + deleteBtnV1: clickable('[data-test-secret-v1-delete="true"] button'), + deleteBtnV2: clickable('[data-test-secret-v2-delete="true"] button'), confirmBtn: clickable('[data-test-confirm-button]'), rows: collection('data-test-row-label'), toggleJSON: clickable('[data-test-secret-json-toggle]'), @@ -22,4 +24,10 @@ export default create({ deleteSecret() { return this.deleteBtn().confirmBtn(); }, + deleteSecretV1() { + return this.deleteBtnV1().confirmBtn(); + }, + deleteSecretV2() { + return this.deleteBtnV2().confirmBtn(); + }, }); From 4f6f32642ac6bb118b8fb09af247991d46e88876 Mon Sep 17 00:00:00 2001 From: Brian Kassouf Date: Wed, 19 May 2021 10:03:32 -0700 Subject: [PATCH 033/101] Reload raft TLS keys on active startup (#11660) --- vault/raft.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/vault/raft.go b/vault/raft.go index ab7932005982a..88d9c684d16db 100644 --- a/vault/raft.go +++ b/vault/raft.go @@ -185,6 +185,12 @@ func (c *Core) setupRaftActiveNode(ctx context.Context) error { raftBackend.SetupAutopilot(c.activeContext, autopilotConfig, c.raftFollowerStates, disableAutopilot) c.pendingRaftPeers = &sync.Map{} + + // Reload the raft TLS keys to ensure we are using the latest version. + if err := c.checkRaftTLSKeyUpgrades(ctx); err != nil { + return err + } + return c.startPeriodicRaftTLSRotate(ctx) } From d2c4fd4102411bc117166200843518c3fa51060c Mon Sep 17 00:00:00 2001 From: Vishal Nayak Date: Wed, 19 May 2021 14:06:08 -0400 Subject: [PATCH 034/101] Tokenutil: Perform num uses check earlier (#11647) * Perform num uses check earlier * Add CL * Ensure that login works --- builtin/credential/approle/backend_test.go | 69 +++++++++++++++++++ changelog/11647.txt | 3 + sdk/helper/tokenutil/tokenutil.go | 14 ++-- .../vault/sdk/helper/tokenutil/tokenutil.go | 14 ++-- 4 files changed, 86 insertions(+), 14 deletions(-) create mode 100644 changelog/11647.txt diff --git a/builtin/credential/approle/backend_test.go b/builtin/credential/approle/backend_test.go index a23f4c3642259..044f02d2a2a4c 100644 --- a/builtin/credential/approle/backend_test.go +++ b/builtin/credential/approle/backend_test.go @@ -5,10 +5,13 @@ import ( "strings" "testing" + "github.com/stretchr/testify/require" + "github.com/hashicorp/vault/sdk/logical" ) func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) { + t.Helper() config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} @@ -26,6 +29,72 @@ func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) { return b, config.StorageView } +func TestAppRole_RoleServiceToBatchNumUses(t *testing.T) { + b, s := createBackendWithStorage(t) + + requestFunc := func(operation logical.Operation, data map[string]interface{}) { + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Path: "role/testrole", + Operation: operation, + Storage: s, + Data: data, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %#v\nresp: %#v", err, resp) + } + } + + data := map[string]interface{}{ + "bind_secret_id": true, + "secret_id_num_uses": 0, + "secret_id_ttl": "10m", + "token_policies": "policy", + "token_ttl": "5m", + "token_max_ttl": "10m", + "token_num_uses": 2, + "token_type": "default", + } + requestFunc(logical.CreateOperation, data) + + data["token_num_uses"] = 0 + data["token_type"] = "batch" + requestFunc(logical.UpdateOperation, data) + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Path: "role/testrole/role-id", + Operation: logical.ReadOperation, + Storage: s, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + roleID := resp.Data["role_id"] + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "role/testrole/secret-id", + Operation: logical.UpdateOperation, + Storage: s, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + secretID := resp.Data["secret_id"] + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "login", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "role_id": roleID, + "secret_id": secretID, + }, + Storage: s, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + require.NotNil(t, resp.Auth) +} + func TestAppRole_RoleNameCaseSensitivity(t *testing.T) { testFunc := func(t *testing.T, roleName string) { var resp *logical.Response diff --git a/changelog/11647.txt b/changelog/11647.txt new file mode 100644 index 0000000000000..2075989ef7c31 --- /dev/null +++ b/changelog/11647.txt @@ -0,0 +1,3 @@ +```release-note:bug +tokenutil: Perform the num uses check before token type. +``` diff --git a/sdk/helper/tokenutil/tokenutil.go b/sdk/helper/tokenutil/tokenutil.go index 29f9748344524..19a3f73c51899 100644 --- a/sdk/helper/tokenutil/tokenutil.go +++ b/sdk/helper/tokenutil/tokenutil.go @@ -207,6 +207,13 @@ func (t *TokenParams) ParseTokenFields(req *logical.Request, d *framework.FieldD t.TokenType = tokenType } + if tokenNumUses, ok := d.GetOk("token_num_uses"); ok { + t.TokenNumUses = tokenNumUses.(int) + } + if t.TokenNumUses < 0 { + return errors.New("'token_num_uses' cannot be negative") + } + if t.TokenType == logical.TokenTypeBatch || t.TokenType == logical.TokenTypeDefaultBatch { if t.TokenPeriod != 0 { return errors.New("'token_type' cannot be 'batch' or 'default_batch' when set to generate periodic tokens") @@ -226,13 +233,6 @@ func (t *TokenParams) ParseTokenFields(req *logical.Request, d *framework.FieldD return errors.New("'token_ttl' cannot be greater than 'token_max_ttl'") } - if tokenNumUses, ok := d.GetOk("token_num_uses"); ok { - t.TokenNumUses = tokenNumUses.(int) - } - if t.TokenNumUses < 0 { - return errors.New("'token_num_uses' cannot be negative") - } - return nil } diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/tokenutil/tokenutil.go b/vendor/github.com/hashicorp/vault/sdk/helper/tokenutil/tokenutil.go index 29f9748344524..19a3f73c51899 100644 --- a/vendor/github.com/hashicorp/vault/sdk/helper/tokenutil/tokenutil.go +++ b/vendor/github.com/hashicorp/vault/sdk/helper/tokenutil/tokenutil.go @@ -207,6 +207,13 @@ func (t *TokenParams) ParseTokenFields(req *logical.Request, d *framework.FieldD t.TokenType = tokenType } + if tokenNumUses, ok := d.GetOk("token_num_uses"); ok { + t.TokenNumUses = tokenNumUses.(int) + } + if t.TokenNumUses < 0 { + return errors.New("'token_num_uses' cannot be negative") + } + if t.TokenType == logical.TokenTypeBatch || t.TokenType == logical.TokenTypeDefaultBatch { if t.TokenPeriod != 0 { return errors.New("'token_type' cannot be 'batch' or 'default_batch' when set to generate periodic tokens") @@ -226,13 +233,6 @@ func (t *TokenParams) ParseTokenFields(req *logical.Request, d *framework.FieldD return errors.New("'token_ttl' cannot be greater than 'token_max_ttl'") } - if tokenNumUses, ok := d.GetOk("token_num_uses"); ok { - t.TokenNumUses = tokenNumUses.(int) - } - if t.TokenNumUses < 0 { - return errors.New("'token_num_uses' cannot be negative") - } - return nil } From 12fa94011d46e18625985c9eb43e51885bc0d72b Mon Sep 17 00:00:00 2001 From: Marc Falzon Date: Wed, 19 May 2021 20:35:19 +0200 Subject: [PATCH 035/101] Add Exoscale plugins to the list of partner plugins (#11592) This change adds the Exoscale auth/secrets plugins to the list of partner plugins on the website "Plugin Portal" page. --- website/content/docs/plugin-portal.mdx | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/website/content/docs/plugin-portal.mdx b/website/content/docs/plugin-portal.mdx index b02823afd8378..a12c469aa5f83 100644 --- a/website/content/docs/plugin-portal.mdx +++ b/website/content/docs/plugin-portal.mdx @@ -96,6 +96,10 @@ exists within the Vault repository, the plugin can be built as instructed in Partner plugins are developed by HashiCorp partners and are owned and maintained by the technology partner. HashiCorp has verified the authenticity of the partner’s plugin, and that the partner is a member of the [HashiCorp Technology Partner Program](https://www.hashicorp.com/partners/become-a-partner#technology). Compatibility and stability guarantees on these plugins are established by their authors. +### Auth + +- [Exoscale](https://github.com/exoscale/vault-plugin-auth-exoscale) + ### Database - [Aerospike](https://github.com/aerospike-community/vault-plugin-database-aerospike) @@ -103,6 +107,7 @@ Partner plugins are developed by HashiCorp partners and are owned and maintained ### Secrets +- [Exoscale](https://github.com/exoscale/vault-plugin-secrets-exoscale) - [PrimeKey EJBCA](https://github.com/primekeydevs/ejbca-vault-plugin) - [Venafi](https://github.com/Venafi/vault-pki-backend-venafi) From 9162552dc6c3ed2a1b9a0e424301dfb2e8e100b2 Mon Sep 17 00:00:00 2001 From: Brian Kassouf Date: Wed, 19 May 2021 12:01:52 -0700 Subject: [PATCH 036/101] Make sure we are checking the final peerset (#11664) --- vault/external_tests/raft/raft_autopilot_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vault/external_tests/raft/raft_autopilot_test.go b/vault/external_tests/raft/raft_autopilot_test.go index 6e598fbdb0b4c..74a5df484591e 100644 --- a/vault/external_tests/raft/raft_autopilot_test.go +++ b/vault/external_tests/raft/raft_autopilot_test.go @@ -380,7 +380,7 @@ func TestRaft_AutoPilot_Peersets_Equivalent(t *testing.T) { t.Fatal(err) } - if reflect.DeepEqual(core0Peers, core1Peers) && reflect.DeepEqual(core1Peers, core2Peers) { + if len(core0Peers) == 3 && reflect.DeepEqual(core0Peers, core1Peers) && reflect.DeepEqual(core1Peers, core2Peers) { break } time.Sleep(time.Second) From 885320f60c41c15ddb609f6b25691fe2e2f2a9c1 Mon Sep 17 00:00:00 2001 From: Nick Cabatoff Date: Wed, 19 May 2021 16:07:58 -0400 Subject: [PATCH 037/101] VAULT-2439: OSS parts of #1889 (raft licensing init) (#11665) --- vault/core.go | 8 +++----- vault/init.go | 9 +++++++-- vault/logical_system_raft.go | 5 +++-- vault/raft.go | 10 ++++++++-- vault/testing.go | 11 +++++++++-- 5 files changed, 30 insertions(+), 13 deletions(-) diff --git a/vault/core.go b/vault/core.go index 8e0512d2447c0..a9b74bda5e886 100644 --- a/vault/core.go +++ b/vault/core.go @@ -120,7 +120,9 @@ var ( LastRemoteWAL = lastRemoteWALImpl LastRemoteUpstreamWAL = lastRemoteUpstreamWALImpl WaitUntilWALShipped = waitUntilWALShippedImpl - storedLicenseCheck = storedLicenseCheckImpl + storedLicenseCheck = func(c *Core, conf *CoreConfig) error { return nil } + LicenseAutoloaded = func(*Core) bool { return false } + LicenseInitCheck = func(*Core) error { return nil } ) // NonFatalError is an error that can be returned during NewCore that should be @@ -2864,7 +2866,3 @@ func ParseRequiredState(raw string, hmacKey []byte) (*logical.WALState, error) { ReplicatedIndex: replicatedIndex, }, nil } - -func storedLicenseCheckImpl(c *Core, conf *CoreConfig) error { - return nil -} diff --git a/vault/init.go b/vault/init.go index 64f1c6a9b1689..24ea18120d0b3 100644 --- a/vault/init.go +++ b/vault/init.go @@ -39,8 +39,9 @@ type InitResult struct { } var ( - initPTFunc = func(c *Core) func() { return nil } - initInProgress uint32 + initPTFunc = func(c *Core) func() { return nil } + initInProgress uint32 + ErrInitWithoutAutoloading = errors.New("cannot initialize storage without an autoloaded license") ) func (c *Core) InitializeRecovery(ctx context.Context) error { @@ -159,6 +160,10 @@ func (c *Core) generateShares(sc *SealConfig) ([]byte, [][]byte, error) { // Initialize is used to initialize the Vault with the given // configurations. func (c *Core) Initialize(ctx context.Context, initParams *InitParams) (*InitResult, error) { + if err := LicenseInitCheck(c); err != nil { + return nil, err + } + atomic.StoreUint32(&initInProgress, 1) defer atomic.StoreUint32(&initInProgress, 0) barrierConfig := initParams.BarrierConfig diff --git a/vault/logical_system_raft.go b/vault/logical_system_raft.go index 22ed1da997b93..f4a2df91d9510 100644 --- a/vault/logical_system_raft.go +++ b/vault/logical_system_raft.go @@ -374,8 +374,9 @@ func (b *SystemBackend) handleRaftBootstrapAnswerWrite() framework.OperationFunc return &logical.Response{ Data: map[string]interface{}{ - "peers": peers, - "tls_keyring": &keyring, + "peers": peers, + "tls_keyring": &keyring, + "autoloaded_license": LicenseAutoloaded(b.Core), }, }, nil } diff --git a/vault/raft.go b/vault/raft.go index 88d9c684d16db..b6e20fd388072 100644 --- a/vault/raft.go +++ b/vault/raft.go @@ -37,6 +37,8 @@ var ( // TestingUpdateClusterAddr is used in tests to override the cluster address TestingUpdateClusterAddr uint32 + + ErrJoinWithoutAutoloading = errors.New("attempt to join a cluster using autoloaded licenses while not using autoloading ourself") ) // GetRaftNodeID returns the raft node ID if there is one, or an empty string if there's not @@ -1107,6 +1109,9 @@ func (c *Core) joinRaftSendAnswer(ctx context.Context, sealAccess *seal.Access, return err } + if answerResp.Data.AutoloadedLicense && !LicenseAutoloaded(c) { + return ErrJoinWithoutAutoloading + } if err := raftBackend.Bootstrap(answerResp.Data.Peers); err != nil { return err } @@ -1207,8 +1212,9 @@ type answerRespData struct { } type answerResp struct { - Peers []raft.Peer `json:"peers"` - TLSKeyring *raft.TLSKeyring `json:"tls_keyring"` + Peers []raft.Peer `json:"peers"` + TLSKeyring *raft.TLSKeyring `json:"tls_keyring"` + AutoloadedLicense bool `json:"autoloaded_license"` } func newDiscover() (*discover.Discover, error) { diff --git a/vault/testing.go b/vault/testing.go index 855cb590696dc..d7ecdafe1af25 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -866,7 +866,13 @@ func (c *TestCluster) UnsealCoresWithError(useStoredKeys bool) error { } func (c *TestCluster) UnsealCore(t testing.T, core *TestClusterCore) { - t.Helper() + err := c.AttemptUnsealCore(core) + if err != nil { + t.Fatal(err) + } +} + +func (c *TestCluster) AttemptUnsealCore(core *TestClusterCore) error { var keys [][]byte if core.seal.RecoveryKeySupported() { keys = c.RecoveryKeys @@ -875,9 +881,10 @@ func (c *TestCluster) UnsealCore(t testing.T, core *TestClusterCore) { } for _, key := range keys { if _, err := core.Core.Unseal(TestKeyCopy(key)); err != nil { - t.Fatalf("unseal err: %s", err) + return fmt.Errorf("unseal err: %w", err) } } + return nil } func (c *TestCluster) UnsealCoreWithStoredKeys(t testing.T, core *TestClusterCore) { From 62a34675f6b24ab96a7695862a4fab45dd476d18 Mon Sep 17 00:00:00 2001 From: Nick Cabatoff Date: Thu, 20 May 2021 13:05:26 -0400 Subject: [PATCH 038/101] Pin aerospike container image to a known working tag. (#11677) --- physical/aerospike/aerospike_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/physical/aerospike/aerospike_test.go b/physical/aerospike/aerospike_test.go index 1845248d3ae0f..610cf6526fc60 100644 --- a/physical/aerospike/aerospike_test.go +++ b/physical/aerospike/aerospike_test.go @@ -43,7 +43,7 @@ func prepareAerospikeContainer(t *testing.T) (func(), *aerospikeConfig) { runner, err := docker.NewServiceRunner(docker.RunOptions{ ImageRepo: "aerospike/aerospike-server", ContainerName: "aerospikedb", - ImageTag: "latest", + ImageTag: "5.5.0.10", Ports: []string{"3000/tcp", "3001/tcp", "3002/tcp", "3003/tcp"}, }) if err != nil { From 44410284716da7b31cb3c774d4b908bb1c1603c5 Mon Sep 17 00:00:00 2001 From: Nick Cabatoff Date: Thu, 20 May 2021 13:32:15 -0400 Subject: [PATCH 039/101] OSS parts of #1891 (sys/health license addition) (#11676) --- http/sys_health.go | 44 ++++++++++++++++++++++++++++++----------- http/sys_health_test.go | 6 ++++++ vault/core.go | 7 +++++++ 3 files changed, 46 insertions(+), 11 deletions(-) diff --git a/http/sys_health.go b/http/sys_health.go index 145deaf53adb1..8ab7359e231fd 100644 --- a/http/sys_health.go +++ b/http/sys_health.go @@ -195,6 +195,21 @@ func getSysHealth(core *vault.Core, r *http.Request) (int, *HealthResponse, erro ClusterID: clusterID, } + licenseState, err := vault.LicenseSummary(core) + if err != nil { + return http.StatusInternalServerError, nil, err + } + + if licenseState != nil { + body.License = &HealthResponseLicense{ + State: licenseState.State, + Terminated: licenseState.Terminated, + } + if !licenseState.ExpiryTime.IsZero() { + body.License.ExpiryTime = licenseState.ExpiryTime.Format(time.RFC3339) + } + } + if init && !sealed && !standby { body.LastWAL = vault.LastWAL(core) } @@ -202,16 +217,23 @@ func getSysHealth(core *vault.Core, r *http.Request) (int, *HealthResponse, erro return code, body, nil } +type HealthResponseLicense struct { + State string `json:"state"` + ExpiryTime string `json:"expiry_time"` + Terminated bool `json:"terminated"` +} + type HealthResponse struct { - Initialized bool `json:"initialized"` - Sealed bool `json:"sealed"` - Standby bool `json:"standby"` - PerformanceStandby bool `json:"performance_standby"` - ReplicationPerformanceMode string `json:"replication_performance_mode"` - ReplicationDRMode string `json:"replication_dr_mode"` - ServerTimeUTC int64 `json:"server_time_utc"` - Version string `json:"version"` - ClusterName string `json:"cluster_name,omitempty"` - ClusterID string `json:"cluster_id,omitempty"` - LastWAL uint64 `json:"last_wal,omitempty"` + Initialized bool `json:"initialized"` + Sealed bool `json:"sealed"` + Standby bool `json:"standby"` + PerformanceStandby bool `json:"performance_standby"` + ReplicationPerformanceMode string `json:"replication_performance_mode"` + ReplicationDRMode string `json:"replication_dr_mode"` + ServerTimeUTC int64 `json:"server_time_utc"` + Version string `json:"version"` + ClusterName string `json:"cluster_name,omitempty"` + ClusterID string `json:"cluster_id,omitempty"` + LastWAL uint64 `json:"last_wal,omitempty"` + License *HealthResponseLicense `json:"license,omitempty"` } diff --git a/http/sys_health_test.go b/http/sys_health_test.go index 8cf373d94ab06..68ef11b9e2f57 100644 --- a/http/sys_health_test.go +++ b/http/sys_health_test.go @@ -44,6 +44,7 @@ func TestSysHealth_get(t *testing.T) { } else { expected["cluster_id"] = actual["cluster_id"] } + delete(actual, "license") if !reflect.DeepEqual(actual, expected) { t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) } @@ -77,6 +78,7 @@ func TestSysHealth_get(t *testing.T) { } else { expected["cluster_id"] = actual["cluster_id"] } + delete(actual, "license") if !reflect.DeepEqual(actual, expected) { t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) } @@ -114,6 +116,7 @@ func TestSysHealth_get(t *testing.T) { } else { expected["cluster_id"] = actual["cluster_id"] } + delete(actual, "license") if !reflect.DeepEqual(actual, expected) { t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) } @@ -157,6 +160,7 @@ func TestSysHealth_customcodes(t *testing.T) { } else { expected["cluster_id"] = actual["cluster_id"] } + delete(actual, "license") if !reflect.DeepEqual(actual, expected) { t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) } @@ -191,6 +195,7 @@ func TestSysHealth_customcodes(t *testing.T) { } else { expected["cluster_id"] = actual["cluster_id"] } + delete(actual, "license") if !reflect.DeepEqual(actual, expected) { t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) } @@ -228,6 +233,7 @@ func TestSysHealth_customcodes(t *testing.T) { } else { expected["cluster_id"] = actual["cluster_id"] } + delete(actual, "license") if !reflect.DeepEqual(actual, expected) { t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) } diff --git a/vault/core.go b/vault/core.go index a9b74bda5e886..8162d4535a6df 100644 --- a/vault/core.go +++ b/vault/core.go @@ -123,6 +123,7 @@ var ( storedLicenseCheck = func(c *Core, conf *CoreConfig) error { return nil } LicenseAutoloaded = func(*Core) bool { return false } LicenseInitCheck = func(*Core) error { return nil } + LicenseSummary = func(*Core) (*LicenseState, error) { return nil, nil } ) // NonFatalError is an error that can be returned during NewCore that should be @@ -2866,3 +2867,9 @@ func ParseRequiredState(raw string, hmacKey []byte) (*logical.WALState, error) { ReplicatedIndex: replicatedIndex, }, nil } + +type LicenseState struct { + State string + ExpiryTime time.Time + Terminated bool +} From ea7b63a348c73bf958c9be83ac5e242aec6acac9 Mon Sep 17 00:00:00 2001 From: Meggie Date: Thu, 20 May 2021 13:37:49 -0400 Subject: [PATCH 040/101] changelog++ --- CHANGELOG.md | 170 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 170 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e8f362bb49d3d..964b7ac23bf39 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,127 @@ ## 1.8.0 (Unreleased) +CHANGES: + +* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs +when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)] + +FEATURES: + +* **MySQL Database UI**: The UI now supports adding and editing MySQL connections in the database secret engine [[GH-11532 | MySQL Database UI](https://github.com/hashicorp/vault/pull/11532 | MySQL Database UI)] +* cli/api: Add lease lookup command [[GH-11129](https://github.com/hashicorp/vault/pull/11129)] +* ssh: add support for templated values in SSH CA DefaultExtensions [[GH-11495](https://github.com/hashicorp/vault/pull/11495)] +* ui: Add database secret engine support for MSSQL [[GH-11231](https://github.com/hashicorp/vault/pull/11231)] + +IMPROVEMENTS: + +* auth/aws: Underlying error included in validation failure message. [[GH-11638](https://github.com/hashicorp/vault/pull/11638)] +* core: Add a small (<1s) exponential backoff to failed TCP listener Accept failures. [[GH-11588](https://github.com/hashicorp/vault/pull/11588)] +* core: Add metrics for standby node forwarding. [[GH-11366](https://github.com/hashicorp/vault/pull/11366)] +* core: Add metrics to report if a node is a perf standby, if a node is a dr +secondary or primary, and if a node is a perf secondary or primary. Also allow +DR secondaries to serve metrics requests when using unauthenticated_metrics_access. [[GH-1844](https://github.com/hashicorp/vault/pull/1844)] +* core: Send notifications to systemd on start, stop, and configuration reload. [[GH-11517](https://github.com/hashicorp/vault/pull/11517)] +* core: allow arbitrary length stack traces upon receiving SIGUSR2 (was 32MB) [[GH-11364](https://github.com/hashicorp/vault/pull/11364)] +* replication (enterprise): The log shipper is now memory +as well as length bound, and length and size can be +separately configured. +* secrets/aws: add ability to provide a role session name when generating STS credentials [[GH-11345](https://github.com/hashicorp/vault/pull/11345)] +* secrets/database/mongodb: Add ability to customize `SocketTimeout`, `ConnectTimeout`, and `ServerSelectionTimeout` [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] +* secrets/database/mongodb: Increased throughput by allowing for multiple request threads to simultaneously update users in MongoDB [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] +* storage/raft: Support autopilot for HA only raft storage. [[GH-11260](https://github.com/hashicorp/vault/pull/11260)] +* ui: Add push notification message when selecting okta auth. [[GH-11442](https://github.com/hashicorp/vault/pull/11442)] +* ui: Add regex validation to Transform Template pattern input [[GH-11586](https://github.com/hashicorp/vault/pull/11586)] +* ui: Obscure secret values on input and displayOnly fields like certificates. [[GH-11284](https://github.com/hashicorp/vault/pull/11284)] +* ui: Redesign of KV 2 Delete toolbar. [[GH-11530](https://github.com/hashicorp/vault/pull/11530)] +* ui: Update ember to latest LTS and upgrade UI dependencies [[GH-11447](https://github.com/hashicorp/vault/pull/11447)] +* ui: Updated ivy code mirror component for consistency [[GH-11500](https://github.com/hashicorp/vault/pull/11500)] +* ui: Updated search select component styling [[GH-11360](https://github.com/hashicorp/vault/pull/11360)] + +BUG FIXES: + +* agent/cert: Fix issue where the API client on agent was not honoring certificate +information from the auto-auth config map on renewals or retries. [[GH-11576](https://github.com/hashicorp/vault/pull/11576)] +* agent: Fixed agent templating to use configured tls servername values [[GH-11288](https://github.com/hashicorp/vault/pull/11288)] +* core (enterprise): Fix orphan return value from auth methods executed on performance standby nodes. +* core (enterprise): Fix plugins mounted in namespaces being unable to use password policies [[GH-11596](https://github.com/hashicorp/vault/pull/11596)] +* core: Fix cleanup of storage entries from cubbyholes within namespaces. [[GH-11408](https://github.com/hashicorp/vault/pull/11408)] +* core: Fix edge cases in the configuration endpoint for barrier key autorotation. [[GH-11541](https://github.com/hashicorp/vault/pull/11541)] +* core: Fix goroutine leak when updating rate limit quota [[GH-11371](https://github.com/hashicorp/vault/pull/11371)] +* core: Fix race that allowed remounting on path used by another mount [[GH-11453](https://github.com/hashicorp/vault/pull/11453)] +* core: Fix storage entry leak when revoking leases created with non-orphan batch tokens. [[GH-11377](https://github.com/hashicorp/vault/pull/11377)] +* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)] +* identity: Use correct mount accessor when refreshing external group memberships. [[GH-11506](https://github.com/hashicorp/vault/pull/11506)] +* pki: Only remove revoked entry for certificates during tidy if they are past their NotAfter value [[GH-11367](https://github.com/hashicorp/vault/pull/11367)] +* replication: Fix panic trying to update walState during identity group invalidation. [[GH-1865](https://github.com/hashicorp/vault/pull/1865)] +* replication: Fix: mounts created within a namespace that was part of an Allow +filtering rule would not appear on performance secondary if created after rule +was defined. [[GH-1807](https://github.com/hashicorp/vault/pull/1807)] +* secret/pki: use case insensitive domain name comparison as per RFC1035 section 2.3.3 +* secrets/database/cassandra: Fixed issue where hostnames were not being validated when using TLS [[GH-11365](https://github.com/hashicorp/vault/pull/11365)] +* secrets/database/cassandra: Updated default statement for password rotation to allow for special characters. This applies to root and static credentials. [[GH-11262](https://github.com/hashicorp/vault/pull/11262)] +* secrets/database: Fix marshalling to allow providing numeric arguments to external database plugins. [[GH-11451](https://github.com/hashicorp/vault/pull/11451)] +* secrets/database: Fixed minor race condition when rotate-root is called [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] +* secrets/database: Fixes issue for V4 database interface where `SetCredentials` wasn't falling back to using `RotateRootCredentials` if `SetCredentials` is `Unimplemented` [[GH-11585](https://github.com/hashicorp/vault/pull/11585)] +* storage/dynamodb: Handle throttled batch write requests by retrying, without which writes could be lost. [[GH-10181](https://github.com/hashicorp/vault/pull/10181)] +* storage/raft: Support cluster address change for nodes in a cluster managed by autopilot [[GH-11247](https://github.com/hashicorp/vault/pull/11247)] +* storage/raft: leader_tls_servername wasn't used unless leader_ca_cert_file and/or mTLS were configured. [[GH-11252](https://github.com/hashicorp/vault/pull/11252)] +* tokenutil: Perform the num uses check before token type. [[GH-11647](https://github.com/hashicorp/vault/pull/11647)] +* transform (enterprise): Fix an issue with malformed transform configuration +storage when upgrading from 1.5 to 1.6. See Upgrade Notes for 1.6.x. +* ui: Add root rotation statements support to appropriate database secret engine plugins [[GH-11404](https://github.com/hashicorp/vault/pull/11404)] +* ui: Fix bug where the UI does not recognize version 2 KV until refresh, and fix [object Object] error message [[GH-11258](https://github.com/hashicorp/vault/pull/11258)] +* ui: Fix entity group membership and metadata not showing [[GH-11641](https://github.com/hashicorp/vault/pull/11641)] +* ui: Fix error message caused by control group [[GH-11143](https://github.com/hashicorp/vault/pull/11143)] +* ui: Fix footer URL linking to the correct version changelog. [[GH-11283](https://github.com/hashicorp/vault/pull/11283)] +* ui: Fix namespace-bug on login [[GH-11182](https://github.com/hashicorp/vault/pull/11182)] +* ui: Fix status menu no showing on login [[GH-11213](https://github.com/hashicorp/vault/pull/11213)] +* ui: Fix text link URL on database roles list [[GH-11597](https://github.com/hashicorp/vault/pull/11597)] +* ui: Fixed and updated lease renewal picker [[GH-11256](https://github.com/hashicorp/vault/pull/11256)] +* ui: fix issue where select-one option was not showing in secrets database role creation [[GH-11294](https://github.com/hashicorp/vault/pull/11294)] + +## 1.7.2 +### May 20th, 2021 + +SECURITY: + +* Non-Expiring Leases: Vault and Vault Enterprise renewed nearly-expiring token +leases and dynamic secret leases with a zero-second TTL, causing them to be +treated as non-expiring, and never revoked. This issue affects Vault and Vault +Enterprise versions 0.10.0 through 1.7.1, and is fixed in 1.5.9, 1.6.5, and +1.7.2 (CVE-2021-32923). + +CHANGES: + +* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs +when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)] +* auth/gcp: Update to v0.9.1 to use IAM Service Account Credentials API for +signing JWTs [[GH-11494](https://github.com/hashicorp/vault/pull/11494)] + +IMPROVEMENTS: + +* api, agent: LifetimeWatcher now does more retries when renewal failures occur. This also impacts Agent auto-auth and leases managed via Agent caching. [[GH-11445](https://github.com/hashicorp/vault/pull/11445)] +* auth/aws: Underlying error included in validation failure message. [[GH-11638](https://github.com/hashicorp/vault/pull/11638)] +* secrets/aws: add ability to provide a role session name when generating STS credentials [[GH-11345](https://github.com/hashicorp/vault/pull/11345)] +* secrets/database/mongodb: Add ability to customize `SocketTimeout`, `ConnectTimeout`, and `ServerSelectionTimeout` [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] +* secrets/database/mongodb: Increased throughput by allowing for multiple request threads to simultaneously update users in MongoDB [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] + +BUG FIXES: + +* agent/cert: Fix issue where the API client on agent was not honoring certificate +information from the auto-auth config map on renewals or retries. [[GH-11576](https://github.com/hashicorp/vault/pull/11576)] +* agent: Fixed agent templating to use configured tls servername values [[GH-11288](https://github.com/hashicorp/vault/pull/11288)] +* core (enterprise): Fix plugins mounted in namespaces being unable to use password policies [[GH-11596](https://github.com/hashicorp/vault/pull/11596)] +* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)] +* identity: Use correct mount accessor when refreshing external group memberships. [[GH-11506](https://github.com/hashicorp/vault/pull/11506)] +* replication: Fix panic trying to update walState during identity group invalidation. [[GH-1865](https://github.com/hashicorp/vault/pull/1865)] +* secrets/database: Fix marshalling to allow providing numeric arguments to external database plugins. [[GH-11451](https://github.com/hashicorp/vault/pull/11451)] +* secrets/database: Fixed minor race condition when rotate-root is called [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] +* secrets/database: Fixes issue for V4 database interface where `SetCredentials` wasn't falling back to using `RotateRootCredentials` if `SetCredentials` is `Unimplemented` [[GH-11585](https://github.com/hashicorp/vault/pull/11585)] +* secrets/keymgmt (enterprise): Fixes audit logging for the read key response. +* storage/raft: Support cluster address change for nodes in a cluster managed by autopilot [[GH-11247](https://github.com/hashicorp/vault/pull/11247)] +* ui: Fix entity group membership and metadata not showing [[GH-11641](https://github.com/hashicorp/vault/pull/11641)] +* ui: Fix text link URL on database roles list [[GH-11597](https://github.com/hashicorp/vault/pull/11597)] + ## 1.7.1 ### 21 April 2021 @@ -184,6 +306,32 @@ the given key will be used to encrypt the snapshot using AWS KMS. DEPRECATIONS: * aws/auth: AWS Auth endpoints that use the "whitelist" and "blacklist" terms have been deprecated. Refer to the CHANGES section for additional details. + +## 1.6.5 +### May 20th, 2021 + +SECURITY: + +* Non-Expiring Leases: Vault and Vault Enterprise renewed nearly-expiring token +leases and dynamic secret leases with a zero-second TTL, causing them to be +treated as non-expiring, and never revoked. This issue affects Vault and Vault +Enterprise versions 0.10.0 through 1.7.1, and is fixed in 1.5.9, 1.6.5, and +1.7.2 (CVE-2021-32923). + +CHANGES: + +* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs +when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)] +* auth/gcp: Update to v0.8.1 to use IAM Service Account Credentials API for +signing JWTs [[GH-11498](https://github.com/hashicorp/vault/pull/11498)] + +BUG FIXES: + +* core (enterprise): Fix plugins mounted in namespaces being unable to use password policies [[GH-11596](https://github.com/hashicorp/vault/pull/11596)] +* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)] +* secrets/database: Fix marshalling to allow providing numeric arguments to external database plugins. [[GH-11451](https://github.com/hashicorp/vault/pull/11451)] +* secrets/database: Fixes issue for V4 database interface where `SetCredentials` wasn't falling back to using `RotateRootCredentials` if `SetCredentials` is `Unimplemented` [[GH-11585](https://github.com/hashicorp/vault/pull/11585)] +* ui: Fix namespace-bug on login [[GH-11182](https://github.com/hashicorp/vault/pull/11182)] ## 1.6.4 ### 21 April 2021 @@ -415,6 +563,28 @@ BUG FIXES: * ui: Update language on replication primary dashboard for clarity [[GH-10205](https://github.com/hashicorp/vault/pull/10217)] * core: Fix bug where updating an existing path quota could introduce a conflict. [[GH-10285](https://github.com/hashicorp/vault/pull/10285)] +## 1.5.9 +### May 20th, 2021 + +SECURITY: + +* Non-Expiring Leases: Vault and Vault Enterprise renewed nearly-expiring token +leases and dynamic secret leases with a zero-second TTL, causing them to be +treated as non-expiring, and never revoked. This issue affects Vault and Vault +Enterprise versions 0.10.0 through 1.7.1, and is fixed in 1.5.9, 1.6.5, and +1.7.2 (CVE-2021-32923). + +CHANGES: + +* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs +when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)] +* auth/gcp: Update to v0.7.2 to use IAM Service Account Credentials API for +signing JWTs [[GH-11499](https://github.com/hashicorp/vault/pull/11499)] + +BUG FIXES: + +* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)] + ## 1.5.8 ### 21 April 2021 From 929eecf00eaded52fed8f7cbead5dd6b399f1303 Mon Sep 17 00:00:00 2001 From: Meggie Date: Thu, 20 May 2021 16:05:38 -0400 Subject: [PATCH 041/101] changelog++ --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 964b7ac23bf39..a779f02094a4f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ secondary or primary, and if a node is a perf secondary or primary. Also allow DR secondaries to serve metrics requests when using unauthenticated_metrics_access. [[GH-1844](https://github.com/hashicorp/vault/pull/1844)] * core: Send notifications to systemd on start, stop, and configuration reload. [[GH-11517](https://github.com/hashicorp/vault/pull/11517)] * core: allow arbitrary length stack traces upon receiving SIGUSR2 (was 32MB) [[GH-11364](https://github.com/hashicorp/vault/pull/11364)] +* http: Add optional HTTP response headers for hostname and raft node ID [[GH-11289](https://github.com/hashicorp/vault/pull/11289)] * replication (enterprise): The log shipper is now memory as well as length bound, and length and size can be separately configured. @@ -101,6 +102,7 @@ IMPROVEMENTS: * api, agent: LifetimeWatcher now does more retries when renewal failures occur. This also impacts Agent auto-auth and leases managed via Agent caching. [[GH-11445](https://github.com/hashicorp/vault/pull/11445)] * auth/aws: Underlying error included in validation failure message. [[GH-11638](https://github.com/hashicorp/vault/pull/11638)] +* http: Add optional HTTP response headers for hostname and raft node ID [[GH-11289](https://github.com/hashicorp/vault/pull/11289)] * secrets/aws: add ability to provide a role session name when generating STS credentials [[GH-11345](https://github.com/hashicorp/vault/pull/11345)] * secrets/database/mongodb: Add ability to customize `SocketTimeout`, `ConnectTimeout`, and `ServerSelectionTimeout` [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] * secrets/database/mongodb: Increased throughput by allowing for multiple request threads to simultaneously update users in MongoDB [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] From bea56190e68cee0c0d8d675d6e55a5f478afc83d Mon Sep 17 00:00:00 2001 From: Theron Voran Date: Thu, 20 May 2021 14:37:49 -0700 Subject: [PATCH 042/101] docs: small fixes in k8s docs (#11679) indentation and spelling --- .../docs/platform/k8s/helm/configuration.mdx | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/website/content/docs/platform/k8s/helm/configuration.mdx b/website/content/docs/platform/k8s/helm/configuration.mdx index ac3ad1bb71483..a978864240ddc 100644 --- a/website/content/docs/platform/k8s/helm/configuration.mdx +++ b/website/content/docs/platform/k8s/helm/configuration.mdx @@ -83,7 +83,7 @@ and consider if they're appropriate for your deployment. - `tag` (`string: "1.7.0"`) - The tag of the Vault Docker image to use for the Vault Agent Sidecar. **Vault 1.3.1+ is required by the admission controller**. -- `agentDefaults` - Values that configure the injected Vault Agent containers default values. + - `agentDefaults` - Values that configure the injected Vault Agent containers default values. - `cpuLimit` (`string: "500m"`) - The default CPU limit for injected Vault Agent containers. @@ -872,33 +872,33 @@ and consider if they're appropriate for your deployment. - `pod` - Values that configure the Vault CSI Provider pod. - - `annotations` (`dictionary: {}`) - This value defines additional annotations to - add to the Vault CSI Provider pods. This can either be YAML or a YAML-formatted - multi-line templated string. + - `annotations` (`dictionary: {}`) - This value defines additional annotations to + add to the Vault CSI Provider pods. This can either be YAML or a YAML-formatted + multi-line templated string. - ```yaml - annotations: - foo: bar - # or - annotations: | - foo: bar - ``` + ```yaml + annotations: + foo: bar + # or + annotations: | + foo: bar + ``` - `serviceAccount` - Values that configure the Vault CSI Provider's serviceaccount. - - `annotations` (`dictionary: {}`) - This value defines additional annotations to - add to the Vault CSI Provider pods. This can either be YAML or a YAML-formatted - multi-line templated string. + - `annotations` (`dictionary: {}`) - This value defines additional + annotations for the serviceAccount definition. This can either be YAML or + a YAML-formatted multi-line templated string. - ```yaml - annotations: - foo: bar - # or - annotations: | - foo: bar - ``` + ```yaml + annotations: + foo: bar + # or + annotations: | + foo: bar + ``` - - `readinessProbe` - Values that configure the readiness probe for the Vault CSI Proivder pods. + - `readinessProbe` - Values that configure the readiness probe for the Vault CSI Provider pods. - `failureThreshold` (`int: 2`) - When set to a value, configures how many probe failures will be tolerated by Kubernetes. @@ -910,7 +910,7 @@ and consider if they're appropriate for your deployment. - `timeoutSeconds` (`int: 3`) - When set to a value, configures the number of seconds after which the probe times out. - - `livenessProbe` - Values that configure the liveliness probe for the Vault CSI Proivder pods. + - `livenessProbe` - Values that configure the liveliness probe for the Vault CSI Provider pods. - `initialDelaySeconds` (`int: 5`) - Sets the initial delay of the liveliness probe when the container starts. From ee13145436328ff8fd6d2ee6a168dbf93f023b25 Mon Sep 17 00:00:00 2001 From: Lars Lehtonen Date: Fri, 21 May 2021 07:22:29 -0700 Subject: [PATCH 043/101] plugins: deprecate errwrap.Wrapf() (#11590) * plugins/database/redshift: deprecate errwrap.Wrapf() * plugins/database/postgresql: deprecate errwrap.Wrapf() * plugins/database/mysql: deprecate errwrap.Wrapf() * plugins/database/mssql: deprecate errwrap.Wrapf() * plugins/database/mongodb: deprecate errwrap.Wrapf() * plugins/database/influxdb: deprecate errwrap.Wrapf() --- .../database/influxdb/connection_producer.go | 23 +++++++++---------- plugins/database/influxdb/influxdb_test.go | 13 +++++------ .../database/mongodb/connection_producer.go | 3 +-- plugins/database/mssql/mssql.go | 5 ++-- plugins/database/mysql/connection_producer.go | 7 +++--- plugins/database/postgresql/postgresql.go | 5 ++-- plugins/database/redshift/redshift.go | 5 ++-- 7 files changed, 27 insertions(+), 34 deletions(-) diff --git a/plugins/database/influxdb/connection_producer.go b/plugins/database/influxdb/connection_producer.go index b1d3ea5598387..ee22964807c1f 100644 --- a/plugins/database/influxdb/connection_producer.go +++ b/plugins/database/influxdb/connection_producer.go @@ -7,7 +7,6 @@ import ( "sync" "time" - "github.com/hashicorp/errwrap" dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" "github.com/hashicorp/vault/sdk/database/helper/connutil" "github.com/hashicorp/vault/sdk/helper/certutil" @@ -62,7 +61,7 @@ func (i *influxdbConnectionProducer) Initialize(ctx context.Context, req dbplugi } i.connectTimeout, err = parseutil.ParseDurationSecond(i.ConnectTimeoutRaw) if err != nil { - return dbplugin.InitializeResponse{}, errwrap.Wrapf("invalid connect_timeout: {{err}}", err) + return dbplugin.InitializeResponse{}, fmt.Errorf("invalid connect_timeout: %w", err) } switch { @@ -80,11 +79,11 @@ func (i *influxdbConnectionProducer) Initialize(ctx context.Context, req dbplugi case len(i.PemJSON) != 0: parsedCertBundle, err = certutil.ParsePKIJSON([]byte(i.PemJSON)) if err != nil { - return dbplugin.InitializeResponse{}, errwrap.Wrapf("could not parse given JSON; it must be in the format of the output of the PKI backend certificate issuing command: {{err}}", err) + return dbplugin.InitializeResponse{}, fmt.Errorf("could not parse given JSON; it must be in the format of the output of the PKI backend certificate issuing command: %w", err) } certBundle, err = parsedCertBundle.ToCertBundle() if err != nil { - return dbplugin.InitializeResponse{}, errwrap.Wrapf("Error marshaling PEM information: {{err}}", err) + return dbplugin.InitializeResponse{}, fmt.Errorf("Error marshaling PEM information: %w", err) } i.certificate = certBundle.Certificate i.privateKey = certBundle.PrivateKey @@ -94,11 +93,11 @@ func (i *influxdbConnectionProducer) Initialize(ctx context.Context, req dbplugi case len(i.PemBundle) != 0: parsedCertBundle, err = certutil.ParsePEMBundle(i.PemBundle) if err != nil { - return dbplugin.InitializeResponse{}, errwrap.Wrapf("Error parsing the given PEM information: {{err}}", err) + return dbplugin.InitializeResponse{}, fmt.Errorf("Error parsing the given PEM information: %w", err) } certBundle, err = parsedCertBundle.ToCertBundle() if err != nil { - return dbplugin.InitializeResponse{}, errwrap.Wrapf("Error marshaling PEM information: {{err}}", err) + return dbplugin.InitializeResponse{}, fmt.Errorf("Error marshaling PEM information: %w", err) } i.certificate = certBundle.Certificate i.privateKey = certBundle.PrivateKey @@ -112,7 +111,7 @@ func (i *influxdbConnectionProducer) Initialize(ctx context.Context, req dbplugi if req.VerifyConnection { if _, err := i.Connection(ctx); err != nil { - return dbplugin.InitializeResponse{}, errwrap.Wrapf("error verifying connection: {{err}}", err) + return dbplugin.InitializeResponse{}, fmt.Errorf("error verifying connection: %w", err) } } @@ -185,12 +184,12 @@ func (i *influxdbConnectionProducer) createClient() (influx.Client, error) { parsedCertBundle, err := certBundle.ToParsedCertBundle() if err != nil { - return nil, errwrap.Wrapf("failed to parse certificate bundle: {{err}}", err) + return nil, fmt.Errorf("failed to parse certificate bundle: %w", err) } tlsConfig, err = parsedCertBundle.GetTLSConfig(certutil.TLSClient) if err != nil || tlsConfig == nil { - return nil, errwrap.Wrapf(fmt.Sprintf("failed to get TLS configuration: tlsConfig:%#v err:{{err}}", tlsConfig), err) + return nil, fmt.Errorf("failed to get TLS configuration: tlsConfig:%#v err:%w", tlsConfig, err) } } @@ -214,19 +213,19 @@ func (i *influxdbConnectionProducer) createClient() (influx.Client, error) { cli, err := influx.NewHTTPClient(clientConfig) if err != nil { - return nil, errwrap.Wrapf("error creating client: {{err}}", err) + return nil, fmt.Errorf("error creating client: %w", err) } // Checking server status _, _, err = cli.Ping(i.connectTimeout) if err != nil { - return nil, errwrap.Wrapf("error checking cluster status: {{err}}", err) + return nil, fmt.Errorf("error checking cluster status: %w", err) } // verifying infos about the connection isAdmin, err := isUserAdmin(cli, i.Username) if err != nil { - return nil, errwrap.Wrapf("error getting if provided username is admin: {{err}}", err) + return nil, fmt.Errorf("error getting if provided username is admin: %w", err) } if !isAdmin { return nil, fmt.Errorf("the provided user is not an admin of the influxDB server") diff --git a/plugins/database/influxdb/influxdb_test.go b/plugins/database/influxdb/influxdb_test.go index 8669961741274..5328996927d40 100644 --- a/plugins/database/influxdb/influxdb_test.go +++ b/plugins/database/influxdb/influxdb_test.go @@ -11,7 +11,6 @@ import ( "testing" "time" - "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/helper/testhelpers/docker" dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing" @@ -78,12 +77,12 @@ func prepareInfluxdbTestContainer(t *testing.T) (func(), *Config) { }) cli, err := influx.NewHTTPClient(c.apiConfig()) if err != nil { - return nil, errwrap.Wrapf("error creating InfluxDB client: {{err}}", err) + return nil, fmt.Errorf("error creating InfluxDB client: %w", err) } defer cli.Close() _, _, err = cli.Ping(1) if err != nil { - return nil, errwrap.Wrapf("error checking cluster status: {{err}}", err) + return nil, fmt.Errorf("error checking cluster status: %w", err) } return c, nil @@ -421,20 +420,20 @@ func testCredsExist(address, username, password string) error { } cli, err := influx.NewHTTPClient(conf) if err != nil { - return errwrap.Wrapf("Error creating InfluxDB Client: ", err) + return fmt.Errorf("Error creating InfluxDB Client: %w", err) } defer cli.Close() _, _, err = cli.Ping(1) if err != nil { - return errwrap.Wrapf("error checking server ping: {{err}}", err) + return fmt.Errorf("error checking server ping: %w", err) } q := influx.NewQuery("SHOW SERIES ON vault", "", "") response, err := cli.Query(q) if err != nil { - return errwrap.Wrapf("error querying influxdb server: {{err}}", err) + return fmt.Errorf("error querying influxdb server: %w", err) } if response != nil && response.Error() != nil { - return errwrap.Wrapf("error using the correct influx database: {{err}}", response.Error()) + return fmt.Errorf("error using the correct influx database: %w", response.Error()) } return nil } diff --git a/plugins/database/mongodb/connection_producer.go b/plugins/database/mongodb/connection_producer.go index 1f0c312fa9766..348fb6bd4d435 100644 --- a/plugins/database/mongodb/connection_producer.go +++ b/plugins/database/mongodb/connection_producer.go @@ -10,7 +10,6 @@ import ( "sync" "time" - "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/sdk/database/helper/connutil" "github.com/hashicorp/vault/sdk/database/helper/dbutil" "github.com/mitchellh/mapstructure" @@ -193,7 +192,7 @@ func (c *mongoDBConnectionProducer) getWriteConcern() (opts *options.ClientOptio concern := &writeConcern{} err = json.Unmarshal([]byte(input), concern) if err != nil { - return nil, errwrap.Wrapf("error unmarshalling write_concern: {{err}}", err) + return nil, fmt.Errorf("error unmarshalling write_concern: %w", err) } // Translate write concern to mongo options diff --git a/plugins/database/mssql/mssql.go b/plugins/database/mssql/mssql.go index bfed5fee1fd90..feb4385b0a9ca 100644 --- a/plugins/database/mssql/mssql.go +++ b/plugins/database/mssql/mssql.go @@ -8,7 +8,6 @@ import ( "strings" _ "github.com/denisenkom/go-mssqldb" - "github.com/hashicorp/errwrap" multierror "github.com/hashicorp/go-multierror" dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" "github.com/hashicorp/vault/sdk/database/helper/connutil" @@ -279,10 +278,10 @@ func (m *MSSQL) revokeUserDefault(ctx context.Context, username string) error { // can't drop if not all database users are dropped if rows.Err() != nil { - return errwrap.Wrapf("could not generate sql statements for all rows: {{err}}", rows.Err()) + return fmt.Errorf("could not generate sql statements for all rows: %w", rows.Err()) } if lastStmtError != nil { - return errwrap.Wrapf("could not perform all sql statements: {{err}}", lastStmtError) + return fmt.Errorf("could not perform all sql statements: %w", lastStmtError) } // Drop this login diff --git a/plugins/database/mysql/connection_producer.go b/plugins/database/mysql/connection_producer.go index 014eeea3ffa5b..480719a0834b7 100644 --- a/plugins/database/mysql/connection_producer.go +++ b/plugins/database/mysql/connection_producer.go @@ -11,7 +11,6 @@ import ( "time" "github.com/go-sql-driver/mysql" - "github.com/hashicorp/errwrap" "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/sdk/database/helper/connutil" "github.com/hashicorp/vault/sdk/database/helper/dbutil" @@ -88,7 +87,7 @@ func (c *mySQLConnectionProducer) Init(ctx context.Context, conf map[string]inte c.maxConnectionLifetime, err = parseutil.ParseDurationSecond(c.MaxConnectionLifetimeRaw) if err != nil { - return nil, errwrap.Wrapf("invalid max_connection_lifetime: {{err}}", err) + return nil, fmt.Errorf("invalid max_connection_lifetime: %w", err) } tlsConfig, err := c.getTLSAuth() @@ -113,11 +112,11 @@ func (c *mySQLConnectionProducer) Init(ctx context.Context, conf map[string]inte if verifyConnection { if _, err := c.Connection(ctx); err != nil { - return nil, errwrap.Wrapf("error verifying connection: {{err}}", err) + return nil, fmt.Errorf("error verifying connection: %w", err) } if err := c.db.PingContext(ctx); err != nil { - return nil, errwrap.Wrapf("error verifying connection: {{err}}", err) + return nil, fmt.Errorf("error verifying connection: %w", err) } } diff --git a/plugins/database/postgresql/postgresql.go b/plugins/database/postgresql/postgresql.go index fcba13249300f..ac0219b948bbb 100644 --- a/plugins/database/postgresql/postgresql.go +++ b/plugins/database/postgresql/postgresql.go @@ -7,7 +7,6 @@ import ( "regexp" "strings" - "github.com/hashicorp/errwrap" "github.com/hashicorp/go-multierror" dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" "github.com/hashicorp/vault/sdk/database/helper/connutil" @@ -445,10 +444,10 @@ func (p *PostgreSQL) defaultDeleteUser(ctx context.Context, username string) err // can't drop if not all privileges are revoked if rows.Err() != nil { - return errwrap.Wrapf("could not generate revocation statements for all rows: {{err}}", rows.Err()) + return fmt.Errorf("could not generate revocation statements for all rows: %w", rows.Err()) } if lastStmtError != nil { - return errwrap.Wrapf("could not perform all revocation statements: {{err}}", lastStmtError) + return fmt.Errorf("could not perform all revocation statements: %w", lastStmtError) } // Drop this user diff --git a/plugins/database/redshift/redshift.go b/plugins/database/redshift/redshift.go index e8262e7cbbe2f..6cd96bf5df8e1 100644 --- a/plugins/database/redshift/redshift.go +++ b/plugins/database/redshift/redshift.go @@ -7,7 +7,6 @@ import ( "fmt" "strings" - "github.com/hashicorp/errwrap" "github.com/hashicorp/go-multierror" dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" "github.com/hashicorp/vault/sdk/database/helper/connutil" @@ -446,10 +445,10 @@ $$;`) // can't drop if not all privileges are revoked if rows.Err() != nil { - return dbplugin.DeleteUserResponse{}, errwrap.Wrapf("could not generate revocation statements for all rows: {{err}}", rows.Err()) + return dbplugin.DeleteUserResponse{}, fmt.Errorf("could not generate revocation statements for all rows: %w", rows.Err()) } if lastStmtError != nil { - return dbplugin.DeleteUserResponse{}, errwrap.Wrapf("could not perform all revocation statements: {{err}}", lastStmtError) + return dbplugin.DeleteUserResponse{}, fmt.Errorf("could not perform all revocation statements: %w", lastStmtError) } // Drop this user From 3c282ccaea9f42b78de1977eef0857a2ab032391 Mon Sep 17 00:00:00 2001 From: Jim Kalafut Date: Fri, 21 May 2021 16:16:34 -0700 Subject: [PATCH 044/101] Update website version (#11685) --- website/data/version.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/data/version.js b/website/data/version.js index 26b222fc7c6a2..2e5870ebdf6bb 100644 --- a/website/data/version.js +++ b/website/data/version.js @@ -1,6 +1,6 @@ -export const VERSION = '1.7.1' +export const VERSION = '1.7.2' export const CHANGELOG_URL = - 'https://github.com/hashicorp/vault/blob/master/CHANGELOG.md#171' + 'https://github.com/hashicorp/vault/blob/master/CHANGELOG.md#172' // HashiCorp officially supported package managers export const packageManagers = [ From 6d6ea1840a11fa9f921b49282c1bac30f3715f97 Mon Sep 17 00:00:00 2001 From: Jeff Escalante Date: Fri, 21 May 2021 19:23:49 -0400 Subject: [PATCH 045/101] update downloads page component to fix a bug in ent version filtering (#11686) --- website/package-lock.json | 1486 +++++++++++++++++-------------------- website/package.json | 26 +- 2 files changed, 713 insertions(+), 799 deletions(-) diff --git a/website/package-lock.json b/website/package-lock.json index 97deee1ad792e..359758e008214 100644 --- a/website/package-lock.json +++ b/website/package-lock.json @@ -8,158 +8,158 @@ "name": "vault-docs", "version": "1.0.0", "dependencies": { - "@hashicorp/mktg-global-styles": "3.0.1", - "@hashicorp/nextjs-scripts": "18.1.0", + "@hashicorp/mktg-global-styles": "3.0.2", + "@hashicorp/nextjs-scripts": "18.3.1", "@hashicorp/react-alert-banner": "6.1.1", "@hashicorp/react-button": "5.0.1", - "@hashicorp/react-case-study-slider": "6.0.0", + "@hashicorp/react-case-study-slider": "6.0.2", "@hashicorp/react-code-block": "4.0.1", "@hashicorp/react-content": "7.0.1", - "@hashicorp/react-docs-page": "13.2.0", - "@hashicorp/react-featured-slider": "4.0.0", + "@hashicorp/react-docs-page": "13.3.1", + "@hashicorp/react-featured-slider": "4.0.2", "@hashicorp/react-hashi-stack-menu": "2.0.3", "@hashicorp/react-head": "3.0.2", - "@hashicorp/react-hero": "7.1.1", + "@hashicorp/react-hero": "7.2.1", "@hashicorp/react-image": "4.0.1", "@hashicorp/react-inline-svg": "6.0.1", "@hashicorp/react-logo-grid": "4.0.1", "@hashicorp/react-markdown-page": "1.2.0", - "@hashicorp/react-product-downloads-page": "2.0.2", + "@hashicorp/react-product-downloads-page": "2.0.3", "@hashicorp/react-section-header": "5.0.2", - "@hashicorp/react-subnav": "8.1.0", + "@hashicorp/react-subnav": "8.2.1", "@hashicorp/react-tabs": "6.0.1", "@hashicorp/react-text-split": "3.1.1", "@hashicorp/react-text-splits": "2.1.1", "@hashicorp/react-use-cases": "3.0.2", "@hashicorp/react-vertical-text-block-list": "6.0.2", - "next": "10.1.3", - "next-mdx-remote": "3.0.1", + "next": "10.2.2", + "next-mdx-remote": "3.0.2", "next-remote-watch": "1.0.0", "react": "16.14.0", "react-dom": "16.14.0", "tippy.js": "4.0.0" }, "devDependencies": { - "@types/react": "^17.0.3", + "@types/react": "^17.0.6", "dart-linkcheck": "^2.0.15", "husky": "^4.3.8", - "inquirer": "^8.0.0", - "prettier": "^2.2.1" + "inquirer": "^8.1.0", + "prettier": "^2.3.0" } }, "node_modules/@algolia/cache-browser-local-storage": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.9.0.tgz", - "integrity": "sha512-H659baxPygLp1ed5Y+kko9nLhhTRtZ6v2k2cs2/WTErAd6XU+OrvTvsEedUprDYUve/t9NLg95Ka9TK8QEQk1w==", + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.9.1.tgz", + "integrity": "sha512-bAUU9vKCy45uTTlzJw0LYu1IjoZsmzL6lgjaVFaW1crhX/4P+JD5ReQv3n/wpiXSFaHq1WEO3WyH2g3ymzeipQ==", "dependencies": { - "@algolia/cache-common": "4.9.0" + "@algolia/cache-common": "4.9.1" } }, "node_modules/@algolia/cache-common": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.9.0.tgz", - "integrity": "sha512-hBqkLEw1Y7oxEJEVmcdm/s/+KKlvCmSenlX5rrQts5qCNdhdS1QkCvHx8vgFF9J6uliP2TPs+umrrXc+aKsLPw==" + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.9.1.tgz", + "integrity": "sha512-tcvw4mOfFy44V4ZxDEy9wNGr6vFROZKRpXKTEBgdw/WBn6mX51H1ar4RWtceDEcDU4H5fIv5tsY3ip2hU+fTPg==" }, "node_modules/@algolia/cache-in-memory": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.9.0.tgz", - "integrity": "sha512-8q9z8tkIrgPenZl+aTc6MOQleLnanVy+Nsz7Uzga5r9Kb7xpqYKNI9rSJYyBzl7KRxock5v6AOUiFgi45eDnDg==", + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.9.1.tgz", + "integrity": "sha512-IEJrHonvdymW2CnRfJtsTVWyfAH05xPEFkGXGCw00+6JNCj8Dln3TeaRLiaaY1srlyGedkemekQm1/Xb46CGOQ==", "dependencies": { - "@algolia/cache-common": "4.9.0" + "@algolia/cache-common": "4.9.1" } }, "node_modules/@algolia/client-account": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.9.0.tgz", - "integrity": "sha512-u9cljyqUnlgHIKazeOA2R820pDZFReRVm3AObiGrxhdKVQ44ZOgAlN+NIqA+c19iFdpulzpkPKxU+Uavcky7JQ==", + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.9.1.tgz", + "integrity": "sha512-Shpjeuwb7i2LR5QuWREb6UbEQLGB+Pl/J5+wPgILJDP/uWp7jpl0ase9mYNQGKj7TjztpSpQCPZ3dSHPnzZPfw==", "dependencies": { - "@algolia/client-common": "4.9.0", - "@algolia/client-search": "4.9.0", - "@algolia/transporter": "4.9.0" + "@algolia/client-common": "4.9.1", + "@algolia/client-search": "4.9.1", + "@algolia/transporter": "4.9.1" } }, "node_modules/@algolia/client-analytics": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.9.0.tgz", - "integrity": "sha512-5TafTR/uP9X4EpDOvBK1w4cgc3JpKeokPJqD37q46AH1IGI8UO5Gy1H5LxcGmPTIMdMnuSfiYgRJsyoEO1Co0A==", + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.9.1.tgz", + "integrity": "sha512-/g6OkOSIA+A0t/tjvbL6iG/zV4El4LPFgv/tcAYHTH27BmlNtnEXw+iFpGjeUlQoPily9WVB3QNLMJkaNwL3HA==", "dependencies": { - "@algolia/client-common": "4.9.0", - "@algolia/client-search": "4.9.0", - "@algolia/requester-common": "4.9.0", - "@algolia/transporter": "4.9.0" + "@algolia/client-common": "4.9.1", + "@algolia/client-search": "4.9.1", + "@algolia/requester-common": "4.9.1", + "@algolia/transporter": "4.9.1" } }, "node_modules/@algolia/client-common": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.9.0.tgz", - "integrity": "sha512-Rjk4XMXi6B63jdKQwnGbKwIubB5QIgok+k67QwrgadbqVphHueJ3af3D6i3sRcKBBTmdprFAXn0zX/zaxYBhAQ==", + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.9.1.tgz", + "integrity": "sha512-UziRTZ8km3qwoVPIyEre8TV6V+MX7UtbfVqPmSafZ0xu41UUZ+sL56YoKjOXkbKuybeIC9prXMGy/ID5bXkTqg==", "dependencies": { - "@algolia/requester-common": "4.9.0", - "@algolia/transporter": "4.9.0" + "@algolia/requester-common": "4.9.1", + "@algolia/transporter": "4.9.1" } }, "node_modules/@algolia/client-recommendation": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/client-recommendation/-/client-recommendation-4.9.0.tgz", - "integrity": "sha512-6y6uyQmmowuBqMkk4iLeBOkd1qtBpfGJ5/di0S041eHQlD0v9WxyhbZyOopn0XxopSLbQaO22u0rjEcla7KYlA==", + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/client-recommendation/-/client-recommendation-4.9.1.tgz", + "integrity": "sha512-Drtvvm1PNIOpYf4HFlkPFstFQ3IsN+TRmxur2F7y6Faplb5ybISa8ithu1tmlTdyTf3A78hQUQjgJet6qD2XZw==", "dependencies": { - "@algolia/client-common": "4.9.0", - "@algolia/requester-common": "4.9.0", - "@algolia/transporter": "4.9.0" + "@algolia/client-common": "4.9.1", + "@algolia/requester-common": "4.9.1", + "@algolia/transporter": "4.9.1" } }, "node_modules/@algolia/client-search": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.9.0.tgz", - "integrity": "sha512-HFfeUJN6GPHsjfcchmksoqlBLF5gT+jRHmSait4fWtde85eGFyJVL7ubUZD9KjlEjzebmUPPIZ1ixcupaTUBnw==", + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.9.1.tgz", + "integrity": "sha512-r9Cw2r8kJr45iYncFDht6EshARghU265wuY8Q8oHrpFHjAziEYdsUOdNmQKbsSH5J3gLjDPx1EI5DzVd6ivn3w==", "dependencies": { - "@algolia/client-common": "4.9.0", - "@algolia/requester-common": "4.9.0", - "@algolia/transporter": "4.9.0" + "@algolia/client-common": "4.9.1", + "@algolia/requester-common": "4.9.1", + "@algolia/transporter": "4.9.1" } }, "node_modules/@algolia/logger-common": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.9.0.tgz", - "integrity": "sha512-OU8lzR1I8R0Qsgk+u4GOSFpEEKZkzPYZP1OXsw92gejW08k5N6kVLzfvVvgNA1KAeZPFXADdH26VBQ/2M9wF3g==" + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.9.1.tgz", + "integrity": "sha512-9mPrbFlFyPT7or/7PXTiJjyOewWB9QRkZKVXkt5zHAUiUzGxmmdpJIGpPv3YQnDur8lXrXaRI0MHXUuIDMY1ng==" }, "node_modules/@algolia/logger-console": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.9.0.tgz", - "integrity": "sha512-CrBU+E2iA4xXnb1rwX3G1ox9O+N+OjxnWccL75sWr1nQ/kh08TPpV7TYAvQEOFEDj8vV1kPeYEMENulbjmVZSA==", + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.9.1.tgz", + "integrity": "sha512-74VUwjtFjFpjZpi3QoHIPv0kcr3vWUSHX/Vs8PJW3lPsD4CgyhFenQbG9v+ZnyH0JrJwiYTtzfmrVh7IMWZGrQ==", "dependencies": { - "@algolia/logger-common": "4.9.0" + "@algolia/logger-common": "4.9.1" } }, "node_modules/@algolia/requester-browser-xhr": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.9.0.tgz", - "integrity": "sha512-KJESXTv4z+mDCn1C9b/azUqPTgIFVL/Y4+Eopz6YBg9Lj0C6KQrsW68w0uLJcGSw9o/qBoKcpUo4QNm4/CwrdQ==", + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.9.1.tgz", + "integrity": "sha512-zc46tk5o0ikOAz3uYiRAMxC2iVKAMFKT7nNZnLB5IzT0uqAh7pz/+D/UvIxP4bKmsllpBSnPcpfQF+OI4Ag/BA==", "dependencies": { - "@algolia/requester-common": "4.9.0" + "@algolia/requester-common": "4.9.1" } }, "node_modules/@algolia/requester-common": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.9.0.tgz", - "integrity": "sha512-8/ljy4/pnB8d4/yTaJQa2t3oKdbsVq9nDXkwhCACVum8tGYSSGpCtpBGln6M4g+QdfBSQxYILTB1wwHLFUstmg==" + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.9.1.tgz", + "integrity": "sha512-9hPgXnlCSbqJqF69M5x5WN3h51Dc+mk/iWNeJSVxExHGvCDfBBZd0v6S15i8q2a9cD1I2RnhMpbnX5BmGtabVA==" }, "node_modules/@algolia/requester-node-http": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.9.0.tgz", - "integrity": "sha512-JpkjPXDCgT+Z8G8d/6hxId7+560HeCHoiDcEFr9eWR/kClAOgVwgVH1I64pmH8ucsjL7kdWbkxez7zBzPiV+Tg==", + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.9.1.tgz", + "integrity": "sha512-vYNVbSCuyrCSCjHBQJk+tLZtWCjvvDf5tSbRJjyJYMqpnXuIuP7gZm24iHil4NPYBhbBj5NU2ZDAhc/gTn75Ag==", "dependencies": { - "@algolia/requester-common": "4.9.0" + "@algolia/requester-common": "4.9.1" } }, "node_modules/@algolia/transporter": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.9.0.tgz", - "integrity": "sha512-GySLvXwg0DQ2LM0/W+hr9y1Co3QY1iNnhWA82gFhBrz7RWGzw47qEsh//9u/wnjl6S1WOjH+eKm5PaQATG1BXg==", + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.9.1.tgz", + "integrity": "sha512-AbjFfGzX+cAuj7Qyc536OxIQzjFOA5FU2ANGStx8LBH+AKXScwfkx67C05riuaRR5adSCLMSEbVvUscH0nF+6A==", "dependencies": { - "@algolia/cache-common": "4.9.0", - "@algolia/logger-common": "4.9.0", - "@algolia/requester-common": "4.9.0" + "@algolia/cache-common": "4.9.1", + "@algolia/logger-common": "4.9.1", + "@algolia/requester-common": "4.9.1" } }, "node_modules/@aws-crypto/ie11-detection": { @@ -1645,9 +1645,9 @@ } }, "node_modules/@hapi/accept": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/@hapi/accept/-/accept-5.0.1.tgz", - "integrity": "sha512-fMr4d7zLzsAXo28PRRQPXR1o2Wmu+6z+VY1UzDp0iFo13Twj8WePakwXBiqn3E1aAlTpSNzCXdnnQXFhst8h8Q==", + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/@hapi/accept/-/accept-5.0.2.tgz", + "integrity": "sha512-CmzBx/bXUR8451fnZRuZAJRlzgm0Jgu5dltTX/bszmR2lheb9BpyN47Q1RbaGTsvFzn0PXAEs+lXDKfshccYZw==", "dependencies": { "@hapi/boom": "9.x.x", "@hapi/hoek": "9.x.x" @@ -1662,9 +1662,9 @@ } }, "node_modules/@hapi/hoek": { - "version": "9.1.1", - "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.1.1.tgz", - "integrity": "sha512-CAEbWH7OIur6jEOzaai83jq3FmKmv4PmX1JYfs9IrYcGEVI/lyL1EXJGCj7eFVJ0bg5QR8LMxBlEtA+xKiLpFw==" + "version": "9.2.0", + "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.2.0.tgz", + "integrity": "sha512-sqKVVVOe5ivCaXDWivIJYVSaEgdQK9ul7a4Kity5Iw7u9+wBAPbX1RMSnLLmp7O4Vzj0WOWwMAJsTL00xwaNug==" }, "node_modules/@hashicorp/js-utils": { "version": "1.0.10", @@ -1682,9 +1682,9 @@ "integrity": "sha512-AvRPKxi6bEzjS6U8dZR9FGjPmlqxxTdzPeFZXYTsc8kUGjoY/X9/GXY2zYCpzEpmUdnLzOSJrDl17INPEeL4Pw==" }, "node_modules/@hashicorp/mktg-global-styles": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@hashicorp/mktg-global-styles/-/mktg-global-styles-3.0.1.tgz", - "integrity": "sha512-k87utsbGPHRBJgaeBMq0HlsoIEHaMn6eVvLRC7mxrxteMCyTg09YVVfihHABozyquK2Z2VnzTeH2dlkcJMNYFQ==" + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@hashicorp/mktg-global-styles/-/mktg-global-styles-3.0.2.tgz", + "integrity": "sha512-WeeiM//oEkDtnhCXkTNkWXPXGdeEqgDiCxz7p9w4619mkHWCuOOZWQSMMP/GV6i6knBZIzSgCT6QPeZwJNg9nA==" }, "node_modules/@hashicorp/mktg-logos": { "version": "1.0.2", @@ -1692,9 +1692,9 @@ "integrity": "sha512-mZyJ3xG1YTufyDLC2vWFDfj6ppXJ8uK1z5+U/9fgcuJynet5STtEpeVsyZz3oTNcXJiCjAQzvK63T0V8npnKYQ==" }, "node_modules/@hashicorp/nextjs-scripts": { - "version": "18.1.0", - "resolved": "https://registry.npmjs.org/@hashicorp/nextjs-scripts/-/nextjs-scripts-18.1.0.tgz", - "integrity": "sha512-NC1u7R1RGfOPB5XZd6MZSX3YkoCkT1BTHiBKwTOZieARPlEeDHdYiJ18fhgIYQLBc0DZ7u10JyKpQHvd15yKdA==", + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/@hashicorp/nextjs-scripts/-/nextjs-scripts-18.3.1.tgz", + "integrity": "sha512-5Y0xL5bVXQ6BC4XXYpQVu6yUGZMC4d0GxvuQSiR6f2/rYBJP/8GgBCiAJ8Z9JDQINb6413WdaYc0KzE+M9WdyA==", "dependencies": { "@bugsnag/js": "7.5.4", "@bugsnag/plugin-react": "7.5.4", @@ -1754,6 +1754,9 @@ }, "bin": { "next-hashicorp": "bin/next-hashicorp" + }, + "peerDependencies": { + "@hashicorp/mktg-global-styles": ">= 3" } }, "node_modules/@hashicorp/nextjs-scripts/node_modules/@hashicorp/react-inline-svg": { @@ -1861,12 +1864,17 @@ } }, "node_modules/@hashicorp/react-case-study-slider": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/@hashicorp/react-case-study-slider/-/react-case-study-slider-6.0.0.tgz", - "integrity": "sha512-mW+572e0FB0jr10+N3rrXJJq1w+mc91U2a+RCTEjOytNEfghYk+hOAvqHFciq4lqdl+sYwZQ3dRhHSwflyGJkA==", + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/@hashicorp/react-case-study-slider/-/react-case-study-slider-6.0.2.tgz", + "integrity": "sha512-x81xrDRV2J6B+e2C7eTEQ936CC1e337EclgF6N8+HLgDQ/6pCsl6+tbhd+f38mpsOc62vEPkhK9HsqGlmMJ1NQ==", "dependencies": { "@hashicorp/react-button": "^5.0.1", "@hashicorp/react-image": "^2.0.3" + }, + "peerDependencies": { + "@hashicorp/mktg-global-styles": ">=3.x", + "@hashicorp/nextjs-scripts": ">=17.x", + "react": ">=16.x" } }, "node_modules/@hashicorp/react-case-study-slider/node_modules/@hashicorp/react-image": { @@ -1904,17 +1912,18 @@ "integrity": "sha512-SM0vGm3BoYI1OOUvRz5NOP8qgLAJII/Idil6N6xJcUacKbnw/sRyXFRLa6QAmPiqFxKrb2BNSFcVtf1RR3kvXQ==" }, "node_modules/@hashicorp/react-docs-page": { - "version": "13.2.0", - "resolved": "https://registry.npmjs.org/@hashicorp/react-docs-page/-/react-docs-page-13.2.0.tgz", - "integrity": "sha512-FHB+J43ZOstihscSBbXF++WJIMHkWLY3HQebOmIP059vfIuMAyDJ0D3UbZL7BkUi24ZU3oznisGHD39J2bLdug==", + "version": "13.3.1", + "resolved": "https://registry.npmjs.org/@hashicorp/react-docs-page/-/react-docs-page-13.3.1.tgz", + "integrity": "sha512-zh+3aJ1JMb1sl03/0+CEzctjkx3RdfCPYdzMdqMlnrk7JwWws1q4Df9wkJNHpnKbALqzz8NXwSXC+HHnil9AAg==", "dependencies": { "@hashicorp/react-alert": "^5.0.0", "@hashicorp/react-content": "^7.0.1", - "@hashicorp/react-docs-sidenav": "^8.1.0", + "@hashicorp/react-docs-sidenav": "^8.2.0", "@hashicorp/react-head": "^3.0.2", "@hashicorp/react-placeholder": "^0.1.0", - "@hashicorp/react-search": "^5.0.2", + "@hashicorp/react-search": "^5.1.0", "@hashicorp/versioned-docs": "^0.0.13", + "classnames": "^2.2.6", "fs-exists-sync": "0.1.0", "gray-matter": "4.0.2", "js-yaml": "3.14.0", @@ -1942,9 +1951,9 @@ } }, "node_modules/@hashicorp/react-docs-sidenav": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/@hashicorp/react-docs-sidenav/-/react-docs-sidenav-8.1.0.tgz", - "integrity": "sha512-TN6D/FKbDGCpLrBv6wO9U4Ad0GtL+4HEJpPMEchTannElkMhq67nV2x6cl4+HzspFsD07dlaYIDUpyASUV+wew==", + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/@hashicorp/react-docs-sidenav/-/react-docs-sidenav-8.2.0.tgz", + "integrity": "sha512-VoXJFuMF0bZkiEpkpTgzo5fz8nyVjTFaiHYFe1RgaM6EWKgVtT8Vjtr5oUJ9gDOf2fMLc+AgiQ0PbTeMWX662g==", "dependencies": { "@hashicorp/react-link-wrap": "^3.0.1", "fuzzysearch": "1.0.3" @@ -1964,12 +1973,17 @@ } }, "node_modules/@hashicorp/react-featured-slider": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@hashicorp/react-featured-slider/-/react-featured-slider-4.0.0.tgz", - "integrity": "sha512-WcblcrlZ8KoLqyku8bBljY/9SzaBYyB/tae937iaMFZgUTvl865WGDoMADxZN91qBiQzfiljJXi5nyTKzLXMkQ==", + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@hashicorp/react-featured-slider/-/react-featured-slider-4.0.2.tgz", + "integrity": "sha512-jTFUAhH22klI3rU0ZMrMvkalsTsK2lTN7Ct1A/BQRMnR+Hw7r0jD4hjbMFWJm/QjF8quPcNxI/uFS/GrTO9p2g==", "dependencies": { "@hashicorp/react-button": "^5.0.1", "@hashicorp/react-image": "^2.0.3" + }, + "peerDependencies": { + "@hashicorp/mktg-global-styles": ">=3.x", + "@hashicorp/nextjs-scripts": ">=17.x", + "react": ">=16.x" } }, "node_modules/@hashicorp/react-featured-slider/node_modules/@hashicorp/react-image": { @@ -2009,11 +2023,11 @@ "integrity": "sha512-kKY/5XwWkBsDSvF8jHgNnxG4hx8ryPjoEtPFxPMVCly/ouwbslilIrWzqSYbeP7vUO686JzBLf5xkwq+HV0aig==" }, "node_modules/@hashicorp/react-hero": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/@hashicorp/react-hero/-/react-hero-7.1.1.tgz", - "integrity": "sha512-MYMduvDLsGSaw8gFpqcgxhdkC3ZXMs5mm4YlRjWw+MWv6lXJ9wzHss387s2qiqnxwQuQ5O2Ec2zraTH4pqf0gg==", + "version": "7.2.1", + "resolved": "https://registry.npmjs.org/@hashicorp/react-hero/-/react-hero-7.2.1.tgz", + "integrity": "sha512-+G4JTrvQm57Qp5fBQgA1X2RFKhFNDatJH2biTBgz27TLB00/g12XduTFA8GqMbVIluCBhrUdnxtpxgk8Fud+Pw==", "dependencies": { - "@hashicorp/js-utils": "^1.0.8-alpha.0", + "@hashicorp/js-utils": "next", "@hashicorp/localstorage-polyfill": "^1.0.14", "@hashicorp/react-alert": "^2.0.3", "@hashicorp/react-button": "^5.0.1", @@ -2023,6 +2037,11 @@ "formik": "^1.5.8", "promise-polyfill": "^8.1.0", "query-string": "^5.1.1" + }, + "peerDependencies": { + "@hashicorp/mktg-global-styles": ">=3.x", + "@hashicorp/nextjs-scripts": ">=17.x", + "react": ">=16.x" } }, "node_modules/@hashicorp/react-hero/node_modules/@hashicorp/react-image": { @@ -2120,9 +2139,9 @@ } }, "node_modules/@hashicorp/react-product-downloads-page": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/@hashicorp/react-product-downloads-page/-/react-product-downloads-page-2.0.2.tgz", - "integrity": "sha512-Ff7kki+A6FqP7/WHa5AA7vRtdBJZ5CHYy33RQC7Hfl5GCYN/bicR0RSiRDaSSGLwtoIGXnNIm+qpj3txixrvtw==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@hashicorp/react-product-downloads-page/-/react-product-downloads-page-2.0.3.tgz", + "integrity": "sha512-9H56cG5EzteBFvZH6IBzB6pMwK8dFdsin4ajDOROQE4d20kCfG1tCB5OGsZHNwn7HSxJ1fVnbyjc1mDkj5Piqg==", "dependencies": { "@hashicorp/react-button": "^5.0.1", "@hashicorp/react-head": "^3.0.2", @@ -2135,9 +2154,9 @@ } }, "node_modules/@hashicorp/react-search": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/@hashicorp/react-search/-/react-search-5.0.2.tgz", - "integrity": "sha512-YcvzgkWOnF5vXFaA86OVKDptP0s0UflRJl18rYcyAhkCzMO8bp2Gt5bvLq7LNq4NGRi+3G0dIv+X04UdQVXxaQ==", + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@hashicorp/react-search/-/react-search-5.1.0.tgz", + "integrity": "sha512-Lrs3eDTwro8r+Yp4ow2orWaHKy00Z0THWg9As3KpDtijJzEAPRgWEP9nDxUr+bcOE4InfrJnC3GQ1BeZKIeU+A==", "dependencies": { "@hashicorp/react-inline-svg": "^1.0.2", "@hashicorp/remark-plugins": "^3.0.0", @@ -2149,12 +2168,20 @@ "remark": "^12.0.1", "search-insights": "^1.6.0", "unist-util-visit": "^2.0.3" + }, + "peerDependencies": { + "@hashicorp/mktg-global-styles": ">=3.x", + "@hashicorp/nextjs-scripts": ">=17.x", + "react": ">=16.x" } }, "node_modules/@hashicorp/react-search/node_modules/@hashicorp/react-inline-svg": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/@hashicorp/react-inline-svg/-/react-inline-svg-1.0.2.tgz", - "integrity": "sha512-AAFnBslSTgnEr++dTbMn3sybAqvn7myIj88ijGigF6u11eSRiV64zqEcyYLQKWTV6dF4AvYoxiYC6GSOgiM0Yw==" + "integrity": "sha512-AAFnBslSTgnEr++dTbMn3sybAqvn7myIj88ijGigF6u11eSRiV64zqEcyYLQKWTV6dF4AvYoxiYC6GSOgiM0Yw==", + "peerDependencies": { + "react": "^16.9.0" + } }, "node_modules/@hashicorp/react-section-header": { "version": "5.0.2", @@ -2177,9 +2204,9 @@ } }, "node_modules/@hashicorp/react-subnav": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/@hashicorp/react-subnav/-/react-subnav-8.1.0.tgz", - "integrity": "sha512-0Kpkkb2nmqnCUgtq+h6ju2iOn/VTVemIpAQMpjRtxUFFGRUV1FAnks7enlPE6NIXVmREg0ZD/6CdnesG0IL6dA==", + "version": "8.2.1", + "resolved": "https://registry.npmjs.org/@hashicorp/react-subnav/-/react-subnav-8.2.1.tgz", + "integrity": "sha512-rnDPYK5l46IcoEut7dD7VDdpW8vz4JbINC/4FxvEAPd8i1Rv+vtfWsJjG5fmyXRgTbLQjVvkyqrRJ3sgh50mmA==", "dependencies": { "@hashicorp/mktg-logos": "^1.0.1", "@hashicorp/react-button": "^5.0.1", @@ -2187,6 +2214,10 @@ "@hashicorp/react-link-wrap": "^1.0.2", "classnames": "^2.2.6", "isomorphic-unfetch": "^3.0.0" + }, + "peerDependencies": { + "@hashicorp/mktg-global-styles": ">=3.x", + "@hashicorp/nextjs-scripts": ">=17.x" } }, "node_modules/@hashicorp/react-subnav/node_modules/@hashicorp/react-inline-svg": { @@ -2549,19 +2580,19 @@ } }, "node_modules/@next/env": { - "version": "10.1.3", - "resolved": "https://registry.npmjs.org/@next/env/-/env-10.1.3.tgz", - "integrity": "sha512-q7z7NvmRs66lCQmVJtKjDxVtMTjSwP6ExVzaH46pbTH60MHgzEJ9H4jXrFLTihPmCIvpAv6Ai04jbS8dcg1ZMQ==" + "version": "10.2.2", + "resolved": "https://registry.npmjs.org/@next/env/-/env-10.2.2.tgz", + "integrity": "sha512-m0xOpl4F9z7R7Yt2OtJoo6ZUsFPdx+kuvZeoctH7T6lO66DmZL3W6MQDxso/ArkH8VOlDPZgeQVVBPf+I7wflA==" }, "node_modules/@next/polyfill-module": { - "version": "10.1.3", - "resolved": "https://registry.npmjs.org/@next/polyfill-module/-/polyfill-module-10.1.3.tgz", - "integrity": "sha512-1DtUVcuoBJAn5IrxIZQjUG1KTPkiXMYloykPSkRxawimgvG9dRj2kscU+4KGNSFxHoxW9c68VRCb+7MDz5aGGw==" + "version": "10.2.2", + "resolved": "https://registry.npmjs.org/@next/polyfill-module/-/polyfill-module-10.2.2.tgz", + "integrity": "sha512-0t5Hw1Dr18TWP65qAnakRa8+jza6SAFOz0b2v67s5AVquAwXXlclR4SfUy3ahrRtjCqlbLEE/oFIzCGbyMYfVA==" }, "node_modules/@next/react-dev-overlay": { - "version": "10.1.3", - "resolved": "https://registry.npmjs.org/@next/react-dev-overlay/-/react-dev-overlay-10.1.3.tgz", - "integrity": "sha512-vIgUah3bR9+MKzwU1Ni5ONfYM0VdI42i7jZ+Ei1c0wjwkG9anVnDqhSQ3mVg62GP2nt7ExaaFyf9THbsw5KYXg==", + "version": "10.2.2", + "resolved": "https://registry.npmjs.org/@next/react-dev-overlay/-/react-dev-overlay-10.2.2.tgz", + "integrity": "sha512-uPslFPWvvZ8AdadGdK2/834UnJy6F+7071/ere6QpN88Ngzqx9lDIhjslEeFLRtpyBst4s1YUdbm69btVPdE5w==", "dependencies": { "@babel/code-frame": "7.12.11", "anser": "1.4.9", @@ -2574,6 +2605,10 @@ "source-map": "0.8.0-beta.0", "stacktrace-parser": "0.1.10", "strip-ansi": "6.0.0" + }, + "peerDependencies": { + "react": "^16.9.0 || ^17", + "react-dom": "^16.9.0 || ^17" } }, "node_modules/@next/react-dev-overlay/node_modules/@babel/code-frame": { @@ -2594,6 +2629,9 @@ }, "engines": { "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" } }, "node_modules/@next/react-dev-overlay/node_modules/classnames": { @@ -2613,9 +2651,18 @@ } }, "node_modules/@next/react-refresh-utils": { - "version": "10.1.3", - "resolved": "https://registry.npmjs.org/@next/react-refresh-utils/-/react-refresh-utils-10.1.3.tgz", - "integrity": "sha512-P4GJZuLKfD/o42JvGZ/xP4Hxg68vd3NeZxOLqIuQKFjjaYgC2IrO+lE5PTwGmRkytjfprJC+9j7Jss/xQAS6QA==" + "version": "10.2.2", + "resolved": "https://registry.npmjs.org/@next/react-refresh-utils/-/react-refresh-utils-10.2.2.tgz", + "integrity": "sha512-OL7r0iz+SiE9SMWcxZocUtEAHv0/TlBWxIE3KjjO1vWSU1r0gMrE2l2RxHfMLIPsl6CjAkcPxoaXlosFsJ2S5w==", + "peerDependencies": { + "react-refresh": "0.8.3", + "webpack": "^4 || ^5" + }, + "peerDependenciesMeta": { + "webpack": { + "optional": true + } + } }, "node_modules/@nodelib/fs.scandir": { "version": "2.1.4", @@ -2799,9 +2846,9 @@ "integrity": "sha512-1HcDas8SEj4z1Wc696tH56G8OlRaH/sqZOynNNB+HF0WOeXPaxTtbYzJY2oEfiUxjSKjhCKr+MvR7dCHcEelug==" }, "node_modules/@types/react": { - "version": "17.0.3", - "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.3.tgz", - "integrity": "sha512-wYOUxIgs2HZZ0ACNiIayItyluADNbONl7kt8lkLjVK8IitMH5QMyAh75Fwhmo37r1m7L2JaFj03sIfxBVDvRAg==", + "version": "17.0.6", + "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.6.tgz", + "integrity": "sha512-u/TtPoF/hrvb63LdukET6ncaplYsvCvmkceasx8oG84/ZCsoLxz9Z/raPBP4lTAiWW1Jb889Y9svHmv8R26dWw==", "dev": true, "dependencies": { "@types/prop-types": "*", @@ -2987,24 +3034,24 @@ "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==" }, "node_modules/algoliasearch": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.9.0.tgz", - "integrity": "sha512-hhlza8j/uCWGe2kSz89HlcexiLxO1wzOKLNPWivNtZeZO5J85agbcMsrKV5+xLFI4LbulP/b/4/IvswxzPrGIw==", - "dependencies": { - "@algolia/cache-browser-local-storage": "4.9.0", - "@algolia/cache-common": "4.9.0", - "@algolia/cache-in-memory": "4.9.0", - "@algolia/client-account": "4.9.0", - "@algolia/client-analytics": "4.9.0", - "@algolia/client-common": "4.9.0", - "@algolia/client-recommendation": "4.9.0", - "@algolia/client-search": "4.9.0", - "@algolia/logger-common": "4.9.0", - "@algolia/logger-console": "4.9.0", - "@algolia/requester-browser-xhr": "4.9.0", - "@algolia/requester-common": "4.9.0", - "@algolia/requester-node-http": "4.9.0", - "@algolia/transporter": "4.9.0" + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.9.1.tgz", + "integrity": "sha512-EeJUYXzBEhZSsL6tXc3hseLBCtlNLa1MZ4mlMK6EeX38yRjY5vgnFcNNml6uUhlOjvheKxgkKRpPWkxgL8Cqkg==", + "dependencies": { + "@algolia/cache-browser-local-storage": "4.9.1", + "@algolia/cache-common": "4.9.1", + "@algolia/cache-in-memory": "4.9.1", + "@algolia/client-account": "4.9.1", + "@algolia/client-analytics": "4.9.1", + "@algolia/client-common": "4.9.1", + "@algolia/client-recommendation": "4.9.1", + "@algolia/client-search": "4.9.1", + "@algolia/logger-common": "4.9.1", + "@algolia/logger-console": "4.9.1", + "@algolia/requester-browser-xhr": "4.9.1", + "@algolia/requester-common": "4.9.1", + "@algolia/requester-node-http": "4.9.1", + "@algolia/transporter": "4.9.1" } }, "node_modules/algoliasearch-helper": { @@ -3013,6 +3060,9 @@ "integrity": "sha512-OjyVLjykaYKCMxxRMZNiwLp8CS310E0qAeIY2NaublcmLAh8/SL19+zYHp7XCLtMem2ZXwl3ywMiA32O9jszuw==", "dependencies": { "events": "^1.1.1" + }, + "peerDependencies": { + "algoliasearch": ">= 3.1 < 5" } }, "node_modules/algoliasearch-helper/node_modules/events": { @@ -4354,21 +4404,25 @@ } }, "node_modules/browserslist": { - "version": "4.16.1", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.16.1.tgz", - "integrity": "sha512-UXhDrwqsNcpTYJBTZsbGATDxZbiVDsx6UjpmRUmtnP10pr8wAYr5LgFoEFw9ixriQH2mv/NX2SfGzE/o8GndLA==", + "version": "4.16.6", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.16.6.tgz", + "integrity": "sha512-Wspk/PqO+4W9qp5iUTJsa1B/QrYn1keNCcEP5OvP7WBwT4KaDly0uONYmC6Xa3Z5IqnUgS0KcgLYu1l74x0ZXQ==", "dependencies": { - "caniuse-lite": "^1.0.30001173", - "colorette": "^1.2.1", - "electron-to-chromium": "^1.3.634", + "caniuse-lite": "^1.0.30001219", + "colorette": "^1.2.2", + "electron-to-chromium": "^1.3.723", "escalade": "^3.1.1", - "node-releases": "^1.1.69" + "node-releases": "^1.1.71" }, "bin": { "browserslist": "cli.js" }, "engines": { "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" } }, "node_modules/buffer": { @@ -4541,9 +4595,13 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001208", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001208.tgz", - "integrity": "sha512-OE5UE4+nBOro8Dyvv0lfx+SRtfVIOM9uhKqFmJeUbGriqhhStgp1A0OyBpgy3OUF8AhYCT+PVwPC1gMl2ZcQMA==" + "version": "1.0.30001228", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001228.tgz", + "integrity": "sha512-QQmLOGJ3DEgokHbMSA8cj2a+geXqmnpyOFT0lhQV6P3/YOJvGDEwoedcwxEQ30gJIwIIunHIicunJ2rzK5gB2A==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + } }, "node_modules/caw": { "version": "2.0.1", @@ -4768,6 +4826,18 @@ "node": ">=8" } }, + "node_modules/cli-spinners": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.6.0.tgz", + "integrity": "sha512-t+4/y50K/+4xcCRosKkA7W4gTr1MySvLV0q+PxmG7FJ5g+66ChKurYjxBCjHggHH3HA5Hh9cy+lcUGWDqVH+4Q==", + "dev": true, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/cli-truncate": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/cli-truncate/-/cli-truncate-2.1.0.tgz", @@ -4799,6 +4869,15 @@ "tiny-emitter": "^2.0.0" } }, + "node_modules/clone": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/clone/-/clone-1.0.4.tgz", + "integrity": "sha1-2jCcwmPfFZlMaIypAheco8fNfH4=", + "dev": true, + "engines": { + "node": ">=0.8" + } + }, "node_modules/clone-regexp": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/clone-regexp/-/clone-regexp-2.2.0.tgz", @@ -5584,197 +5663,25 @@ } }, "node_modules/cssnano-preset-simple": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/cssnano-preset-simple/-/cssnano-preset-simple-1.2.2.tgz", - "integrity": "sha512-gtvrcRSGtP3hA/wS8mFVinFnQdEsEpm3v4I/s/KmNjpdWaThV/4E5EojAzFXxyT5OCSRPLlHR9iQexAqKHlhGQ==", - "dependencies": { - "caniuse-lite": "^1.0.30001179", - "postcss": "^7.0.32" - } - }, - "node_modules/cssnano-preset-simple/node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dependencies": { - "color-convert": "^1.9.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/cssnano-preset-simple/node_modules/chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/cssnano-preset-simple/node_modules/chalk/node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/cssnano-preset-simple/node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/cssnano-preset-simple/node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" - }, - "node_modules/cssnano-preset-simple/node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", - "engines": { - "node": ">=4" - } - }, - "node_modules/cssnano-preset-simple/node_modules/postcss": { - "version": "7.0.35", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.35.tgz", - "integrity": "sha512-3QT8bBJeX/S5zKTTjTCIjRF3If4avAT6kqxcASlTWEtAFCb9NH0OUxNDfgZSWdP5fJnBYCMEWkIFfWeugjzYMg==", - "dependencies": { - "chalk": "^2.4.2", - "source-map": "^0.6.1", - "supports-color": "^6.1.0" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/cssnano-preset-simple/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/cssnano-preset-simple/node_modules/supports-color": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", - "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/cssnano-preset-simple/-/cssnano-preset-simple-2.0.0.tgz", + "integrity": "sha512-HkufSLkaBJbKBFx/7aj5HmCK9Ni/JedRQm0mT2qBzMG/dEuJOLnMt2lK6K1rwOOyV4j9aSY+knbW9WoS7BYpzg==", "dependencies": { - "has-flag": "^3.0.0" + "caniuse-lite": "^1.0.30001202" }, - "engines": { - "node": ">=6" + "peerDependencies": { + "postcss": "^8.2.1" } }, "node_modules/cssnano-simple": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/cssnano-simple/-/cssnano-simple-1.2.2.tgz", - "integrity": "sha512-4slyYc1w4JhSbhVX5xi9G0aQ42JnRyPg+7l7cqoNyoIDzfWx40Rq3JQZnoAWDu60A4AvKVp9ln/YSUOdhDX68g==", - "dependencies": { - "cssnano-preset-simple": "1.2.2", - "postcss": "^7.0.32" - } - }, - "node_modules/cssnano-simple/node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dependencies": { - "color-convert": "^1.9.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/cssnano-simple/node_modules/chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/cssnano-simple/node_modules/chalk/node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/cssnano-simple/node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/cssnano-simple/node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" - }, - "node_modules/cssnano-simple/node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", - "engines": { - "node": ">=4" - } - }, - "node_modules/cssnano-simple/node_modules/postcss": { - "version": "7.0.35", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.35.tgz", - "integrity": "sha512-3QT8bBJeX/S5zKTTjTCIjRF3If4avAT6kqxcASlTWEtAFCb9NH0OUxNDfgZSWdP5fJnBYCMEWkIFfWeugjzYMg==", - "dependencies": { - "chalk": "^2.4.2", - "source-map": "^0.6.1", - "supports-color": "^6.1.0" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/cssnano-simple/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/cssnano-simple/node_modules/supports-color": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", - "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/cssnano-simple/-/cssnano-simple-2.0.0.tgz", + "integrity": "sha512-0G3TXaFxlh/szPEG/o3VcmCwl0N3E60XNb9YZZijew5eIs6fLjJuOPxQd9yEBaX2p/YfJtt49i4vYi38iH6/6w==", "dependencies": { - "has-flag": "^3.0.0" + "cssnano-preset-simple": "^2.0.0" }, - "engines": { - "node": ">=6" + "peerDependencies": { + "postcss": "^8.2.2" } }, "node_modules/csso": { @@ -6063,6 +5970,15 @@ "node": ">=0.10.0" } }, + "node_modules/defaults": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/defaults/-/defaults-1.0.3.tgz", + "integrity": "sha1-xlYFHpgX2f8I7YgUd/P+QBnz730=", + "dev": true, + "dependencies": { + "clone": "^1.0.2" + } + }, "node_modules/define-properties": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz", @@ -6273,9 +6189,9 @@ } }, "node_modules/electron-to-chromium": { - "version": "1.3.712", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.712.tgz", - "integrity": "sha512-3kRVibBeCM4vsgoHHGKHmPocLqtFAGTrebXxxtgKs87hNUzXrX2NuS3jnBys7IozCnw7viQlozxKkmty2KNfrw==" + "version": "1.3.735", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.735.tgz", + "integrity": "sha512-cp7MWzC3NseUJV2FJFgaiesdrS+A8ZUjX5fLAxdRlcaPDkaPGFplX930S5vf84yqDp4LjuLdKouWuVOTwUfqHQ==" }, "node_modules/elliptic": { "version": "6.5.4", @@ -7651,7 +7567,9 @@ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", "optional": true, - "os": ["darwin"], + "os": [ + "darwin" + ], "engines": { "node": "^8.16.0 || ^10.6.0 || >=11.0.0" } @@ -8819,19 +8737,20 @@ "integrity": "sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q==" }, "node_modules/inquirer": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-8.0.0.tgz", - "integrity": "sha512-ON8pEJPPCdyjxj+cxsYRe6XfCJepTxANdNnTebsTuQgXpRyZRRT9t4dJwjRubgmvn20CLSEnozRUayXyM9VTXA==", + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-8.1.0.tgz", + "integrity": "sha512-1nKYPoalt1vMBfCMtpomsUc32wmOoWXAoq3kM/5iTfxyQ2f/BxjixQpC+mbZ7BI0JUXHED4/XPXekDVtJNpXYw==", "dev": true, "dependencies": { "ansi-escapes": "^4.2.1", - "chalk": "^4.1.0", + "chalk": "^4.1.1", "cli-cursor": "^3.1.0", "cli-width": "^3.0.0", "external-editor": "^3.0.3", "figures": "^3.0.0", "lodash": "^4.17.21", "mute-stream": "0.0.8", + "ora": "^5.3.0", "run-async": "^2.4.0", "rxjs": "^6.6.6", "string-width": "^4.1.0", @@ -8842,6 +8761,22 @@ "node": ">=8.0.0" } }, + "node_modules/inquirer/node_modules/chalk": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.1.tgz", + "integrity": "sha512-diHzdDKxcU+bAsUboHLPEDQiw0qEe0qd7SYUn3HgcFlWgbDcfLGswOHYeGrHKzG9z6UYf01d9VFMfZxPM1xZSg==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, "node_modules/internal-slot": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.3.tgz", @@ -9095,6 +9030,15 @@ "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz", "integrity": "sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==" }, + "node_modules/is-interactive": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-interactive/-/is-interactive-1.0.0.tgz", + "integrity": "sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, "node_modules/is-jpg": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/is-jpg/-/is-jpg-2.0.0.tgz", @@ -9346,17 +9290,6 @@ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=" }, - "node_modules/isobject": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz", - "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=", - "dependencies": { - "isarray": "1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/isomorphic-fetch": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/isomorphic-fetch/-/isomorphic-fetch-2.2.1.tgz", @@ -9671,15 +9604,6 @@ "node": ">= 0.8.0" } }, - "node_modules/line-column": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/line-column/-/line-column-1.0.2.tgz", - "integrity": "sha1-0lryk2tvSEkXKzEuR5LR2Ye8NKI=", - "dependencies": { - "isarray": "^1.0.0", - "isobject": "^2.0.0" - } - }, "node_modules/line-reader": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/line-reader/-/line-reader-0.4.0.tgz", @@ -10459,9 +10383,9 @@ "integrity": "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==" }, "node_modules/nanoid": { - "version": "3.1.22", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.1.22.tgz", - "integrity": "sha512-/2ZUaJX2ANuLtTvqTlgqBQNJoQO398KyJgZloL0PZkC0dpysjncRUPsFe3DUPzz/y3h+u7C46np8RMuvF3jsSQ==", + "version": "3.1.23", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.1.23.tgz", + "integrity": "sha512-FiB0kzdP0FFVGDKlRLEQ1BgDzU87dy5NnzjeW9YZNt+/c3+q82EQDUwniSAUxp/F0gFNI1ZhKU1FqYsMuqZVnw==", "bin": { "nanoid": "bin/nanoid.cjs" }, @@ -10535,28 +10459,28 @@ } }, "node_modules/next": { - "version": "10.1.3", - "resolved": "https://registry.npmjs.org/next/-/next-10.1.3.tgz", - "integrity": "sha512-8Jf38F+s0YcXXkJGF5iUxOqSmbHrey0fX5Epc43L0uwDKmN2jK9vhc2ihCwXC1pmu8d2m/8wfTiXRJKGti55yw==", + "version": "10.2.2", + "resolved": "https://registry.npmjs.org/next/-/next-10.2.2.tgz", + "integrity": "sha512-HPGSLrflWPvf3zEZSIk/uj0CZ+YYrpZwZS0PFAgXbEwb894iRuAPzglagqlzcCh7lg12RBEaKNIxhrVa5xgjtQ==", "dependencies": { "@babel/runtime": "7.12.5", - "@hapi/accept": "5.0.1", - "@next/env": "10.1.3", - "@next/polyfill-module": "10.1.3", - "@next/react-dev-overlay": "10.1.3", - "@next/react-refresh-utils": "10.1.3", + "@hapi/accept": "5.0.2", + "@next/env": "10.2.2", + "@next/polyfill-module": "10.2.2", + "@next/react-dev-overlay": "10.2.2", + "@next/react-refresh-utils": "10.2.2", "@opentelemetry/api": "0.14.0", "assert": "2.0.0", "ast-types": "0.13.2", "browserify-zlib": "0.2.0", - "browserslist": "4.16.1", + "browserslist": "4.16.6", "buffer": "5.6.0", - "caniuse-lite": "^1.0.30001179", + "caniuse-lite": "^1.0.30001228", "chalk": "2.4.2", "chokidar": "3.5.1", "constants-browserify": "1.0.0", "crypto-browserify": "3.12.0", - "cssnano-simple": "1.2.2", + "cssnano-simple": "2.0.0", "domain-browser": "4.19.0", "encoding": "0.1.13", "etag": "1.8.1", @@ -10572,7 +10496,7 @@ "p-limit": "3.1.0", "path-browserify": "1.0.1", "pnp-webpack-plugin": "1.6.4", - "postcss": "8.1.7", + "postcss": "8.2.13", "process": "0.11.10", "prop-types": "15.7.2", "querystring-es3": "0.2.1", @@ -10595,12 +10519,30 @@ }, "engines": { "node": ">=10.13.0" + }, + "peerDependencies": { + "fibers": ">= 3.1.0", + "node-sass": "^4.0.0 || ^5.0.0", + "react": "^16.6.0 || ^17", + "react-dom": "^16.6.0 || ^17", + "sass": "^1.3.0" + }, + "peerDependenciesMeta": { + "fibers": { + "optional": true + }, + "node-sass": { + "optional": true + }, + "sass": { + "optional": true + } } }, "node_modules/next-mdx-remote": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/next-mdx-remote/-/next-mdx-remote-3.0.1.tgz", - "integrity": "sha512-sV1sM6CkdYP5aPND1+vrF3wr8TU8NJwVlcFe2rPjVHR5J/9M2bl9zlhF6AF+GOKHA7d5kUdwHoLbApEGofD8hA==", + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/next-mdx-remote/-/next-mdx-remote-3.0.2.tgz", + "integrity": "sha512-imLrrw6c/Hi1BKRyJE9yLhk45N5zzw+/CxSQRHOAByYUSOhd+DNjUaqm2jtd+/GgxJeYB1d3fJPXmvUZTez1MQ==", "dependencies": { "@mdx-js/mdx": "^1.6.22", "@mdx-js/react": "^1.6.22", @@ -11385,6 +11327,40 @@ "node": ">=10" } }, + "node_modules/ora": { + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/ora/-/ora-5.4.0.tgz", + "integrity": "sha512-1StwyXQGoU6gdjYkyVcqOLnVlbKj+6yPNNOxJVgpt9t4eksKjiriiHuxktLYkgllwk+D6MbC4ihH84L1udRXPg==", + "dev": true, + "dependencies": { + "bl": "^4.1.0", + "chalk": "^4.1.0", + "cli-cursor": "^3.1.0", + "cli-spinners": "^2.5.0", + "is-interactive": "^1.0.0", + "is-unicode-supported": "^0.1.0", + "log-symbols": "^4.1.0", + "strip-ansi": "^6.0.0", + "wcwidth": "^1.0.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ora/node_modules/bl": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", + "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", + "dev": true, + "dependencies": { + "buffer": "^5.5.0", + "inherits": "^2.0.4", + "readable-stream": "^3.4.0" + } + }, "node_modules/os-browserify": { "version": "0.3.0", "resolved": "https://registry.npmjs.org/os-browserify/-/os-browserify-0.3.0.tgz", @@ -11828,17 +11804,20 @@ } }, "node_modules/postcss": { - "version": "8.1.7", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.1.7.tgz", - "integrity": "sha512-llCQW1Pz4MOPwbZLmOddGM9eIJ8Bh7SZ2Oj5sxZva77uVaotYDsYTch1WBTNu7fUY0fpWp0fdt7uW40D4sRiiQ==", + "version": "8.2.13", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.2.13.tgz", + "integrity": "sha512-FCE5xLH+hjbzRdpbRb1IMCvPv9yZx2QnDarBEYSN0N0HYk+TcXsEhwdFcFb+SRWOKzKGErhIEbBK2ogyLdTtfQ==", "dependencies": { - "colorette": "^1.2.1", - "line-column": "^1.0.2", - "nanoid": "^3.1.16", + "colorette": "^1.2.2", + "nanoid": "^3.1.22", "source-map": "^0.6.1" }, "engines": { "node": "^10 || ^12 || >=14" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" } }, "node_modules/postcss-attribute-case-insensitive": { @@ -15801,9 +15780,9 @@ } }, "node_modules/prettier": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.2.1.tgz", - "integrity": "sha512-PqyhM2yCjg/oKkFPtTGUojv7gnZAoG80ttl45O6x2Ug/rMJw4wcc9k6aaf2hibP7BGVCCM33gZoGjyvt9mm16Q==", + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.3.0.tgz", + "integrity": "sha512-kXtO4s0Lz/DW/IJ9QdWhAf7/NmPWQXkFr/r/WkR3vyI+0v8amTDxiaQSLzs8NBlytfLWX/7uQUMIW677yLKl4w==", "dev": true, "bin": { "prettier": "bin-prettier.js" @@ -16092,14 +16071,18 @@ "integrity": "sha512-suNP+J1VU1MWFKcyt7RtjiSWUjvidmQSlqu+eHslq+342xCbGTYmC0mEhPCOHxlW0CywylOC1u2DFAT+bv4dBw==" }, "node_modules/react-instantsearch-core": { - "version": "6.10.3", - "resolved": "https://registry.npmjs.org/react-instantsearch-core/-/react-instantsearch-core-6.10.3.tgz", - "integrity": "sha512-7twp3OJrPGTFpyXwjJNeOTbQw7RTv+0cUyKkXR9njEyLdXKcPWfpeBirXfdQHjYIHEY2b0V2Vom1B9IHSDSUtQ==", + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/react-instantsearch-core/-/react-instantsearch-core-6.11.0.tgz", + "integrity": "sha512-RE5pPuSL5e3+wgMl+QLY+MgyqgNXBrfppqk3podOXTdWpQ5VdaXVrpeWVcmFJlRRKPLJI6RGqCiHVRTaGt1HVw==", "dependencies": { "@babel/runtime": "^7.1.2", "algoliasearch-helper": "^3.4.3", "prop-types": "^15.6.2", "react-fast-compare": "^3.0.0" + }, + "peerDependencies": { + "algoliasearch": ">= 3.1 < 5", + "react": ">= 16.3.0 < 18" } }, "node_modules/react-instantsearch-core/node_modules/react-fast-compare": { @@ -16108,16 +16091,20 @@ "integrity": "sha512-rtGImPZ0YyLrscKI9xTpV8psd6I8VAtjKCzQDlzyDvqJA8XOW78TXYQwNRNd8g8JZnDu8q9Fu/1v4HPAVwVdHA==" }, "node_modules/react-instantsearch-dom": { - "version": "6.10.3", - "resolved": "https://registry.npmjs.org/react-instantsearch-dom/-/react-instantsearch-dom-6.10.3.tgz", - "integrity": "sha512-kxc6IEruxJrc7O9lsLV5o4YK/RkGt3l7D1Y51JfmYkgeLuQHApwgcy/TAIoSN7wfR/1DONFbX8Y5VhU9Wqh87Q==", + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/react-instantsearch-dom/-/react-instantsearch-dom-6.11.0.tgz", + "integrity": "sha512-OXbP3OuT52HAoCbQiPxPAL9q/Lm5pm+eRx8TMKB2kcf6fQzc+L7RMy7fveuMENcmBks+vdD2+G7DGM8oVv9etg==", "dependencies": { "@babel/runtime": "^7.1.2", "algoliasearch-helper": "^3.4.3", "classnames": "^2.2.5", "prop-types": "^15.6.2", "react-fast-compare": "^3.0.0", - "react-instantsearch-core": "^6.10.3" + "react-instantsearch-core": "^6.11.0" + }, + "peerDependencies": { + "react": ">= 16.3.0 < 18", + "react-dom": ">= 16.3.0 < 18" } }, "node_modules/react-instantsearch-dom/node_modules/react-fast-compare": { @@ -16814,9 +16801,9 @@ } }, "node_modules/search-insights": { - "version": "1.7.1", - "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-1.7.1.tgz", - "integrity": "sha512-CSuSKIJp+WcSwYrD9GgIt1e3xmI85uyAefC4/KYGgtvNEm6rt4kBGilhVRmTJXxRE2W1JknvP598Q7SMhm7qKA==", + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-1.8.0.tgz", + "integrity": "sha512-4sd6oS/sLH/UxiZ4vMoDbcpJP01pcoNI4mm3ZsUfDAMCPKxDda1R8SFZUv618og3NYBvvWvwmf8VRC0rNYuTkg==", "engines": { "node": ">=8.16.0" } @@ -17537,14 +17524,6 @@ "node": ">=6" } }, - "node_modules/stacktrace-parser/node_modules/type-fest": { - "version": "0.7.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.7.1.tgz", - "integrity": "sha512-Ne2YiiGN8bmrmJJEuTWTLJR32nh/JdL1+PSicowtNb0WFpn59GK8/lfD61bVtzguz7b3PBt74nxpv/Pw5po5Rg==", - "engines": { - "node": ">=8" - } - }, "node_modules/state-toggle": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/state-toggle/-/state-toggle-1.0.3.tgz", @@ -19341,6 +19320,14 @@ "node": ">= 0.8.0" } }, + "node_modules/type-fest": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.7.1.tgz", + "integrity": "sha512-Ne2YiiGN8bmrmJJEuTWTLJR32nh/JdL1+PSicowtNb0WFpn59GK8/lfD61bVtzguz7b3PBt74nxpv/Pw5po5Rg==", + "engines": { + "node": ">=8" + } + }, "node_modules/type-is": { "version": "1.6.18", "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", @@ -19848,6 +19835,15 @@ "node": ">=10.13.0" } }, + "node_modules/wcwidth": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/wcwidth/-/wcwidth-1.0.1.tgz", + "integrity": "sha1-8LDc+RW8X/FSivrbLA4XtTLaL+g=", + "dev": true, + "dependencies": { + "defaults": "^1.0.3" + } + }, "node_modules/web-namespaces": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-1.1.4.tgz", @@ -20116,118 +20112,118 @@ }, "dependencies": { "@algolia/cache-browser-local-storage": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.9.0.tgz", - "integrity": "sha512-H659baxPygLp1ed5Y+kko9nLhhTRtZ6v2k2cs2/WTErAd6XU+OrvTvsEedUprDYUve/t9NLg95Ka9TK8QEQk1w==", + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.9.1.tgz", + "integrity": "sha512-bAUU9vKCy45uTTlzJw0LYu1IjoZsmzL6lgjaVFaW1crhX/4P+JD5ReQv3n/wpiXSFaHq1WEO3WyH2g3ymzeipQ==", "requires": { - "@algolia/cache-common": "4.9.0" + "@algolia/cache-common": "4.9.1" } }, "@algolia/cache-common": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.9.0.tgz", - "integrity": "sha512-hBqkLEw1Y7oxEJEVmcdm/s/+KKlvCmSenlX5rrQts5qCNdhdS1QkCvHx8vgFF9J6uliP2TPs+umrrXc+aKsLPw==" + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.9.1.tgz", + "integrity": "sha512-tcvw4mOfFy44V4ZxDEy9wNGr6vFROZKRpXKTEBgdw/WBn6mX51H1ar4RWtceDEcDU4H5fIv5tsY3ip2hU+fTPg==" }, "@algolia/cache-in-memory": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.9.0.tgz", - "integrity": "sha512-8q9z8tkIrgPenZl+aTc6MOQleLnanVy+Nsz7Uzga5r9Kb7xpqYKNI9rSJYyBzl7KRxock5v6AOUiFgi45eDnDg==", + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.9.1.tgz", + "integrity": "sha512-IEJrHonvdymW2CnRfJtsTVWyfAH05xPEFkGXGCw00+6JNCj8Dln3TeaRLiaaY1srlyGedkemekQm1/Xb46CGOQ==", "requires": { - "@algolia/cache-common": "4.9.0" + "@algolia/cache-common": "4.9.1" } }, "@algolia/client-account": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.9.0.tgz", - "integrity": "sha512-u9cljyqUnlgHIKazeOA2R820pDZFReRVm3AObiGrxhdKVQ44ZOgAlN+NIqA+c19iFdpulzpkPKxU+Uavcky7JQ==", + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.9.1.tgz", + "integrity": "sha512-Shpjeuwb7i2LR5QuWREb6UbEQLGB+Pl/J5+wPgILJDP/uWp7jpl0ase9mYNQGKj7TjztpSpQCPZ3dSHPnzZPfw==", "requires": { - "@algolia/client-common": "4.9.0", - "@algolia/client-search": "4.9.0", - "@algolia/transporter": "4.9.0" + "@algolia/client-common": "4.9.1", + "@algolia/client-search": "4.9.1", + "@algolia/transporter": "4.9.1" } }, "@algolia/client-analytics": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.9.0.tgz", - "integrity": "sha512-5TafTR/uP9X4EpDOvBK1w4cgc3JpKeokPJqD37q46AH1IGI8UO5Gy1H5LxcGmPTIMdMnuSfiYgRJsyoEO1Co0A==", + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.9.1.tgz", + "integrity": "sha512-/g6OkOSIA+A0t/tjvbL6iG/zV4El4LPFgv/tcAYHTH27BmlNtnEXw+iFpGjeUlQoPily9WVB3QNLMJkaNwL3HA==", "requires": { - "@algolia/client-common": "4.9.0", - "@algolia/client-search": "4.9.0", - "@algolia/requester-common": "4.9.0", - "@algolia/transporter": "4.9.0" + "@algolia/client-common": "4.9.1", + "@algolia/client-search": "4.9.1", + "@algolia/requester-common": "4.9.1", + "@algolia/transporter": "4.9.1" } }, "@algolia/client-common": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.9.0.tgz", - "integrity": "sha512-Rjk4XMXi6B63jdKQwnGbKwIubB5QIgok+k67QwrgadbqVphHueJ3af3D6i3sRcKBBTmdprFAXn0zX/zaxYBhAQ==", + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.9.1.tgz", + "integrity": "sha512-UziRTZ8km3qwoVPIyEre8TV6V+MX7UtbfVqPmSafZ0xu41UUZ+sL56YoKjOXkbKuybeIC9prXMGy/ID5bXkTqg==", "requires": { - "@algolia/requester-common": "4.9.0", - "@algolia/transporter": "4.9.0" + "@algolia/requester-common": "4.9.1", + "@algolia/transporter": "4.9.1" } }, "@algolia/client-recommendation": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/client-recommendation/-/client-recommendation-4.9.0.tgz", - "integrity": "sha512-6y6uyQmmowuBqMkk4iLeBOkd1qtBpfGJ5/di0S041eHQlD0v9WxyhbZyOopn0XxopSLbQaO22u0rjEcla7KYlA==", + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/client-recommendation/-/client-recommendation-4.9.1.tgz", + "integrity": "sha512-Drtvvm1PNIOpYf4HFlkPFstFQ3IsN+TRmxur2F7y6Faplb5ybISa8ithu1tmlTdyTf3A78hQUQjgJet6qD2XZw==", "requires": { - "@algolia/client-common": "4.9.0", - "@algolia/requester-common": "4.9.0", - "@algolia/transporter": "4.9.0" + "@algolia/client-common": "4.9.1", + "@algolia/requester-common": "4.9.1", + "@algolia/transporter": "4.9.1" } }, "@algolia/client-search": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.9.0.tgz", - "integrity": "sha512-HFfeUJN6GPHsjfcchmksoqlBLF5gT+jRHmSait4fWtde85eGFyJVL7ubUZD9KjlEjzebmUPPIZ1ixcupaTUBnw==", + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.9.1.tgz", + "integrity": "sha512-r9Cw2r8kJr45iYncFDht6EshARghU265wuY8Q8oHrpFHjAziEYdsUOdNmQKbsSH5J3gLjDPx1EI5DzVd6ivn3w==", "requires": { - "@algolia/client-common": "4.9.0", - "@algolia/requester-common": "4.9.0", - "@algolia/transporter": "4.9.0" + "@algolia/client-common": "4.9.1", + "@algolia/requester-common": "4.9.1", + "@algolia/transporter": "4.9.1" } }, "@algolia/logger-common": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.9.0.tgz", - "integrity": "sha512-OU8lzR1I8R0Qsgk+u4GOSFpEEKZkzPYZP1OXsw92gejW08k5N6kVLzfvVvgNA1KAeZPFXADdH26VBQ/2M9wF3g==" + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.9.1.tgz", + "integrity": "sha512-9mPrbFlFyPT7or/7PXTiJjyOewWB9QRkZKVXkt5zHAUiUzGxmmdpJIGpPv3YQnDur8lXrXaRI0MHXUuIDMY1ng==" }, "@algolia/logger-console": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.9.0.tgz", - "integrity": "sha512-CrBU+E2iA4xXnb1rwX3G1ox9O+N+OjxnWccL75sWr1nQ/kh08TPpV7TYAvQEOFEDj8vV1kPeYEMENulbjmVZSA==", + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.9.1.tgz", + "integrity": "sha512-74VUwjtFjFpjZpi3QoHIPv0kcr3vWUSHX/Vs8PJW3lPsD4CgyhFenQbG9v+ZnyH0JrJwiYTtzfmrVh7IMWZGrQ==", "requires": { - "@algolia/logger-common": "4.9.0" + "@algolia/logger-common": "4.9.1" } }, "@algolia/requester-browser-xhr": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.9.0.tgz", - "integrity": "sha512-KJESXTv4z+mDCn1C9b/azUqPTgIFVL/Y4+Eopz6YBg9Lj0C6KQrsW68w0uLJcGSw9o/qBoKcpUo4QNm4/CwrdQ==", + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.9.1.tgz", + "integrity": "sha512-zc46tk5o0ikOAz3uYiRAMxC2iVKAMFKT7nNZnLB5IzT0uqAh7pz/+D/UvIxP4bKmsllpBSnPcpfQF+OI4Ag/BA==", "requires": { - "@algolia/requester-common": "4.9.0" + "@algolia/requester-common": "4.9.1" } }, "@algolia/requester-common": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.9.0.tgz", - "integrity": "sha512-8/ljy4/pnB8d4/yTaJQa2t3oKdbsVq9nDXkwhCACVum8tGYSSGpCtpBGln6M4g+QdfBSQxYILTB1wwHLFUstmg==" + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.9.1.tgz", + "integrity": "sha512-9hPgXnlCSbqJqF69M5x5WN3h51Dc+mk/iWNeJSVxExHGvCDfBBZd0v6S15i8q2a9cD1I2RnhMpbnX5BmGtabVA==" }, "@algolia/requester-node-http": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.9.0.tgz", - "integrity": "sha512-JpkjPXDCgT+Z8G8d/6hxId7+560HeCHoiDcEFr9eWR/kClAOgVwgVH1I64pmH8ucsjL7kdWbkxez7zBzPiV+Tg==", + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.9.1.tgz", + "integrity": "sha512-vYNVbSCuyrCSCjHBQJk+tLZtWCjvvDf5tSbRJjyJYMqpnXuIuP7gZm24iHil4NPYBhbBj5NU2ZDAhc/gTn75Ag==", "requires": { - "@algolia/requester-common": "4.9.0" + "@algolia/requester-common": "4.9.1" } }, "@algolia/transporter": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.9.0.tgz", - "integrity": "sha512-GySLvXwg0DQ2LM0/W+hr9y1Co3QY1iNnhWA82gFhBrz7RWGzw47qEsh//9u/wnjl6S1WOjH+eKm5PaQATG1BXg==", + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.9.1.tgz", + "integrity": "sha512-AbjFfGzX+cAuj7Qyc536OxIQzjFOA5FU2ANGStx8LBH+AKXScwfkx67C05riuaRR5adSCLMSEbVvUscH0nF+6A==", "requires": { - "@algolia/cache-common": "4.9.0", - "@algolia/logger-common": "4.9.0", - "@algolia/requester-common": "4.9.0" + "@algolia/cache-common": "4.9.1", + "@algolia/logger-common": "4.9.1", + "@algolia/requester-common": "4.9.1" } }, "@aws-crypto/ie11-detection": { @@ -21630,9 +21626,9 @@ } }, "@hapi/accept": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/@hapi/accept/-/accept-5.0.1.tgz", - "integrity": "sha512-fMr4d7zLzsAXo28PRRQPXR1o2Wmu+6z+VY1UzDp0iFo13Twj8WePakwXBiqn3E1aAlTpSNzCXdnnQXFhst8h8Q==", + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/@hapi/accept/-/accept-5.0.2.tgz", + "integrity": "sha512-CmzBx/bXUR8451fnZRuZAJRlzgm0Jgu5dltTX/bszmR2lheb9BpyN47Q1RbaGTsvFzn0PXAEs+lXDKfshccYZw==", "requires": { "@hapi/boom": "9.x.x", "@hapi/hoek": "9.x.x" @@ -21647,9 +21643,9 @@ } }, "@hapi/hoek": { - "version": "9.1.1", - "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.1.1.tgz", - "integrity": "sha512-CAEbWH7OIur6jEOzaai83jq3FmKmv4PmX1JYfs9IrYcGEVI/lyL1EXJGCj7eFVJ0bg5QR8LMxBlEtA+xKiLpFw==" + "version": "9.2.0", + "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.2.0.tgz", + "integrity": "sha512-sqKVVVOe5ivCaXDWivIJYVSaEgdQK9ul7a4Kity5Iw7u9+wBAPbX1RMSnLLmp7O4Vzj0WOWwMAJsTL00xwaNug==" }, "@hashicorp/js-utils": { "version": "1.0.10", @@ -21667,9 +21663,9 @@ "integrity": "sha512-AvRPKxi6bEzjS6U8dZR9FGjPmlqxxTdzPeFZXYTsc8kUGjoY/X9/GXY2zYCpzEpmUdnLzOSJrDl17INPEeL4Pw==" }, "@hashicorp/mktg-global-styles": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@hashicorp/mktg-global-styles/-/mktg-global-styles-3.0.1.tgz", - "integrity": "sha512-k87utsbGPHRBJgaeBMq0HlsoIEHaMn6eVvLRC7mxrxteMCyTg09YVVfihHABozyquK2Z2VnzTeH2dlkcJMNYFQ==" + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@hashicorp/mktg-global-styles/-/mktg-global-styles-3.0.2.tgz", + "integrity": "sha512-WeeiM//oEkDtnhCXkTNkWXPXGdeEqgDiCxz7p9w4619mkHWCuOOZWQSMMP/GV6i6knBZIzSgCT6QPeZwJNg9nA==" }, "@hashicorp/mktg-logos": { "version": "1.0.2", @@ -21677,9 +21673,9 @@ "integrity": "sha512-mZyJ3xG1YTufyDLC2vWFDfj6ppXJ8uK1z5+U/9fgcuJynet5STtEpeVsyZz3oTNcXJiCjAQzvK63T0V8npnKYQ==" }, "@hashicorp/nextjs-scripts": { - "version": "18.1.0", - "resolved": "https://registry.npmjs.org/@hashicorp/nextjs-scripts/-/nextjs-scripts-18.1.0.tgz", - "integrity": "sha512-NC1u7R1RGfOPB5XZd6MZSX3YkoCkT1BTHiBKwTOZieARPlEeDHdYiJ18fhgIYQLBc0DZ7u10JyKpQHvd15yKdA==", + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/@hashicorp/nextjs-scripts/-/nextjs-scripts-18.3.1.tgz", + "integrity": "sha512-5Y0xL5bVXQ6BC4XXYpQVu6yUGZMC4d0GxvuQSiR6f2/rYBJP/8GgBCiAJ8Z9JDQINb6413WdaYc0KzE+M9WdyA==", "requires": { "@bugsnag/js": "7.5.4", "@bugsnag/plugin-react": "7.5.4", @@ -21837,9 +21833,9 @@ } }, "@hashicorp/react-case-study-slider": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/@hashicorp/react-case-study-slider/-/react-case-study-slider-6.0.0.tgz", - "integrity": "sha512-mW+572e0FB0jr10+N3rrXJJq1w+mc91U2a+RCTEjOytNEfghYk+hOAvqHFciq4lqdl+sYwZQ3dRhHSwflyGJkA==", + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/@hashicorp/react-case-study-slider/-/react-case-study-slider-6.0.2.tgz", + "integrity": "sha512-x81xrDRV2J6B+e2C7eTEQ936CC1e337EclgF6N8+HLgDQ/6pCsl6+tbhd+f38mpsOc62vEPkhK9HsqGlmMJ1NQ==", "requires": { "@hashicorp/react-button": "^5.0.1", "@hashicorp/react-image": "^2.0.3" @@ -21882,17 +21878,18 @@ "integrity": "sha512-SM0vGm3BoYI1OOUvRz5NOP8qgLAJII/Idil6N6xJcUacKbnw/sRyXFRLa6QAmPiqFxKrb2BNSFcVtf1RR3kvXQ==" }, "@hashicorp/react-docs-page": { - "version": "13.2.0", - "resolved": "https://registry.npmjs.org/@hashicorp/react-docs-page/-/react-docs-page-13.2.0.tgz", - "integrity": "sha512-FHB+J43ZOstihscSBbXF++WJIMHkWLY3HQebOmIP059vfIuMAyDJ0D3UbZL7BkUi24ZU3oznisGHD39J2bLdug==", + "version": "13.3.1", + "resolved": "https://registry.npmjs.org/@hashicorp/react-docs-page/-/react-docs-page-13.3.1.tgz", + "integrity": "sha512-zh+3aJ1JMb1sl03/0+CEzctjkx3RdfCPYdzMdqMlnrk7JwWws1q4Df9wkJNHpnKbALqzz8NXwSXC+HHnil9AAg==", "requires": { "@hashicorp/react-alert": "^5.0.0", "@hashicorp/react-content": "^7.0.1", - "@hashicorp/react-docs-sidenav": "^8.1.0", + "@hashicorp/react-docs-sidenav": "^8.2.0", "@hashicorp/react-head": "^3.0.2", "@hashicorp/react-placeholder": "^0.1.0", - "@hashicorp/react-search": "^5.0.2", + "@hashicorp/react-search": "^5.1.0", "@hashicorp/versioned-docs": "^0.0.13", + "classnames": "^2.2.6", "fs-exists-sync": "0.1.0", "gray-matter": "4.0.2", "js-yaml": "3.14.0", @@ -21912,9 +21909,9 @@ } }, "@hashicorp/react-docs-sidenav": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/@hashicorp/react-docs-sidenav/-/react-docs-sidenav-8.1.0.tgz", - "integrity": "sha512-TN6D/FKbDGCpLrBv6wO9U4Ad0GtL+4HEJpPMEchTannElkMhq67nV2x6cl4+HzspFsD07dlaYIDUpyASUV+wew==", + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/@hashicorp/react-docs-sidenav/-/react-docs-sidenav-8.2.0.tgz", + "integrity": "sha512-VoXJFuMF0bZkiEpkpTgzo5fz8nyVjTFaiHYFe1RgaM6EWKgVtT8Vjtr5oUJ9gDOf2fMLc+AgiQ0PbTeMWX662g==", "requires": { "@hashicorp/react-link-wrap": "^3.0.1", "fuzzysearch": "1.0.3" @@ -21929,9 +21926,9 @@ } }, "@hashicorp/react-featured-slider": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@hashicorp/react-featured-slider/-/react-featured-slider-4.0.0.tgz", - "integrity": "sha512-WcblcrlZ8KoLqyku8bBljY/9SzaBYyB/tae937iaMFZgUTvl865WGDoMADxZN91qBiQzfiljJXi5nyTKzLXMkQ==", + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@hashicorp/react-featured-slider/-/react-featured-slider-4.0.2.tgz", + "integrity": "sha512-jTFUAhH22klI3rU0ZMrMvkalsTsK2lTN7Ct1A/BQRMnR+Hw7r0jD4hjbMFWJm/QjF8quPcNxI/uFS/GrTO9p2g==", "requires": { "@hashicorp/react-button": "^5.0.1", "@hashicorp/react-image": "^2.0.3" @@ -21975,11 +21972,11 @@ "integrity": "sha512-kKY/5XwWkBsDSvF8jHgNnxG4hx8ryPjoEtPFxPMVCly/ouwbslilIrWzqSYbeP7vUO686JzBLf5xkwq+HV0aig==" }, "@hashicorp/react-hero": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/@hashicorp/react-hero/-/react-hero-7.1.1.tgz", - "integrity": "sha512-MYMduvDLsGSaw8gFpqcgxhdkC3ZXMs5mm4YlRjWw+MWv6lXJ9wzHss387s2qiqnxwQuQ5O2Ec2zraTH4pqf0gg==", + "version": "7.2.1", + "resolved": "https://registry.npmjs.org/@hashicorp/react-hero/-/react-hero-7.2.1.tgz", + "integrity": "sha512-+G4JTrvQm57Qp5fBQgA1X2RFKhFNDatJH2biTBgz27TLB00/g12XduTFA8GqMbVIluCBhrUdnxtpxgk8Fud+Pw==", "requires": { - "@hashicorp/js-utils": "^1.0.8-alpha.0", + "@hashicorp/js-utils": "next", "@hashicorp/localstorage-polyfill": "^1.0.14", "@hashicorp/react-alert": "^2.0.3", "@hashicorp/react-button": "^5.0.1", @@ -22077,9 +22074,9 @@ "requires": {} }, "@hashicorp/react-product-downloads-page": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/@hashicorp/react-product-downloads-page/-/react-product-downloads-page-2.0.2.tgz", - "integrity": "sha512-Ff7kki+A6FqP7/WHa5AA7vRtdBJZ5CHYy33RQC7Hfl5GCYN/bicR0RSiRDaSSGLwtoIGXnNIm+qpj3txixrvtw==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@hashicorp/react-product-downloads-page/-/react-product-downloads-page-2.0.3.tgz", + "integrity": "sha512-9H56cG5EzteBFvZH6IBzB6pMwK8dFdsin4ajDOROQE4d20kCfG1tCB5OGsZHNwn7HSxJ1fVnbyjc1mDkj5Piqg==", "requires": { "@hashicorp/react-button": "^5.0.1", "@hashicorp/react-head": "^3.0.2", @@ -22088,9 +22085,9 @@ } }, "@hashicorp/react-search": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/@hashicorp/react-search/-/react-search-5.0.2.tgz", - "integrity": "sha512-YcvzgkWOnF5vXFaA86OVKDptP0s0UflRJl18rYcyAhkCzMO8bp2Gt5bvLq7LNq4NGRi+3G0dIv+X04UdQVXxaQ==", + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@hashicorp/react-search/-/react-search-5.1.0.tgz", + "integrity": "sha512-Lrs3eDTwro8r+Yp4ow2orWaHKy00Z0THWg9As3KpDtijJzEAPRgWEP9nDxUr+bcOE4InfrJnC3GQ1BeZKIeU+A==", "requires": { "@hashicorp/react-inline-svg": "^1.0.2", "@hashicorp/remark-plugins": "^3.0.0", @@ -22107,7 +22104,8 @@ "@hashicorp/react-inline-svg": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/@hashicorp/react-inline-svg/-/react-inline-svg-1.0.2.tgz", - "integrity": "sha512-AAFnBslSTgnEr++dTbMn3sybAqvn7myIj88ijGigF6u11eSRiV64zqEcyYLQKWTV6dF4AvYoxiYC6GSOgiM0Yw==" + "integrity": "sha512-AAFnBslSTgnEr++dTbMn3sybAqvn7myIj88ijGigF6u11eSRiV64zqEcyYLQKWTV6dF4AvYoxiYC6GSOgiM0Yw==", + "requires": {} } } }, @@ -22129,9 +22127,9 @@ } }, "@hashicorp/react-subnav": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/@hashicorp/react-subnav/-/react-subnav-8.1.0.tgz", - "integrity": "sha512-0Kpkkb2nmqnCUgtq+h6ju2iOn/VTVemIpAQMpjRtxUFFGRUV1FAnks7enlPE6NIXVmREg0ZD/6CdnesG0IL6dA==", + "version": "8.2.1", + "resolved": "https://registry.npmjs.org/@hashicorp/react-subnav/-/react-subnav-8.2.1.tgz", + "integrity": "sha512-rnDPYK5l46IcoEut7dD7VDdpW8vz4JbINC/4FxvEAPd8i1Rv+vtfWsJjG5fmyXRgTbLQjVvkyqrRJ3sgh50mmA==", "requires": { "@hashicorp/mktg-logos": "^1.0.1", "@hashicorp/react-button": "^5.0.1", @@ -22475,19 +22473,19 @@ } }, "@next/env": { - "version": "10.1.3", - "resolved": "https://registry.npmjs.org/@next/env/-/env-10.1.3.tgz", - "integrity": "sha512-q7z7NvmRs66lCQmVJtKjDxVtMTjSwP6ExVzaH46pbTH60MHgzEJ9H4jXrFLTihPmCIvpAv6Ai04jbS8dcg1ZMQ==" + "version": "10.2.2", + "resolved": "https://registry.npmjs.org/@next/env/-/env-10.2.2.tgz", + "integrity": "sha512-m0xOpl4F9z7R7Yt2OtJoo6ZUsFPdx+kuvZeoctH7T6lO66DmZL3W6MQDxso/ArkH8VOlDPZgeQVVBPf+I7wflA==" }, "@next/polyfill-module": { - "version": "10.1.3", - "resolved": "https://registry.npmjs.org/@next/polyfill-module/-/polyfill-module-10.1.3.tgz", - "integrity": "sha512-1DtUVcuoBJAn5IrxIZQjUG1KTPkiXMYloykPSkRxawimgvG9dRj2kscU+4KGNSFxHoxW9c68VRCb+7MDz5aGGw==" + "version": "10.2.2", + "resolved": "https://registry.npmjs.org/@next/polyfill-module/-/polyfill-module-10.2.2.tgz", + "integrity": "sha512-0t5Hw1Dr18TWP65qAnakRa8+jza6SAFOz0b2v67s5AVquAwXXlclR4SfUy3ahrRtjCqlbLEE/oFIzCGbyMYfVA==" }, "@next/react-dev-overlay": { - "version": "10.1.3", - "resolved": "https://registry.npmjs.org/@next/react-dev-overlay/-/react-dev-overlay-10.1.3.tgz", - "integrity": "sha512-vIgUah3bR9+MKzwU1Ni5ONfYM0VdI42i7jZ+Ei1c0wjwkG9anVnDqhSQ3mVg62GP2nt7ExaaFyf9THbsw5KYXg==", + "version": "10.2.2", + "resolved": "https://registry.npmjs.org/@next/react-dev-overlay/-/react-dev-overlay-10.2.2.tgz", + "integrity": "sha512-uPslFPWvvZ8AdadGdK2/834UnJy6F+7071/ere6QpN88Ngzqx9lDIhjslEeFLRtpyBst4s1YUdbm69btVPdE5w==", "requires": { "@babel/code-frame": "7.12.11", "anser": "1.4.9", @@ -22535,9 +22533,10 @@ } }, "@next/react-refresh-utils": { - "version": "10.1.3", - "resolved": "https://registry.npmjs.org/@next/react-refresh-utils/-/react-refresh-utils-10.1.3.tgz", - "integrity": "sha512-P4GJZuLKfD/o42JvGZ/xP4Hxg68vd3NeZxOLqIuQKFjjaYgC2IrO+lE5PTwGmRkytjfprJC+9j7Jss/xQAS6QA==" + "version": "10.2.2", + "resolved": "https://registry.npmjs.org/@next/react-refresh-utils/-/react-refresh-utils-10.2.2.tgz", + "integrity": "sha512-OL7r0iz+SiE9SMWcxZocUtEAHv0/TlBWxIE3KjjO1vWSU1r0gMrE2l2RxHfMLIPsl6CjAkcPxoaXlosFsJ2S5w==", + "requires": {} }, "@nodelib/fs.scandir": { "version": "2.1.4", @@ -22707,9 +22706,9 @@ "integrity": "sha512-1HcDas8SEj4z1Wc696tH56G8OlRaH/sqZOynNNB+HF0WOeXPaxTtbYzJY2oEfiUxjSKjhCKr+MvR7dCHcEelug==" }, "@types/react": { - "version": "17.0.3", - "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.3.tgz", - "integrity": "sha512-wYOUxIgs2HZZ0ACNiIayItyluADNbONl7kt8lkLjVK8IitMH5QMyAh75Fwhmo37r1m7L2JaFj03sIfxBVDvRAg==", + "version": "17.0.6", + "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.6.tgz", + "integrity": "sha512-u/TtPoF/hrvb63LdukET6ncaplYsvCvmkceasx8oG84/ZCsoLxz9Z/raPBP4lTAiWW1Jb889Y9svHmv8R26dWw==", "dev": true, "requires": { "@types/prop-types": "*", @@ -22859,24 +22858,24 @@ "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==" }, "algoliasearch": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.9.0.tgz", - "integrity": "sha512-hhlza8j/uCWGe2kSz89HlcexiLxO1wzOKLNPWivNtZeZO5J85agbcMsrKV5+xLFI4LbulP/b/4/IvswxzPrGIw==", - "requires": { - "@algolia/cache-browser-local-storage": "4.9.0", - "@algolia/cache-common": "4.9.0", - "@algolia/cache-in-memory": "4.9.0", - "@algolia/client-account": "4.9.0", - "@algolia/client-analytics": "4.9.0", - "@algolia/client-common": "4.9.0", - "@algolia/client-recommendation": "4.9.0", - "@algolia/client-search": "4.9.0", - "@algolia/logger-common": "4.9.0", - "@algolia/logger-console": "4.9.0", - "@algolia/requester-browser-xhr": "4.9.0", - "@algolia/requester-common": "4.9.0", - "@algolia/requester-node-http": "4.9.0", - "@algolia/transporter": "4.9.0" + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.9.1.tgz", + "integrity": "sha512-EeJUYXzBEhZSsL6tXc3hseLBCtlNLa1MZ4mlMK6EeX38yRjY5vgnFcNNml6uUhlOjvheKxgkKRpPWkxgL8Cqkg==", + "requires": { + "@algolia/cache-browser-local-storage": "4.9.1", + "@algolia/cache-common": "4.9.1", + "@algolia/cache-in-memory": "4.9.1", + "@algolia/client-account": "4.9.1", + "@algolia/client-analytics": "4.9.1", + "@algolia/client-common": "4.9.1", + "@algolia/client-recommendation": "4.9.1", + "@algolia/client-search": "4.9.1", + "@algolia/logger-common": "4.9.1", + "@algolia/logger-console": "4.9.1", + "@algolia/requester-browser-xhr": "4.9.1", + "@algolia/requester-common": "4.9.1", + "@algolia/requester-node-http": "4.9.1", + "@algolia/transporter": "4.9.1" } }, "algoliasearch-helper": { @@ -23976,15 +23975,15 @@ } }, "browserslist": { - "version": "4.16.1", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.16.1.tgz", - "integrity": "sha512-UXhDrwqsNcpTYJBTZsbGATDxZbiVDsx6UjpmRUmtnP10pr8wAYr5LgFoEFw9ixriQH2mv/NX2SfGzE/o8GndLA==", + "version": "4.16.6", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.16.6.tgz", + "integrity": "sha512-Wspk/PqO+4W9qp5iUTJsa1B/QrYn1keNCcEP5OvP7WBwT4KaDly0uONYmC6Xa3Z5IqnUgS0KcgLYu1l74x0ZXQ==", "requires": { - "caniuse-lite": "^1.0.30001173", - "colorette": "^1.2.1", - "electron-to-chromium": "^1.3.634", + "caniuse-lite": "^1.0.30001219", + "colorette": "^1.2.2", + "electron-to-chromium": "^1.3.723", "escalade": "^3.1.1", - "node-releases": "^1.1.69" + "node-releases": "^1.1.71" } }, "buffer": { @@ -24128,9 +24127,9 @@ } }, "caniuse-lite": { - "version": "1.0.30001208", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001208.tgz", - "integrity": "sha512-OE5UE4+nBOro8Dyvv0lfx+SRtfVIOM9uhKqFmJeUbGriqhhStgp1A0OyBpgy3OUF8AhYCT+PVwPC1gMl2ZcQMA==" + "version": "1.0.30001228", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001228.tgz", + "integrity": "sha512-QQmLOGJ3DEgokHbMSA8cj2a+geXqmnpyOFT0lhQV6P3/YOJvGDEwoedcwxEQ30gJIwIIunHIicunJ2rzK5gB2A==" }, "caw": { "version": "2.0.1", @@ -24317,6 +24316,12 @@ "restore-cursor": "^3.1.0" } }, + "cli-spinners": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.6.0.tgz", + "integrity": "sha512-t+4/y50K/+4xcCRosKkA7W4gTr1MySvLV0q+PxmG7FJ5g+66ChKurYjxBCjHggHH3HA5Hh9cy+lcUGWDqVH+4Q==", + "dev": true + }, "cli-truncate": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/cli-truncate/-/cli-truncate-2.1.0.tgz", @@ -24342,6 +24347,12 @@ "tiny-emitter": "^2.0.0" } }, + "clone": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/clone/-/clone-1.0.4.tgz", + "integrity": "sha1-2jCcwmPfFZlMaIypAheco8fNfH4=", + "dev": true + }, "clone-regexp": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/clone-regexp/-/clone-regexp-2.2.0.tgz", @@ -25000,163 +25011,19 @@ "integrity": "sha512-MsCAG1z9lPdoO/IUMLSBWBSVxVtJ1395VGIQ+Fc2gNdkQ1hNDnQdw3YhA71WJCBW1vdwA0cAnk/DnW6bqoEUYg==" }, "cssnano-preset-simple": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/cssnano-preset-simple/-/cssnano-preset-simple-1.2.2.tgz", - "integrity": "sha512-gtvrcRSGtP3hA/wS8mFVinFnQdEsEpm3v4I/s/KmNjpdWaThV/4E5EojAzFXxyT5OCSRPLlHR9iQexAqKHlhGQ==", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/cssnano-preset-simple/-/cssnano-preset-simple-2.0.0.tgz", + "integrity": "sha512-HkufSLkaBJbKBFx/7aj5HmCK9Ni/JedRQm0mT2qBzMG/dEuJOLnMt2lK6K1rwOOyV4j9aSY+knbW9WoS7BYpzg==", "requires": { - "caniuse-lite": "^1.0.30001179", - "postcss": "^7.0.32" - }, - "dependencies": { - "ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "requires": { - "color-convert": "^1.9.0" - } - }, - "chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "requires": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "dependencies": { - "supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "requires": { - "has-flag": "^3.0.0" - } - } - } - }, - "color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "requires": { - "color-name": "1.1.3" - } - }, - "color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" - }, - "has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=" - }, - "postcss": { - "version": "7.0.35", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.35.tgz", - "integrity": "sha512-3QT8bBJeX/S5zKTTjTCIjRF3If4avAT6kqxcASlTWEtAFCb9NH0OUxNDfgZSWdP5fJnBYCMEWkIFfWeugjzYMg==", - "requires": { - "chalk": "^2.4.2", - "source-map": "^0.6.1", - "supports-color": "^6.1.0" - } - }, - "source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" - }, - "supports-color": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", - "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==", - "requires": { - "has-flag": "^3.0.0" - } - } + "caniuse-lite": "^1.0.30001202" } }, "cssnano-simple": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/cssnano-simple/-/cssnano-simple-1.2.2.tgz", - "integrity": "sha512-4slyYc1w4JhSbhVX5xi9G0aQ42JnRyPg+7l7cqoNyoIDzfWx40Rq3JQZnoAWDu60A4AvKVp9ln/YSUOdhDX68g==", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/cssnano-simple/-/cssnano-simple-2.0.0.tgz", + "integrity": "sha512-0G3TXaFxlh/szPEG/o3VcmCwl0N3E60XNb9YZZijew5eIs6fLjJuOPxQd9yEBaX2p/YfJtt49i4vYi38iH6/6w==", "requires": { - "cssnano-preset-simple": "1.2.2", - "postcss": "^7.0.32" - }, - "dependencies": { - "ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "requires": { - "color-convert": "^1.9.0" - } - }, - "chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "requires": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "dependencies": { - "supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "requires": { - "has-flag": "^3.0.0" - } - } - } - }, - "color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "requires": { - "color-name": "1.1.3" - } - }, - "color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" - }, - "has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=" - }, - "postcss": { - "version": "7.0.35", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.35.tgz", - "integrity": "sha512-3QT8bBJeX/S5zKTTjTCIjRF3If4avAT6kqxcASlTWEtAFCb9NH0OUxNDfgZSWdP5fJnBYCMEWkIFfWeugjzYMg==", - "requires": { - "chalk": "^2.4.2", - "source-map": "^0.6.1", - "supports-color": "^6.1.0" - } - }, - "source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" - }, - "supports-color": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", - "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==", - "requires": { - "has-flag": "^3.0.0" - } - } + "cssnano-preset-simple": "^2.0.0" } }, "csso": { @@ -25380,6 +25247,15 @@ "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-2.2.1.tgz", "integrity": "sha512-R9hc1Xa/NOBi9WRVUWg19rl1UB7Tt4kuPd+thNJgFZoxXsTz7ncaPaeIm+40oSGuP33DfMb4sZt1QIGiJzC4EA==" }, + "defaults": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/defaults/-/defaults-1.0.3.tgz", + "integrity": "sha1-xlYFHpgX2f8I7YgUd/P+QBnz730=", + "dev": true, + "requires": { + "clone": "^1.0.2" + } + }, "define-properties": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz", @@ -25550,9 +25426,9 @@ } }, "electron-to-chromium": { - "version": "1.3.712", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.712.tgz", - "integrity": "sha512-3kRVibBeCM4vsgoHHGKHmPocLqtFAGTrebXxxtgKs87hNUzXrX2NuS3jnBys7IozCnw7viQlozxKkmty2KNfrw==" + "version": "1.3.735", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.735.tgz", + "integrity": "sha512-cp7MWzC3NseUJV2FJFgaiesdrS+A8ZUjX5fLAxdRlcaPDkaPGFplX930S5vf84yqDp4LjuLdKouWuVOTwUfqHQ==" }, "elliptic": { "version": "6.5.4", @@ -27643,24 +27519,37 @@ "integrity": "sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q==" }, "inquirer": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-8.0.0.tgz", - "integrity": "sha512-ON8pEJPPCdyjxj+cxsYRe6XfCJepTxANdNnTebsTuQgXpRyZRRT9t4dJwjRubgmvn20CLSEnozRUayXyM9VTXA==", + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-8.1.0.tgz", + "integrity": "sha512-1nKYPoalt1vMBfCMtpomsUc32wmOoWXAoq3kM/5iTfxyQ2f/BxjixQpC+mbZ7BI0JUXHED4/XPXekDVtJNpXYw==", "dev": true, "requires": { "ansi-escapes": "^4.2.1", - "chalk": "^4.1.0", + "chalk": "^4.1.1", "cli-cursor": "^3.1.0", "cli-width": "^3.0.0", "external-editor": "^3.0.3", "figures": "^3.0.0", "lodash": "^4.17.21", "mute-stream": "0.0.8", + "ora": "^5.3.0", "run-async": "^2.4.0", "rxjs": "^6.6.6", "string-width": "^4.1.0", "strip-ansi": "^6.0.0", "through": "^2.3.6" + }, + "dependencies": { + "chalk": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.1.tgz", + "integrity": "sha512-diHzdDKxcU+bAsUboHLPEDQiw0qEe0qd7SYUn3HgcFlWgbDcfLGswOHYeGrHKzG9z6UYf01d9VFMfZxPM1xZSg==", + "dev": true, + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + } } }, "internal-slot": { @@ -27847,6 +27736,12 @@ "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz", "integrity": "sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==" }, + "is-interactive": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-interactive/-/is-interactive-1.0.0.tgz", + "integrity": "sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w==", + "dev": true + }, "is-jpg": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/is-jpg/-/is-jpg-2.0.0.tgz", @@ -28034,14 +27929,6 @@ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=" }, - "isobject": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz", - "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=", - "requires": { - "isarray": "1.0.0" - } - }, "isomorphic-fetch": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/isomorphic-fetch/-/isomorphic-fetch-2.2.1.tgz", @@ -28303,15 +28190,6 @@ "type-check": "~0.4.0" } }, - "line-column": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/line-column/-/line-column-1.0.2.tgz", - "integrity": "sha1-0lryk2tvSEkXKzEuR5LR2Ye8NKI=", - "requires": { - "isarray": "^1.0.0", - "isobject": "^2.0.0" - } - }, "line-reader": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/line-reader/-/line-reader-0.4.0.tgz", @@ -28957,9 +28835,9 @@ "integrity": "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==" }, "nanoid": { - "version": "3.1.22", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.1.22.tgz", - "integrity": "sha512-/2ZUaJX2ANuLtTvqTlgqBQNJoQO398KyJgZloL0PZkC0dpysjncRUPsFe3DUPzz/y3h+u7C46np8RMuvF3jsSQ==" + "version": "3.1.23", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.1.23.tgz", + "integrity": "sha512-FiB0kzdP0FFVGDKlRLEQ1BgDzU87dy5NnzjeW9YZNt+/c3+q82EQDUwniSAUxp/F0gFNI1ZhKU1FqYsMuqZVnw==" }, "nanomatch": { "version": "1.2.13", @@ -29017,28 +28895,28 @@ "integrity": "sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw==" }, "next": { - "version": "10.1.3", - "resolved": "https://registry.npmjs.org/next/-/next-10.1.3.tgz", - "integrity": "sha512-8Jf38F+s0YcXXkJGF5iUxOqSmbHrey0fX5Epc43L0uwDKmN2jK9vhc2ihCwXC1pmu8d2m/8wfTiXRJKGti55yw==", + "version": "10.2.2", + "resolved": "https://registry.npmjs.org/next/-/next-10.2.2.tgz", + "integrity": "sha512-HPGSLrflWPvf3zEZSIk/uj0CZ+YYrpZwZS0PFAgXbEwb894iRuAPzglagqlzcCh7lg12RBEaKNIxhrVa5xgjtQ==", "requires": { "@babel/runtime": "7.12.5", - "@hapi/accept": "5.0.1", - "@next/env": "10.1.3", - "@next/polyfill-module": "10.1.3", - "@next/react-dev-overlay": "10.1.3", - "@next/react-refresh-utils": "10.1.3", + "@hapi/accept": "5.0.2", + "@next/env": "10.2.2", + "@next/polyfill-module": "10.2.2", + "@next/react-dev-overlay": "10.2.2", + "@next/react-refresh-utils": "10.2.2", "@opentelemetry/api": "0.14.0", "assert": "2.0.0", "ast-types": "0.13.2", "browserify-zlib": "0.2.0", - "browserslist": "4.16.1", + "browserslist": "4.16.6", "buffer": "5.6.0", - "caniuse-lite": "^1.0.30001179", + "caniuse-lite": "^1.0.30001228", "chalk": "2.4.2", "chokidar": "3.5.1", "constants-browserify": "1.0.0", "crypto-browserify": "3.12.0", - "cssnano-simple": "1.2.2", + "cssnano-simple": "2.0.0", "domain-browser": "4.19.0", "encoding": "0.1.13", "etag": "1.8.1", @@ -29054,7 +28932,7 @@ "p-limit": "3.1.0", "path-browserify": "1.0.1", "pnp-webpack-plugin": "1.6.4", - "postcss": "8.1.7", + "postcss": "8.2.13", "process": "0.11.10", "prop-types": "15.7.2", "querystring-es3": "0.2.1", @@ -29133,9 +29011,9 @@ } }, "next-mdx-remote": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/next-mdx-remote/-/next-mdx-remote-3.0.1.tgz", - "integrity": "sha512-sV1sM6CkdYP5aPND1+vrF3wr8TU8NJwVlcFe2rPjVHR5J/9M2bl9zlhF6AF+GOKHA7d5kUdwHoLbApEGofD8hA==", + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/next-mdx-remote/-/next-mdx-remote-3.0.2.tgz", + "integrity": "sha512-imLrrw6c/Hi1BKRyJE9yLhk45N5zzw+/CxSQRHOAByYUSOhd+DNjUaqm2jtd+/GgxJeYB1d3fJPXmvUZTez1MQ==", "requires": { "@mdx-js/mdx": "^1.6.22", "@mdx-js/react": "^1.6.22", @@ -29738,6 +29616,36 @@ "logalot": "^2.0.0" } }, + "ora": { + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/ora/-/ora-5.4.0.tgz", + "integrity": "sha512-1StwyXQGoU6gdjYkyVcqOLnVlbKj+6yPNNOxJVgpt9t4eksKjiriiHuxktLYkgllwk+D6MbC4ihH84L1udRXPg==", + "dev": true, + "requires": { + "bl": "^4.1.0", + "chalk": "^4.1.0", + "cli-cursor": "^3.1.0", + "cli-spinners": "^2.5.0", + "is-interactive": "^1.0.0", + "is-unicode-supported": "^0.1.0", + "log-symbols": "^4.1.0", + "strip-ansi": "^6.0.0", + "wcwidth": "^1.0.1" + }, + "dependencies": { + "bl": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", + "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", + "dev": true, + "requires": { + "buffer": "^5.5.0", + "inherits": "^2.0.4", + "readable-stream": "^3.4.0" + } + } + } + }, "os-browserify": { "version": "0.3.0", "resolved": "https://registry.npmjs.org/os-browserify/-/os-browserify-0.3.0.tgz", @@ -30072,13 +29980,12 @@ "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=" }, "postcss": { - "version": "8.1.7", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.1.7.tgz", - "integrity": "sha512-llCQW1Pz4MOPwbZLmOddGM9eIJ8Bh7SZ2Oj5sxZva77uVaotYDsYTch1WBTNu7fUY0fpWp0fdt7uW40D4sRiiQ==", + "version": "8.2.13", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.2.13.tgz", + "integrity": "sha512-FCE5xLH+hjbzRdpbRb1IMCvPv9yZx2QnDarBEYSN0N0HYk+TcXsEhwdFcFb+SRWOKzKGErhIEbBK2ogyLdTtfQ==", "requires": { - "colorette": "^1.2.1", - "line-column": "^1.0.2", - "nanoid": "^3.1.16", + "colorette": "^1.2.2", + "nanoid": "^3.1.22", "source-map": "^0.6.1" }, "dependencies": { @@ -33291,9 +33198,9 @@ "integrity": "sha1-1PRWKwzjaW5BrFLQ4ALlemNdxtw=" }, "prettier": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.2.1.tgz", - "integrity": "sha512-PqyhM2yCjg/oKkFPtTGUojv7gnZAoG80ttl45O6x2Ug/rMJw4wcc9k6aaf2hibP7BGVCCM33gZoGjyvt9mm16Q==", + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.3.0.tgz", + "integrity": "sha512-kXtO4s0Lz/DW/IJ9QdWhAf7/NmPWQXkFr/r/WkR3vyI+0v8amTDxiaQSLzs8NBlytfLWX/7uQUMIW677yLKl4w==", "dev": true }, "prettier-linter-helpers": { @@ -33534,9 +33441,9 @@ "integrity": "sha512-suNP+J1VU1MWFKcyt7RtjiSWUjvidmQSlqu+eHslq+342xCbGTYmC0mEhPCOHxlW0CywylOC1u2DFAT+bv4dBw==" }, "react-instantsearch-core": { - "version": "6.10.3", - "resolved": "https://registry.npmjs.org/react-instantsearch-core/-/react-instantsearch-core-6.10.3.tgz", - "integrity": "sha512-7twp3OJrPGTFpyXwjJNeOTbQw7RTv+0cUyKkXR9njEyLdXKcPWfpeBirXfdQHjYIHEY2b0V2Vom1B9IHSDSUtQ==", + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/react-instantsearch-core/-/react-instantsearch-core-6.11.0.tgz", + "integrity": "sha512-RE5pPuSL5e3+wgMl+QLY+MgyqgNXBrfppqk3podOXTdWpQ5VdaXVrpeWVcmFJlRRKPLJI6RGqCiHVRTaGt1HVw==", "requires": { "@babel/runtime": "^7.1.2", "algoliasearch-helper": "^3.4.3", @@ -33552,16 +33459,16 @@ } }, "react-instantsearch-dom": { - "version": "6.10.3", - "resolved": "https://registry.npmjs.org/react-instantsearch-dom/-/react-instantsearch-dom-6.10.3.tgz", - "integrity": "sha512-kxc6IEruxJrc7O9lsLV5o4YK/RkGt3l7D1Y51JfmYkgeLuQHApwgcy/TAIoSN7wfR/1DONFbX8Y5VhU9Wqh87Q==", + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/react-instantsearch-dom/-/react-instantsearch-dom-6.11.0.tgz", + "integrity": "sha512-OXbP3OuT52HAoCbQiPxPAL9q/Lm5pm+eRx8TMKB2kcf6fQzc+L7RMy7fveuMENcmBks+vdD2+G7DGM8oVv9etg==", "requires": { "@babel/runtime": "^7.1.2", "algoliasearch-helper": "^3.4.3", "classnames": "^2.2.5", "prop-types": "^15.6.2", "react-fast-compare": "^3.0.0", - "react-instantsearch-core": "^6.10.3" + "react-instantsearch-core": "^6.11.0" }, "dependencies": { "react-fast-compare": { @@ -34142,9 +34049,9 @@ } }, "search-insights": { - "version": "1.7.1", - "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-1.7.1.tgz", - "integrity": "sha512-CSuSKIJp+WcSwYrD9GgIt1e3xmI85uyAefC4/KYGgtvNEm6rt4kBGilhVRmTJXxRE2W1JknvP598Q7SMhm7qKA==" + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-1.8.0.tgz", + "integrity": "sha512-4sd6oS/sLH/UxiZ4vMoDbcpJP01pcoNI4mm3ZsUfDAMCPKxDda1R8SFZUv618og3NYBvvWvwmf8VRC0rNYuTkg==" }, "section-matter": { "version": "1.0.0", @@ -34736,13 +34643,6 @@ "integrity": "sha512-KJP1OCML99+8fhOHxwwzyWrlUuVX5GQ0ZpJTd1DFXhdkrvg1szxfHhawXUZ3g9TkXORQd4/WG68jMlQZ2p8wlg==", "requires": { "type-fest": "^0.7.1" - }, - "dependencies": { - "type-fest": { - "version": "0.7.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.7.1.tgz", - "integrity": "sha512-Ne2YiiGN8bmrmJJEuTWTLJR32nh/JdL1+PSicowtNb0WFpn59GK8/lfD61bVtzguz7b3PBt74nxpv/Pw5po5Rg==" - } } }, "state-toggle": { @@ -36211,6 +36111,11 @@ "prelude-ls": "^1.2.1" } }, + "type-fest": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.7.1.tgz", + "integrity": "sha512-Ne2YiiGN8bmrmJJEuTWTLJR32nh/JdL1+PSicowtNb0WFpn59GK8/lfD61bVtzguz7b3PBt74nxpv/Pw5po5Rg==" + }, "type-is": { "version": "1.6.18", "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", @@ -36647,6 +36552,15 @@ "graceful-fs": "^4.1.2" } }, + "wcwidth": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/wcwidth/-/wcwidth-1.0.1.tgz", + "integrity": "sha1-8LDc+RW8X/FSivrbLA4XtTLaL+g=", + "dev": true, + "requires": { + "defaults": "^1.0.3" + } + }, "web-namespaces": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-1.1.4.tgz", diff --git a/website/package.json b/website/package.json index afd2d6b458fdc..66949548b080a 100644 --- a/website/package.json +++ b/website/package.json @@ -4,43 +4,43 @@ "version": "1.0.0", "author": "HashiCorp", "dependencies": { - "@hashicorp/mktg-global-styles": "3.0.1", - "@hashicorp/nextjs-scripts": "18.1.0", + "@hashicorp/mktg-global-styles": "3.0.2", + "@hashicorp/nextjs-scripts": "18.3.1", "@hashicorp/react-alert-banner": "6.1.1", "@hashicorp/react-button": "5.0.1", - "@hashicorp/react-case-study-slider": "6.0.0", + "@hashicorp/react-case-study-slider": "6.0.2", "@hashicorp/react-code-block": "4.0.1", "@hashicorp/react-content": "7.0.1", - "@hashicorp/react-docs-page": "13.2.0", - "@hashicorp/react-featured-slider": "4.0.0", + "@hashicorp/react-docs-page": "13.3.1", + "@hashicorp/react-featured-slider": "4.0.2", "@hashicorp/react-hashi-stack-menu": "2.0.3", "@hashicorp/react-head": "3.0.2", - "@hashicorp/react-hero": "7.1.1", + "@hashicorp/react-hero": "7.2.1", "@hashicorp/react-image": "4.0.1", "@hashicorp/react-inline-svg": "6.0.1", "@hashicorp/react-logo-grid": "4.0.1", "@hashicorp/react-markdown-page": "1.2.0", - "@hashicorp/react-product-downloads-page": "2.0.2", + "@hashicorp/react-product-downloads-page": "2.0.3", "@hashicorp/react-section-header": "5.0.2", - "@hashicorp/react-subnav": "8.1.0", + "@hashicorp/react-subnav": "8.2.1", "@hashicorp/react-tabs": "6.0.1", "@hashicorp/react-text-split": "3.1.1", "@hashicorp/react-text-splits": "2.1.1", "@hashicorp/react-use-cases": "3.0.2", "@hashicorp/react-vertical-text-block-list": "6.0.2", - "next": "10.1.3", - "next-mdx-remote": "3.0.1", + "next": "10.2.2", + "next-mdx-remote": "3.0.2", "next-remote-watch": "1.0.0", "react": "16.14.0", "react-dom": "16.14.0", "tippy.js": "4.0.0" }, "devDependencies": { - "@types/react": "^17.0.3", + "@types/react": "^17.0.6", "dart-linkcheck": "^2.0.15", "husky": "^4.3.8", - "inquirer": "^8.0.0", - "prettier": "^2.2.1" + "inquirer": "^8.1.0", + "prettier": "^2.3.0" }, "husky": { "hooks": { From 2fe4bda547c02739ed647eaddedbf5bdf0a4959f Mon Sep 17 00:00:00 2001 From: Scott Miller Date: Fri, 21 May 2021 19:21:11 -0700 Subject: [PATCH 046/101] Initial Diagnose CLI output (#11583) * Create helpers which integrate with OpenTelemetry for diagnose collection * Go mod vendor * Comments * Update vault/diagnose/helpers.go Co-authored-by: swayne275 * Add unit test/example * tweak output * More comments * add spot check concept * Get unit tests working on Result structs * wip * Fix unit test * Get unit tests working, and make diagnose sessions local rather than global * Comments * Last comments * No need for init * :| * Fix helpers_test * wip * wip * wip * Revendor otel * Fix merge related problems * imports * Fix unit tests Co-authored-by: swayne275 --- command/operator_diagnose.go | 43 +++++++++-- command/operator_diagnose_test.go | 3 +- go.sum | 64 ---------------- vault/diagnose/helpers.go | 25 +++--- vault/diagnose/helpers_test.go | 4 +- vault/diagnose/output.go | 122 +++++++++++++++++++++++------- 6 files changed, 148 insertions(+), 113 deletions(-) diff --git a/command/operator_diagnose.go b/command/operator_diagnose.go index a395c7706d8e6..47fa7b5c904d1 100644 --- a/command/operator_diagnose.go +++ b/command/operator_diagnose.go @@ -2,9 +2,13 @@ package command import ( "context" + "encoding/json" + "fmt" + "os" "strings" "sync" + "github.com/docker/docker/pkg/ioutils" "github.com/hashicorp/consul/api" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/internalshared/listenerutil" @@ -95,6 +99,12 @@ func (c *OperatorDiagnoseCommand) Flags() *FlagSets { Default: false, Usage: "Dump all information collected by Diagnose.", }) + + f.StringVar(&StringVar{ + Name: "format", + Target: &c.flagFormat, + Usage: "The output format", + }) return set } @@ -130,10 +140,31 @@ func (c *OperatorDiagnoseCommand) RunWithParsedFlags() int { return 1 } + if c.diagnose == nil { + if c.flagFormat == "json" { + c.diagnose = diagnose.New(&ioutils.NopWriter{}) + } else { + c.UI.Output(version.GetVersion().FullVersionNumber(true)) + c.diagnose = diagnose.New(os.Stdout) + } + } c.UI.Output(version.GetVersion().FullVersionNumber(true)) ctx := diagnose.Context(context.Background(), c.diagnose) - err := c.offlineDiagnostics(ctx) c.diagnose.SetSkipList(c.flagSkips) + err := c.offlineDiagnostics(ctx) + + results := c.diagnose.Finalize(ctx) + if c.flagFormat == "json" { + resultsJS, err := json.MarshalIndent(results, "", " ") + if err != nil { + fmt.Fprintf(os.Stderr, "error marshalling results: %v", err) + return 2 + } + c.UI.Output(string(resultsJS)) + } else { + c.UI.Output("\nResults:") + results.Write(os.Stdout) + } if err != nil { return 1 @@ -165,7 +196,6 @@ func (c *OperatorDiagnoseCommand) offlineDiagnostics(ctx context.Context) error ctx, span := diagnose.StartSpan(ctx, "initialization") defer span.End() - server.flagConfigs = c.flagConfigs config, err := server.parseConfig() if err != nil { @@ -269,7 +299,7 @@ func (c *OperatorDiagnoseCommand) offlineDiagnostics(ctx context.Context) error return err } - return diagnose.Test(ctx, "service-discovery", func(ctx context.Context) error { + diagnose.Test(ctx, "service-discovery", func(ctx context.Context) error { srConfig := config.ServiceRegistration.Config // Initialize the Service Discovery, if there is one if config.ServiceRegistration != nil && config.ServiceRegistration.Type == "consul" { @@ -281,11 +311,10 @@ func (c *OperatorDiagnoseCommand) offlineDiagnostics(ctx context.Context) error // SetupSecureTLS for service discovery uses the same cert and key to set up physical // storage. See the consul package in physical for details. - err = srconsul.SetupSecureTLS(api.DefaultConfig(), srConfig, server.logger, true) - if err != nil { - return err - } + return srconsul.SetupSecureTLS(api.DefaultConfig(), srConfig, server.logger, true) } return nil }) + + return nil } diff --git a/command/operator_diagnose_test.go b/command/operator_diagnose_test.go index 42f58e65a6ed7..c4a2a7228ceb9 100644 --- a/command/operator_diagnose_test.go +++ b/command/operator_diagnose_test.go @@ -5,6 +5,7 @@ package command import ( "context" "fmt" + "io/ioutil" "strings" "testing" @@ -17,7 +18,7 @@ func testOperatorDiagnoseCommand(tb testing.TB) *OperatorDiagnoseCommand { ui := cli.NewMockUi() return &OperatorDiagnoseCommand{ - diagnose: diagnose.New(), + diagnose: diagnose.New(ioutil.Discard), BaseCommand: &BaseCommand{ UI: ui, }, diff --git a/go.sum b/go.sum index 22e535106d12d..c0c0264982fbf 100644 --- a/go.sum +++ b/go.sum @@ -17,15 +17,12 @@ cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbf cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.6.0 h1:ajp/DjpiCHO71SyIhwb83YsUGAyWuzVvMko+9xCsJLw= cloud.google.com/go/bigquery v1.6.0/go.mod h1:hyFDG0qSGdHNz8Q6nDN8rYIkld0q/+5uBZaelxiDLfE= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/spanner v1.5.1 h1:dWyj10TLlaxH2No6+tXsSCaq9oWgrRbXy1N3x/bhMGU= cloud.google.com/go/spanner v1.5.1/go.mod h1:e1+8M6PF3ntV9Xr57X2Gf+UhylXXYF6gI4WRZ1kfu2A= @@ -85,7 +82,6 @@ github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxB github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= @@ -138,7 +134,6 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14= github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= github.com/aerospike/aerospike-client-go v3.1.1+incompatible h1:+zAuvKMI9rq/hdpwX8srmFvDKfprMPX1SQGMLkBpvuc= github.com/aerospike/aerospike-client-go v3.1.1+incompatible/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc= @@ -184,7 +179,6 @@ github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/ github.com/aws/aws-sdk-go v1.37.19 h1:/xKHoSsYfH9qe16pJAHIjqTVpMM2DRSsEt8Ok1bzYiw= github.com/aws/aws-sdk-go v1.37.19/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -193,9 +187,7 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= -github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0= github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= -github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= @@ -231,14 +223,11 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381 h1:rdRS5BT13Iae9ssvcslol66gfOOXjaLYwqerEn/cl9s= github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381/go.mod h1:e5+USP2j8Le2M0Jo3qKPFnNhuo1wueU4nWHCXBOfQ14= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c h1:2zRrJWIt/f9c9HhNHAgrRgq0San5gRRUJTBXLkchal0= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= @@ -279,7 +268,6 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -298,7 +286,6 @@ github.com/digitalocean/godo v1.7.5/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nb github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= @@ -317,7 +304,6 @@ github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdf github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74 h1:2MIhn2R6oXQbgW5yHfS+d6YqyMfXiu2L55rFZC4UD/M= github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74/go.mod h1:UqXY1lYT/ERa4OEAywUqdok1T4RCRdArkhic1Opuavo= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= @@ -347,7 +333,6 @@ github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVB github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.4.0/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= -github.com/frankban/quicktest v1.10.0 h1:Gfh+GAJZOAoKZsIZeZbdn2JF10kN1XHNvjsvQK8gVkE= github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa h1:RDBNVkRviHZtvDvId8XSGPu3rmpmSe+wKRcEWNgsfWU= @@ -383,7 +368,6 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= @@ -453,7 +437,6 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -476,7 +459,6 @@ github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/golang/snappy v0.0.2 h1:aeE13tS0IiQgFjYdoL8qN3K1N2bXXtI6Vi51/y7BpMw= github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/flatbuffers v1.11.0 h1:O7CEyB8Cb3/DmtxODGtLHcEvpr81Jm5qLg/hsHnxA2A= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= @@ -500,7 +482,6 @@ github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -521,34 +502,26 @@ github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTV github.com/gophercloud/gophercloud v0.1.0 h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20180628210949-0892b62f0d9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 h1:f0n1xnMSmBLzVfsMMvriDyA75NB/oBgILX2GcHXIQzY= github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= -github.com/gorilla/sessions v1.2.0 h1:S7P+1Hm5V/AT9cjEcUD5uDaQSX0OE577aCXgoaKpYbQ= github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= @@ -562,7 +535,6 @@ github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= -github.com/hashicorp/consul/sdk v0.4.1-0.20200910203702-bb2b5dd871ca h1:DYR7hPxUqDQP4h3eX9/wI4J2yzL3QEsXi3TCXYtAgGI= github.com/hashicorp/consul/sdk v0.4.1-0.20200910203702-bb2b5dd871ca/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= @@ -575,7 +547,6 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-discover v0.0.0-20201029210230-738cb3105cd0 h1:UgODETBAoROFMSSVgg0v8vVpD9Tol8FtYcAeomcWJtY= github.com/hashicorp/go-discover v0.0.0-20201029210230-738cb3105cd0/go.mod h1:D4eo8/CN92vm9/9UDG+ldX1/fMFa4kpl8qzyTolus8o= -github.com/hashicorp/go-gatedio v0.5.0 h1:Jm1X5yP4yCqqWj5L1TgW7iZwCVPGtVc+mro5r/XX7Tg= github.com/hashicorp/go-gatedio v0.5.0/go.mod h1:Lr3t8L6IyxD3DAeaUxGcgl2JnRUpWMCsmBl4Omu/2t4= github.com/hashicorp/go-gcp-common v0.5.0/go.mod h1:IDGUI2N/OS3PiU4qZcXJeWKPI6O/9Y8hOrbSiMcqyYw= github.com/hashicorp/go-gcp-common v0.6.0 h1:m1X+DK003bj4WEjqOnv+Csepb3zpfN/bidboUeUSj68= @@ -651,7 +622,6 @@ github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0m github.com/hashicorp/mdns v1.0.1 h1:XFSOubp8KWB+Jd2PDyaX5xUd5bhSP/+pTDZVDMzZJM8= github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.2.2 h1:5+RffWKwqJ71YPu9mWsF7ZOscZmwfasdA8kbdC7AO2g= github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/nomad/api v0.0.0-20191220223628-edc62acd919d h1:BXqsASWhyiAiEVm6FcltF0dg8XvoookQwmpHn8lstu8= github.com/hashicorp/nomad/api v0.0.0-20191220223628-edc62acd919d/go.mod h1:WKCL+tLVhN1D+APwH3JiTRZoxcdwRk86bWu1LVCUPaE= @@ -706,8 +676,6 @@ github.com/hashicorp/vault-plugin-secrets-ad v0.9.0 h1:pmui4uYI7zol5yk5u0crqSClq github.com/hashicorp/vault-plugin-secrets-ad v0.9.0/go.mod h1:L5L6NoJFxRvgxhuA2sWhloc3sbgmE7KxhNcoRxcaH9U= github.com/hashicorp/vault-plugin-secrets-alicloud v0.8.0 h1:dg1vrZl+XwGipfjet7M9UqGM8nCDx4FNnN7zlqiHQWU= github.com/hashicorp/vault-plugin-secrets-alicloud v0.8.0/go.mod h1:SSkKpSTOMnX84PfgYiWHgwVg+YMhxHNjo+YCJGNBoZk= -github.com/hashicorp/vault-plugin-secrets-azure v0.9.0 h1:RfJsZ9DoOhJutvHx3KDSuCXZqZEWKyu3Pksa9F5fjYo= -github.com/hashicorp/vault-plugin-secrets-azure v0.9.0/go.mod h1:4jCVjTG809NCQ8mrSnbBtX17gX1Iush+558BVO6MJeo= github.com/hashicorp/vault-plugin-secrets-azure v0.9.1 h1:vZhWEafEedxLS7t906QSYFKT+jiNM6Mv6fDHxOX6O5I= github.com/hashicorp/vault-plugin-secrets-azure v0.9.1/go.mod h1:4jCVjTG809NCQ8mrSnbBtX17gX1Iush+558BVO6MJeo= github.com/hashicorp/vault-plugin-secrets-gcp v0.9.0 h1:gfaTe+QNNk+wZLec0k9pUt2VSBKPB237F/Dh0a1u8ic= @@ -727,7 +695,6 @@ github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443/go.mod h1:bEpDU35n github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huaweicloud/golangsdk v0.0.0-20200304081349-45ec0797f2a4/go.mod h1:WQBcHRNX9shz3928lWEvstQJtAtYI7ks6XlgtRT9Tcw= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= @@ -739,13 +706,11 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4 h1:3K3KcD4S6/Y2hevi70EzUTNKOS3cryQyhUnkjE6Tz0w= github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgx v3.3.0+incompatible h1:Wa90/+qsITBAPkAZjiByeIGHFcj3Ztu+VzrrIpHjL90= github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= github.com/jarcoal/httpmock v1.0.4/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= -github.com/jarcoal/httpmock v1.0.5 h1:cHtVEcTxRSX4J0je7mWPfc9BpDpqzXSJ5HbymZmyHck= github.com/jarcoal/httpmock v1.0.5/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= github.com/jcmturner/aescts v1.0.1 h1:5jhUSHbHSZjQeWFY//Lv8dpP/O3sMDOxrGV/IfCqh44= github.com/jcmturner/aescts v1.0.1/go.mod h1:k9gJoDUf1GH5r2IBtBjwjDCoLELYxOcEhitdP8RL7qQ= @@ -769,10 +734,8 @@ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f h1:ENpDacvnr8faw5ugQmEF1QYk+f/Y9lXFvuYmRxykago= @@ -790,7 +753,6 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= @@ -836,7 +798,6 @@ github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11 h1:YFh+sjyJTMQSYjKwM4dFKhJPJC/wfo98tPUc17HdoYw= github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -909,7 +870,6 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/mongodb/go-client-mongodb-atlas v0.1.2 h1:qmUme1TlQBPZupmXMnpD8DxnfGXLVGs3w+0Z17HBiSA= github.com/mongodb/go-client-mongodb-atlas v0.1.2/go.mod h1:LS8O0YLkA+sbtOb3fZLF10yY3tJM+1xATXMJ3oU35LU= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwielbut/pointy v1.1.0 h1:U5/YEfoIkaGCHv0St3CgjduqXID4FNRoyZgLM1kY9vg= @@ -930,7 +890,6 @@ github.com/ncw/swift v1.0.47 h1:4DQRPj35Y41WogBxyhOXlrI37nzGlyEcsforeudyYPQ= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 h1:BQ1HW7hr4IVovMwWg0E0PYcyW8CzqDcVmaew9cujU4s= github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2/go.mod h1:TLb2Sg7HQcgGdloNxkrmtgDNR9uVYF3lfdFIN4Ro6Sk= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nwaples/rardecode v1.1.0 h1:vSxaY8vQhOcVr4mm5e8XllHWTiM4JF507A0Katqw7MQ= github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= @@ -949,13 +908,11 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -989,13 +946,11 @@ github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4 github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/ory/dockertest/v3 v3.6.2 h1:Q3Y8naCMyC1Nw91BHum1bGyEsNQc/UOIYS3ZoPoou0g= github.com/ory/dockertest/v3 v3.6.2/go.mod h1:EFLcVUOl8qCwp9NyDAcCDtq/QviLtYswW/VbWzUnTNE= -github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2 h1:CXwSGu/LYmbjEab5aMCs5usQRVBGThelUKBNnoSOuso= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0= github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c h1:vwpFWvAO8DeIZfFeqASzZfsxuWPno9ncAebBEP0N3uE= github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v0.0.0-20180815053127-5633e0862627/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= @@ -1074,7 +1029,6 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.2 h1:aIihoIOHCiLZHxyoNQ+ABL4NKhFTgKLBdMLyEAh98m0= github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/zerolog v1.4.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= @@ -1088,17 +1042,14 @@ github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da h1:p3Vo3i64TCL github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sasha-s/go-deadlock v0.2.0 h1:lMqc+fUb7RrFS3gQLtoQsJ7/6TV/pAIFvBsqX73DK8Y= github.com/sasha-s/go-deadlock v0.2.0/go.mod h1:StQn567HiB1fF2yJ44N9au7wOhrPS3iZqiDbRupzT10= -github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/conswriter v0.0.0-20180208195008-f5ae3917a627/go.mod h1:7zjs06qF79/FKAJpBvFx3P8Ww4UTIMAe+lpNXDHziac= github.com/sean-/pager v0.0.0-20180208200047-666be9bf53b5/go.mod h1:BeybITEsBEg6qbIiqJ6/Bqeq25bCLbL7YFmpaFfJDuM= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sethvargo/go-limiter v0.3.0 h1:yRMc+Qs2yqw6YJp6UxrO2iUs6DOSq4zcnljbB7/rMns= github.com/sethvargo/go-limiter v0.3.0/go.mod h1:C0kbSFbiriE5k2FFOe18M1YZbAR2Fiwf72uGu0CXCcU= github.com/shirou/gopsutil v3.21.1+incompatible h1:2LwXWdbjXwyDgq26Yy/OT4xozlpmssQfy/rtfhWb0bY= github.com/shirou/gopsutil v3.21.1+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= @@ -1110,11 +1061,9 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/smartystreets/assertions v0.0.0-20180725160413-e900ae048470/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/snowflakedb/glog v0.0.0-20180824191149-f5055e6f21ce h1:CGR1hXCOeoZ1aJhCs8qdKJuEu3xoZnxsLcYoh5Bnr+4= github.com/snowflakedb/glog v0.0.0-20180824191149-f5055e6f21ce/go.mod h1:EB/w24pR5VKI60ecFnKqXzxX3dOorz1rnVicQTQrGM0= @@ -1122,7 +1071,6 @@ github.com/snowflakedb/gosnowflake v1.3.11 h1:4VATaWPZv2HEh9bkZG5LaMux4WRiZJDu/P github.com/snowflakedb/gosnowflake v1.3.11/go.mod h1:+BMe9ivHWpzcXbM1qSIxWZ8qpWGBBaA46o9Z1qSfrNg= github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d h1:bVQRCxQvfjNUeRqaY/uT0tFuvuFY0ulgnczuR684Xic= github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d/go.mod h1:Cw4GTlQccdRGSEf6KiMju767x0NEHE0YIVPJSaXjlsw= -github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -1145,7 +1093,6 @@ github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/y github.com/square/go-jose v2.4.1+incompatible/go.mod h1:7MxpAF/1WTVUu8Am+T5kNy+t0902CaLWM4Z745MkOa8= github.com/square/go-jose/v3 v3.0.0-20200225220504-708a9fe87ddc/go.mod h1:JbpHhNyeVc538vtj/ECJ3gPYm1VEitNjsLhm4eJQQbg= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 h1:WhxRHzgeVGETMlmVfqhRn8RIeeNoPr2Czh33I4Zdccw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -1166,10 +1113,8 @@ github.com/tencentcloud/tencentcloud-sdk-go v3.0.83+incompatible/go.mod h1:0PfYo github.com/tencentcloud/tencentcloud-sdk-go v3.0.171+incompatible h1:K3fcS92NS8cRntIdu8Uqy2ZSePvX73nNhOkKuPGJLXQ= github.com/tencentcloud/tencentcloud-sdk-go v3.0.171+incompatible/go.mod h1:0PfYow01SHPMhKY31xa+EFz2RStxIqj6JFAJS+IkCi4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.0.1 h1:WE4RBSZ1x6McVVC8S/Md+Qse8YUv6HRObAx6ke00NY8= github.com/tidwall/pretty v1.0.1/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= @@ -1191,7 +1136,6 @@ github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yandex-cloud/go-genproto v0.0.0-20200722140432-762fe965ce77/go.mod h1:HEUYX/p8966tMUHHT+TsS0hF/Ca/NYwqprC5WXSDMfE= @@ -1228,7 +1172,6 @@ go.opentelemetry.io/otel v0.20.0 h1:eaP0Fqu7SXHwvjiqDq83zImeehOHX8doTvU9AwXON8g= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= go.opentelemetry.io/otel/metric v0.20.0 h1:4kzhXFP+btKm4jwxpjIqjs41A7MakRFUS86bqLHTIw8= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/oteltest v0.20.0 h1:HiITxCawalo5vQzdHfKeZurV8x7ljcqAgiWzF6Vaeaw= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0 h1:JsxtGXd06J8jrnya7fdI/U/MR6yXA5DtbZy+qoHQlr8= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= @@ -1245,7 +1188,6 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= @@ -1623,12 +1565,9 @@ gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUy gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= @@ -1650,7 +1589,6 @@ gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76 gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= @@ -1664,9 +1602,7 @@ gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vault/diagnose/helpers.go b/vault/diagnose/helpers.go index ad41b421c00a9..a878d00e29c56 100644 --- a/vault/diagnose/helpers.go +++ b/vault/diagnose/helpers.go @@ -3,6 +3,7 @@ package diagnose import ( "context" "fmt" + "io" "time" "go.opentelemetry.io/otel/attribute" @@ -23,6 +24,10 @@ const ( messageKey = attribute.Key("message") ) +var ( + MainSection = trace.WithAttributes(attribute.Key("diagnose").String("main-section")) +) + var diagnoseSession = struct{}{} var noopTracer = trace.NewNoopTracerProvider().Tracer("vault-diagnose") @@ -38,8 +43,8 @@ type Session struct { // New initializes a Diagnose tracing session. In particular this wires a TelemetryCollector, which // synchronously receives and tracks OpenTelemetry spans in order to provide a tree structure of results // when the outermost span ends. -func New() *Session { - tc := NewTelemetryCollector() +func New(w io.Writer) *Session { + tc := NewTelemetryCollector(w) //so, _ := stdout.NewExporter(stdout.WithPrettyPrint()) tp := sdktrace.NewTracerProvider( sdktrace.WithSampler(sdktrace.AlwaysSample()), @@ -128,30 +133,30 @@ func Warn(ctx context.Context, msg string) { // SpotOk adds an Ok result without adding a new Span. This should be used for instantaneous checks with no // possible sub-spans -func SpotOk(ctx context.Context, checkName, message string) { - addSpotCheckResult(ctx, spotCheckOkEventName, checkName, message) +func SpotOk(ctx context.Context, checkName, message string, options ...trace.EventOption) { + addSpotCheckResult(ctx, spotCheckOkEventName, checkName, message, options...) } // SpotWarn adds a Warning result without adding a new Span. This should be used for instantaneous checks with no // possible sub-spans -func SpotWarn(ctx context.Context, checkName, message string) { - addSpotCheckResult(ctx, spotCheckWarnEventName, checkName, message) +func SpotWarn(ctx context.Context, checkName, message string, options ...trace.EventOption) { + addSpotCheckResult(ctx, spotCheckWarnEventName, checkName, message, options...) } // SpotError adds an Error result without adding a new Span. This should be used for instantaneous checks with no // possible sub-spans -func SpotError(ctx context.Context, checkName string, err error) error { +func SpotError(ctx context.Context, checkName string, err error, options ...trace.EventOption) error { var message string if err != nil { message = err.Error() } - addSpotCheckResult(ctx, spotCheckErrorEventName, checkName, message) + addSpotCheckResult(ctx, spotCheckErrorEventName, checkName, message, options...) return err } -func addSpotCheckResult(ctx context.Context, eventName, checkName, message string) { +func addSpotCheckResult(ctx context.Context, eventName, checkName, message string, options ...trace.EventOption) { span := trace.SpanFromContext(ctx) - attrs := []trace.EventOption{trace.WithAttributes(nameKey.String(checkName))} + attrs := append(options, trace.WithAttributes(nameKey.String(checkName))) if message != "" { attrs = append(attrs, trace.WithAttributes(messageKey.String(message))) } diff --git a/vault/diagnose/helpers_test.go b/vault/diagnose/helpers_test.go index ebe92f59797ee..9e3dc008528aa 100644 --- a/vault/diagnose/helpers_test.go +++ b/vault/diagnose/helpers_test.go @@ -13,7 +13,7 @@ import ( func TestDiagnoseOtelResults(t *testing.T) { expected := &Result{ Name: "make-coffee", - Status: WarningStatus, + Status: ErrorStatus, Warnings: []string{ "coffee getting low", }, @@ -37,7 +37,7 @@ func TestDiagnoseOtelResults(t *testing.T) { }, }, } - sess := New() + sess := New(os.Stdout) sess.SetSkipList([]string{"dispose-grounds"}) ctx := Context(context.Background(), sess) diff --git a/vault/diagnose/output.go b/vault/diagnose/output.go index 2fdad10ff2526..8ebfdbf505f61 100644 --- a/vault/diagnose/output.go +++ b/vault/diagnose/output.go @@ -3,6 +3,7 @@ package diagnose import ( "context" "errors" + "fmt" "io" "sort" "strings" @@ -17,36 +18,62 @@ import ( const ( status_unknown = "[ ] " status_ok = "\u001b[32m[ ok ]\u001b[0m " - status_failed = "\u001b[31m[ fail ]\u001b[0m " + status_failed = "\u001b[31m[failed]\u001b[0m " status_warn = "\u001b[33m[ warn ]\u001b[0m " status_skipped = "\u001b[90m[ skip ]\u001b[0m " - same_line = "\u001b[F" - ErrorStatus = "error" - WarningStatus = "warn" - OkStatus = "ok" - SkippedStatus = "skipped" + same_line = "\x0d" + ErrorStatus = 2 + WarningStatus = 1 + OkStatus = 0 + SkippedStatus = -1 ) var errUnimplemented = errors.New("unimplemented") +type status int + +func (s status) String() string { + switch s { + case OkStatus: + return "ok" + case WarningStatus: + return "warn" + case ErrorStatus: + return "fail" + } + return "invalid" +} + +func (s status) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprint("\"", s.String(), "\"")), nil +} + type Result struct { - Time time.Time - Name string - Warnings []string - Status string - Message string - Children []*Result + Time time.Time `json:"time"` + Name string `json:"name"` + Status status `json:"status"` + Warnings []string `json:"warnings,omitempty"` + Message string `json:"message,omitempty"` + Children []*Result `json:"children,omitempty"` } -func (r *Result) sortChildren() { +func (r *Result) finalize() status { + maxStatus := r.Status if len(r.Children) > 0 { sort.SliceStable(r.Children, func(i, j int) bool { return r.Children[i].Time.Before(r.Children[j].Time) }) for _, c := range r.Children { - c.sortChildren() + cms := c.finalize() + if cms > maxStatus { + maxStatus = cms + } + } + if maxStatus > r.Status { + r.Status = maxStatus } } + return maxStatus } func (r *Result) ZeroTimes() { @@ -60,6 +87,7 @@ func (r *Result) ZeroTimes() { // TelemetryCollector is an otel SpanProcessor that gathers spans and once the outermost // span ends, walks the otel traces in order to produce a top-down tree of Diagnose results. type TelemetryCollector struct { + ui io.Writer spans map[trace.SpanID]sdktrace.ReadOnlySpan rootSpan sdktrace.ReadOnlySpan results map[trace.SpanID]*Result @@ -67,8 +95,12 @@ type TelemetryCollector struct { mu sync.Mutex } -func NewTelemetryCollector() *TelemetryCollector { +// NewTelemetryCollector creates a SpanProcessor that collects OpenTelemetry spans +// and aggregates them into a tree structure for use by Diagnose. +// It also outputs the status of main sections to that writer. +func NewTelemetryCollector(w io.Writer) *TelemetryCollector { return &TelemetryCollector{ + ui: w, spans: make(map[trace.SpanID]sdktrace.ReadOnlySpan), results: make(map[trace.SpanID]*Result), } @@ -79,6 +111,18 @@ func (t *TelemetryCollector) OnStart(_ context.Context, s sdktrace.ReadWriteSpan t.mu.Lock() defer t.mu.Unlock() t.spans[s.SpanContext().SpanID()] = s + if isMainSection(s) { + fmt.Fprintf(t.ui, status_unknown+s.Name()) + } +} + +func isMainSection(s sdktrace.ReadOnlySpan) bool { + for _, a := range s.Attributes() { + if a.Key == "diagnose" && a.Value.AsString() == "main-section" { + return true + } + } + return false } func (t *TelemetryCollector) OnEnd(e sdktrace.ReadOnlySpan) { @@ -101,7 +145,13 @@ func (t *TelemetryCollector) OnEnd(e sdktrace.ReadOnlySpan) { } // Then walk the results sorting children by time - t.RootResult.sortChildren() + t.RootResult.finalize() + } else if isMainSection(e) { + r := t.getOrBuildResult(e.SpanContext().SpanID()) + if r != nil { + fmt.Print(same_line) + fmt.Fprintln(t.ui, r.String()) + } } } @@ -137,7 +187,7 @@ func (t *TelemetryCollector) getOrBuildResult(id trace.SpanID) *Result { } case skippedEventName: r.Status = SkippedStatus - case ErrorStatus: + case "fail": var message string var action string for _, a := range e.Attributes { @@ -246,10 +296,19 @@ func (r *Result) Write(writer io.Writer) error { } func (r *Result) write(sb *strings.Builder, depth int) { - if r.Status != WarningStatus || (len(r.Warnings) == 0 && r.Message != "") { - for i := 0; i < depth; i++ { - sb.WriteRune('\t') - } + for i := 0; i < depth; i++ { + sb.WriteString(" ") + } + sb.WriteString(r.String()) + sb.WriteRune('\n') + for _, c := range r.Children { + c.write(sb, depth+1) + } +} + +func (r *Result) String() string { + var sb strings.Builder + if len(r.Warnings) == 0 { switch r.Status { case OkStatus: sb.WriteString(status_ok) @@ -267,18 +326,23 @@ func (r *Result) write(sb *strings.Builder, depth int) { } sb.WriteString(r.Message) } - for _, w := range r.Warnings { + warnings := r.Warnings + if r.Message == "" && len(warnings) > 0 { + sb.WriteString(status_warn) + sb.WriteString(r.Name) + sb.WriteString(": ") + sb.WriteString(warnings[0]) + + warnings = warnings[1:] + } + for _, w := range warnings { sb.WriteRune('\n') - for i := 0; i < depth; i++ { - sb.WriteRune('\t') - } + //TODO: Indentation sb.WriteString(status_warn) sb.WriteString(r.Name) sb.WriteString(": ") sb.WriteString(w) } - sb.WriteRune('\n') - for _, c := range r.Children { - c.write(sb, depth+1) - } + return sb.String() + } From e896d62f4ca3e907996c87665fa2867af9c08463 Mon Sep 17 00:00:00 2001 From: Scott Miller Date: Fri, 21 May 2021 19:21:23 -0700 Subject: [PATCH 047/101] Add a Success helper to set successful spans' messages (#11621) --- vault/diagnose/helpers.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/vault/diagnose/helpers.go b/vault/diagnose/helpers.go index a878d00e29c56..4779cfd0dd160 100644 --- a/vault/diagnose/helpers.go +++ b/vault/diagnose/helpers.go @@ -106,6 +106,12 @@ func StartSpan(ctx context.Context, spanName string, options ...trace.SpanOption } } +// Success sets the span to Successful (overriding any previous status) and sets the message to the input. +func Success(ctx context.Context, message string) { + span := trace.SpanFromContext(ctx) + span.SetStatus(codes.Ok, message) +} + // Fail records a failure in the current span func Fail(ctx context.Context, message string) { span := trace.SpanFromContext(ctx) From ce97bfc1f52e5bc48dfdf7c897e8bd54cd1e8892 Mon Sep 17 00:00:00 2001 From: Patrick Picard Date: Mon, 24 May 2021 11:43:53 -0400 Subject: [PATCH 048/101] Update index.mdx (#11644) --- website/content/docs/platform/k8s/csi/index.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/content/docs/platform/k8s/csi/index.mdx b/website/content/docs/platform/k8s/csi/index.mdx index 1e00c7127c34f..6628c4cfca773 100644 --- a/website/content/docs/platform/k8s/csi/index.mdx +++ b/website/content/docs/platform/k8s/csi/index.mdx @@ -29,8 +29,8 @@ This means that pods will be blocked from starting until the secrets have been r The following features are supported by the Vault CSI Provider: - All Vault secret engines supported. -- Authenticatation using the requesting pod's service account. -- TLS/mTLS communciations with Vault. +- Authentication using the requesting pod's service account. +- TLS/mTLS communications with Vault. - Rendering Vault secrets to files. - Syncing secrets to Kubernetes secrets to be used as environment variables. - Installation via [Vault Helm](/docs/platform/k8s/helm) From f552fc4bfa025e8bc64b3d55cacdcaae43f75f90 Mon Sep 17 00:00:00 2001 From: Anthony Burke Date: Tue, 25 May 2021 02:19:14 +1000 Subject: [PATCH 049/101] Changes role/roles (#11655) --- website/content/docs/secrets/terraform.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/content/docs/secrets/terraform.mdx b/website/content/docs/secrets/terraform.mdx index e392bdb9e6e1c..c2bfddc746adb 100644 --- a/website/content/docs/secrets/terraform.mdx +++ b/website/content/docs/secrets/terraform.mdx @@ -59,8 +59,8 @@ management tool. desired User ID. ```shell-session - $ vault write terraform/roles/my-role user_id=user-12345abcde - Success! Data written to: terraform/roles/my-role + $ vault write terraform/role/my-role user_id=user-12345abcde + Success! Data written to: terraform/role/my-role ``` ## Usage From 331f59405fb8912c3b112e052e57aac0e9d8032d Mon Sep 17 00:00:00 2001 From: Angel Garbarino Date: Mon, 24 May 2021 10:45:35 -0600 Subject: [PATCH 050/101] UI/tools partial (#11672) * hash tools from partial to component * initial setup of tools random, but issue remaining with bytes * rewrap * unwrap * final two partials * fix issues with actions on tool wrap * fix hash * changelog * address pr comments * fix onClear * trigger run * triggering test suite --- changelog/11672.txt | 4 ++ ui/app/components/tool-actions-form.js | 8 +-- ui/app/components/tool-hash.js | 28 +++++++++ ui/app/components/tool-lookup.js | 34 ++++++++++ ui/app/components/tool-random.js | 29 +++++++++ ui/app/components/tool-rewrap.js | 31 ++++++++++ ui/app/components/tool-unwrap.js | 31 ++++++++++ ui/app/components/tool-wrap.js | 51 +++++++++++++++ .../components/tool-actions-form.hbs | 62 ++++++++++++++++++- .../hash.hbs => components/tool-hash.hbs} | 22 +++---- ui/app/templates/components/tool-lookup.hbs | 44 +++++++++++++ .../random.hbs => components/tool-random.hbs} | 16 ++--- .../rewrap.hbs => components/tool-rewrap.hbs} | 14 ++--- .../unwrap.hbs => components/tool-unwrap.hbs} | 26 ++++---- .../wrap.hbs => components/tool-wrap.hbs} | 20 +++--- ui/app/templates/partials/tools/lookup.hbs | 44 ------------- 16 files changed, 364 insertions(+), 100 deletions(-) create mode 100644 changelog/11672.txt create mode 100644 ui/app/components/tool-hash.js create mode 100644 ui/app/components/tool-lookup.js create mode 100644 ui/app/components/tool-random.js create mode 100644 ui/app/components/tool-rewrap.js create mode 100644 ui/app/components/tool-unwrap.js create mode 100644 ui/app/components/tool-wrap.js rename ui/app/templates/{partials/tools/hash.hbs => components/tool-hash.hbs} (71%) create mode 100644 ui/app/templates/components/tool-lookup.hbs rename ui/app/templates/{partials/tools/random.hbs => components/tool-random.hbs} (73%) rename ui/app/templates/{partials/tools/rewrap.hbs => components/tool-rewrap.hbs} (61%) rename ui/app/templates/{partials/tools/unwrap.hbs => components/tool-unwrap.hbs} (61%) rename ui/app/templates/{partials/tools/wrap.hbs => components/tool-wrap.hbs} (64%) delete mode 100644 ui/app/templates/partials/tools/lookup.hbs diff --git a/changelog/11672.txt b/changelog/11672.txt new file mode 100644 index 0000000000000..2d019cec1018d --- /dev/null +++ b/changelog/11672.txt @@ -0,0 +1,4 @@ +```release-note:improvement +ui: Replace tool partials with components. +``` + diff --git a/ui/app/components/tool-actions-form.js b/ui/app/components/tool-actions-form.js index fd9f3df9a0235..f57fc1bf46426 100644 --- a/ui/app/components/tool-actions-form.js +++ b/ui/app/components/tool-actions-form.js @@ -106,7 +106,6 @@ export default Component.extend(DEFAULTS, { if (action === 'random') { return { bytes: this.bytes, format: this.format }; } - if (action === 'hash') { return { input: this.input, format: this.format, algorithm: this.algorithm }; } @@ -135,14 +134,11 @@ export default Component.extend(DEFAULTS, { this.reset(); }, - updateTtl(evt) { - const ttl = evt.enabled ? `${evt.seconds}s` : '30m'; + updateTtl(ttl) { set(this, 'wrapTTL', ttl); }, - codemirrorUpdated(val, codemirror) { - codemirror.performLint(); - const hasErrors = codemirror.state.lint.marked.length > 0; + codemirrorUpdated(val, hasErrors) { setProperties(this, { buttonDisabled: hasErrors, data: val, diff --git a/ui/app/components/tool-hash.js b/ui/app/components/tool-hash.js new file mode 100644 index 0000000000000..7d1f9e316b009 --- /dev/null +++ b/ui/app/components/tool-hash.js @@ -0,0 +1,28 @@ +import Component from '@glimmer/component'; +import { action } from '@ember/object'; + +/** + * @module ToolHash + * ToolHash components are components that sys/wrapping/hash functionality. Most of the functionality is passed through as actions from the tool-actions-form and then called back with properties. + * + * @example + * ```js + * + * ``` + * @param onClear {Function} - parent action that is passed through. Must be passed as {{action "onClear"}} + * @param sum=null {String} - property passed from parent to child and then passed back up to parent. + * @param algorithm {String} - property returned from parent. + * @param format {String} - property returned from parent. + * @param error=null {Object} - errors passed from parent as default then from child back to parent. + */ +export default class ToolHash extends Component { + @action + onClear() { + this.args.onClear(); + } +} diff --git a/ui/app/components/tool-lookup.js b/ui/app/components/tool-lookup.js new file mode 100644 index 0000000000000..a9220f86de921 --- /dev/null +++ b/ui/app/components/tool-lookup.js @@ -0,0 +1,34 @@ +import Component from '@glimmer/component'; +import { action } from '@ember/object'; + +/** + * @module ToolLookup + * ToolLookup components are components that sys/wrapping/lookup functionality. Most of the functionality is passed through as actions from the tool-actions-form and then called back with properties. + * + * @example + * ```js + * + * ``` + * @param creation_time {Function} - parent action that is passed through. + * @param creation_ttl {Function} - parent action that is passed through. + * @param creation_path {Function} - parent action that is passed through. + * @param expirationDate='' {String} - value returned from lookup. + * @param selectedAction="wrap" - passed in from parent. This is the wrap action, others include hash, etc. + * @param token=null {String} - property passed from parent to child and then passed back up to parent + * @param onClear {Function} - parent action that is passed through. Must be passed as {{action "onClear"}} + * @param error=null {Object} - errors passed from parent as default then from child back to parent. + */ +export default class ToolLookup extends Component { + @action + onClear() { + this.args.onClear(); + } +} diff --git a/ui/app/components/tool-random.js b/ui/app/components/tool-random.js new file mode 100644 index 0000000000000..c3e876e8152b0 --- /dev/null +++ b/ui/app/components/tool-random.js @@ -0,0 +1,29 @@ +import Component from '@glimmer/component'; +import { action } from '@ember/object'; + +/** + * @module ToolRandom + * ToolRandom components are components that sys/wrapping/random functionality. Most of the functionality is passed through as actions from the tool-actions-form and then called back with properties. + * + * @example + * ```js + * + * ``` + * @param onClear {Function} - parent action that is passed through. Must be passed as {{action "onClear"}} + * @param format {String} - property returned from parent. + * @param bytes {String} - property returned from parent. + * @param random_bytes {String} - property returned from parent. + * @param error=null {Object} - errors passed from parent as default then from child back to parent. + */ + +export default class ToolRandom extends Component { + @action + onClear() { + this.args.onClear(); + } +} diff --git a/ui/app/components/tool-rewrap.js b/ui/app/components/tool-rewrap.js new file mode 100644 index 0000000000000..c711b2ac19827 --- /dev/null +++ b/ui/app/components/tool-rewrap.js @@ -0,0 +1,31 @@ +import Component from '@glimmer/component'; +import { action } from '@ember/object'; + +/** + * @module ToolRewrap + * ToolRewrap components are components that sys/wrapping/rewrap functionality. Most of the functionality is passed through as actions from the tool-actions-form and then called back with properties. + * + * @example + * ```js + * + * ``` + * @param onClear {Function} - parent action that is passed through. Must be passed as {{action "onClear"}} + * @param token=null {String} - property passed from parent to child and then passed back up to parent + * @param rewrap_token {String} - property returned from parent. + * @param selectedAction {String} - property returned from parent. + * @param bytes {String} - property returned from parent. + * @param error=null {Object} - errors passed from parent as default then from child back to parent. + */ + +export default class ToolRewrap extends Component { + @action + onClear() { + this.args.onClear(); + } +} diff --git a/ui/app/components/tool-unwrap.js b/ui/app/components/tool-unwrap.js new file mode 100644 index 0000000000000..acfafc94ccd08 --- /dev/null +++ b/ui/app/components/tool-unwrap.js @@ -0,0 +1,31 @@ +import Component from '@glimmer/component'; +import { action } from '@ember/object'; + +/** + * @module ToolUnwrap + * ToolUnwrap components are components that sys/wrapping/unwrap functionality. Most of the functionality is passed through as actions from the tool-actions-form and then called back with properties. + * + * @example + * ```js + * + * ``` + * @param onClear {Function} - parent action that is passed through. Must be passed as {{action "onClear"}} + * @param token=null {String} - property passed from parent to child and then passed back up to parent + * @param unwrap_data {String} - property returned from parent. + * @param unwrapActiveTab {String} - property returned from parent. + * @param details {String} - property returned from parent. + * @param error=null {Object} - errors passed from parent as default then from child back to parent. + */ + +export default class ToolUnwrap extends Component { + @action + onClear() { + this.args.onClear(); + } +} diff --git a/ui/app/components/tool-wrap.js b/ui/app/components/tool-wrap.js new file mode 100644 index 0000000000000..360cfcaff7ac7 --- /dev/null +++ b/ui/app/components/tool-wrap.js @@ -0,0 +1,51 @@ +import Component from '@glimmer/component'; +import { action } from '@ember/object'; +import { tracked } from '@glimmer/tracking'; + +/** + * @module ToolWrap + * ToolWrap components are components that sys/wrapping/wrap functionality. Most of the functionality is passed through as actions from the tool-actions-form and then called back with properties. + * + * @example + * ```js + * + * ``` + * @param onClear {Function} - parent action that is passed through. Must be passed as {{action "onClear"}} + * @param token=null {String} - property passed from parent to child and then passed back up to parent + * @param selectedAction="wrap" - passed in from parent. This is the wrap action, others include hash, etc. + * @param codemirrorUpdated {Function} - parent action that is passed through. Must be passed as {{action "codemirrorUpdated"}}. + * @param updateTtl {Function} - parent action that is passed through. Must be passed as {{action "updateTtl"}} + * @param buttonDisabled=false {Boolean} - false default and if there is an error on codemirror it turns to true. + * @param error=null {Object} - errors passed from parent as default then from child back to parent. + */ + +export default class ToolWrap extends Component { + @tracked data = '{\n}'; + @tracked buttonDisabled = false; + + @action + onClear() { + this.args.onClear(); + } + @action + updateTtl(evt) { + if (!evt) return; + const ttl = evt.enabled ? `${evt.seconds}s` : '30m'; + this.args.updateTtl(ttl); + } + @action + codemirrorUpdated(val, codemirror) { + codemirror.performLint(); + const hasErrors = codemirror?.state.lint.marked?.length > 0; + this.data = val; + this.buttonDisabled = hasErrors; + this.args.codemirrorUpdated(val, hasErrors); + } +} diff --git a/ui/app/templates/components/tool-actions-form.hbs b/ui/app/templates/components/tool-actions-form.hbs index 7a29fd8bd1fc9..e45e4f4457051 100644 --- a/ui/app/templates/components/tool-actions-form.hbs +++ b/ui/app/templates/components/tool-actions-form.hbs @@ -1,3 +1,63 @@
    - {{partial (concat "partials/tools/" selectedAction)}} + {{#if (eq selectedAction 'hash')}} + + {{else if (eq selectedAction 'random')}} + + {{else if (eq selectedAction 'rewrap')}} + + {{else if (eq selectedAction 'unwrap')}} + + {{else if (eq selectedAction 'lookup')}} + + {{else if (eq selectedAction 'wrap')}} + + {{else}} + + {{/if}} diff --git a/ui/app/templates/partials/tools/hash.hbs b/ui/app/templates/components/tool-hash.hbs similarity index 71% rename from ui/app/templates/partials/tools/hash.hbs rename to ui/app/templates/components/tool-hash.hbs index c051fa5d8820f..b9ca925de3bb5 100644 --- a/ui/app/templates/partials/tools/hash.hbs +++ b/ui/app/templates/components/tool-hash.hbs @@ -6,37 +6,37 @@ -{{#if sum}} +{{#if @sum}}
    - +
    - + Copy
    -
    {{else}}
    - +
    - +
    - + Copy
    -
    {{else}}
    - +
    @@ -33,7 +33,7 @@ Number of bytes
    - +
    @@ -45,10 +45,10 @@