diff --git a/.github/actions/install-external-tools/action.yml b/.github/actions/install-external-tools/action.yml
index 1b9b2babb5a8..152b6822ba09 100644
--- a/.github/actions/install-external-tools/action.yml
+++ b/.github/actions/install-external-tools/action.yml
@@ -22,7 +22,7 @@ runs:
# up here.
- run: go install google.golang.org/protobuf/cmd/protoc-gen-go@latest
shell: bash
- - run: go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest
+ - run: go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.3.0
shell: bash
- run: go install github.com/favadi/protoc-go-inject-tag@latest
shell: bash
diff --git a/Dockerfile b/Dockerfile
index 3799f778dd2c..62860b7efa6c 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -24,7 +24,8 @@ LABEL name="Vault" \
summary="Vault is a tool for securely accessing secrets." \
description="Vault is a tool for securely accessing secrets. A secret is anything that you want to tightly control access to, such as API keys, passwords, certificates, and more. Vault provides a unified interface to any secret, while providing tight access control and recording a detailed audit log."
-COPY LICENSE /licenses/mozilla.txt
+# Copy the license file as per Legal requirement
+COPY LICENSE /licenses/LICENSE.txt
# Set ARGs as ENV so that they can be used in ENTRYPOINT/CMD
ENV NAME=$NAME
@@ -95,7 +96,8 @@ LABEL name="Vault" \
summary="Vault is a tool for securely accessing secrets." \
description="Vault is a tool for securely accessing secrets. A secret is anything that you want to tightly control access to, such as API keys, passwords, certificates, and more. Vault provides a unified interface to any secret, while providing tight access control and recording a detailed audit log."
-COPY LICENSE /licenses/mozilla.txt
+# Copy the license file as per Legal requirement
+COPY LICENSE /licenses/LICENSE.txt
# Set ARGs as ENV so that they can be used in ENTRYPOINT/CMD
ENV NAME=$NAME
diff --git a/builtin/credential/aws/path_config_identity.go b/builtin/credential/aws/path_config_identity.go
index 0c6f8c3398ec..eb3ef6e5339e 100644
--- a/builtin/credential/aws/path_config_identity.go
+++ b/builtin/credential/aws/path_config_identity.go
@@ -66,7 +66,7 @@ func (b *backend) pathConfigIdentity() *framework.Path {
"iam_alias": {
Type: framework.TypeString,
Default: identityAliasIAMUniqueID,
- Description: fmt.Sprintf("Configure how the AWS auth method generates entity aliases when using IAM auth. Valid values are %q, %q, and %q. Defaults to %q.", identityAliasRoleID, identityAliasIAMUniqueID, identityAliasIAMFullArn, identityAliasRoleID),
+ Description: fmt.Sprintf("Configure how the AWS auth method generates entity aliases when using IAM auth. Valid values are %q, %q, %q and %q. Defaults to %q.", identityAliasRoleID, identityAliasIAMUniqueID, identityAliasIAMFullArn, identityAliasIAMCanonicalArn, identityAliasRoleID),
},
iamAuthMetadataFields.FieldName: authmetadata.FieldSchema(iamAuthMetadataFields),
"ec2_alias": {
@@ -150,7 +150,7 @@ func pathConfigIdentityUpdate(ctx context.Context, req *logical.Request, data *f
iamAliasRaw, ok := data.GetOk("iam_alias")
if ok {
iamAlias := iamAliasRaw.(string)
- allowedIAMAliasValues := []string{identityAliasRoleID, identityAliasIAMUniqueID, identityAliasIAMFullArn}
+ allowedIAMAliasValues := []string{identityAliasRoleID, identityAliasIAMUniqueID, identityAliasIAMFullArn, identityAliasIAMCanonicalArn}
if !strutil.StrListContains(allowedIAMAliasValues, iamAlias) {
return logical.ErrorResponse(fmt.Sprintf("iam_alias of %q not in set of allowed values: %v", iamAlias, allowedIAMAliasValues)), nil
}
@@ -194,11 +194,12 @@ type identityConfig struct {
}
const (
- identityAliasIAMUniqueID = "unique_id"
- identityAliasIAMFullArn = "full_arn"
- identityAliasEC2InstanceID = "instance_id"
- identityAliasEC2ImageID = "image_id"
- identityAliasRoleID = "role_id"
+ identityAliasIAMUniqueID = "unique_id"
+ identityAliasIAMFullArn = "full_arn"
+ identityAliasIAMCanonicalArn = "canonical_arn"
+ identityAliasEC2InstanceID = "instance_id"
+ identityAliasEC2ImageID = "image_id"
+ identityAliasRoleID = "role_id"
)
const pathConfigIdentityHelpSyn = `
diff --git a/builtin/credential/aws/path_login.go b/builtin/credential/aws/path_login.go
index b66146d1ee67..e3d31229fc74 100644
--- a/builtin/credential/aws/path_login.go
+++ b/builtin/credential/aws/path_login.go
@@ -1397,6 +1397,8 @@ func (b *backend) pathLoginUpdateIam(ctx context.Context, req *logical.Request,
identityAlias = callerUniqueId
case identityAliasIAMFullArn:
identityAlias = callerID.Arn
+ case identityAliasIAMCanonicalArn:
+ identityAlias = entity.canonicalArn()
}
// If we're just looking up for MFA, return the Alias info
diff --git a/builtin/logical/pki/ca_util.go b/builtin/logical/pki/ca_util.go
index 2006684889ff..4ad1887853f7 100644
--- a/builtin/logical/pki/ca_util.go
+++ b/builtin/logical/pki/ca_util.go
@@ -237,7 +237,7 @@ func getKeyTypeAndBitsFromPublicKeyForRole(pubKey crypto.PublicKey) (certutil.Pr
keyBits = certutil.GetPublicKeySize(pubKey)
case *ecdsa.PublicKey:
keyType = certutil.ECPrivateKey
- case *ed25519.PublicKey:
+ case ed25519.PublicKey:
keyType = certutil.Ed25519PrivateKey
default:
return certutil.UnknownPrivateKey, 0, fmt.Errorf("unsupported public key: %#v", pubKey)
diff --git a/builtin/logical/pki/ca_util_test.go b/builtin/logical/pki/ca_util_test.go
new file mode 100644
index 000000000000..d4ef64e68fe1
--- /dev/null
+++ b/builtin/logical/pki/ca_util_test.go
@@ -0,0 +1,82 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: BUSL-1.1
+
+package pki
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "testing"
+
+ "github.com/hashicorp/vault/sdk/helper/certutil"
+)
+
+func TestGetKeyTypeAndBitsFromPublicKeyForRole(t *testing.T) {
+ rsaKey, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ t.Fatalf("error generating rsa key: %s", err)
+ }
+
+ ecdsaKey, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
+ if err != nil {
+ t.Fatalf("error generating ecdsa key: %s", err)
+ }
+
+ publicKey, _, err := ed25519.GenerateKey(rand.Reader)
+ if err != nil {
+ t.Fatalf("error generating ed25519 key: %s", err)
+ }
+
+ testCases := map[string]struct {
+ publicKey crypto.PublicKey
+ expectedKeyType certutil.PrivateKeyType
+ expectedKeyBits int
+ expectError bool
+ }{
+ "rsa": {
+ publicKey: rsaKey.Public(),
+ expectedKeyType: certutil.RSAPrivateKey,
+ expectedKeyBits: 2048,
+ },
+ "ecdsa": {
+ publicKey: ecdsaKey.Public(),
+ expectedKeyType: certutil.ECPrivateKey,
+ expectedKeyBits: 0,
+ },
+ "ed25519": {
+ publicKey: publicKey,
+ expectedKeyType: certutil.Ed25519PrivateKey,
+ expectedKeyBits: 0,
+ },
+ "bad key type": {
+ publicKey: []byte{},
+ expectedKeyType: certutil.UnknownPrivateKey,
+ expectedKeyBits: 0,
+ expectError: true,
+ },
+ }
+
+ for name, tt := range testCases {
+ t.Run(name, func(t *testing.T) {
+ keyType, keyBits, err := getKeyTypeAndBitsFromPublicKeyForRole(tt.publicKey)
+ if err != nil && !tt.expectError {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if err == nil && tt.expectError {
+ t.Fatal("expected error, got nil")
+ }
+
+ if keyType != tt.expectedKeyType {
+ t.Fatalf("key type mismatch: expected %s, got %s", tt.expectedKeyType, keyType)
+ }
+
+ if keyBits != tt.expectedKeyBits {
+ t.Fatalf("key bits mismatch: expected %d, got %d", tt.expectedKeyBits, keyBits)
+ }
+ })
+ }
+}
diff --git a/builtin/logical/transit/path_hmac.go b/builtin/logical/transit/path_hmac.go
index 0465b8dfa2be..f71c9516ea5f 100644
--- a/builtin/logical/transit/path_hmac.go
+++ b/builtin/logical/transit/path_hmac.go
@@ -257,7 +257,19 @@ func (b *backend) pathHMACVerify(ctx context.Context, req *logical.Request, d *f
name := d.Get("name").(string)
algorithm := d.Get("urlalgorithm").(string)
if algorithm == "" {
- algorithm = d.Get("algorithm").(string)
+ hashAlgorithmRaw, hasHashAlgorithm := d.GetOk("hash_algorithm")
+ algorithmRaw, hasAlgorithm := d.GetOk("algorithm")
+
+ // As `algorithm` is deprecated, make sure we only read it if
+ // `hash_algorithm` is not present.
+ switch {
+ case hasHashAlgorithm:
+ algorithm = hashAlgorithmRaw.(string)
+ case hasAlgorithm:
+ algorithm = algorithmRaw.(string)
+ default:
+ algorithm = d.Get("hash_algorithm").(string)
+ }
}
// Get the policy
diff --git a/builtin/logical/transit/path_hmac_test.go b/builtin/logical/transit/path_hmac_test.go
index 4fa0fbce318c..3f21106c4cc9 100644
--- a/builtin/logical/transit/path_hmac_test.go
+++ b/builtin/logical/transit/path_hmac_test.go
@@ -94,17 +94,40 @@ func TestTransit_HMAC(t *testing.T) {
}
// Now verify
+ verify := func() {
+ t.Helper()
+
+ resp, err = b.HandleRequest(context.Background(), req)
+ if err != nil {
+ t.Fatalf("%v: %v", err, resp)
+ }
+ if resp == nil {
+ t.Fatal("expected non-nil response")
+ }
+ if errStr, ok := resp.Data["error"]; ok {
+ t.Fatalf("error validating hmac: %s", errStr)
+ }
+ if resp.Data["valid"].(bool) == false {
+ t.Fatalf(fmt.Sprintf("error validating hmac;\nreq:\n%#v\nresp:\n%#v", *req, *resp))
+ }
+ }
req.Path = strings.ReplaceAll(req.Path, "hmac", "verify")
req.Data["hmac"] = value.(string)
- resp, err = b.HandleRequest(context.Background(), req)
- if err != nil {
- t.Fatalf("%v: %v", err, resp)
- }
- if resp == nil {
- t.Fatal("expected non-nil response")
- }
- if resp.Data["valid"].(bool) == false {
- panic(fmt.Sprintf("error validating hmac;\nreq:\n%#v\nresp:\n%#v", *req, *resp))
+ verify()
+
+ // If `algorithm` parameter is used, try with `hash_algorithm` as well
+ if algorithm, ok := req.Data["algorithm"]; ok {
+ // Note that `hash_algorithm` takes precedence over `algorithm`, since the
+ // latter is deprecated.
+ req.Data["hash_algorithm"] = algorithm
+ req.Data["algorithm"] = "xxx"
+ defer func() {
+ // Restore the req fields, since it is re-used by the tests below
+ delete(req.Data, "hash_algorithm")
+ req.Data["algorithm"] = algorithm
+ }()
+
+ verify()
}
}
diff --git a/changelog/26993.txt b/changelog/26993.txt
new file mode 100644
index 000000000000..35acaa79a8ad
--- /dev/null
+++ b/changelog/26993.txt
@@ -0,0 +1,3 @@
+```release-note:improvement
+ui: Update PGP display and show error for Generate Operation Token flow with PGP
+```
\ No newline at end of file
diff --git a/changelog/27093.txt b/changelog/27093.txt
new file mode 100644
index 000000000000..a24becec3eac
--- /dev/null
+++ b/changelog/27093.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+pki: Fix error in cross-signing using ed25519 keys
+```
diff --git a/changelog/27178.txt b/changelog/27178.txt
new file mode 100644
index 000000000000..c84c67f34e27
--- /dev/null
+++ b/changelog/27178.txt
@@ -0,0 +1,3 @@
+```release-note:change
+ui/kubernetes: Update the roles filter-input to use explicit search.
+```
diff --git a/changelog/27184.txt b/changelog/27184.txt
new file mode 100644
index 000000000000..500045efb5af
--- /dev/null
+++ b/changelog/27184.txt
@@ -0,0 +1,3 @@
+```release-note:change
+core/identity: improve performance for secondary nodes receiving identity related updates through replication
+```
diff --git a/changelog/27211.txt b/changelog/27211.txt
new file mode 100644
index 000000000000..26bf725ebff3
--- /dev/null
+++ b/changelog/27211.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+secrets/transit: Use 'hash_algorithm' parameter if present in HMAC verify requests. Otherwise fall back to deprecated 'algorithm' parameter.
+```
diff --git a/command/debug.go b/command/debug.go
index 09df88fb4d60..e81bc30d1edc 100644
--- a/command/debug.go
+++ b/command/debug.go
@@ -4,10 +4,12 @@
package command
import (
+ "archive/tar"
+ "compress/gzip"
"context"
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"net/url"
"os"
"path/filepath"
@@ -26,7 +28,6 @@ import (
"github.com/hashicorp/vault/sdk/helper/jsonutil"
"github.com/hashicorp/vault/sdk/helper/logging"
"github.com/hashicorp/vault/version"
- "github.com/mholt/archiver/v3"
"github.com/oklog/run"
"github.com/posener/complete"
)
@@ -374,7 +375,7 @@ func (c *DebugCommand) generateIndex() error {
}
// Write out file
- if err := ioutil.WriteFile(filepath.Join(c.flagOutput, "index.json"), bytes, 0o600); err != nil {
+ if err := os.WriteFile(filepath.Join(c.flagOutput, "index.json"), bytes, 0o600); err != nil {
return fmt.Errorf("error generating index file; %s", err)
}
@@ -777,7 +778,7 @@ func (c *DebugCommand) collectPprof(ctx context.Context) {
return
}
- err = ioutil.WriteFile(filepath.Join(dirName, target+".prof"), data, 0o600)
+ err = os.WriteFile(filepath.Join(dirName, target+".prof"), data, 0o600)
if err != nil {
c.captureError("pprof."+target, err)
}
@@ -795,13 +796,13 @@ func (c *DebugCommand) collectPprof(ctx context.Context) {
return
}
- err = ioutil.WriteFile(filepath.Join(dirName, "goroutines.txt"), data, 0o600)
+ err = os.WriteFile(filepath.Join(dirName, "goroutines.txt"), data, 0o600)
if err != nil {
c.captureError("pprof.goroutines-text", err)
}
}()
- // If the our remaining duration is less than the interval value
+ // If our remaining duration is less than the interval value
// skip profile and trace.
runDuration := currentTimestamp.Sub(startTime)
if (c.flagDuration+debugDurationGrace)-runDuration < c.flagInterval {
@@ -819,7 +820,7 @@ func (c *DebugCommand) collectPprof(ctx context.Context) {
return
}
- err = ioutil.WriteFile(filepath.Join(dirName, "profile.prof"), data, 0o600)
+ err = os.WriteFile(filepath.Join(dirName, "profile.prof"), data, 0o600)
if err != nil {
c.captureError("pprof.profile", err)
}
@@ -835,7 +836,7 @@ func (c *DebugCommand) collectPprof(ctx context.Context) {
return
}
- err = ioutil.WriteFile(filepath.Join(dirName, "trace.out"), data, 0o600)
+ err = os.WriteFile(filepath.Join(dirName, "trace.out"), data, 0o600)
if err != nil {
c.captureError("pprof.trace", err)
}
@@ -971,7 +972,7 @@ func (c *DebugCommand) persistCollection(collection []map[string]interface{}, ou
if err != nil {
return err
}
- if err := ioutil.WriteFile(filepath.Join(c.flagOutput, outFile), bytes, 0o600); err != nil {
+ if err := os.WriteFile(filepath.Join(c.flagOutput, outFile), bytes, 0o600); err != nil {
return err
}
@@ -983,14 +984,100 @@ func (c *DebugCommand) compress(dst string) error {
defer osutil.Umask(osutil.Umask(0o077))
}
- tgz := archiver.NewTarGz()
- if err := tgz.Archive([]string{c.flagOutput}, dst); err != nil {
- return fmt.Errorf("failed to compress data: %s", err)
+ if err := archiveToTgz(c.flagOutput, dst); err != nil {
+ return fmt.Errorf("failed to compress data: %w", err)
}
// If everything is fine up to this point, remove original directory
if err := os.RemoveAll(c.flagOutput); err != nil {
- return fmt.Errorf("failed to remove data directory: %s", err)
+ return fmt.Errorf("failed to remove data directory: %w", err)
+ }
+
+ return nil
+}
+
+// archiveToTgz compresses all the files in sourceDir to a
+// a tarball at destination.
+func archiveToTgz(sourceDir, destination string) error {
+ file, err := os.Create(destination)
+ if err != nil {
+ return fmt.Errorf("failed to create file: %w", err)
+ }
+ defer file.Close()
+
+ gzipWriter := gzip.NewWriter(file)
+ defer gzipWriter.Close()
+
+ tarWriter := tar.NewWriter(gzipWriter)
+ defer tarWriter.Close()
+
+ err = filepath.Walk(sourceDir,
+ func(filePath string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ return addFileToTar(sourceDir, filePath, tarWriter)
+ })
+
+ return err
+}
+
+// addFileToTar takes a file at filePath and adds it to the tar
+// being written to by tarWriter, alongside its header.
+// The tar header name will be relative. Example: If we're tarring
+// a file in ~/a/b/c/foo/bar.json, the header name will be foo/bar.json
+func addFileToTar(sourceDir, filePath string, tarWriter *tar.Writer) error {
+ file, err := os.Open(filePath)
+ if err != nil {
+ return fmt.Errorf("failed to open file %q: %w", filePath, err)
+ }
+ defer file.Close()
+
+ stat, err := file.Stat()
+ if err != nil {
+ return fmt.Errorf("failed to stat file %q: %w", filePath, err)
+ }
+
+ var link string
+ mode := stat.Mode()
+ if mode&os.ModeSymlink != 0 {
+ if link, err = os.Readlink(filePath); err != nil {
+ return fmt.Errorf("failed to read symlink for file %q: %w", filePath, err)
+ }
+ }
+ tarHeader, err := tar.FileInfoHeader(stat, link)
+ if err != nil {
+ return fmt.Errorf("failed to create tar header for file %q: %w", filePath, err)
+ }
+
+ // The tar header name should be relative, so remove the sourceDir from it,
+ // but preserve the last directory name.
+ // Example: If we're tarring a file in ~/a/b/c/foo/bar.json
+ // The name should be foo/bar.json
+ sourceDirExceptLastDir := filepath.Dir(sourceDir)
+ headerName := strings.TrimPrefix(filepath.Clean(filePath), filepath.Clean(sourceDirExceptLastDir)+"/")
+
+ // Directories should end with a slash.
+ if stat.IsDir() && !strings.HasSuffix(headerName, "/") {
+ headerName += "/"
+ }
+ tarHeader.Name = headerName
+
+ err = tarWriter.WriteHeader(tarHeader)
+ if err != nil {
+ return fmt.Errorf("failed to write tar header for file %q: %w", filePath, err)
+ }
+
+ // If it's not a regular file (e.g. link or directory) we shouldn't
+ // copy the file. The body of a tar entry (i.e. what's done by the
+ // below io.Copy call) is only required for tar files of TypeReg.
+ if tarHeader.Typeflag != tar.TypeReg {
+ return nil
+ }
+
+ _, err = io.Copy(tarWriter, file)
+ if err != nil {
+ return fmt.Errorf("failed to copy file %q into tarball: %w", filePath, err)
}
return nil
@@ -1007,7 +1094,7 @@ func pprofTarget(ctx context.Context, client *api.Client, target string, params
}
defer resp.Body.Close()
- data, err := ioutil.ReadAll(resp.Body)
+ data, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
@@ -1027,7 +1114,7 @@ func pprofProfile(ctx context.Context, client *api.Client, duration time.Duratio
}
defer resp.Body.Close()
- data, err := ioutil.ReadAll(resp.Body)
+ data, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
@@ -1047,7 +1134,7 @@ func pprofTrace(ctx context.Context, client *api.Client, duration time.Duration)
}
defer resp.Body.Close()
- data, err := ioutil.ReadAll(resp.Body)
+ data, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
diff --git a/command/debug_test.go b/command/debug_test.go
index 279c48f0a5ac..16d297bf920f 100644
--- a/command/debug_test.go
+++ b/command/debug_test.go
@@ -5,9 +5,10 @@ package command
import (
"archive/tar"
+ "compress/gzip"
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"os"
"path/filepath"
"runtime"
@@ -18,7 +19,7 @@ import (
"github.com/hashicorp/cli"
"github.com/hashicorp/vault/api"
- "github.com/mholt/archiver/v3"
+ "github.com/stretchr/testify/require"
)
func testDebugCommand(tb testing.TB) (*cli.MockUi, *DebugCommand) {
@@ -35,11 +36,7 @@ func testDebugCommand(tb testing.TB) (*cli.MockUi, *DebugCommand) {
func TestDebugCommand_Run(t *testing.T) {
t.Parallel()
- testDir, err := ioutil.TempDir("", "vault-debug")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(testDir)
+ testDir := t.TempDir()
cases := []struct {
name string
@@ -104,6 +101,54 @@ func TestDebugCommand_Run(t *testing.T) {
}
}
+// expectHeaderNamesInTarGzFile asserts that the expectedHeaderNames
+// match exactly to the header names in the tar.gz file at tarballPath.
+// Will error if there are more or less than expected.
+// ignoreUnexpectedHeaders toggles ignoring the presence of headers not
+// in expectedHeaderNames.
+func expectHeaderNamesInTarGzFile(t *testing.T, tarballPath string, expectedHeaderNames []string, ignoreUnexpectedHeaders bool) {
+ t.Helper()
+
+ file, err := os.Open(tarballPath)
+ require.NoError(t, err)
+
+ uncompressedStream, err := gzip.NewReader(file)
+ require.NoError(t, err)
+
+ tarReader := tar.NewReader(uncompressedStream)
+ headersFoundMap := make(map[string]any)
+
+ for {
+ header, err := tarReader.Next()
+ if err == io.EOF {
+ // We're at the end of the tar.
+ break
+ }
+ require.NoError(t, err)
+
+ // Ignore directories.
+ if header.Typeflag == tar.TypeDir {
+ continue
+ }
+
+ for _, name := range expectedHeaderNames {
+ if header.Name == name {
+ headersFoundMap[header.Name] = struct{}{}
+ }
+ }
+ if _, ok := headersFoundMap[header.Name]; !ok && !ignoreUnexpectedHeaders {
+ t.Fatalf("unexpected file: %s", header.Name)
+ }
+ }
+
+ // Expect that every expectedHeader was found at some point
+ for _, name := range expectedHeaderNames {
+ if _, ok := headersFoundMap[name]; !ok {
+ t.Fatalf("missing header from tar: %s", name)
+ }
+ }
+}
+
func TestDebugCommand_Archive(t *testing.T) {
t.Parallel()
@@ -137,11 +182,7 @@ func TestDebugCommand_Archive(t *testing.T) {
// Create temp dirs for each test case since os.Stat and tgz.Walk
// (called down below) exhibits raciness otherwise.
- testDir, err := ioutil.TempDir("", "vault-debug")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(testDir)
+ testDir := t.TempDir()
client, closer := testVaultServer(t)
defer closer()
@@ -177,32 +218,14 @@ func TestDebugCommand_Archive(t *testing.T) {
}
bundlePath := filepath.Join(testDir, basePath+expectedExt)
- _, err = os.Stat(bundlePath)
+ _, err := os.Stat(bundlePath)
if os.IsNotExist(err) {
t.Log(ui.OutputWriter.String())
t.Fatal(err)
}
- tgz := archiver.NewTarGz()
- err = tgz.Walk(bundlePath, func(f archiver.File) error {
- fh, ok := f.Header.(*tar.Header)
- if !ok {
- return fmt.Errorf("invalid file header: %#v", f.Header)
- }
-
- // Ignore base directory and index file
- if fh.Name == basePath+"/" || fh.Name == filepath.Join(basePath, "index.json") {
- return nil
- }
-
- if fh.Name != filepath.Join(basePath, "server_status.json") {
- return fmt.Errorf("unexpected file: %s", fh.Name)
- }
- return nil
- })
- if err != nil {
- t.Fatal(err)
- }
+ expectedHeaders := []string{filepath.Join(basePath, "index.json"), filepath.Join(basePath, "server_status.json")}
+ expectHeaderNamesInTarGzFile(t, bundlePath, expectedHeaders, false)
})
}
}
@@ -258,11 +281,7 @@ func TestDebugCommand_CaptureTargets(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
- testDir, err := ioutil.TempDir("", "vault-debug")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(testDir)
+ testDir := t.TempDir()
client, closer := testVaultServer(t)
defer closer()
@@ -287,45 +306,22 @@ func TestDebugCommand_CaptureTargets(t *testing.T) {
}
bundlePath := filepath.Join(testDir, basePath+debugCompressionExt)
- _, err = os.Open(bundlePath)
+ _, err := os.Open(bundlePath)
if err != nil {
t.Fatalf("failed to open archive: %s", err)
}
- tgz := archiver.NewTarGz()
- err = tgz.Walk(bundlePath, func(f archiver.File) error {
- fh, ok := f.Header.(*tar.Header)
- if !ok {
- t.Fatalf("invalid file header: %#v", f.Header)
- }
-
- // Ignore base directory and index file
- if fh.Name == basePath+"/" || fh.Name == filepath.Join(basePath, "index.json") {
- return nil
- }
-
- for _, fileName := range tc.expectedFiles {
- if fh.Name == filepath.Join(basePath, fileName) {
- return nil
- }
- }
-
- // If we reach here, it means that this is an unexpected file
- return fmt.Errorf("unexpected file: %s", fh.Name)
- })
- if err != nil {
- t.Fatal(err)
+ expectedHeaders := []string{filepath.Join(basePath, "index.json")}
+ for _, fileName := range tc.expectedFiles {
+ expectedHeaders = append(expectedHeaders, filepath.Join(basePath, fileName))
}
+ expectHeaderNamesInTarGzFile(t, bundlePath, expectedHeaders, false)
})
}
}
func TestDebugCommand_Pprof(t *testing.T) {
- testDir, err := ioutil.TempDir("", "vault-debug")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(testDir)
+ testDir := t.TempDir()
client, closer := testVaultServer(t)
defer closer()
@@ -379,11 +375,7 @@ func TestDebugCommand_Pprof(t *testing.T) {
func TestDebugCommand_IndexFile(t *testing.T) {
t.Parallel()
- testDir, err := ioutil.TempDir("", "vault-debug")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(testDir)
+ testDir := t.TempDir()
client, closer := testVaultServer(t)
defer closer()
@@ -409,7 +401,7 @@ func TestDebugCommand_IndexFile(t *testing.T) {
t.Fatalf("expected %d to be %d", code, exp)
}
- content, err := ioutil.ReadFile(filepath.Join(outputPath, "index.json"))
+ content, err := os.ReadFile(filepath.Join(outputPath, "index.json"))
if err != nil {
t.Fatal(err)
}
@@ -426,11 +418,7 @@ func TestDebugCommand_IndexFile(t *testing.T) {
func TestDebugCommand_TimingChecks(t *testing.T) {
t.Parallel()
- testDir, err := ioutil.TempDir("", "vault-debug")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(testDir)
+ testDir := t.TempDir()
cases := []struct {
name string
@@ -585,11 +573,7 @@ func TestDebugCommand_OutputExists(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
- testDir, err := ioutil.TempDir("", "vault-debug")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(testDir)
+ testDir := t.TempDir()
client, closer := testVaultServer(t)
defer closer()
@@ -602,12 +586,12 @@ func TestDebugCommand_OutputExists(t *testing.T) {
// Create a conflicting file/directory
if tc.compress {
- _, err = os.Create(outputPath)
+ _, err := os.Create(outputPath)
if err != nil {
t.Fatal(err)
}
} else {
- err = os.Mkdir(outputPath, 0o700)
+ err := os.Mkdir(outputPath, 0o700)
if err != nil {
t.Fatal(err)
}
@@ -639,11 +623,7 @@ func TestDebugCommand_OutputExists(t *testing.T) {
func TestDebugCommand_PartialPermissions(t *testing.T) {
t.Parallel()
- testDir, err := ioutil.TempDir("", "vault-debug")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(testDir)
+ testDir := t.TempDir()
client, closer := testVaultServer(t)
defer closer()
@@ -680,38 +660,14 @@ func TestDebugCommand_PartialPermissions(t *testing.T) {
t.Fatalf("failed to open archive: %s", err)
}
- tgz := archiver.NewTarGz()
- err = tgz.Walk(bundlePath, func(f archiver.File) error {
- fh, ok := f.Header.(*tar.Header)
- if !ok {
- t.Fatalf("invalid file header: %#v", f.Header)
- }
-
- // Ignore base directory and index file
- if fh.Name == basePath+"/" {
- return nil
- }
-
- // Ignore directories, which still get created by pprof but should
- // otherwise be empty.
- if fh.FileInfo().IsDir() {
- return nil
- }
-
- switch {
- case fh.Name == filepath.Join(basePath, "index.json"):
- case fh.Name == filepath.Join(basePath, "replication_status.json"):
- case fh.Name == filepath.Join(basePath, "server_status.json"):
- case fh.Name == filepath.Join(basePath, "vault.log"):
- default:
- return fmt.Errorf("unexpected file: %s", fh.Name)
- }
-
- return nil
- })
- if err != nil {
- t.Fatal(err)
+ expectedHeaders := []string{
+ filepath.Join(basePath, "index.json"), filepath.Join(basePath, "server_status.json"),
+ filepath.Join(basePath, "vault.log"),
}
+
+ // We set ignoreUnexpectedHeaders to true as replication_status.json is only sometimes
+ // produced. Relying on it being or not being there would be racy.
+ expectHeaderNamesInTarGzFile(t, bundlePath, expectedHeaders, true)
}
// set insecure umask to see if the files and directories get created with right permissions
@@ -748,11 +704,7 @@ func TestDebugCommand_InsecureUmask(t *testing.T) {
// set insecure umask
defer syscall.Umask(syscall.Umask(0))
- testDir, err := ioutil.TempDir("", "vault-debug")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(testDir)
+ testDir := t.TempDir()
client, closer := testVaultServer(t)
defer closer()
@@ -796,20 +748,22 @@ func TestDebugCommand_InsecureUmask(t *testing.T) {
// check permissions of the files within the parent directory
switch tc.compress {
case true:
- tgz := archiver.NewTarGz()
+ file, err := os.Open(bundlePath)
+ require.NoError(t, err)
- err = tgz.Walk(bundlePath, func(f archiver.File) error {
- fh, ok := f.Header.(*tar.Header)
- if !ok {
- return fmt.Errorf("invalid file header: %#v", f.Header)
- }
- err = isValidFilePermissions(fh.FileInfo())
- if err != nil {
- t.Fatalf(err.Error())
- }
- return nil
- })
+ uncompressedStream, err := gzip.NewReader(file)
+ require.NoError(t, err)
+
+ tarReader := tar.NewReader(uncompressedStream)
+ for {
+ header, err := tarReader.Next()
+ if err == io.EOF {
+ break
+ }
+ err = isValidFilePermissions(header.FileInfo())
+ require.NoError(t, err)
+ }
case false:
err = filepath.Walk(bundlePath, func(path string, info os.FileInfo, err error) error {
err = isValidFilePermissions(info)
@@ -820,9 +774,7 @@ func TestDebugCommand_InsecureUmask(t *testing.T) {
})
}
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
})
}
}
diff --git a/go.mod b/go.mod
index 8e1de83f6d0a..9b5251cbc131 100644
--- a/go.mod
+++ b/go.mod
@@ -174,7 +174,6 @@ require (
github.com/kr/text v0.2.0
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.20
- github.com/mholt/archiver/v3 v3.5.1
github.com/michaelklishin/rabbit-hole/v2 v2.12.0
github.com/mikesmitty/edkey v0.0.0-20170222072505-3356ea4e686a
github.com/mitchellh/copystructure v1.2.0
@@ -276,7 +275,6 @@ require (
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
github.com/agext/levenshtein v1.2.1 // indirect
- github.com/andybalholm/brotli v1.0.5 // indirect
github.com/apache/arrow/go/v14 v14.0.2 // indirect
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
github.com/aws/aws-sdk-go-v2 v1.23.4 // indirect
@@ -337,7 +335,6 @@ require (
github.com/docker/cli v25.0.1+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
- github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
github.com/dvsekhvalnov/jose2go v1.6.0 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/emirpasic/gods v1.18.1 // indirect
@@ -427,7 +424,6 @@ require (
github.com/kelseyhightower/envconfig v1.4.0 // indirect
github.com/kevinburke/ssh_config v1.2.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
- github.com/klauspost/pgzip v1.2.5 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/lib/pq v1.10.9 // indirect
github.com/linode/linodego v0.7.1 // indirect
@@ -460,7 +456,6 @@ require (
github.com/mtibben/percent v0.2.1 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 // indirect
- github.com/nwaples/rardecode v1.1.2 // indirect
github.com/oklog/ulid v1.3.1 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect
@@ -499,7 +494,6 @@ require (
github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/tklauser/numcpus v0.4.0 // indirect
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c // indirect
- github.com/ulikunitz/xz v0.5.10 // indirect
github.com/vmware/govmomi v0.18.0 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
@@ -508,7 +502,6 @@ require (
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
- github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
diff --git a/go.sum b/go.sum
index b0cc8032fb32..38754fbd726f 100644
--- a/go.sum
+++ b/go.sum
@@ -1366,10 +1366,7 @@ github.com/aliyun/alibaba-cloud-sdk-go v1.62.676/go.mod h1:CJJYa1ZMxjlN/NbXEwmej
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5 h1:nWDRPCyCltiTsANwC/n3QZH7Vww33Npq9MKqlwRzI/c=
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
-github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
-github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs=
-github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
@@ -1832,9 +1829,6 @@ github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
-github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 h1:iFaUwBSo5Svw6L7HYpRu/0lE3e0BaElwnNO1qkNQxBY=
-github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s=
-github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74 h1:2MIhn2R6oXQbgW5yHfS+d6YqyMfXiu2L55rFZC4UD/M=
github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74/go.mod h1:UqXY1lYT/ERa4OEAywUqdok1T4RCRdArkhic1Opuavo=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
@@ -2160,7 +2154,6 @@ github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@@ -2748,10 +2741,8 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE=
-github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
@@ -2760,13 +2751,10 @@ github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQs
github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
-github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
-github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
-github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -2862,8 +2850,6 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfr
github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
github.com/mediocregopher/radix/v4 v4.1.4 h1:Uze6DEbEAvL+VHXUEu/EDBTkUk5CLct5h3nVSGpc6Ts=
github.com/mediocregopher/radix/v4 v4.1.4/go.mod h1:ajchozX/6ELmydxWeWM6xCFHVpZ4+67LXHOTOVR0nCE=
-github.com/mholt/archiver/v3 v3.5.1 h1:rDjOBX9JSF5BvoJGvjqK479aL70qh9DIpZCl+k7Clwo=
-github.com/mholt/archiver/v3 v3.5.1/go.mod h1:e3dqJ7H78uzsRSEACH1joayhuSyhnonssnDhppzS1L4=
github.com/michaelklishin/rabbit-hole/v2 v2.12.0 h1:946p6jOYFcVJdtBBX8MwXvuBkpPjwm1Nm2Qg8oX+uFk=
github.com/michaelklishin/rabbit-hole/v2 v2.12.0/go.mod h1:AN/3zyz7d++OHf+4WUo/LR0+Q5nlPHMaXasIsG/mPY0=
github.com/microsoft/go-mssqldb v1.5.0 h1:CgENxkwtOBNj3Jg6T1X209y2blCfTTcwuOlznd2k9fk=
@@ -2990,9 +2976,6 @@ github.com/networkplumbing/go-nft v0.2.0/go.mod h1:HnnM+tYvlGAsMU7yoYwXEVLLiDW9g
github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 h1:BQ1HW7hr4IVovMwWg0E0PYcyW8CzqDcVmaew9cujU4s=
github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2/go.mod h1:TLb2Sg7HQcgGdloNxkrmtgDNR9uVYF3lfdFIN4Ro6Sk=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
-github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
-github.com/nwaples/rardecode v1.1.2 h1:Cj0yZY6T1Zx1R7AhTbyGSALm44/Mmq+BAPc4B/p/d3M=
-github.com/nwaples/rardecode v1.1.2/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
@@ -3130,7 +3113,6 @@ github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk
github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
-github.com/pierrec/lz4/v4 v4.1.2/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
@@ -3405,10 +3387,6 @@ github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVM
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
-github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
-github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
-github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
-github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.19.1/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
@@ -3451,8 +3429,6 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
-github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo=
-github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yashtewari/glob-intersection v0.1.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok=
diff --git a/sdk/helper/certutil/types.go b/sdk/helper/certutil/types.go
index ca245d7d4208..bfdc153c4852 100644
--- a/sdk/helper/certutil/types.go
+++ b/sdk/helper/certutil/types.go
@@ -171,7 +171,7 @@ func GetPrivateKeyTypeFromPublicKey(pubKey crypto.PublicKey) PrivateKeyType {
return RSAPrivateKey
case *ecdsa.PublicKey:
return ECPrivateKey
- case *ed25519.PublicKey:
+ case ed25519.PublicKey:
return Ed25519PrivateKey
default:
return UnknownPrivateKey
diff --git a/sdk/helper/certutil/types_test.go b/sdk/helper/certutil/types_test.go
new file mode 100644
index 000000000000..2cf383afaa02
--- /dev/null
+++ b/sdk/helper/certutil/types_test.go
@@ -0,0 +1,63 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package certutil
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "testing"
+)
+
+func TestGetPrivateKeyTypeFromPublicKey(t *testing.T) {
+ rsaKey, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ t.Fatalf("error generating rsa key: %s", err)
+ }
+
+ ecdsaKey, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
+ if err != nil {
+ t.Fatalf("error generating ecdsa key: %s", err)
+ }
+
+ publicKey, _, err := ed25519.GenerateKey(rand.Reader)
+ if err != nil {
+ t.Fatalf("error generating ed25519 key: %s", err)
+ }
+
+ testCases := map[string]struct {
+ publicKey crypto.PublicKey
+ expectedKeyType PrivateKeyType
+ }{
+ "rsa": {
+ publicKey: rsaKey.Public(),
+ expectedKeyType: RSAPrivateKey,
+ },
+ "ecdsa": {
+ publicKey: ecdsaKey.Public(),
+ expectedKeyType: ECPrivateKey,
+ },
+ "ed25519": {
+ publicKey: publicKey,
+ expectedKeyType: Ed25519PrivateKey,
+ },
+ "bad key type": {
+ publicKey: []byte{},
+ expectedKeyType: UnknownPrivateKey,
+ },
+ }
+
+ for name, tt := range testCases {
+ t.Run(name, func(t *testing.T) {
+ keyType := GetPrivateKeyTypeFromPublicKey(tt.publicKey)
+
+ if keyType != tt.expectedKeyType {
+ t.Fatalf("key type mismatch: expected %s, got %s", tt.expectedKeyType, keyType)
+ }
+ })
+ }
+}
diff --git a/tools/tools.sh b/tools/tools.sh
index 16a117b0efd7..a3007c8c1d9e 100755
--- a/tools/tools.sh
+++ b/tools/tools.sh
@@ -42,7 +42,7 @@ install_external() {
github.com/golangci/revgrep/cmd/revgrep@latest
golang.org/x/tools/cmd/goimports@latest
google.golang.org/protobuf/cmd/protoc-gen-go@latest
- google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest
+ google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.3.0
gotest.tools/gotestsum@latest
honnef.co/go/tools/cmd/staticcheck@latest
mvdan.cc/gofumpt@latest
diff --git a/ui/lib/core/addon/components/choose-pgp-key-form.hbs b/ui/lib/core/addon/components/choose-pgp-key-form.hbs
index de61638f0716..ee66c580681c 100644
--- a/ui/lib/core/addon/components/choose-pgp-key-form.hbs
+++ b/ui/lib/core/addon/components/choose-pgp-key-form.hbs
@@ -27,6 +27,7 @@
@textToCopy={{this.pgpKey}}
@color="secondary"
@onError={{(fn (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger"))}}
+ @isTruncated={{true}}
data-test-pgp-key-copy
@container="#shamir-flow-modal"
/>
diff --git a/ui/lib/core/addon/components/filter-input-explicit.hbs b/ui/lib/core/addon/components/filter-input-explicit.hbs
new file mode 100644
index 000000000000..2cf1f2ed2935
--- /dev/null
+++ b/ui/lib/core/addon/components/filter-input-explicit.hbs
@@ -0,0 +1,19 @@
+{{!
+ Copyright (c) HashiCorp, Inc.
+ SPDX-License-Identifier: BUSL-1.1
+~}}
+
+
\ No newline at end of file
diff --git a/ui/lib/core/addon/components/shamir/dr-token-flow.hbs b/ui/lib/core/addon/components/shamir/dr-token-flow.hbs
index 82b956f21d34..159bdb2b82c1 100644
--- a/ui/lib/core/addon/components/shamir/dr-token-flow.hbs
+++ b/ui/lib/core/addon/components/shamir/dr-token-flow.hbs
@@ -94,6 +94,7 @@
{{else if this.generateWithPGP}}
+
{{#unless @promptConfig}}
diff --git a/ui/lib/kubernetes/addon/components/page/roles.js b/ui/lib/kubernetes/addon/components/page/roles.js
index 8caf60148e42..6de4188d12c4 100644
--- a/ui/lib/kubernetes/addon/components/page/roles.js
+++ b/ui/lib/kubernetes/addon/components/page/roles.js
@@ -4,35 +4,68 @@
*/
import Component from '@glimmer/component';
-import { inject as service } from '@ember/service';
+import { service } from '@ember/service';
import { action } from '@ember/object';
import { getOwner } from '@ember/application';
import errorMessage from 'vault/utils/error-message';
import { tracked } from '@glimmer/tracking';
+import keys from 'core/utils/key-codes';
/**
* @module Roles
- * RolesPage component is a child component to show list of roles
+ * RolesPage component is a child component to show list of roles.
+ * It also handles the filtering actions of roles.
*
* @param {array} roles - array of roles
* @param {boolean} promptConfig - whether or not to display config cta
- * @param {array} pageFilter - array of filtered roles
+ * @param {string} filterValue - value of queryParam pageFilter
* @param {array} breadcrumbs - breadcrumbs as an array of objects that contain label and route
*/
export default class RolesPageComponent extends Component {
@service flashMessages;
+ @service router;
+ @tracked query;
@tracked roleToDelete = null;
+ constructor() {
+ super(...arguments);
+ this.query = this.args.filterValue;
+ }
+
get mountPoint() {
return getOwner(this).mountPoint;
}
+ navigate(pageFilter) {
+ const route = `${this.mountPoint}.roles.index`;
+ const args = [route, { queryParams: { pageFilter: pageFilter || null } }];
+ this.router.transitionTo(...args);
+ }
+
+ @action
+ handleKeyDown(event) {
+ if (event.keyCode === keys.ESC) {
+ // On escape, transition to roles index route.
+ this.navigate();
+ }
+ // ignore all other key events
+ }
+
+ @action handleInput(evt) {
+ this.query = evt.target.value;
+ }
+
+ @action
+ handleSearch(evt) {
+ evt.preventDefault();
+ this.navigate(this.query);
+ }
+
@action
async onDelete(model) {
try {
const message = `Successfully deleted role ${model.name}`;
await model.destroyRecord();
- this.args.roles.removeObject(model);
this.flashMessages.success(message);
} catch (error) {
const message = errorMessage(error, 'Error deleting role. Please try again or contact support');
diff --git a/ui/lib/kubernetes/addon/components/tab-page-header.hbs b/ui/lib/kubernetes/addon/components/tab-page-header.hbs
index 6b6fc74a31a3..cc765170abcc 100644
--- a/ui/lib/kubernetes/addon/components/tab-page-header.hbs
+++ b/ui/lib/kubernetes/addon/components/tab-page-header.hbs
@@ -28,10 +28,12 @@
{{#if @filterRoles}}
-
{{/if}}
diff --git a/ui/tests/acceptance/secrets/backend/kubernetes/roles-test.js b/ui/tests/acceptance/secrets/backend/kubernetes/roles-test.js
index 4a8dcdbdf6fa..ce3690e381ea 100644
--- a/ui/tests/acceptance/secrets/backend/kubernetes/roles-test.js
+++ b/ui/tests/acceptance/secrets/backend/kubernetes/roles-test.js
@@ -10,6 +10,7 @@ import kubernetesScenario from 'vault/mirage/scenarios/kubernetes';
import kubernetesHandlers from 'vault/mirage/handlers/kubernetes';
import authPage from 'vault/tests/pages/auth';
import { fillIn, visit, currentURL, click, currentRouteName } from '@ember/test-helpers';
+import { SELECTORS as GENERAL } from 'vault/tests/helpers/general-selectors';
module('Acceptance | kubernetes | roles', function (hooks) {
setupApplicationTest(hooks);
@@ -30,7 +31,8 @@ module('Acceptance | kubernetes | roles', function (hooks) {
test('it should filter roles', async function (assert) {
await this.visitRoles();
assert.dom('[data-test-list-item-link]').exists({ count: 3 }, 'Roles list renders');
- await fillIn('[data-test-component="navigate-input"]', '1');
+ await fillIn(GENERAL.filterInputExplicit, '1');
+ await click(GENERAL.filterInputExplicitSearch);
assert.dom('[data-test-list-item-link]').exists({ count: 1 }, 'Filtered roles list renders');
assert.ok(currentURL().includes('pageFilter=1'), 'pageFilter query param value is set');
});
diff --git a/ui/tests/helpers/clients.js b/ui/tests/helpers/clients.js
index 467ef57603e9..a2bf63cabe5f 100644
--- a/ui/tests/helpers/clients.js
+++ b/ui/tests/helpers/clients.js
@@ -120,7 +120,7 @@ export function overrideResponse(httpStatus, data) {
if (httpStatus === 204) {
return new Response(204, { 'Content-Type': 'application/json' });
}
- return new Response(200, { 'Content-Type': 'application/json' }, JSON.stringify(data));
+ return new Response(httpStatus, { 'Content-Type': 'application/json' }, JSON.stringify(data));
}
export async function dateDropdownSelect(month, year) {
diff --git a/ui/tests/helpers/general-selectors.js b/ui/tests/helpers/general-selectors.js
index ea736f61e08f..badce0204632 100644
--- a/ui/tests/helpers/general-selectors.js
+++ b/ui/tests/helpers/general-selectors.js
@@ -16,6 +16,8 @@ export const SELECTORS = {
tab: (name) => `[data-test-tab="${name}"]`,
filter: (name) => `[data-test-filter="${name}"]`,
filterInput: '[data-test-filter-input]',
+ filterInputExplicit: '[data-test-filter-input-explicit]',
+ filterInputExplicitSearch: '[data-test-filter-input-explicit-search]',
confirmModalInput: '[data-test-confirmation-modal-input]',
confirmButton: '[data-test-confirm-button]',
confirmTrigger: '[data-test-confirm-action-trigger]',
diff --git a/ui/tests/integration/components/choose-pgp-key-form-test.js b/ui/tests/integration/components/choose-pgp-key-form-test.js
index 4e87e3e39526..23838386ae08 100644
--- a/ui/tests/integration/components/choose-pgp-key-form-test.js
+++ b/ui/tests/integration/components/choose-pgp-key-form-test.js
@@ -9,6 +9,17 @@ import { setupRenderingTest } from 'vault/tests/helpers';
import { click, fillIn, render } from '@ember/test-helpers';
import { hbs } from 'ember-cli-htmlbars';
+const CHOOSE_PGP = {
+ begin: '[data-test-choose-pgp-key-form="begin"]',
+ description: '[data-test-choose-pgp-key-description]',
+ toggle: '[data-test-text-toggle]',
+ useKeyButton: '[data-test-use-pgp-key-button]',
+ pgpTextArea: '[data-test-pgp-file-textarea]',
+ confirm: '[data-test-pgp-key-confirm]',
+ base64Output: '[data-test-pgp-key-copy]',
+ submit: '[data-test-confirm-pgp-key-submit]',
+ cancel: '[data-test-use-pgp-key-cancel]',
+};
module('Integration | Component | choose-pgp-key-form', function (hooks) {
setupRenderingTest(hooks);
@@ -22,25 +33,24 @@ module('Integration | Component | choose-pgp-key-form', function (hooks) {
hbs``
);
- assert.dom('[data-test-choose-pgp-key-form="begin"]').exists('PGP key selection form exists');
+ assert.dom(CHOOSE_PGP.begin).exists('PGP key selection form exists');
+ assert.dom(CHOOSE_PGP.description).hasText('my custom form text', 'uses custom form text');
+ await click(CHOOSE_PGP.toggle);
+ assert.dom(CHOOSE_PGP.useKeyButton).isDisabled('use pgp button is disabled');
+ await fillIn(CHOOSE_PGP.pgpTextArea, 'base64-pgp-key');
+ assert.dom(CHOOSE_PGP.useKeyButton).isNotDisabled('use pgp button is no longer disabled');
+ await click(CHOOSE_PGP.useKeyButton);
assert
- .dom('[data-test-choose-pgp-key-description]')
- .hasText('my custom form text', 'uses custom form text');
- await click('[data-test-text-toggle]');
- assert.dom('[data-test-use-pgp-key-button]').isDisabled('use pgp button is disabled');
- await fillIn('[data-test-pgp-file-textarea]', 'base64-pgp-key');
- assert.dom('[data-test-use-pgp-key-button]').isNotDisabled('use pgp button is no longer disabled');
- await click('[data-test-use-pgp-key-button]');
- assert
- .dom('[data-test-pgp-key-confirm]')
+ .dom(CHOOSE_PGP.confirm)
.hasText(
'Below is the base-64 encoded PGP Key that will be used. Click the "Do it" button to proceed.',
'Incorporates button text in confirmation'
);
- assert.dom('[data-test-pgp-key-copy]').hasText('base64-pgp-key', 'Shows PGP key contents');
- assert.dom('[data-test-confirm-pgp-key-submit]').hasText('Do it', 'uses passed buttonText');
- await click('[data-test-confirm-pgp-key-submit]');
+ assert.dom(CHOOSE_PGP.base64Output).hasText('base64-pgp-key', 'Shows PGP key contents');
+ assert.dom(CHOOSE_PGP.submit).hasText('Do it', 'uses passed buttonText');
+ await click(CHOOSE_PGP.submit);
});
+
test('it calls onSubmit correctly', async function (assert) {
const submitSpy = sinon.spy();
this.set('onSubmit', submitSpy);
@@ -48,24 +58,24 @@ module('Integration | Component | choose-pgp-key-form', function (hooks) {
hbs``
);
- assert.dom('[data-test-choose-pgp-key-form="begin"]').exists('PGP key selection form exists');
+ assert.dom(CHOOSE_PGP.begin).exists('PGP key selection form exists');
assert
- .dom('[data-test-choose-pgp-key-description]')
+ .dom(CHOOSE_PGP.description)
.hasText('Choose a PGP Key from your computer or paste the contents of one in the form below.');
- await click('[data-test-text-toggle]');
- assert.dom('[data-test-use-pgp-key-button]').isDisabled('use pgp button is disabled');
- await fillIn('[data-test-pgp-file-textarea]', 'base64-pgp-key');
- assert.dom('[data-test-use-pgp-key-button]').isNotDisabled('use pgp button is no longer disabled');
- await click('[data-test-use-pgp-key-button]');
+ await click(CHOOSE_PGP.toggle);
+ assert.dom(CHOOSE_PGP.useKeyButton).isDisabled('use pgp button is disabled');
+ await fillIn(CHOOSE_PGP.pgpTextArea, 'base64-pgp-key');
+ assert.dom(CHOOSE_PGP.useKeyButton).isNotDisabled('use pgp button is no longer disabled');
+ await click(CHOOSE_PGP.useKeyButton);
assert
- .dom('[data-test-pgp-key-confirm]')
+ .dom(CHOOSE_PGP.confirm)
.hasText(
'Below is the base-64 encoded PGP Key that will be used. Click the "Submit" button to proceed.',
'Confirmation text has buttonText'
);
- assert.dom('[data-test-pgp-key-copy]').hasText('base64-pgp-key', 'Shows PGP key contents');
- assert.dom('[data-test-confirm-pgp-key-submit]').hasText('Submit', 'uses passed buttonText');
- await click('[data-test-confirm-pgp-key-submit]');
+ assert.dom(CHOOSE_PGP.base64Output).hasText('base64-pgp-key', 'Shows PGP key contents');
+ assert.dom(CHOOSE_PGP.submit).hasText('Submit', 'uses passed buttonText');
+ await click(CHOOSE_PGP.submit);
assert.ok(submitSpy.calledOnceWith('base64-pgp-key'));
});
@@ -76,9 +86,9 @@ module('Integration | Component | choose-pgp-key-form', function (hooks) {
hbs``
);
- await click('[data-test-text-toggle]');
- await fillIn('[data-test-pgp-file-textarea]', 'base64-pgp-key');
- await click('[data-test-use-pgp-key-cancel]');
+ await click(CHOOSE_PGP.toggle);
+ await fillIn(CHOOSE_PGP.pgpTextArea, 'base64-pgp-key');
+ await click(CHOOSE_PGP.cancel);
assert.ok(cancelSpy.calledOnce);
});
});
diff --git a/ui/tests/integration/components/filter-input-explicit-test.js b/ui/tests/integration/components/filter-input-explicit-test.js
new file mode 100644
index 000000000000..5e6e90941f88
--- /dev/null
+++ b/ui/tests/integration/components/filter-input-explicit-test.js
@@ -0,0 +1,61 @@
+/**
+ * Copyright (c) HashiCorp, Inc.
+ * SPDX-License-Identifier: BUSL-1.1
+ */
+
+import { module, test } from 'qunit';
+import { setupRenderingTest } from 'ember-qunit';
+import { render, typeIn, click } from '@ember/test-helpers';
+import hbs from 'htmlbars-inline-precompile';
+import { SELECTORS as GENERAL } from 'vault/tests/helpers/general-selectors';
+import sinon from 'sinon';
+
+const handler = (e) => {
+ // required because filter-input-explicit passes handleSearch on form submit
+ if (e && e.preventDefault) e.preventDefault();
+ return;
+};
+
+module('Integration | Component | filter-input-explicit', function (hooks) {
+ setupRenderingTest(hooks);
+
+ hooks.beforeEach(function () {
+ this.handleSearch = sinon.spy(handler);
+ this.handleInput = sinon.spy();
+ this.handleKeyDown = sinon.spy();
+ this.query = '';
+ this.placeholder = 'Filter roles';
+
+ this.renderComponent = () => {
+ return render(
+ hbs``
+ );
+ };
+ });
+
+ test('it renders', async function (assert) {
+ this.query = 'foo';
+ await this.renderComponent();
+
+ assert
+ .dom(GENERAL.filterInputExplicit)
+ .hasAttribute('placeholder', 'Filter roles', 'Placeholder passed to input element');
+ assert.dom(GENERAL.filterInputExplicit).hasValue('foo', 'Value passed to input element');
+ });
+
+ test('it should call handleSearch on submit', async function (assert) {
+ await this.renderComponent();
+ await typeIn(GENERAL.filterInputExplicit, 'bar');
+ await click(GENERAL.filterInputExplicitSearch);
+ assert.ok(this.handleSearch.calledOnce, 'handleSearch was called once');
+ });
+
+ test('it should send keydown event on keydown', async function (assert) {
+ await this.renderComponent();
+ await typeIn(GENERAL.filterInputExplicit, 'a');
+ await typeIn(GENERAL.filterInputExplicit, 'b');
+
+ assert.ok(this.handleKeyDown.calledTwice, 'handle keydown was called twice');
+ assert.ok(this.handleSearch.notCalled, 'handleSearch was not called on a keydown event');
+ });
+});
diff --git a/ui/tests/integration/components/kubernetes/page/roles-test.js b/ui/tests/integration/components/kubernetes/page/roles-test.js
index 67e9a4b4ca95..afb7d8543762 100644
--- a/ui/tests/integration/components/kubernetes/page/roles-test.js
+++ b/ui/tests/integration/components/kubernetes/page/roles-test.js
@@ -10,6 +10,7 @@ import { setupMirage } from 'ember-cli-mirage/test-support';
import { render, click } from '@ember/test-helpers';
import hbs from 'htmlbars-inline-precompile';
import { allowAllCapabilitiesStub } from 'vault/tests/helpers/stubs';
+import { SELECTORS as GENERAL } from 'vault/tests/helpers/general-selectors';
module('Integration | Component | kubernetes | Page::Roles', function (hooks) {
setupRenderingTest(hooks);
@@ -58,7 +59,7 @@ module('Integration | Component | kubernetes | Page::Roles', function (hooks) {
.dom('[data-test-toolbar-roles-action]')
.doesNotExist('Create role', 'Toolbar action does not render when not configured');
assert
- .dom('[data-test-nav-input]')
+ .dom(GENERAL.filterInputExplicit)
.doesNotExist('Roles filter input does not render when not configured');
assert.dom('[data-test-config-cta]').exists('Config cta renders');
});
@@ -70,7 +71,7 @@ module('Integration | Component | kubernetes | Page::Roles', function (hooks) {
assert
.dom('[data-test-toolbar-roles-action] svg')
.hasClass('flight-icon-plus', 'Toolbar action has correct icon');
- assert.dom('[data-test-nav-input]').exists('Roles filter input renders');
+ assert.dom(GENERAL.filterInputExplicit).exists('Roles filter input renders');
assert.dom('[data-test-empty-state-title]').hasText('No roles yet', 'Title renders');
assert
.dom('[data-test-empty-state-message]')
diff --git a/ui/tests/integration/components/kubernetes/tab-page-header-test.js b/ui/tests/integration/components/kubernetes/tab-page-header-test.js
index fe658242a4c9..9021573b71ee 100644
--- a/ui/tests/integration/components/kubernetes/tab-page-header-test.js
+++ b/ui/tests/integration/components/kubernetes/tab-page-header-test.js
@@ -9,6 +9,8 @@ import { setupEngine } from 'ember-engines/test-support';
import { setupMirage } from 'ember-cli-mirage/test-support';
import { render } from '@ember/test-helpers';
import hbs from 'htmlbars-inline-precompile';
+import { SELECTORS as GENERAL } from 'vault/tests/helpers/general-selectors';
+import sinon from 'sinon';
module('Integration | Component | kubernetes | TabPageHeader', function (hooks) {
setupRenderingTest(hooks);
@@ -28,12 +30,18 @@ module('Integration | Component | kubernetes | TabPageHeader', function (hooks)
this.model = this.store.peekRecord('secret-engine', 'kubernetes-test');
this.mount = this.model.path.slice(0, -1);
this.breadcrumbs = [{ label: 'secrets', route: 'secrets', linkExternal: true }, { label: this.mount }];
+ this.handleSearch = sinon.spy();
+ this.handleInput = sinon.spy();
+ this.handleKeyDown = sinon.spy();
});
test('it should render breadcrumbs', async function (assert) {
- await render(hbs``, {
- owner: this.engine,
- });
+ await render(
+ hbs``,
+ {
+ owner: this.engine,
+ }
+ );
assert.dom('[data-test-breadcrumbs] li:nth-child(1) a').hasText('secrets', 'Secrets breadcrumb renders');
assert
@@ -42,9 +50,12 @@ module('Integration | Component | kubernetes | TabPageHeader', function (hooks)
});
test('it should render title', async function (assert) {
- await render(hbs``, {
- owner: this.engine,
- });
+ await render(
+ hbs``,
+ {
+ owner: this.engine,
+ }
+ );
assert
.dom('[data-test-header-title] svg')
.hasClass('flight-icon-kubernetes-color', 'Correct icon renders in title');
@@ -52,9 +63,12 @@ module('Integration | Component | kubernetes | TabPageHeader', function (hooks)
});
test('it should render tabs', async function (assert) {
- await render(hbs``, {
- owner: this.engine,
- });
+ await render(
+ hbs``,
+ {
+ owner: this.engine,
+ }
+ );
assert.dom('[data-test-tab="overview"]').hasText('Overview', 'Overview tab renders');
assert.dom('[data-test-tab="roles"]').hasText('Roles', 'Roles tab renders');
assert.dom('[data-test-tab="config"]').hasText('Configuration', 'Configuration tab renders');
@@ -62,16 +76,16 @@ module('Integration | Component | kubernetes | TabPageHeader', function (hooks)
test('it should render filter for roles', async function (assert) {
await render(
- hbs``,
+ hbs``,
{ owner: this.engine }
);
- assert.dom('[data-test-nav-input] input').hasValue('test', 'Filter renders with provided value');
+ assert.dom(GENERAL.filterInputExplicit).hasValue('test', 'Filter renders with provided value');
});
test('it should yield block for toolbar actions', async function (assert) {
await render(
hbs`
-
+
It yields!
`,
diff --git a/ui/tests/integration/components/shamir/dr-token-flow-test.js b/ui/tests/integration/components/shamir/dr-token-flow-test.js
index 2cbb76bf25e9..6eb6fcc82c12 100644
--- a/ui/tests/integration/components/shamir/dr-token-flow-test.js
+++ b/ui/tests/integration/components/shamir/dr-token-flow-test.js
@@ -6,9 +6,11 @@
import sinon from 'sinon';
import { module, test } from 'qunit';
import { setupRenderingTest } from 'vault/tests/helpers';
-import { click, fillIn, render } from '@ember/test-helpers';
+import { click, fillIn, render, waitFor } from '@ember/test-helpers';
import { hbs } from 'ember-cli-htmlbars';
import { setupMirage } from 'ember-cli-mirage/test-support';
+import { overrideResponse } from 'vault/tests/helpers/clients';
+import { SELECTORS as GENERAL } from 'vault/tests/helpers/general-selectors';
module('Integration | Component | shamir/dr-token-flow', function (hooks) {
setupRenderingTest(hooks);
@@ -151,6 +153,25 @@ module('Integration | Component | shamir/dr-token-flow', function (hooks) {
assert.dom('[data-test-dr-token-flow-step="shamir"]').exists('Renders shamir step after PGP key chosen');
});
+ test('it shows error with pgp key', async function (assert) {
+ assert.expect(2);
+ this.server.get('/sys/replication/dr/secondary/generate-operation-token/attempt', function () {
+ return {};
+ });
+ this.server.post('/sys/replication/dr/secondary/generate-operation-token/attempt', () =>
+ overrideResponse(400, { errors: ['error parsing PGP key'] })
+ );
+ await render(hbs``);
+ await click('[data-test-use-pgp-key-cta]');
+ assert.dom('[data-test-choose-pgp-key-form="begin"]').exists('PGP form shows');
+ await click('[data-test-text-toggle]');
+ await fillIn('[data-test-pgp-file-textarea]', 'some-key-here');
+ await click('[data-test-use-pgp-key-button]');
+ await click('[data-test-confirm-pgp-key-submit]');
+ await waitFor(GENERAL.messageError);
+ assert.dom(GENERAL.messageError).hasText('Error error parsing PGP key');
+ });
+
test('it cancels correctly when generation not started', async function (assert) {
assert.expect(2);
const cancelSpy = sinon.spy();
diff --git a/vault/activity_log.go b/vault/activity_log.go
index e4836c4e30ea..5fc6d1fed0dc 100644
--- a/vault/activity_log.go
+++ b/vault/activity_log.go
@@ -12,7 +12,6 @@ import (
"io"
"net/http"
"os"
- "path"
"sort"
"strconv"
"strings"
@@ -267,11 +266,20 @@ func NewActivityLog(core *Core, logger log.Logger, view *BarrierView, metrics me
precomputedQueryWritten: make(chan struct{}),
}
- config, err := a.loadConfigOrDefault(core.activeContext, core.ManualLicenseReportingEnabled())
+ config, err := a.loadConfigOrDefault(core.activeContext)
if err != nil {
return nil, err
}
+ // check if the retention time is lesser than the default in storage when reporting is enabled to support upgrades
+ if (config.RetentionMonths < ActivityLogMinimumRetentionMonths) && core.ManualLicenseReportingEnabled() {
+ updatedConfig, err := a.setDefaultRetentionMonthsInConfig(core.activeContext, config)
+ if err != nil {
+ return nil, err
+ }
+ config = updatedConfig
+ }
+
a.SetConfigInit(config)
a.queryStore = activity.NewPrecomputedQueryStore(
@@ -1902,7 +1910,7 @@ func defaultActivityConfig() activityConfig {
}
}
-func (a *ActivityLog) loadConfigOrDefault(ctx context.Context, isReportingEnabled bool) (activityConfig, error) {
+func (a *ActivityLog) loadConfigOrDefault(ctx context.Context) (activityConfig, error) {
// Load from storage
var config activityConfig
configRaw, err := a.view.Get(ctx, activityConfigKey)
@@ -1916,34 +1924,26 @@ func (a *ActivityLog) loadConfigOrDefault(ctx context.Context, isReportingEnable
if err := configRaw.DecodeJSON(&config); err != nil {
return config, err
}
-
- // check if the retention time is lesser than the default when reporting is enabled
- if (config.RetentionMonths < ActivityLogMinimumRetentionMonths) && isReportingEnabled {
- updatedConfig, err := a.setDefaultRetentionMonthsInConfig(ctx, config)
- if err != nil {
- return config, err
- }
- return updatedConfig, nil
- }
return config, nil
}
// setDefaultRetentionMonthsInConfig sets the retention months in activity config with default value.
// This supports upgrades from versions prior to set the new default ActivityLogMinimumRetentionMonths.
func (a *ActivityLog) setDefaultRetentionMonthsInConfig(ctx context.Context, inputConfig activityConfig) (activityConfig, error) {
+ if a.core.perfStandby {
+ return inputConfig, nil
+ }
+
inputConfig.RetentionMonths = ActivityLogMinimumRetentionMonths
// Store the config
- entry, err := logical.StorageEntryJSON(path.Join(activitySubPath, activityConfigKey), inputConfig)
+ entry, err := logical.StorageEntryJSON(activityConfigKey, inputConfig)
if err != nil {
return inputConfig, err
}
if err := a.view.Put(ctx, entry); err != nil {
return inputConfig, err
}
-
- // Set the new config on the activity log
- a.SetConfig(ctx, inputConfig)
return inputConfig, nil
}
diff --git a/vault/activity_log_util.go b/vault/activity_log_util.go
index a3a9d2b9c1b3..890af5533fad 100644
--- a/vault/activity_log_util.go
+++ b/vault/activity_log_util.go
@@ -7,13 +7,9 @@ package vault
import (
"context"
- "time"
)
// sendCurrentFragment is a no-op on OSS
func (a *ActivityLog) sendCurrentFragment(ctx context.Context) error {
return nil
}
-
-// CensusReport is a no-op on OSS
-func (a *ActivityLog) CensusReport(context.Context, CensusReporter, time.Time) {}
diff --git a/vault/identity_store.go b/vault/identity_store.go
index c10edf7ad368..8d53f4c35682 100644
--- a/vault/identity_store.go
+++ b/vault/identity_store.go
@@ -6,6 +6,7 @@ package vault
import (
"context"
"fmt"
+ "reflect"
"strings"
"time"
@@ -24,6 +25,7 @@ import (
"github.com/hashicorp/vault/sdk/helper/locksutil"
"github.com/hashicorp/vault/sdk/logical"
"github.com/patrickmn/go-cache"
+ "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/timestamppb"
)
@@ -621,316 +623,453 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) {
defer i.lock.Unlock()
switch {
- // Check if the key is a storage entry key for an entity bucket
case strings.HasPrefix(key, storagepacker.StoragePackerBucketsPrefix):
- // Create a MemDB transaction
- txn := i.db.Txn(true)
- defer txn.Abort()
-
- // Each entity object in MemDB holds the MD5 hash of the storage
- // entry key of the entity bucket. Fetch all the entities that
- // belong to this bucket using the hash value. Remove these entities
- // from MemDB along with all the aliases of each entity.
- entitiesFetched, err := i.MemDBEntitiesByBucketKeyInTxn(txn, key)
- if err != nil {
- i.logger.Error("failed to fetch entities using the bucket key", "key", key)
- return
- }
+ // key is for a entity bucket in storage.
+ i.invalidateEntityBucket(ctx, key)
+ case strings.HasPrefix(key, groupBucketsPrefix):
+ // key is for a group bucket in storage.
+ i.invalidateGroupBucket(ctx, key)
+ case strings.HasPrefix(key, oidcTokensPrefix):
+ // key is for oidc tokens in storage.
+ i.invalidateOIDCToken(ctx)
+ case strings.HasPrefix(key, clientPath):
+ // key is for a client in storage.
+ i.invalidateClientPath(ctx, key)
+ case strings.HasPrefix(key, localAliasesBucketsPrefix):
+ // key is for a local alias bucket in storage.
+ i.invalidateLocalAliasesBucket(ctx, key)
+ }
+}
+
+func (i *IdentityStore) invalidateEntityBucket(ctx context.Context, key string) {
+ txn := i.db.Txn(true)
+ defer txn.Abort()
- for _, entity := range entitiesFetched {
- // Delete all the aliases in the entity. This function will also remove
- // the corresponding alias indexes too.
- err = i.deleteAliasesInEntityInTxn(txn, entity, entity.Aliases)
+ // The handling of entities has the added quirk of dealing with a temporary
+ // copy of the entity written in storage on the active node of performance
+ // secondary clusters. These temporary entity entries in storage must be
+ // removed once the actual entity appears in the storage bucket (as
+ // replicated from the primary cluster).
+ //
+ // This function retrieves all entities from MemDB that have a corresponding
+ // storage key that matches the provided key to invalidate. This is the set
+ // of entities that need to be updated, removed, or left alone in MemDB.
+ //
+ // The logic iterates over every entity stored in the invalidated storage
+ // bucket. For each entity read from the storage bucket, the set of entities
+ // read from MemDB is searched for the same entity. If it can't be found,
+ // it means that it needs to be inserted into MemDB. On the other hand, if
+ // the entity is found, it the storage bucket entity is compared to the
+ // MemDB entity. If they do not match, then the storage entity state needs
+ // to be used to update the MemDB entity; if they did match, then it means
+ // that the MemDB entity can be left alone. As each MemDB entity is
+ // processed in the loop, it is removed from the set of MemDB entities.
+ //
+ // Once all entities from the storage bucket have been compared to those
+ // retrieved from MemDB, the remaining entities from the set retrieved from
+ // MemDB are those that have been deleted from storage and must be removed
+ // from MemDB (because as MemDB entities that matches a storage bucket
+ // entity were processed, they were removed from the set).
+ memDBEntities, err := i.MemDBEntitiesByBucketKeyInTxn(txn, key)
+ if err != nil {
+ i.logger.Error("failed to fetch entities using the bucket key", "key", key)
+ return
+ }
+
+ bucket, err := i.entityPacker.GetBucket(ctx, key)
+ if err != nil {
+ i.logger.Error("failed to refresh entities", "key", key, "error", err)
+ return
+ }
+
+ if bucket != nil {
+ // The storage entry for the entity bucket exists, so we need to compare
+ // the entities in that bucket with those in MemDB and only update those
+ // that are different. The entities in the bucket storage entry are the
+ // source of truth.
+
+ // Iterate over each entity item from the bucket
+ for _, item := range bucket.Items {
+ bucketEntity, err := i.parseEntityFromBucketItem(ctx, item)
if err != nil {
- i.logger.Error("failed to delete aliases in entity", "entity_id", entity.ID, "error", err)
+ i.logger.Error("failed to parse entity from bucket entry item", "error", err)
return
}
- // Delete the entity using the same transaction
- err = i.MemDBDeleteEntityByIDInTxn(txn, entity.ID)
+ localAliases, err := i.parseLocalAliases(bucketEntity.ID)
if err != nil {
- i.logger.Error("failed to delete entity from MemDB", "entity_id", entity.ID, "error", err)
+ i.logger.Error("failed to load local aliases from storage", "error", err)
return
}
- }
- // Get the storage bucket entry
- bucket, err := i.entityPacker.GetBucket(ctx, key)
- if err != nil {
- i.logger.Error("failed to refresh entities", "key", key, "error", err)
- return
- }
-
- // If the underlying entry is nil, it means that this invalidation
- // notification is for the deletion of the underlying storage entry. At
- // this point, since all the entities belonging to this bucket are
- // already removed, there is nothing else to be done. But, if the
- // storage entry is non-nil, its an indication of an update. In this
- // case, entities in the updated bucket needs to be reinserted into
- // MemDB.
- var entityIDs []string
- if bucket != nil {
- entityIDs = make([]string, 0, len(bucket.Items))
- for _, item := range bucket.Items {
- entity, err := i.parseEntityFromBucketItem(ctx, item)
- if err != nil {
- i.logger.Error("failed to parse entity from bucket entry item", "error", err)
- return
+ if localAliases != nil {
+ for _, alias := range localAliases.Aliases {
+ bucketEntity.UpsertAlias(alias)
}
+ }
- localAliases, err := i.parseLocalAliases(entity.ID)
- if err != nil {
- i.logger.Error("failed to load local aliases from storage", "error", err)
- return
- }
- if localAliases != nil {
- for _, alias := range localAliases.Aliases {
- entity.UpsertAlias(alias)
- }
+ var memDBEntity *identity.Entity
+ for i, entity := range memDBEntities {
+ if entity.ID == bucketEntity.ID {
+ memDBEntity = entity
+
+ // Remove this processed entity from the slice, so that
+ // all tht will be left are unprocessed entities.
+ copy(memDBEntities[i:], memDBEntities[i+1:])
+ memDBEntities = memDBEntities[:len(memDBEntities)-1]
+ break
}
+ }
+
+ // If the entity is not in MemDB or if it is but differs from the
+ // state that's in the bucket storage entry, upsert it into MemDB.
- // Only update MemDB and don't touch the storage
- err = i.upsertEntityInTxn(ctx, txn, entity, nil, false)
+ // We've considered the use of github.com/google/go-cmp here,
+ // but opted for sticking with reflect.DeepEqual because go-cmp
+ // is intended for testing and is able to panic in some
+ // situations.
+ if memDBEntity == nil || !reflect.DeepEqual(memDBEntity, bucketEntity) {
+ // The entity is not in MemDB, it's a new entity. Add it to MemDB.
+ err = i.upsertEntityInTxn(ctx, txn, bucketEntity, nil, false)
if err != nil {
- i.logger.Error("failed to update entity in MemDB", "error", err)
+ i.logger.Error("failed to update entity in MemDB", "entity_id", bucketEntity.ID, "error", err)
return
}
- // If we are a secondary, the entity created by the secondary
- // via the CreateEntity RPC would have been cached. Now that the
- // invalidation of the same has hit, there is no need of the
- // cache. Clearing the cache. Writing to storage can't be
- // performed by perf standbys. So only doing this in the active
- // node of the secondary.
+ // If this is a performance secondary, the entity created on
+ // this node would have been cached in a local cache based on
+ // the result of the CreateEntity RPC call to the primary
+ // cluster. Since this invalidation is signaling that the
+ // entity is now in the primary cluster's storage, the locally
+ // cached entry can be removed.
if i.localNode.ReplicationState().HasState(consts.ReplicationPerformanceSecondary) && i.localNode.HAState() == consts.Active {
- if err := i.localAliasPacker.DeleteItem(ctx, entity.ID+tmpSuffix); err != nil {
- i.logger.Error("failed to clear local alias entity cache", "error", err, "entity_id", entity.ID)
+ if err := i.localAliasPacker.DeleteItem(ctx, bucketEntity.ID+tmpSuffix); err != nil {
+ i.logger.Error("failed to clear local alias entity cache", "error", err, "entity_id", bucketEntity.ID)
return
}
}
-
- entityIDs = append(entityIDs, entity.ID)
}
}
+ }
+
+ // Any entities that are still in the memDBEntities slice are ones that do
+ // not exist in the bucket storage entry. These entities have to be removed
+ // from MemDB.
+ for _, memDBEntity := range memDBEntities {
+ err = i.deleteAliasesInEntityInTxn(txn, memDBEntity, memDBEntity.Aliases)
+ if err != nil {
+ i.logger.Error("failed to delete aliases in entity", "entity_id", memDBEntity.ID, "error", err)
+ return
+ }
+
+ err = i.MemDBDeleteEntityByIDInTxn(txn, memDBEntity.ID)
+ if err != nil {
+ i.logger.Error("failed to delete entity from MemDB", "entity_id", memDBEntity.ID, "error", err)
+ return
+ }
- // entitiesFetched are the entities before invalidation. entityIDs
- // represent entities that are valid after invalidation. Clear the
- // storage entries of local aliases for those entities that are
- // indicated deleted by this invalidation.
+ // In addition, if this is an active node of a performance secondary
+ // cluster, remove the local alias storage entry for this deleted entity.
if i.localNode.ReplicationState().HasState(consts.ReplicationPerformanceSecondary) && i.localNode.HAState() == consts.Active {
- for _, entity := range entitiesFetched {
- if !strutil.StrListContains(entityIDs, entity.ID) {
- if err := i.localAliasPacker.DeleteItem(ctx, entity.ID); err != nil {
- i.logger.Error("failed to clear local alias for entity", "error", err, "entity_id", entity.ID)
- return
- }
- }
+ if err := i.localAliasPacker.DeleteItem(ctx, memDBEntity.ID); err != nil {
+ i.logger.Error("failed to clear local alias for entity", "error", err, "entity_id", memDBEntity.ID)
+ return
}
}
+ }
- txn.Commit()
- return
+ txn.Commit()
+}
- // Check if the key is a storage entry key for an group bucket
- // For those entities that are deleted, clear up the local alias entries
- case strings.HasPrefix(key, groupBucketsPrefix):
- // Create a MemDB transaction
- txn := i.db.Txn(true)
- defer txn.Abort()
+func (i *IdentityStore) invalidateGroupBucket(ctx context.Context, key string) {
+ // Create a MemDB transaction
+ txn := i.db.Txn(true)
+ defer txn.Abort()
+
+ groupsFetched, err := i.MemDBGroupsByBucketKeyInTxn(txn, key)
+ if err != nil {
+ i.logger.Error("failed to fetch groups using the bucket key", "key", key)
+ return
+ }
- groupsFetched, err := i.MemDBGroupsByBucketKeyInTxn(txn, key)
+ for _, group := range groupsFetched {
+ // Delete the group using the same transaction
+ err = i.MemDBDeleteGroupByIDInTxn(txn, group.ID)
if err != nil {
- i.logger.Error("failed to fetch groups using the bucket key", "key", key)
+ i.logger.Error("failed to delete group from MemDB", "group_id", group.ID, "error", err)
return
}
- for _, group := range groupsFetched {
- // Delete the group using the same transaction
- err = i.MemDBDeleteGroupByIDInTxn(txn, group.ID)
+ if group.Alias != nil {
+ err := i.MemDBDeleteAliasByIDInTxn(txn, group.Alias.ID, true)
if err != nil {
- i.logger.Error("failed to delete group from MemDB", "group_id", group.ID, "error", err)
+ i.logger.Error("failed to delete group alias from MemDB", "error", err)
return
}
+ }
+ }
+
+ // Get the storage bucket entry
+ bucket, err := i.groupPacker.GetBucket(ctx, key)
+ if err != nil {
+ i.logger.Error("failed to refresh group", "key", key, "error", err)
+ return
+ }
- if group.Alias != nil {
- err := i.MemDBDeleteAliasByIDInTxn(txn, group.Alias.ID, true)
+ if bucket != nil {
+ for _, item := range bucket.Items {
+ group, err := i.parseGroupFromBucketItem(item)
+ if err != nil {
+ i.logger.Error("failed to parse group from bucket entry item", "error", err)
+ return
+ }
+
+ // Before updating the group, check if the group exists. If it
+ // does, then delete the group alias from memdb, for the
+ // invalidation would have sent an update.
+ groupFetched, err := i.MemDBGroupByIDInTxn(txn, group.ID, true)
+ if err != nil {
+ i.logger.Error("failed to fetch group from MemDB", "error", err)
+ return
+ }
+
+ // If the group has an alias remove it from memdb
+ if groupFetched != nil && groupFetched.Alias != nil {
+ err := i.MemDBDeleteAliasByIDInTxn(txn, groupFetched.Alias.ID, true)
if err != nil {
- i.logger.Error("failed to delete group alias from MemDB", "error", err)
+ i.logger.Error("failed to delete old group alias from MemDB", "error", err)
return
}
}
- }
- // Get the storage bucket entry
- bucket, err := i.groupPacker.GetBucket(ctx, key)
- if err != nil {
- i.logger.Error("failed to refresh group", "key", key, "error", err)
- return
+ // Only update MemDB and don't touch the storage
+ err = i.UpsertGroupInTxn(ctx, txn, group, false)
+ if err != nil {
+ i.logger.Error("failed to update group in MemDB", "error", err)
+ return
+ }
}
+ }
- if bucket != nil {
- for _, item := range bucket.Items {
- group, err := i.parseGroupFromBucketItem(item)
- if err != nil {
- i.logger.Error("failed to parse group from bucket entry item", "error", err)
- return
- }
+ txn.Commit()
+}
- // Before updating the group, check if the group exists. If it
- // does, then delete the group alias from memdb, for the
- // invalidation would have sent an update.
- groupFetched, err := i.MemDBGroupByIDInTxn(txn, group.ID, true)
- if err != nil {
- i.logger.Error("failed to fetch group from MemDB", "error", err)
- return
- }
+// invalidateOIDCToken is called by the Invalidate function to handle the
+// invalidation of an OIDC token storage entry.
+func (i *IdentityStore) invalidateOIDCToken(ctx context.Context) {
+ ns, err := namespace.FromContext(ctx)
+ if err != nil {
+ i.logger.Error("error retrieving namespace", "error", err)
+ return
+ }
- // If the group has an alias remove it from memdb
- if groupFetched != nil && groupFetched.Alias != nil {
- err := i.MemDBDeleteAliasByIDInTxn(txn, groupFetched.Alias.ID, true)
- if err != nil {
- i.logger.Error("failed to delete old group alias from MemDB", "error", err)
- return
- }
- }
+ // Wipe the cache for the requested namespace. This will also clear
+ // the shared namespace as well.
+ if err := i.oidcCache.Flush(ns); err != nil {
+ i.logger.Error("error flushing oidc cache", "error", err)
+ return
+ }
+}
- // Only update MemDB and don't touch the storage
- err = i.UpsertGroupInTxn(ctx, txn, group, false)
- if err != nil {
- i.logger.Error("failed to update group in MemDB", "error", err)
- return
- }
- }
- }
+// invalidateClientPath is called by the Invalidate function to handle the
+// invalidation of a client path storage entry.
+func (i *IdentityStore) invalidateClientPath(ctx context.Context, key string) {
+ name := strings.TrimPrefix(key, clientPath)
- txn.Commit()
+ // Invalidate the cached client in memdb
+ if err := i.memDBDeleteClientByName(ctx, name); err != nil {
+ i.logger.Error("error invalidating client", "error", err, "key", key)
return
+ }
+}
- case strings.HasPrefix(key, oidcTokensPrefix):
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- i.logger.Error("error retrieving namespace", "error", err)
- return
- }
+// invalidateLocalAliasBucket is called by the Invalidate function to handle the
+// invalidation of a local alias bucket storage entry.
+func (i *IdentityStore) invalidateLocalAliasesBucket(ctx context.Context, key string) {
+ // This invalidation only happens on performance standby servers
- // Wipe the cache for the requested namespace. This will also clear
- // the shared namespace as well.
- if err := i.oidcCache.Flush(ns); err != nil {
- i.logger.Error("error flushing oidc cache", "error", err)
- }
- case strings.HasPrefix(key, clientPath):
- name := strings.TrimPrefix(key, clientPath)
+ // Create a MemDB transaction and abort it once this function returns
+ txn := i.db.Txn(true)
+ defer txn.Abort()
- // Invalidate the cached client in memdb
- if err := i.memDBDeleteClientByName(ctx, name); err != nil {
- i.logger.Error("error invalidating client", "error", err, "key", key)
- return
- }
- case strings.HasPrefix(key, localAliasesBucketsPrefix):
- //
- // This invalidation only happens on perf standbys
- //
-
- txn := i.db.Txn(true)
- defer txn.Abort()
-
- // Find all the local aliases belonging to this bucket and remove it
- // both from aliases table and entities table. We will add the local
- // aliases back by parsing the storage key. This way the deletion
- // invalidation gets handled.
- aliases, err := i.MemDBLocalAliasesByBucketKeyInTxn(txn, key)
- if err != nil {
- i.logger.Error("failed to fetch entities using the bucket key", "key", key)
- return
- }
+ // Local aliases have the added complexity of being associated with
+ // entities. Whenever a local alias is updated or inserted into MemDB, its
+ // associated MemDB-stored entity must also be updated.
+ //
+ // This function retrieves all local aliases that have a corresponding
+ // storage key that matches the provided key to invalidate. This is the
+ // set of local aliases that need to be updated, removed, or left
+ // alone in MemDB. Each of these operations is done as its own MemDB
+ // operation, but the corresponding changes that need to be made to the
+ // associated entities can be batched together to cut down on the number of
+ // MemDB operations.
+ //
+ // The logic iterates over every local alias stored at the invalidated key.
+ // For each local alias read from the storage entry, the set of local
+ // aliases read from MemDB is searched for the same local alias. If it can't
+ // be found, it means that it needs to be inserted into MemDB. However, if
+ // it's found, it must be compared with the local alias from the storage. If
+ // they don't match, it means that the local alias in MemDB needs to be
+ // updated. If they did match, it means that this particular local alias did
+ // not change in storage, so nothing further needs to be done. Each local
+ // alias processed in this loop is removed from the set of retrieved local
+ // aliases. The local alias is also added to the map tracking local aliases
+ // that need to be upserted in their associated entities in MemDB.
+ //
+ // Once the code is done iterating over all of the local aliases from
+ // storage, any local aliases still in the set retrieved from MemDB
+ // corresponds to a local alias that is no longer in storage and must be
+ // removed from MemDB. These local aliases are added to the map tracking
+ // local aliases to remove from their entities in MemDB. The actual removal
+ // of the local aliases themselves is done as part of the tidying up of the
+ // associated entities, described below.
+ //
+ // In order to batch the changes to the associated entities, a map of entity
+ // to local aliases (slice of local alias) is built up in the loop that
+ // iterates over the local aliases from storage. Similarly, the code that
+ // detects which local aliases to remove from MemDB also builds a separate
+ // map of entity to local aliases (slice of local alias). Each element in
+ // the map of local aliases to update in their entity is processed as
+ // follows: the mapped slice of local aliases is iterated over and each
+ // local alias is upserted into the entity and then the entity itself is
+ // upserted. Then, each element in the map of local aliases to remove from
+ // their entity is processed as follows: the
+
+ // Get all cached local aliases to compare with invalidated bucket
+ memDBLocalAliases, err := i.MemDBLocalAliasesByBucketKeyInTxn(txn, key)
+ if err != nil {
+ i.logger.Error("failed to fetch local aliases using the bucket key", "key", key, "error", err)
+ return
+ }
- for _, alias := range aliases {
- entity, err := i.MemDBEntityByIDInTxn(txn, alias.CanonicalID, true)
- if err != nil {
- i.logger.Error("failed to fetch entity during local alias invalidation", "entity_id", alias.CanonicalID, "error", err)
- return
- }
- if entity == nil {
- i.logger.Error("failed to fetch entity during local alias invalidation, missing entity", "entity_id", alias.CanonicalID, "error", err)
+ // Get local aliases from the invalidated bucket
+ bucket, err := i.localAliasPacker.GetBucket(ctx, key)
+ if err != nil {
+ i.logger.Error("failed to refresh local aliases", "key", key, "error", err)
+ return
+ }
+
+ // This map tracks the set of local aliases that need to be updated in each
+ // affected entity in MemDB.
+ entityLocalAliasesToUpsert := map[*identity.Entity][]*identity.Alias{}
+
+ // This map tracks the set of local aliases that need to be removed from
+ // their affected entity in MemDB, as well as removing the local alias
+ // themselves.
+ entityLocalAliasesToRemove := map[*identity.Entity][]*identity.Alias{}
+
+ if bucket != nil {
+ // The storage entry for the local alias bucket exists, so we need to
+ // compare the local aliases in that bucket with those in MemDB and only
+ // update those that are different. The local aliases in the bucket are
+ // the source of truth.
+
+ // Iterate over each local alias item from the bucket
+ for _, item := range bucket.Items {
+ if strings.HasSuffix(item.ID, tmpSuffix) {
continue
}
- // Delete local aliases from the entity.
- err = i.deleteAliasesInEntityInTxn(txn, entity, []*identity.Alias{alias})
- if err != nil {
- i.logger.Error("failed to delete aliases in entity", "entity_id", entity.ID, "error", err)
- return
- }
+ var bucketLocalAliases identity.LocalAliases
- // Update the entity with removed alias.
- if err := i.MemDBUpsertEntityInTxn(txn, entity); err != nil {
- i.logger.Error("failed to delete entity from MemDB", "entity_id", entity.ID, "error", err)
+ err = anypb.UnmarshalTo(item.Message, &bucketLocalAliases, proto.UnmarshalOptions{})
+ if err != nil {
+ i.logger.Error("failed to parse local aliases during invalidation", "item_id", item.ID, "error", err)
return
}
- }
- // Now read the invalidated storage key
- bucket, err := i.localAliasPacker.GetBucket(ctx, key)
- if err != nil {
- i.logger.Error("failed to refresh local aliases", "key", key, "error", err)
- return
- }
- if bucket != nil {
- for _, item := range bucket.Items {
- if strings.HasSuffix(item.ID, tmpSuffix) {
- continue
- }
-
- var localAliases identity.LocalAliases
- err = ptypes.UnmarshalAny(item.Message, &localAliases)
- if err != nil {
- i.logger.Error("failed to parse local aliases during invalidation", "error", err)
+ for _, bucketLocalAlias := range bucketLocalAliases.Aliases {
+ // Find the entity related to bucketLocalAlias in MemDB in order
+ // to track any local aliases modifications that must be made in
+ // this entity.
+ memDBEntity := i.FetchEntityForLocalAliasInTxn(txn, bucketLocalAlias)
+ if memDBEntity == nil {
+ // FetchEntityForLocalAliasInTxn already logs any error
return
}
- for _, alias := range localAliases.Aliases {
- // Add to the aliases table
- if err := i.MemDBUpsertAliasInTxn(txn, alias, false); err != nil {
- i.logger.Error("failed to insert local alias to memdb during invalidation", "error", err)
- return
+
+ // memDBLocalAlias starts off nil but gets set to the local
+ // alias from memDBLocalAliases whose ID matches the ID of
+ // bucketLocalAlias.
+ var memDBLocalAlias *identity.Alias
+ for i, localAlias := range memDBLocalAliases {
+ if localAlias.ID == bucketLocalAlias.ID {
+ memDBLocalAlias = localAlias
+
+ // Remove this processed local alias from the
+ // memDBLocalAliases slice, so that all that
+ // will be left are unprocessed local aliases.
+ copy(memDBLocalAliases[i:], memDBLocalAliases[i+1:])
+ memDBLocalAliases = memDBLocalAliases[:len(memDBLocalAliases)-1]
+
+ break
}
+ }
- // Fetch the associated entity and add the alias to that too.
- entity, err := i.MemDBEntityByIDInTxn(txn, alias.CanonicalID, false)
+ // We've considered the use of github.com/google/go-cmp here,
+ // but opted for sticking with reflect.DeepEqual because go-cmp
+ // is intended for testing and is able to panic in some
+ // situations.
+ if memDBLocalAlias == nil || !reflect.DeepEqual(memDBLocalAlias, bucketLocalAlias) {
+ // The bucketLocalAlias is not in MemDB or it has changed in
+ // storage.
+ err = i.MemDBUpsertAliasInTxn(txn, bucketLocalAlias, false)
if err != nil {
- i.logger.Error("failed to fetch entity during local alias invalidation", "error", err)
+ i.logger.Error("failed to update local alias in MemDB", "alias_id", bucketLocalAlias.ID, "error", err)
return
}
- if entity == nil {
- cachedEntityItem, err := i.localAliasPacker.GetItem(alias.CanonicalID + tmpSuffix)
- if err != nil {
- i.logger.Error("failed to fetch cached entity", "key", key, "error", err)
- return
- }
- if cachedEntityItem != nil {
- entity, err = i.parseCachedEntity(cachedEntityItem)
- if err != nil {
- i.logger.Error("failed to parse cached entity", "key", key, "error", err)
- return
- }
- }
- }
- if entity == nil {
- i.logger.Error("received local alias invalidation for an invalid entity", "item.ID", item.ID)
- return
- }
- entity.UpsertAlias(alias)
- // Update the entities table
- if err := i.MemDBUpsertEntityInTxn(txn, entity); err != nil {
- i.logger.Error("failed to upsert entity during local alias invalidation", "error", err)
- return
- }
+ // Add this local alias to the set of local aliases that
+ // need to be updated for memDBEntity.
+ entityLocalAliasesToUpsert[memDBEntity] = append(entityLocalAliasesToUpsert[memDBEntity], bucketLocalAlias)
}
}
}
- txn.Commit()
- return
}
+
+ // Any local aliases still remaining in memDBLocalAliases do not exist in
+ // storage and should be removed from MemDB.
+ for _, memDBLocalAlias := range memDBLocalAliases {
+ memDBEntity := i.FetchEntityForLocalAliasInTxn(txn, memDBLocalAlias)
+ if memDBEntity == nil {
+ // FetchEntityForLocalAliasInTxn already logs any error
+ return
+ }
+
+ entityLocalAliasesToRemove[memDBEntity] = append(entityLocalAliasesToRemove[memDBEntity], memDBLocalAlias)
+ }
+
+ // Now process the entityLocalAliasesToUpsert map.
+ for entity, localAliases := range entityLocalAliasesToUpsert {
+ for _, localAlias := range localAliases {
+ entity.UpsertAlias(localAlias)
+ }
+
+ err = i.MemDBUpsertEntityInTxn(txn, entity)
+ if err != nil {
+ i.logger.Error("failed to update entity in MemDB", "entity_id", entity.ID, "error", err)
+ return
+ }
+ }
+
+ // Finally process the entityLocalAliasesToRemove map.
+ for entity, localAliases := range entityLocalAliasesToRemove {
+ // The deleteAliasesInEntityInTxn removes the provided aliases from
+ // the entity, but it also removes the aliases themselves from MemDB.
+ err := i.deleteAliasesInEntityInTxn(txn, entity, localAliases)
+ if err != nil {
+ i.logger.Error("failed to delete aliases in entity", "entity_id", entity.ID, "error", err)
+ return
+ }
+
+ err = i.MemDBUpsertEntityInTxn(txn, entity)
+ if err != nil {
+ i.logger.Error("failed to update entity in MemDB", "entity_id", entity.ID, "error", err)
+ return
+ }
+ }
+
+ txn.Commit()
}
func (i *IdentityStore) parseLocalAliases(entityID string) (*identity.LocalAliases, error) {
diff --git a/vault/identity_store_test.go b/vault/identity_store_test.go
index 9ed4659b8d27..7c826dfa0c33 100644
--- a/vault/identity_store_test.go
+++ b/vault/identity_store_test.go
@@ -18,6 +18,7 @@ import (
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/helper/storagepacker"
"github.com/hashicorp/vault/sdk/logical"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/types/known/anypb"
)
@@ -912,3 +913,217 @@ func TestIdentityStore_DeleteCaseSensitivityKey(t *testing.T) {
t.Fatalf("bad: expected no entry for casesensitivity key")
}
}
+
+// TestIdentityStoreInvalidate_Entities verifies the proper handling of
+// entities in the Invalidate method.
+func TestIdentityStoreInvalidate_Entities(t *testing.T) {
+ c, _, _ := TestCoreUnsealed(t)
+
+ // Create an entity in storage then call the Invalidate function
+ //
+ id, err := uuid.GenerateUUID()
+ require.NoError(t, err)
+
+ entity := &identity.Entity{
+ Name: "test",
+ NamespaceID: namespace.RootNamespaceID,
+ ID: id,
+ Aliases: []*identity.Alias{},
+ BucketKey: c.identityStore.entityPacker.BucketKey(id),
+ }
+
+ p := c.identityStore.entityPacker
+
+ // Persist the entity which we are merging to
+ entityAsAny, err := anypb.New(entity)
+ require.NoError(t, err)
+
+ item := &storagepacker.Item{
+ ID: id,
+ Message: entityAsAny,
+ }
+
+ err = p.PutItem(context.Background(), item)
+ require.NoError(t, err)
+
+ c.identityStore.Invalidate(context.Background(), p.BucketKey(id))
+
+ txn := c.identityStore.db.Txn(true)
+
+ memEntity, err := c.identityStore.MemDBEntityByIDInTxn(txn, id, true)
+ assert.NoError(t, err)
+ assert.NotNil(t, memEntity)
+
+ txn.Commit()
+
+ // Modify the entity in storage then call the Invalidate function
+ entity.Metadata = make(map[string]string)
+ entity.Metadata["foo"] = "bar"
+
+ entityAsAny, err = anypb.New(entity)
+ require.NoError(t, err)
+
+ item.Message = entityAsAny
+
+ p.PutItem(context.Background(), item)
+
+ c.identityStore.Invalidate(context.Background(), p.BucketKey(id))
+
+ txn = c.identityStore.db.Txn(true)
+
+ memEntity, err = c.identityStore.MemDBEntityByIDInTxn(txn, id, true)
+ assert.NoError(t, err)
+ assert.Contains(t, memEntity.Metadata, "foo")
+
+ txn.Commit()
+
+ // Delete the entity in storage then call the Invalidate function
+ err = p.DeleteItem(context.Background(), id)
+ require.NoError(t, err)
+
+ c.identityStore.Invalidate(context.Background(), p.BucketKey(id))
+
+ txn = c.identityStore.db.Txn(true)
+
+ memEntity, err = c.identityStore.MemDBEntityByIDInTxn(txn, id, true)
+ assert.NoError(t, err)
+ assert.Nil(t, memEntity)
+
+ txn.Commit()
+}
+
+// TestIdentityStoreInvalidate_LocalAliasesWithEntity verifies the correct
+// handling of local aliases in the Invalidate method.
+func TestIdentityStoreInvalidate_LocalAliasesWithEntity(t *testing.T) {
+ c, _, _ := TestCoreUnsealed(t)
+
+ // Create an entity in storage then call the Invalidate function
+ //
+ entityID, err := uuid.GenerateUUID()
+ require.NoError(t, err)
+
+ entity := &identity.Entity{
+ Name: "test",
+ NamespaceID: namespace.RootNamespaceID,
+ ID: entityID,
+ Aliases: []*identity.Alias{},
+ BucketKey: c.identityStore.entityPacker.BucketKey(entityID),
+ }
+
+ aliasID, err := uuid.GenerateUUID()
+ require.NoError(t, err)
+
+ localAliases := &identity.LocalAliases{
+ Aliases: []*identity.Alias{
+ {
+ ID: aliasID,
+ Name: "test",
+ NamespaceID: namespace.RootNamespaceID,
+ CanonicalID: entityID,
+ MountAccessor: "userpass-000000",
+ },
+ },
+ }
+
+ ep := c.identityStore.entityPacker
+
+ // Persist the entity which we are merging to
+ entityAsAny, err := anypb.New(entity)
+ require.NoError(t, err)
+
+ entityItem := &storagepacker.Item{
+ ID: entityID,
+ Message: entityAsAny,
+ }
+
+ err = ep.PutItem(context.Background(), entityItem)
+ require.NoError(t, err)
+
+ c.identityStore.Invalidate(context.Background(), ep.BucketKey(entityID))
+
+ lap := c.identityStore.localAliasPacker
+
+ localAliasesAsAny, err := anypb.New(localAliases)
+ require.NoError(t, err)
+
+ localAliasesItem := &storagepacker.Item{
+ ID: entityID,
+ Message: localAliasesAsAny,
+ }
+
+ err = lap.PutItem(context.Background(), localAliasesItem)
+ require.NoError(t, err)
+
+ c.identityStore.Invalidate(context.Background(), lap.BucketKey(entityID))
+
+ txn := c.identityStore.db.Txn(true)
+
+ memDBEntity, err := c.identityStore.MemDBEntityByIDInTxn(txn, entityID, true)
+ assert.NoError(t, err)
+ assert.NotNil(t, memDBEntity)
+
+ memDBLocalAlias, err := c.identityStore.MemDBAliasByIDInTxn(txn, aliasID, true, false)
+ assert.NoError(t, err)
+ assert.NotNil(t, memDBLocalAlias)
+ assert.Equal(t, 1, len(memDBEntity.Aliases))
+ assert.NotNil(t, memDBEntity.Aliases[0])
+ assert.Equal(t, memDBEntity.Aliases[0].ID, memDBLocalAlias.ID)
+
+ txn.Commit()
+}
+
+// TestIdentityStoreInvalidate_TemporaryEntity verifies the proper handling of
+// temporary entities in the Invalidate method.
+func TestIdentityStoreInvalidate_TemporaryEntity(t *testing.T) {
+ c, _, _ := TestCoreUnsealed(t)
+
+ // Create an entity in storage then call the Invalidate function
+ //
+ entityID, err := uuid.GenerateUUID()
+ require.NoError(t, err)
+
+ tempEntity := &identity.Entity{
+ Name: "test",
+ NamespaceID: namespace.RootNamespaceID,
+ ID: entityID,
+ Aliases: []*identity.Alias{},
+ BucketKey: c.identityStore.entityPacker.BucketKey(entityID),
+ }
+
+ lap := c.identityStore.localAliasPacker
+ ep := c.identityStore.entityPacker
+
+ // Persist the entity which we are merging to
+ tempEntityAsAny, err := anypb.New(tempEntity)
+ require.NoError(t, err)
+
+ tempEntityItem := &storagepacker.Item{
+ ID: entityID + tmpSuffix,
+ Message: tempEntityAsAny,
+ }
+
+ err = lap.PutItem(context.Background(), tempEntityItem)
+ require.NoError(t, err)
+
+ entityAsAny := tempEntityAsAny
+
+ entityItem := &storagepacker.Item{
+ ID: entityID,
+ Message: entityAsAny,
+ }
+
+ err = ep.PutItem(context.Background(), entityItem)
+ require.NoError(t, err)
+
+ c.identityStore.Invalidate(context.Background(), ep.BucketKey(entityID))
+
+ txn := c.identityStore.db.Txn(true)
+
+ memDBEntity, err := c.identityStore.MemDBEntityByIDInTxn(txn, entityID, true)
+ assert.NoError(t, err)
+ assert.NotNil(t, memDBEntity)
+
+ item, err := lap.GetItem(lap.BucketKey(entityID) + tmpSuffix)
+ assert.NoError(t, err)
+ assert.Nil(t, item)
+}
diff --git a/vault/identity_store_util.go b/vault/identity_store_util.go
index 6d9190cbe293..c78db0bc70f7 100644
--- a/vault/identity_store_util.go
+++ b/vault/identity_store_util.go
@@ -1269,6 +1269,36 @@ func (i *IdentityStore) MemDBDeleteEntityByID(entityID string) error {
return nil
}
+// FetchEntityForLocalAliasInTxn fetches the entity associated with the provided
+// local identity.Alias. MemDB will first be searched for the entity. If it is
+// not found there, the localAliasPacker storagepacker.StoragePacker will be
+// used. If an error occurs, an appropriate error message is logged and nil is
+// returned.
+func (i *IdentityStore) FetchEntityForLocalAliasInTxn(txn *memdb.Txn, alias *identity.Alias) *identity.Entity {
+ entity, err := i.MemDBEntityByIDInTxn(txn, alias.CanonicalID, false)
+ if err != nil {
+ i.logger.Error("failed to fetch entity from local alias", "entity_id", alias.CanonicalID, "error", err)
+ return nil
+ }
+
+ if entity == nil {
+ cachedEntityItem, err := i.localAliasPacker.GetItem(alias.CanonicalID + tmpSuffix)
+ if err != nil {
+ i.logger.Error("failed to fetch cached entity from local alias", "key", alias.CanonicalID+tmpSuffix, "error", err)
+ return nil
+ }
+ if cachedEntityItem != nil {
+ entity, err = i.parseCachedEntity(cachedEntityItem)
+ if err != nil {
+ i.logger.Error("failed to parse cached entity", "key", alias.CanonicalID+tmpSuffix, "error", err)
+ return nil
+ }
+ }
+ }
+
+ return entity
+}
+
func (i *IdentityStore) MemDBDeleteEntityByIDInTxn(txn *memdb.Txn, entityID string) error {
if entityID == "" {
return nil
diff --git a/vault/logical_system_activity.go b/vault/logical_system_activity.go
index 28d2763b5abc..c286b572d07d 100644
--- a/vault/logical_system_activity.go
+++ b/vault/logical_system_activity.go
@@ -311,7 +311,7 @@ func (b *SystemBackend) handleActivityConfigRead(ctx context.Context, req *logic
return logical.ErrorResponse("no activity log present"), nil
}
- config, err := a.loadConfigOrDefault(ctx, b.Core.ManualLicenseReportingEnabled())
+ config, err := a.loadConfigOrDefault(ctx)
if err != nil {
return nil, err
}
@@ -348,7 +348,7 @@ func (b *SystemBackend) handleActivityConfigUpdate(ctx context.Context, req *log
warnings := make([]string, 0)
- config, err := a.loadConfigOrDefault(ctx, b.Core.ManualLicenseReportingEnabled())
+ config, err := a.loadConfigOrDefault(ctx)
if err != nil {
return nil, err
}
diff --git a/version/VERSION b/version/VERSION
index b8ae5a5b4d68..709c55fe2d7d 100644
--- a/version/VERSION
+++ b/version/VERSION
@@ -1 +1 @@
-1.16.3
\ No newline at end of file
+1.16.4
\ No newline at end of file
diff --git a/website/content/api-docs/auth/aws.mdx b/website/content/api-docs/auth/aws.mdx
index 2a490c150845..b4bb11fa2cb8 100644
--- a/website/content/api-docs/auth/aws.mdx
+++ b/website/content/api-docs/auth/aws.mdx
@@ -203,16 +203,17 @@ This configures the way that Vault interacts with the
### Parameters
- `iam_alias` `(string: "role_id")` - How to generate the identity alias when
- using the `iam` auth method. Valid choices are `role_id`, `unique_id`, and
- `full_arn` When `role_id` is selected, the randomly generated ID of the Vault role
+ using the `iam` auth method. Valid choices are `role_id`, `unique_id`, `canonical_arn` and
+ `full_arn`. When `role_id` is selected, the randomly generated ID of the Vault role
is used. When `unique_id` is selected, the [IAM Unique
ID](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers#identifiers-unique-ids)
of the IAM principal (either the user or role) is used as the identity alias
- name. When `full_arn` is selected, the ARN returned by the
- `sts:GetCallerIdentity` call is used as the alias name. This is either
+ name. When `canonical_arn` is selected, the role ARN returned by the `sts:GetCallerIdentity`call
+ will be used. This will be `arn:aws:iam:::role/`. When `full_arn` is selected,
+ the ARN returned by the `sts:GetCallerIdentity` call is used as the alias name. This is either
`arn:aws:iam:::user/` or
`arn:aws:sts:::assumed-role//`.
- **Note**: if you select `full_arn` and then delete and recreate the IAM role,
+ **Note**: if you select `canonical_arn` or `full_arn` and then delete and recreate the IAM role,
Vault won't be aware and any identity aliases set up for the role name will
still be valid.
diff --git a/website/content/api-docs/secret/pki.mdx b/website/content/api-docs/secret/pki.mdx
index 767ac796dfac..1329084b17bc 100644
--- a/website/content/api-docs/secret/pki.mdx
+++ b/website/content/api-docs/secret/pki.mdx
@@ -4901,10 +4901,11 @@ $ curl \
```json
{
"data": {
+ "audit_fields": ["common_name", "alt_names", "ip_sans", "uri_sans"],
"authenticators": {
"cert": {
"accessor": "auth_cert_7fe0c1cc",
- "cert_role": ""
+ "cert_role": "est-ca"
},
"userpass": {
"accessor": "auth_userpass_2b333949"
@@ -4912,9 +4913,10 @@ $ curl \
},
"default_mount": true,
"default_path_policy": "sign-verbatim",
+ "enable_sentinel_parsing": true,
"enabled": true,
"label_to_path_policy": {
- "test-label": "roles/est-clients"
+ "test-label": "role:est-clients"
},
"last_updated": "2024-01-31T10:45:22-05:00"
}
@@ -4954,6 +4956,12 @@ updated values as a response along with an updated `last_updated` field.
- `enable_sentinel_parsing` `(bool: false)` - Parse out fields from the provided CSR making them available for
Sentinel policies.
+- `audit_fields` `(list: ["common_name", "alt_names", "ip_sans", "uri_sans"])` - Fields parsed from the CSR that
+ appear in the audit and can be used by sentinel policies. Allowed values are `csr`, `common_name`, `alt_names`,
+ `ip_sans`, `uri_sans`, `other_sans`, `signature_bits`, `exclude_cn_from_sans`, `ou`, `organization`, `country`,
+ `locality`, `province`, `street_address`, `postal_code`, `serial_number`, `use_pss`, `key_type`, `key_bits`,
+ `add_basic_constraints`
+
#### Sample Payload
```json
@@ -4972,7 +4980,9 @@ updated values as a response along with an updated `last_updated` field.
"userpass": {
"accessor": "auth_userpass_b2b08fac"
}
- }
+ },
+ "enable_sentinel_parsing": true,
+ "audit_fields": ["common_name", "alt_names", "ip_sans", "uri_sans"]
}
```
diff --git a/website/content/api-docs/system/internal-counters.mdx b/website/content/api-docs/system/internal-counters.mdx
index 10fce6ce4128..3aae794ffcbe 100644
--- a/website/content/api-docs/system/internal-counters.mdx
+++ b/website/content/api-docs/system/internal-counters.mdx
@@ -1007,7 +1007,7 @@ The `/sys/internal/counters/config` endpoint is used to configure logging of act
- `enabled` `(string: enable, disable, default)` - Enable or disable counting of client activity. When set to `default`, the client
counts are enabled on Enterprise builds and disabled on community builds. Disabling the feature during the middle of a month will
discard any data recorded for that month, but does not delete previous months.
-- `retention_months` `(integer: 48)` - The number of months of history to retain.
+- `retention_months` `(integer: 48)` - The number of months of history to retain. The minimum is 48 months and the maximum is 60 months.
Any missing parameters are left at their existing value.
diff --git a/website/content/docs/concepts/identity.mdx b/website/content/docs/concepts/identity.mdx
index 8ecb9c015a82..20d7e2eb97cf 100644
--- a/website/content/docs/concepts/identity.mdx
+++ b/website/content/docs/concepts/identity.mdx
@@ -105,7 +105,7 @@ a particular auth mount point.
| ------------------- | --------------------------------------------------------------------------------------------------- |
| AliCloud | Principal ID |
| AppRole | Role ID |
-| AWS IAM | Configurable via `iam_alias` to one of: Role ID (default), IAM unique ID, Full ARN |
+| AWS IAM | Configurable via `iam_alias` to one of: Role ID (default), IAM unique ID, Canonical ARN, Full ARN |
| AWS EC2 | Configurable via `ec2_alias` to one of: Role ID (default), EC2 instance ID, AMI ID |
| Azure | Subject (from JWT claim) |
| Cloud Foundry | App ID |
diff --git a/website/content/docs/configuration/listener/tcp.mdx b/website/content/docs/configuration/listener/tcp/index.mdx
similarity index 91%
rename from website/content/docs/configuration/listener/tcp.mdx
rename to website/content/docs/configuration/listener/tcp/index.mdx
index 8cf920199d29..3617a7be7e2f 100644
--- a/website/content/docs/configuration/listener/tcp.mdx
+++ b/website/content/docs/configuration/listener/tcp/index.mdx
@@ -1,7 +1,7 @@
---
layout: docs
page_title: TCP - Listeners - Configuration
-description: |-
+description: >-
The TCP listener configures Vault to listen on the specified TCP address and
port.
---
@@ -47,6 +47,44 @@ also omit keys from the response when the corresponding value is empty (`""`).
settings will apply to CLI and UI output in addition to direct API calls.
+## Default TLS configuration
+
+By default, Vault TCP listeners only accept TLS 1.2 or 1.3 connections and will
+drop connection requests from clients using TLS 1.0 or 1.1.
+
+Vault uses the following ciphersuites by default:
+
+- **TLS 1.3** - `TLS_AES_128_GCM_SHA256`, `TLS_AES_256_GCM_SHA384`, or `TLS_CHACHA20_POLY1305_SHA256`.
+- **TLS 1.2** - depends on whether you configure Vault with a RSA or ECDSA certificate.
+
+You can configure Vault with any cipher supported by the
+[`tls`](https://pkg.go.dev/crypto/tls) and
+[`tlsutil`](https://github.com/hashicorp/go-secure-stdlib/blob/main/tlsutil/tlsutil.go#L31-L57)
+Go packages. Vault uses the `tlsutil` package to parse ciphersuite configurations.
+
+
+
+ The Go team and HashiCorp believe that the set of cyphers supported by `tls`
+ and `tlsutil` is appropriate for modern, secure usage. However, some
+ vulnerability scanners may flag issues with your configuration.
+
+ In particular, Sweet32 (CVE-2016-2183) is an attack against 64-bit block size
+ ciphers including 3DES that may allow an attacker to break the encryption of
+ long lived connections. According to the
+ [vulnerability disclosure](https://sweet32.info/), Sweet32 took a
+ single HTTPS session with 785 GB of traffic to break the encryption.
+
+ As of May 2024, the Go team does not believe the risk of Sweet32 is sufficient
+ to remove existing client compatibility by deprecating 3DES support, however,
+ the team did [de-prioritize 3DES](https://github.com/golang/go/issues/45430)
+ in favor of AES-based ciphers.
+
+
+
+Before overriding Vault defaults, we recommend reviewing the recommended Go team
+[approach to TLS configuration](https://go.dev/blog/tls-cipher-suites) with
+particular attention to their ciphersuite selections.
+
## Listener's custom response headers
As of version 1.9, Vault supports defining custom HTTP response headers for the root path (`/`) and also on API endpoints (`/v1/*`).
@@ -119,7 +157,7 @@ default value in the `"/sys/config/ui"` [API endpoint](/vault/api-docs/system/co
request size, in bytes. Defaults to 32 MB if not set or set to `0`.
Specifying a number less than `0` turns off limiting altogether.
-- `max_request_duration` `(string: "90s")` – Specifies the maximum
+- `max_request_duration` `(string: "90s")` – Specifies the maximum
request duration allowed before Vault cancels the request. This overrides
`default_max_request_duration` for this listener.
@@ -283,6 +321,7 @@ This example shows enabling a TLS listener.
```hcl
listener "tcp" {
+ address = "127.0.0.1:8200"
tls_cert_file = "/etc/certs/vault.crt"
tls_key_file = "/etc/certs/vault.key"
}
@@ -546,4 +585,4 @@ Raft Applied Index 219
[golang-tls]: https://golang.org/src/crypto/tls/cipher_suites.go
[api-addr]: /vault/docs/configuration#api_addr
[cluster-addr]: /vault/docs/configuration#cluster_addr
-[go-tls-blog]: https://go.dev/blog/tls-cipher-suites
+[go-tls-blog]: https://go.dev/blog/tls-cipher-suites
\ No newline at end of file
diff --git a/website/content/docs/configuration/listener/tcp/tcp-tls.mdx b/website/content/docs/configuration/listener/tcp/tcp-tls.mdx
new file mode 100644
index 000000000000..90b356bec14e
--- /dev/null
+++ b/website/content/docs/configuration/listener/tcp/tcp-tls.mdx
@@ -0,0 +1,208 @@
+---
+layout: docs
+page_title: Configure TLS for your Vault TCP listener
+description: >-
+ Example TCP listener configuration with TLS encryption.
+---
+
+# Configure TLS for your Vault TCP listener
+
+You can configure your TCP listener to use specific versions of TLS and specific
+ciphersuites.
+
+## Assumptions
+
+- **Your Vault instance is not currently running**. If your Vault cluster is
+ running, you must
+ [restart the cluster gracefully](https://support.hashicorp.com/hc/en-us/articles/17169701076371-A-Step-by-Step-Guide-to-Restarting-a-Vault-Cluster)
+ to apply changes to your TCP listener. SIGHIP will not reload your TLS
+ configuration.
+- **You have a valid TLS certificate file**.
+- **You have a valid TLS key file**.
+- **You have a valid CA file (if required)**.
+
+## Example TLS 1.3 configuration
+
+If a reasonably modern set of clients are connecting to a Vault instance, you
+can configure the `tcp` listener stanza to only accept TLS 1.3 with the
+`tls_min_version` parameter:
+
+
+
+```plaintext
+listener "tcp" {
+ address = "127.0.0.1:8200"
+ tls_cert_file = "cert.pem"
+ tls_key_file = "key.pem"
+ tls_min_version = "tls13"
+}
+```
+
+
+
+Vault does not accept explicit ciphersuite configuration for TLS 1.3 because the
+Go team has already designated a select set of ciphers that align with the
+broadly-accepted Mozilla Security/Server Side TLS guidance for [modern TLS
+configuration](https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility).
+
+## Example TLS 1.2 configuration
+
+To use TLS 1.2 with a non-default set of ciphersuites, you can set 1.2 as the
+minimum and maximum allowed TLS version and explicitly define your preferred
+ciphersuites with `tls_ciper_suites` and one or more of the ciphersuite
+constants from the ciphersuite configuration parser. For example:
+
+
+
+```plaintext
+listener "tcp" {
+ address = "127.0.0.1:8200"
+ tls_cert_file = "cert.pem"
+ tls_key_file = "key.pem"
+ tls_min_version = "tls12"
+ tls_max_version = "tls12"
+ tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256"
+}
+```
+
+
+
+You must set the minimum and maximum TLS version to disable TLS 1.3, which does
+not support explicit cipher selection. The priority order of the ciphersuites
+in `tls_cipher_suites` is determined by the `tls` Go package.
+
+
+
+ The TLS 1.2 configuration example excludes any 3DES ciphers to avoid potential
+ exposure to the Sweet32 attack (CVE-2016-2183). You should customize the
+ ciphersuite list as needed to meet your environment-specific security
+ requirements.
+
+
+
+## Verify your TLS configuration
+
+You can verify your TLS configuration using an SSL scanner such as
+[`sslscan`](https://github.com/rbsec/sslscan).
+
+
+
+
+
+
+```shell-session
+$ sslscan 127.0.0.1:8200
+Version: 2.1.3
+OpenSSL 3.2.1 30 Jan 2024
+
+Connected to 127.0.0.1
+
+Testing SSL server 127.0.0.1 on port 8200 using SNI name 127.0.0.1
+
+ SSL/TLS Protocols:
+SSLv2 disabled
+SSLv3 disabled
+TLSv1.0 disabled
+TLSv1.1 disabled
+TLSv1.2 enabled
+TLSv1.3 enabled
+
+ TLS Fallback SCSV:
+Server supports TLS Fallback SCSV
+
+ TLS renegotiation:
+Session renegotiation not supported
+
+ TLS Compression:
+Compression disabled
+
+ Heartbleed:
+TLSv1.3 not vulnerable to heartbleed
+TLSv1.2 not vulnerable to heartbleed
+
+ Supported Server Cipher(s):
+Preferred TLSv1.3 128 bits TLS_AES_128_GCM_SHA256 Curve 25519 DHE 253
+Accepted TLSv1.3 256 bits TLS_AES_256_GCM_SHA384 Curve 25519 DHE 253
+Accepted TLSv1.3 256 bits TLS_CHACHA20_POLY1305_SHA256 Curve 25519 DHE 253
+Preferred TLSv1.2 128 bits ECDHE-ECDSA-AES128-GCM-SHA256 Curve 25519 DHE 253
+Accepted TLSv1.2 256 bits ECDHE-ECDSA-AES256-GCM-SHA384 Curve 25519 DHE 253
+Accepted TLSv1.2 256 bits ECDHE-ECDSA-CHACHA20-POLY1305 Curve 25519 DHE 253
+Accepted TLSv1.2 128 bits ECDHE-ECDSA-AES128-SHA Curve 25519 DHE 253
+Accepted TLSv1.2 256 bits ECDHE-ECDSA-AES256-SHA Curve 25519 DHE 253
+
+ Server Key Exchange Group(s):
+TLSv1.3 128 bits secp256r1 (NIST P-256)
+TLSv1.3 192 bits secp384r1 (NIST P-384)
+TLSv1.3 260 bits secp521r1 (NIST P-521)
+TLSv1.3 128 bits x25519
+TLSv1.2 128 bits secp256r1 (NIST P-256)
+TLSv1.2 192 bits secp384r1 (NIST P-384)
+TLSv1.2 260 bits secp521r1 (NIST P-521)
+TLSv1.2 128 bits x25519
+
+ SSL Certificate:
+Signature Algorithm: ecdsa-with-SHA256
+ECC Curve Name: prime256v1
+ECC Key Strength: 128
+
+Subject: localhost
+Issuer: localhost
+
+Not valid before: May 17 17:27:29 2024 GMT
+Not valid after: Jun 16 17:27:29 2024 GMT
+```
+
+
+
+
+
+
+
+
+```shell-session
+sslscan 127.0.0.1:8200
+Testing SSL server 127.0.0.1 on port 8200 using SNI name 127.0.0.1
+
+ SSL/TLS Protocols:
+SSLv2 disabled
+SSLv3 disabled
+TLSv1.0 disabled
+TLSv1.1 disabled
+TLSv1.2 enabled
+TLSv1.3 enabled
+
+ Supported Server Cipher(s):
+Preferred TLSv1.3 128 bits TLS_AES_128_GCM_SHA256 Curve 25519 DHE 253
+Accepted TLSv1.3 256 bits TLS_AES_256_GCM_SHA384 Curve 25519 DHE 253
+Accepted TLSv1.3 256 bits TLS_CHACHA20_POLY1305_SHA256 Curve 25519 DHE 253
+Preferred TLSv1.2 128 bits ECDHE-RSA-AES128-GCM-SHA256 Curve 25519 DHE 253
+Accepted TLSv1.2 256 bits ECDHE-RSA-AES256-GCM-SHA384 Curve 25519 DHE 253
+Accepted TLSv1.2 256 bits ECDHE-RSA-CHACHA20-POLY1305 Curve 25519 DHE 253
+Accepted TLSv1.2 128 bits ECDHE-RSA-AES128-SHA Curve 25519 DHE 253
+Accepted TLSv1.2 256 bits ECDHE-RSA-AES256-SHA Curve 25519 DHE 253
+Accepted TLSv1.2 128 bits AES128-GCM-SHA256
+Accepted TLSv1.2 256 bits AES256-GCM-SHA384
+Accepted TLSv1.2 128 bits AES128-SHA
+Accepted TLSv1.2 256 bits AES256-SHA
+Accepted TLSv1.2 112 bits TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
+Accepted TLSv1.2 112 bits TLS_RSA_WITH_3DES_EDE_CBC_SHA
+
+ Server Key Exchange Group(s):
+TLSv1.3 128 bits secp256r1 (NIST P-256)
+TLSv1.3 192 bits secp384r1 (NIST P-384)
+TLSv1.3 260 bits secp521r1 (NIST P-521)
+TLSv1.3 128 bits x25519
+TLSv1.2 128 bits secp256r1 (NIST P-256)
+TLSv1.2 192 bits secp384r1 (NIST P-384)
+TLSv1.2 260 bits secp521r1 (NIST P-521)
+TLSv1.2 128 bits x25519
+
+ SSL Certificate:
+Signature Algorithm: sha256WithRSAEncryption
+RSA Key Strength: 4096
+```
+
+
+
+
+
\ No newline at end of file
diff --git a/website/content/docs/configuration/programmatic-management.mdx b/website/content/docs/configuration/programmatic-management.mdx
new file mode 100644
index 000000000000..a04eb8b14c3e
--- /dev/null
+++ b/website/content/docs/configuration/programmatic-management.mdx
@@ -0,0 +1,463 @@
+---
+layout: docs
+page_title: Manage Vault resources programmatically
+description: >-
+ Step-by-step instructions for managing Vault resources programmatically with
+ Terraform
+---
+
+# Manage Vault resources programmatically with Terraform
+
+Use Terraform to manage policies, namespaces, and plugins in Vault.
+
+## Before you start
+
+- **You must have [Terraform installed](/terraform/install)**.
+- **You must have the [Terraform Vault provider](https://registry.terraform.io/providers/hashicorp/vault/latest) configured**.
+- **You must have admin access to your Terraform installation**. If you do not
+ have admin access, you can still generate the relevant configuration files,
+ but you will need to have someone else apply the changes.
+- **You must have a [Vault server running](/vault/tutorials/getting-started/getting-started-dev-server)**.
+
+## Step 1: Create a resource file for namespaces
+
+Terraform Vault provider supports a `vault_namespace` resource type for
+managing Vault namespaces:
+
+```hcl
+resource "vault_namespace" "" {
+ path = ""
+}
+```
+
+To manage your Vault namespaces in Terraform:
+
+1. Use the `vault namespace list` command to identify any unmanaged namespaces
+ that you need to migrate. For example:
+
+ ```shell-session
+ $ vault namespace list
+
+ Keys
+ ----
+ admin/
+ ```
+
+1. Create a new Terraform Vault Provider resource file called
+ `vault_namespaces.tf` that defines `vault_namespace` resources for each of
+ the new or existing namespaces resources you want to manage.
+
+ For example, to migrate the `admin` namespace in the example and create a new
+ `dev` namespace:
+
+ ```hcl
+ resource "vault_namespace" "admin_ns" {
+ path = "admin"
+ }
+
+ resource "vault_namespace" "dev_ns" {
+ path = "dev"
+ }
+ ```
+
+## Step 2: Create a resource file for secret engines
+
+Terraform Vault provider supports discrete types for the different
+[auth](https://registry.terraform.io/providers/hashicorp/vault/latest/docs#vault-authentication-configuration-options),
+[secret](https://registry.terraform.io/providers/hashicorp/vault/latest/docs/resources/mount),
+and [database](https://registry.terraform.io/providers/hashicorp/vault/latest/docs/resources/database_secrets_mount)
+plugin types in Vault.
+
+To migrate a secret engine, use the `vault_mount` resource type:
+
+```hcl
+resource "vault_mount" "" {
+ path = ""
+ type = ""
+}
+```
+
+To manage your Vault secret engines in Terraform:
+
+1. Use the `vault secret list` command to identify any unmanaged secret engines
+ that you need to migrate. For example:
+
+ ```shell-session
+ $ vault secrets list | grep -vEw '(cubbyhole|identity|sys)'
+
+ Path Type Accessor Description
+ ---- ---- -------- -----------
+ transit/ transit transit_8291b949 n/a
+ ```
+
+1. Use the `-namespace` flag to check for unmanaged secret engines under any
+ namespaces you identified in the previous step. For example, to check for
+ secret engines under the `admin` namespace:
+
+ ```shell-session
+ $ vault secrets list -namespace=admin | grep -vEw '(cubbyhole|identity|sys)'
+
+ Path Type Accessor Description
+ ---- ---- -------- -----------
+ admin_keys/ kv kv_87edfc65 n/a
+ ```
+
+1. Create a new Terraform Vault Provider resource file called `vault_secrets.tf`
+ that defines `vault_mount` resources for each of the new or existing secret
+ engines you want to manage.
+
+ For example, to migrate the `transit` and `admin_keys` secret engines in the
+ example and enable a new `kv` engine under the new `dev` namespace called
+ `dev_keys`:
+
+ ```hcl
+ resource "vault_mount" "transit_plugin" {
+ path = "transit"
+ type = "transit"
+ }
+
+ resource "vault_mount" "admin_keys_plugin" {
+ namespace = vault_namespace.admin_ns.path
+ path = "admin_keys"
+ type = "kv"
+ options = {
+ version = "2"
+ }
+ }
+
+ resource "vault_mount" "dev_keys_plugin" {
+ namespace = vault_namespace.dev_ns.path
+ path = "dev_keys"
+ type = "kv"
+ options = {
+ version = "2"
+ }
+ }
+ ```
+
+## Step 3: Create a resource file for policies
+
+Terraform Vault provider supports a `vault_policy` resource type for
+managing Vault policies:
+
+```hcl
+resource "vault_policy" "" {
+ name = ""
+ policy = <
+ EOT
+}
+```
+
+To manage your Vault policies in Terraform:
+
+1. Use the `vault policy list` command to identify any unmanaged policies that
+ you need to migrate. For example:
+
+ ```shell-session
+ $ vault policy list | grep -vEw 'root'
+
+ default
+ ```
+
+1. Create a Terraform Vault Provider resource file called `vault_policies.tf`
+ that defines `vault_mount` resources for each policy resource you want to
+ manage in Terraform. You can use the following `bash` code to write all
+ your existing, non-root policies to the file:
+
+ ```shell-session
+ for vpolicy in $(vault policy list | grep -vw root) ; do
+ echo "resource \"vault_policy\" \"vault_$vpolicy\" {"
+ echo " name = \"$vpolicy\""
+ echo " policy = < vault_policies.tf
+ ```
+
+1. Update the `vault_policies.tf` file with any new policies you want to add.
+ For example, to create a policy for the example `dev_keys` secret engine:
+
+ ```hcl
+ resource "vault_policy" "dev_team_policy" {
+ name = "dev_team"
+
+ policy = <
+Tuning or adjusting TTLs does not retroactively affect tokens that were issued. New tokens must be issued after tuning TTLs.
+
+
+**Anti-pattern issue:**
+
+If you create leases without changing the default time-to-live (TTL), leases will live in Vault until the default lease time is up.
+Depending on your infrastructure and available system memory, using the default or long TTL may cause performance issues as Vault stores
+leases in memory.
+
+## Use identity entities for accurate client count
+
+Each Vault client may have multiple accounts with the auth methods enabled on the Vault server.
+
+![Entity](/img/vault-entity-waf1.png)
+
+**Recommended pattern:**
+
+Since each token adds to the client count, and each unique authentication issues a token, you should use identity entities to create aliases that connect each login to a single identity.
+
+ - [Client count](/vault/docs/concepts/client-count)
+ - [Vault identity concepts](/vault/docs/concepts/identity)
+ - [Vault Identity secrets engine](/vault/docs/secrets/identity)
+ - [Identity: Entities and groups tutorial](/vault/tutorials/auth-methods/identity)
+
+**Anti-pattern issue:**
+
+When you do not use identity entities, each new client is counted as a separate identity when using another auth method not linked to the user's entity.
+
+## Increase IOPS
+
+IOPS (input/output operations per second) measures performance for Vault cluster members. Vault is bound by the IO limits of the storage backend rather than the compute requirements.
+
+**Recommended pattern:**
+
+Use the HashiCorp reference guidelines for Vault servers' hardware sizing and network considerations.
+
+- [Vault with Integrated storage reference architecture](/vault/tutorials/day-one-raft/raft-reference-architecture#system-requirements)
+- [Performance tuning](/vault/tutorials/operations/performance-tuning)
+- [Transform secrets engine](/vault/docs/concepts/transform)
+
+
+
+Depending on the client count, the Transform (Enterprise) and Transit secret engines can be resource-intensive.
+
+
+
+**Anti-pattern issue:**
+
+Limited IOPS can significantly degrade Vault’s performance.
+
+## Enable disaster recovery
+
+HashiCorp Vault's (HA) highly available [Integrated storage (Raft)](/vault/docs/concepts/integrated-storage)
+backend provides intra-cluster data replication across cluster members. Integrated Storage provides Vault with
+horizontal scalability and failure tolerance, but it does not provide backup for the entire cluster. Not utilizing
+disaster recovery for your production environment will negatively impact your organization's Recovery Point
+Objective (RPO) and Recovery Time Objective (RTO).
+
+**Recommended pattern:**
+
+For cluster-wide issues (i.e., network connectivity), Vault Enterprise Disaster Recovery (DR) replication
+provides a warm standby cluster containing all primary cluster data. The DR cluster does not service reads
+or writes but you can promote it to replace the primary cluster when needed.
+
+- [Disaster recovery replication setup](/vault/tutorials/day-one-raft/disaster-recovery)
+- [Disaster recovery (DR) replication](/vault/docs/enterprise/replication#disaster-recovery-dr-replication)
+- [DR replication API documentation](/vault/api-docs/system/replication/replication-dr)
+
+We also recommend periodically creating data snapshots to protect against data corruption.
+
+- [Vault data backup standard procedure](/vault/tutorials/standard-procedures/sop-backup)
+- [Automated integrated storage snapshots](/vault/docs/enterprise/automated-integrated-storage-snapshots)
+- [/sys/storage/raft/snapshot-auto](/vault/api-docs/system/storage/raftautosnapshots)
+
+**Anti-pattern issue:**
+
+If you do not enable disaster recovery and catastrophic failure occurs, your use cases will encounter longer downtime duration and costs associated with not serving Vault clients in your environment.
+
+## Test disaster recovery
+
+Your disaster recovery (DR) solution is a key part of your overall disaster recovery plan.
+
+Designing and configuring your Vault disaster recovery solution is only the first step. You also need to validate the DR solution, as not doing so can negatively impact your organization's Recovery Point Objective (RPO) and Recovery Time Objective (RTO).
+
+**Recommended pattern:**
+
+Vault's Disaster Recovery (DR) replication mode provides a warm standby for
+failover if the primary cluster experiences catastrophic failure. You should
+periodically test the disaster recovery replication cluster by completing the
+failover and failback procedure.
+
+- [Vault disaster recovery replication failover and failback tutorial](/vault/tutorials/enterprise/disaster-recovery-replication-failover)
+- [Vault Enterprise replication](/vault/docs/enterprise/replication)
+- [Monitoring Vault replication](/vault/tutorials/monitoring/monitor-replication)
+
+You should establish standard operating procedures for restoring a Vault cluster from a snapshot. The restoration methods following a DR situation would be in response to data corruption or sabotage, which Disaster Recovery Replication might be unable to protect against.
+
+- [Standard procedure for restoring a Vault cluster](/vault/tutorials/standard-procedures/sop-restore)
+
+**Anti-pattern issue:**
+
+If you don't test your disaster recovery solution, your key stakeholders will not feel confident they can effectively perform the disaster recovery plan. Testing the DR solution also helps your team to remove uncertainty around recovering the system during an outage.
+
+## Improve upgrade cadence
+
+While it might be easy to upgrade Vault whenever you have capacity, not having a frequent upgrade cadence can impact your Vault performance and security.
+
+**Recommended pattern:**
+
+We recommend upgrading to our latest version of Vault. Subscribe to the releases in [Vault's GitHub repository](https://github.com/hashicorp/vault), and notifications from [HashiCorp Vault discuss](https://discuss.hashicorp.com/c/release-notifications/57), will inform you when we release a new Vault version.
+
+- [Vault upgrade guides](/vault/docs/upgrading)
+- [Vault feature deprecation notice and plans](/vault/docs/deprecation)
+
+**Anti-pattern issue:**
+
+When you do not keep a regular upgrade cadence, your Vault environment could be missing key features or improvements.
+
+- Missing patches for bugs or vulnerabilities as documented in the [CHANGELOG](https://github.com/hashicorp/vault/blob/main/CHANGELOG.md).
+- New features to improve workflow.
+- Must use version-specific rather than the latest documentation.
+- Some educational resourcesrequire a specific minimum Vault version.
+- Updates may require a stepped approach that uses an intermediate version before installing the latest binary.
+
+## Test before upgrades
+
+We recommend testing Vault in a sandbox environment before deploying to production.
+
+Although it might be faster to upgrade immediately in production, testing will help identify any compatibility issues.
+
+Be aware of the [CHANGELOG](https://github.com/hashicorp/vault/blob/main/CHANGELOG.md) and account for any new features, improvements, known issues and bug fixes in your testing.
+
+**Recommended pattern:**
+
+Test new Vault versions in sandbox environments before upgrading in production and follow our upgrading documentation.
+
+We recommend adding a testing phase to your standard upgrade procedure.
+
+- [Vault upgrade standard procedure](/vault/tutorials/standard-procedures/sop-upgrade)
+- [Upgrading Vault](/vault/docs/upgrading)
+
+**Anti-pattern issue:**
+
+Without adequate testing before upgrading in production, you risk compatibility and performance issues.
+
+
+
+This could lead to downtime or degradation in your production Vault environment.
+
+
+
+## Rotate audit device logs
+
+Audit devices in Vault maintain a detailed log of every client request and server response.
+
+If you allow the logs for audit devices to run perpetually without rotating you may face a blocked audit device if the filesystem storage becomes exhausted.
+
+**Recommended pattern:**
+
+Inspect and rotate audit logs periodically.
+
+- [Blocked audit devices tutorial](/vault/tutorials/monitoring/blocked-audit-devices)
+- [blocked audit devices](/vault/docs/audit#blocked-audit-devices)
+
+**Anti-pattern issue:**
+
+Vault will not respond to requests when audit devices are not enabled to record them.
+
+The audit device can exhaust the local storage if the audit device log is not maintained and rotated over time.
+
+## Monitor metrics
+
+Relying solely on Vault operational logs and data in Vault UI will give you a partial picture of the cluster's performance.
+
+
+**Recommended pattern:**
+
+Continuous monitoring will allow organizations to detect minor problems and promptly resolve them.
+Migrating from reactive to proactive monitoring will help to prevent system failures. Vault has multiple outputs
+that help monitor the cluster's activity: audit logs, operational logs, and telemetry data. This data can work
+with a SIEM (security information and event management) tool for aggregation, inspection, and alerting capabilities.
+
+- [Telemetry](/vault/docs/internals/telemetry#secrets-engines-metric)
+- [Telemetry metrics reference](/vault/tutorials/monitoring/telemetry-metrics-reference)
+
+Adding a monitoring solution:
+- [Audit device logs and incident response with elasticsearch](/vault/tutorials/monitoring/audit-elastic-incident-response)
+- [Monitor telemetry & audit device log data](/vault/tutorials/monitoring/monitor-telemetry-audit-splunk)
+- [Monitor telemetry with Prometheus & Grafana](/vault/tutorials/monitoring/monitor-telemetry-grafana-prometheus)
+
+
+
+
+ Vault logs to standard output and standard error by default, automatically captured by the systemd journal. You can also instruct Vault to redirect operational log writes to a file.
+
+
+
+**Anti-pattern issue:**
+
+Having partial insight into cluster activity can leave the business in a reactive state.
+
+## Establish usage baseline
+
+A baseline provides insight into current utilization and thresholds. Telemetry metrics are valuable, especially when monitored over time. You can use telemetry metrics to gather a baseline of cluster activity, while alerts inform you of abnormal activity.
+
+**Recommended pattern:**
+
+Telemetry information can also be streamed directly from Vault to a range of metrics aggregation solutions and
+saved for aggregation and inspection.
+
+- [Vault usage metrics](/vault/tutorials/monitoring/usage-metrics)
+- [Diagnose server issues](/vault/tutorials/monitoring/diagnose-startup-issues)
+
+**Anti-pattern issue:**
+
+This issue closely relates to the recommended pattern for [monitor metrics](#monitor-metrics).
+ Telemetry data is
+only held in memory for a short period.
+
+## Minimize root token use
+
+Initializing a Vault server emits an initial root token that gives root-level access across all Vault features.
+
+**Recommended pattern:**
+
+We recommend that you revoke the root token after initializing Vault within your environment. If users require elevated access, create access control list policies that grant proper capabilities on the necessary paths in Vault. If your operations require the root token, keep it for the shortest possible time before revoking it.
+
+- [Generate root tokens tutorial](/vault/tutorials/operations/generate-root)
+- [Root tokens](/vault/docs/concepts/tokens#root-tokens)
+- [Vault policies](/vault/docs/concepts/policies)
+
+**Anti-pattern issue:**
+
+A root token can perform all actions within Vault and never expire. Unrestricted access can give users higher privileges than necessary to all Vault operations and paths. Sharing and providing access to root tokens poses a security risk.
+
+## Rekey when necessary
+
+Vault distributes unsealed keys to stakeholders. A quorum of keys is needed to unlock Vault based on your initialization settings.
+
+**Recommended pattern:**
+
+Vault supports rekeying, and you should establish a workflow for rekeying when necessary.
+
+- [Rekeying & rotating Vault](/vault/tutorials/operations/rekeying-and-rotating)
+- [Operator rekey](/vault/docs/commands/operator/rekey)
+
+**Anti-pattern issue:**
+
+If several stakeholders leave the organization, you risk not having the required key shares to meet the unseal quorum, which could result in the loss of the ability to unseal Vault.
diff --git a/website/content/docs/interoperability-matrix.mdx b/website/content/docs/interoperability-matrix.mdx
index a8cd65099678..aa921fa22a4c 100644
--- a/website/content/docs/interoperability-matrix.mdx
+++ b/website/content/docs/interoperability-matrix.mdx
@@ -85,6 +85,7 @@ Vault Secrets Engine Key: EKM Provider = Vault EK
| Oracle | Oracle 19c | PKCS#11 | 1.11 | N/A |
| Percona | Server 8.0 | KMIP | 1.9 | N/A |
| Percona | XtraBackup 8.0 | KMIP | 1.9 | N/A |
+| Rubrik | CDM 9.1 (Edge) | KMIP | 1.16.2 | N/A |
| Scality | Scality RING | KMIP | 1.12 | N/A |
| Snowflake | Snowflake | KMSE | 1.6 | N/A |
| Veeam | Karsten K10 | Transit | 1.9 | N/A |
diff --git a/website/content/docs/platform/aws/lambda-extension.mdx b/website/content/docs/platform/aws/lambda-extension.mdx
index 3702c96aaa5e..d9fe60028b88 100644
--- a/website/content/docs/platform/aws/lambda-extension.mdx
+++ b/website/content/docs/platform/aws/lambda-extension.mdx
@@ -273,6 +273,13 @@ synchronously refresh its own token before proxying requests if the token is
expired (including a grace window), and it will attempt to renew its token if the
token is nearly expired but renewable.
+
+
+ The Vault Lambda extension does not currently work with
+ [AWS SnapStart](https://docs.aws.amazon.com/lambda/latest/dg/snapstart.html).
+
+
+
## Performance impact
AWS Lambda pricing is based on [number of invocations, time of execution and memory
diff --git a/website/content/docs/platform/k8s/vso/api-reference.mdx b/website/content/docs/platform/k8s/vso/api-reference.mdx
index 57c1f1c18feb..cb2ccf98eb10 100644
--- a/website/content/docs/platform/k8s/vso/api-reference.mdx
+++ b/website/content/docs/platform/k8s/vso/api-reference.mdx
@@ -7,7 +7,7 @@ description: >-
# API Reference
@@ -212,7 +212,7 @@ with a timestamp value of when the trigger was executed.
E.g. vso.secrets.hashicorp.com/restartedAt: "2023-03-23T13:39:31Z"
-Supported resources: Deployment, DaemonSet, StatefulSet
+Supported resources: Deployment, DaemonSet, StatefulSet, argo.Rollout
@@ -224,8 +224,8 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `kind` _string_ | | | Enum: [Deployment DaemonSet StatefulSet]
|
-| `name` _string_ | | | |
+| `kind` _string_ | Kind of the resource | | Enum: [Deployment DaemonSet StatefulSet argo.Rollout]
|
+| `name` _string_ | Name of the resource | | |
#### SecretTransformation
diff --git a/website/content/docs/platform/k8s/vso/helm.mdx b/website/content/docs/platform/k8s/vso/helm.mdx
index 05d0e219d082..ac29f51ca49b 100644
--- a/website/content/docs/platform/k8s/vso/helm.mdx
+++ b/website/content/docs/platform/k8s/vso/helm.mdx
@@ -11,7 +11,7 @@ The chart is customizable using
[Helm configuration values](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing).
+ the vault-secrets-operator repo's values.yaml: file commit=bf1746f1937f25b4cb28f15d4b818303f3a78dd9 -->
## Top-Level Stanzas
@@ -34,6 +34,16 @@ Use these links to navigate to a particular top-level stanza.
- `replicas` ((#v-controller-replicas)) (`integer: 1`) - Set the number of replicas for the operator.
+ - `strategy` ((#v-controller-strategy)) (`object: ""`) - Configure update strategy for multi-replica deployments.
+ Kubernetes supports types Recreate, and RollingUpdate
+ ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
+ Example:
+ strategy: {}
+ rollingUpdate:
+ maxSurge: 1
+ maxUnavailable: 0
+ type: RollingUpdate
+
- `hostAliases` ((#v-controller-hostaliases)) (`array