diff --git a/changelog/28207.txt b/changelog/28207.txt new file mode 100644 index 000000000000..0c3e07b517e8 --- /dev/null +++ b/changelog/28207.txt @@ -0,0 +1,3 @@ +```release-note:bug +proxy/cache (enterprise): Fixed an issue where Proxy with static secret caching enabled would not correctly handle requests to older secret versions for KVv2 secrets. Proxy's static secret cache now properly handles all requests relating to older versions for KVv2 secrets. +``` diff --git a/command/agentproxyshared/cache/cachememdb/cache_memdb.go b/command/agentproxyshared/cache/cachememdb/cache_memdb.go index ed2cd0ac8001..9746374593ec 100644 --- a/command/agentproxyshared/cache/cachememdb/cache_memdb.go +++ b/command/agentproxyshared/cache/cachememdb/cache_memdb.go @@ -240,7 +240,7 @@ func (c *CacheMemDB) SetCapabilitiesIndex(index *CapabilitiesIndex) error { // EvictCapabilitiesIndex removes a capabilities index from the cache based on index name and value. func (c *CacheMemDB) EvictCapabilitiesIndex(indexName string, indexValues ...interface{}) error { index, err := c.GetCapabilitiesIndex(indexName, indexValues...) - if err == ErrCacheItemNotFound { + if errors.Is(err, ErrCacheItemNotFound) { return nil } if err != nil { diff --git a/command/agentproxyshared/cache/cachememdb/index.go b/command/agentproxyshared/cache/cachememdb/index.go index 484409a57954..6297b3df5f2a 100644 --- a/command/agentproxyshared/cache/cachememdb/index.go +++ b/command/agentproxyshared/cache/cachememdb/index.go @@ -53,6 +53,15 @@ type Index struct { // Required: true, Unique: false RequestPath string + // Versions are the versions of the secret for KVv2 static secrets only. This is + // a map of version to response, where version is the version number and response is the + // serialized cached response for that secret version. + // We could have chosen to put index.Response as Versions[0], but opted not to for consistency, + // and also to elevate the fact that the current version/representation of the path being + // cached here is stored there, not here. + // Required: false, Unique: false + Versions map[int][]byte + // Lease is the identifier of the lease in Vault, that belongs to the // response held by this index. // Required: false, Unique: true diff --git a/command/agentproxyshared/cache/lease_cache.go b/command/agentproxyshared/cache/lease_cache.go index a9ea65806d5e..ee135e51a047 100644 --- a/command/agentproxyshared/cache/lease_cache.go +++ b/command/agentproxyshared/cache/lease_cache.go @@ -14,6 +14,7 @@ import ( "io" "net/http" "net/url" + "strconv" "strings" "sync" "time" @@ -220,6 +221,7 @@ func (c *LeaseCache) PersistentStorage() *cacheboltdb.BoltStorage { // checkCacheForDynamicSecretRequest checks the cache for a particular request based on its // computed ID. It returns a non-nil *SendResponse if an entry is found. func (c *LeaseCache) checkCacheForDynamicSecretRequest(id string) (*SendResponse, error) { + c.logger.Trace("checking cache for dynamic secret request", "id", id) return c.checkCacheForRequest(id, nil) } @@ -229,6 +231,7 @@ func (c *LeaseCache) checkCacheForDynamicSecretRequest(id string) (*SendResponse // cache entry, and return nil if it isn't. It will also evict the cache if this is a non-GET // request. func (c *LeaseCache) checkCacheForStaticSecretRequest(id string, req *SendRequest) (*SendResponse, error) { + c.logger.Trace("checking cache for static secret request", "id", id) return c.checkCacheForRequest(id, req) } @@ -269,15 +272,28 @@ func (c *LeaseCache) checkCacheForRequest(id string, req *SendRequest) (*SendRes } } + var response []byte + version := getStaticSecretVersionFromRequest(req) + if version == 0 { + response = index.Response + } else { + response = index.Versions[version] + } + + // We don't have this response as either a current or older version. + if response == nil { + return nil, nil + } + // Cached request is found, deserialize the response - reader := bufio.NewReader(bytes.NewReader(index.Response)) + reader := bufio.NewReader(bytes.NewReader(response)) resp, err := http.ReadResponse(reader, nil) if err != nil { c.logger.Error("failed to deserialize response", "error", err) return nil, err } - sendResp, err := NewSendResponse(&api.Response{Response: resp}, index.Response) + sendResp, err := NewSendResponse(&api.Response{Response: resp}, response) if err != nil { c.logger.Error("failed to create new send response", "error", err) return nil, err @@ -482,8 +498,8 @@ func (c *LeaseCache) Send(ctx context.Context, req *SendRequest) (*SendResponse, // included in the request path. index.RequestPath = getStaticSecretPathFromRequest(req) - c.logger.Trace("attempting to cache static secret with following request path", "request path", index.RequestPath) - err := c.cacheStaticSecret(ctx, req, resp, index) + c.logger.Trace("attempting to cache static secret with following request path", "request path", index.RequestPath, "version", getStaticSecretVersionFromRequest(req)) + err := c.cacheStaticSecret(ctx, req, resp, index, secret) if err != nil { return nil, err } @@ -617,16 +633,19 @@ func (c *LeaseCache) Send(ctx context.Context, req *SendRequest) (*SendResponse, return resp, nil } -func (c *LeaseCache) cacheStaticSecret(ctx context.Context, req *SendRequest, resp *SendResponse, index *cachememdb.Index) error { +func (c *LeaseCache) cacheStaticSecret(ctx context.Context, req *SendRequest, resp *SendResponse, index *cachememdb.Index, secret *api.Secret) error { // If a cached version of this secret exists, we now have access, so // we don't need to re-cache, just update index.Tokens indexFromCache, err := c.db.Get(cachememdb.IndexNameID, index.ID) - if err != nil && err != cachememdb.ErrCacheItemNotFound { + if err != nil && !errors.Is(err, cachememdb.ErrCacheItemNotFound) { return err } + version := getStaticSecretVersionFromRequest(req) + // The index already exists, so all we need to do is add our token - // to the index's allowed token list, then re-store it. + // to the index's allowed token list, and if necessary, the new version, + // then re-store it. if indexFromCache != nil { // We must hold a lock for the index while it's being updated. // We keep the two locking mechanisms distinct, so that it's only writes @@ -635,6 +654,45 @@ func (c *LeaseCache) cacheStaticSecret(ctx context.Context, req *SendRequest, re defer indexFromCache.IndexLock.Unlock() indexFromCache.Tokens[req.Token] = struct{}{} + // Are we looking for a version that's already cached? + haveVersion := false + if version != 0 { + _, ok := indexFromCache.Versions[version] + if ok { + haveVersion = true + } + } else { + if indexFromCache.Response != nil { + haveVersion = true + } + } + + if !haveVersion { + var respBytes bytes.Buffer + err = resp.Response.Write(&respBytes) + if err != nil { + c.logger.Error("failed to serialize response", "error", err) + return err + } + + // Reset the response body for upper layers to read + if resp.Response.Body != nil { + resp.Response.Body.Close() + } + resp.Response.Body = io.NopCloser(bytes.NewReader(resp.ResponseBody)) + + // Set the index's Response + if version == 0 { + indexFromCache.Response = respBytes.Bytes() + // For current KVv2 secrets, see if we can add the version that the secret is + // to the versions map, too. If we got the latest version and the version is #2, + // also update Versions[2] + c.addToVersionListForCurrentVersionKVv2Secret(indexFromCache, secret) + } else { + indexFromCache.Versions[version] = respBytes.Bytes() + } + } + return c.storeStaticSecretIndex(ctx, req, indexFromCache) } @@ -652,8 +710,19 @@ func (c *LeaseCache) cacheStaticSecret(ctx context.Context, req *SendRequest, re } resp.Response.Body = io.NopCloser(bytes.NewReader(resp.ResponseBody)) + // Initialize the versions + index.Versions = map[int][]byte{} + // Set the index's Response - index.Response = respBytes.Bytes() + if version == 0 { + index.Response = respBytes.Bytes() + // For current KVv2 secrets, see if we can add the version that the secret is + // to the versions map, too. If we got the latest version and the version is #2, + // also update Versions[2] + c.addToVersionListForCurrentVersionKVv2Secret(index, secret) + } else { + index.Versions[version] = respBytes.Bytes() + } // Initialize the token map and add this token to it. index.Tokens = map[string]struct{}{req.Token: {}} @@ -661,12 +730,53 @@ func (c *LeaseCache) cacheStaticSecret(ctx context.Context, req *SendRequest, re // Set the index type index.Type = cacheboltdb.StaticSecretType + // Store the index: return c.storeStaticSecretIndex(ctx, req, index) } +// addToVersionListForCurrentVersionKVv2Secret takes a secret index and, if it's +// a KVv2 secret, adds the given response to the corresponding version for it. +// This function fails silently, as we could be parsing arbitrary JSON. +// This function can store a version for a KVv1 secret iff: +// - It has 'data' in the path +// - It has a numerical 'metadata.version' field +// However, this risk seems very small, and the negatives of such a secret being +// stored in the cache aren't worth additional mitigations to check if it's a KVv1 +// or KVv2 mount (such as doing a 'preflight' request like the CLI). +// There's no way to access it and it's just a couple of extra bytes, in the +// case that this does happen to a KVv1 secret. +func (c *LeaseCache) addToVersionListForCurrentVersionKVv2Secret(index *cachememdb.Index, secret *api.Secret) { + if secret != nil { + // First do an imperfect but lightweight check. This saves parsing the secret in the case that the secret isn't KVv2. + // KVv2 secrets always contain /data/, but KVv1 secrets can too, so we can't rely on this. + if strings.Contains(index.RequestPath, "/data/") { + metadata, ok := secret.Data["metadata"] + if ok { + metaDataAsMap, ok := metadata.(map[string]interface{}) + if ok { + versionJson, ok := metaDataAsMap["version"].(json.Number) + if ok { + versionInt64, err := versionJson.Int64() + if err == nil { + version := int(versionInt64) + c.logger.Trace("adding response for current KVv2 secret to index's Versions map", "path", index.RequestPath, "version", version) + + if index.Versions == nil { + index.Versions = map[int][]byte{} + } + + index.Versions[version] = index.Response + } + } + } + } + } + } +} + func (c *LeaseCache) storeStaticSecretIndex(ctx context.Context, req *SendRequest, index *cachememdb.Index) error { // Store the index in the cache - c.logger.Debug("storing static secret response into the cache", "method", req.Request.Method, "path", index.RequestPath, "id", index.ID) + c.logger.Debug("storing static secret response into the cache", "path", index.RequestPath, "id", index.ID) err := c.Set(ctx, index) if err != nil { c.logger.Error("failed to cache the proxied response", "error", err) @@ -695,7 +805,7 @@ func (c *LeaseCache) storeStaticSecretIndex(ctx context.Context, req *SendReques return err } - // Lastly, ensure that we start renewing this index, if it's new. + // Lastly, ensure that we start renewing this index, if it's new. // We require the 'created' check so that we don't renew the same // index multiple times. if c.capabilityManager != nil && created { @@ -712,7 +822,7 @@ func (c *LeaseCache) retrieveOrCreateTokenCapabilitiesEntry(token string) (*cach // The index ID is a hash of the token. indexId := hashStaticSecretIndex(token) indexFromCache, err := c.db.GetCapabilitiesIndex(cachememdb.IndexNameID, indexId) - if err != nil && err != cachememdb.ErrCacheItemNotFound { + if err != nil && !errors.Is(err, cachememdb.ErrCacheItemNotFound) { return nil, false, err } @@ -885,6 +995,25 @@ func canonicalizeStaticSecretPath(requestPath string, ns string) string { return path } +// getStaticSecretVersionFromRequest gets the version of a secret +// from a request. For the latest secret and for KVv1 secrets, +// this will return 0. +func getStaticSecretVersionFromRequest(req *SendRequest) int { + if req == nil || req.Request == nil { + return 0 + } + version := req.Request.FormValue("version") + if version == "" { + return 0 + } + versionInt, err := strconv.Atoi(version) + if err != nil { + // It's not a valid version. + return 0 + } + return versionInt +} + // getStaticSecretPathFromRequest gets the canonical path for a // request, taking into account intricacies relating to /v1/ and namespaces // in the header. @@ -923,6 +1052,7 @@ func computeStaticSecretCacheIndex(req *SendRequest) string { if path == "" { return path } + return hashStaticSecretIndex(path) } @@ -964,7 +1094,7 @@ func (c *LeaseCache) HandleCacheClear(ctx context.Context) http.Handler { // Default to 500 on error, unless the user provided an invalid type, // which would then be a 400. httpStatus := http.StatusInternalServerError - if err == errInvalidType { + if errors.Is(err, errInvalidType) { httpStatus = http.StatusBadRequest } logical.RespondError(w, httpStatus, fmt.Errorf("failed to clear cache: %w", err)) diff --git a/command/agentproxyshared/cache/lease_cache_test.go b/command/agentproxyshared/cache/lease_cache_test.go index 7bb454828659..d88171150631 100644 --- a/command/agentproxyshared/cache/lease_cache_test.go +++ b/command/agentproxyshared/cache/lease_cache_test.go @@ -511,7 +511,7 @@ func TestLeaseCache_StoreCacheableStaticSecret(t *testing.T) { // We expect two entries to be stored by this: // 1. The actual static secret // 2. The capabilities index - err := lc.cacheStaticSecret(context.Background(), request, response, index) + err := lc.cacheStaticSecret(context.Background(), request, response, index, nil) if err != nil { return } @@ -577,7 +577,7 @@ func TestLeaseCache_StaticSecret_CacheClear_All(t *testing.T) { // We expect two entries to be stored by this: // 1. The actual static secret // 2. The capabilities index - err := lc.cacheStaticSecret(context.Background(), request, response, index) + err := lc.cacheStaticSecret(context.Background(), request, response, index, nil) if err != nil { return } diff --git a/command/agentproxyshared/cache/static_secret_cache_updater.go b/command/agentproxyshared/cache/static_secret_cache_updater.go index 3d79f9eff464..0af90f923e24 100644 --- a/command/agentproxyshared/cache/static_secret_cache_updater.go +++ b/command/agentproxyshared/cache/static_secret_cache_updater.go @@ -12,6 +12,7 @@ import ( "io" "net/http" "net/url" + "strconv" "strings" "sync/atomic" "time" @@ -28,7 +29,7 @@ import ( "nhooyr.io/websocket" ) -// Example Event: +// Example write event (this does not contain all possible fields): //{ // "id": "a3be9fb1-b514-519f-5b25-b6f144a8c1ce", // "source": "https://vaultproject.io/", @@ -58,6 +59,37 @@ import ( // "time": "2023-09-12T15:19:49.394915-07:00" //} +// Example event with namespaces for an undelete (this does not contain all possible fields): +// { +// "id": "6c6b13fd-f133-f351-3cf0-b09ae6a417b1", +// "source": "vault://hostname", +// "specversion": "1.0", +// "type": "*", +// "data": { +// "event": { +// "id": "6c6b13fd-f133-f351-3cf0-b09ae6a417b1", +// "metadata": { +// "current_version": "3", +// "destroyed_versions": "[2,3]", +// "modified": "true", +// "oldest_version": "0", +// "operation": "destroy", +// "path": "secret-v2/destroy/my-secret" +// } +// }, +// "event_type": "kv-v2/destroy", +// "plugin_info": { +// "mount_class": "secret", +// "mount_accessor": "kv_b27b3cad", +// "mount_path": "secret-v2/", +// "plugin": "kv", +// "version": "2" +// } +// }, +// "datacontentype": "application/cloudevents", +// "time": "2024-08-27T12:46:01.373097-04:00" +//} + // StaticSecretCacheUpdater is a struct that utilizes // the event system to keep the static secret cache up to date. type StaticSecretCacheUpdater struct { @@ -165,15 +197,47 @@ func (updater *StaticSecretCacheUpdater) streamStaticSecretEvents(ctx context.Co } modified, ok := metadata["modified"].(string) if ok && modified == "true" { + // If data_path were in every event, we'd get that instead, but unfortunately it isn't. path, ok := metadata["path"].(string) if !ok { - return fmt.Errorf("unexpected event format when decoding 'path' element, message: %s\nerror: %w", string(message), err) + return fmt.Errorf("unexpected event format when decoding 'data_path' element, message: %s\nerror: %w", string(message), err) } namespace, ok := data["namespace"].(string) if ok { path = namespace + path } - err := updater.updateStaticSecret(ctx, path) + + deletedOrDestroyedVersions, newPath := checkForDeleteOrDestroyEvent(messageMap) + if len(deletedOrDestroyedVersions) > 0 { + path = newPath + err = updater.handleDeleteDestroyVersions(path, deletedOrDestroyedVersions) + if err != nil { + // While we are kind of 'missing' an event this way, re-calling this function will + // result in the secret remaining up to date. + return fmt.Errorf("error handling delete/destroy versions for static secret: path: %q, message: %s error: %w", path, message, err) + } + } + + // For all other operations, we *only* care about the latest version. + // However, if we know the current version, we should update that too + currentVersion := 0 + currentVersionString, ok := metadata["current_version"].(string) + if ok { + versionInt, err := strconv.Atoi(currentVersionString) + if err != nil { + return fmt.Errorf("unexpected event format when decoding 'current_version' element, message: %s\nerror: %w", string(message), err) + } + currentVersion = versionInt + } + + // Note: For delete/destroy events, we continue through to updating the secret itself, too. + // This means that if the latest version of the secret gets deleted, then the cache keeps + // knowledge of which the latest version is. + // One intricacy of e.g. destroyed events is that if the latest secret is destroyed, continuing + // to update the secret will 404. This is consistent with other behaviour. For Proxy, this means + // the secret may be evicted. That's okay. + + err = updater.updateStaticSecret(ctx, path, currentVersion) if err != nil { // While we are kind of 'missing' an event this way, re-calling this function will // result in the secret remaining up to date. @@ -190,6 +254,97 @@ func (updater *StaticSecretCacheUpdater) streamStaticSecretEvents(ctx context.Co return nil } +// checkForDeleteOrDestroyEvent checks an event message for delete/destroy events and if there +// are any, returns the versions to be deleted or destroyed, as well as the path to +// If none can be found, returns empty array and empty string. +// We have to do this since events do not always return data_path for all events. If they did, +// we could rely on that instead of doing string manipulation. +// Example return value: [1, 2, 3], "secrets/data/my-secret". +func checkForDeleteOrDestroyEvent(eventMap map[string]interface{}) ([]int, string) { + var versions []int + + data, ok := eventMap["data"].(map[string]interface{}) + if !ok { + return versions, "" + } + + event, ok := data["event"].(map[string]interface{}) + if !ok { + return versions, "" + } + + metadata, ok := event["metadata"].(map[string]interface{}) + if !ok { + return versions, "" + } + + // We should have only one of these: + deletedVersions, ok := metadata["deleted_versions"].(string) + if ok { + err := json.Unmarshal([]byte(deletedVersions), &versions) + if err != nil { + return versions, "" + } + } + + destroyedVersions, ok := metadata["destroyed_versions"].(string) + if ok { + err := json.Unmarshal([]byte(destroyedVersions), &versions) + if err != nil { + return versions, "" + } + } + + undeletedVersions, ok := metadata["undeleted_versions"].(string) + if ok { + err := json.Unmarshal([]byte(undeletedVersions), &versions) + if err != nil { + return versions, "" + } + } + + // We have neither deleted_versions nor destroyed_versions, return early + if len(versions) == 0 { + return versions, "" + } + + path, ok := metadata["path"].(string) + if !ok { + return versions, "" + } + + namespace, ok := data["namespace"].(string) + if ok { + path = namespace + path + } + + pluginInfo, ok := data["plugin_info"].(map[string]interface{}) + if !ok { + return versions, "" + } + + mountPath := pluginInfo["mount_path"].(string) + if !ok { + return versions, "" + } + + // We get the path without the mount path for safety, just in case the namespace or mount path + // have 'data' inside. + namespaceMountPathOnly := namespace + mountPath + pathWithoutMountPath := strings.TrimPrefix(path, namespaceMountPathOnly) + + // We need to trim destroy or delete to add the correct path for where the secret + // is stored. + trimmedPath := strings.TrimPrefix(pathWithoutMountPath, "delete") + trimmedPath = strings.TrimPrefix(trimmedPath, "destroy") + trimmedPath = strings.TrimPrefix(trimmedPath, "undelete") + + // This is how we form the ID of the cached secrets + fixedPath := namespaceMountPathOnly + "data" + trimmedPath + + return versions, fixedPath +} + // preEventStreamUpdate is called after successful connection to the event system, but before // we process any events, to ensure we don't miss any updates. // In some cases, this will result in multiple processing of the same updates, but @@ -208,7 +363,7 @@ func (updater *StaticSecretCacheUpdater) preEventStreamUpdate(ctx context.Contex if index.Type != cacheboltdb.StaticSecretType { continue } - err = updater.updateStaticSecret(ctx, index.RequestPath) + err = updater.updateStaticSecret(ctx, index.RequestPath, 0) if err != nil { errs = multierror.Append(errs, err) } @@ -219,9 +374,46 @@ func (updater *StaticSecretCacheUpdater) preEventStreamUpdate(ctx context.Contex return errs.ErrorOrNil() } +// handleDeleteDestroyVersions will handle calls to deleteVersions and destroyVersions for a given cached +// secret. The handling is simple: remove them from the cache. We do the same for undeletes, as this will +// also affect the cache, but we don't re-grab the secret for undeletes. +func (updater *StaticSecretCacheUpdater) handleDeleteDestroyVersions(path string, versions []int) error { + indexId := hashStaticSecretIndex(path) + // received delete/destroy versions request: path=secret-v2/delete/my-secret + updater.logger.Debug("received delete/undelete/destroy versions request", "path", path, "indexId", indexId, "versions", versions) + + index, err := updater.leaseCache.db.Get(cachememdb.IndexNameID, indexId) + if errors.Is(err, cachememdb.ErrCacheItemNotFound) { + // This event doesn't correspond to a secret in our cache + // so this is a no-op. + return nil + } + if err != nil { + return err + } + + // Hold the lock as we're modifying the secret + index.IndexLock.Lock() + defer index.IndexLock.Unlock() + + for _, version := range versions { + delete(index.Versions, version) + } + + // Lastly, store the secret + updater.logger.Debug("storing updated secret as result of delete/undelete/destroy", "path", path, "deletedVersions", versions) + err = updater.leaseCache.db.Set(index) + if err != nil { + return err + } + + return nil +} + // updateStaticSecret checks for updates for a static secret on the path given, -// and updates the cache if appropriate -func (updater *StaticSecretCacheUpdater) updateStaticSecret(ctx context.Context, path string) error { +// and updates the cache if appropriate. If currentVersion is not 0, we will also update +// will also update the version at index.Versions[currentVersion] with the same data. +func (updater *StaticSecretCacheUpdater) updateStaticSecret(ctx context.Context, path string, currentVersion int) error { // We clone the client, as we won't be using the same token. client, err := updater.client.Clone() if err != nil { @@ -325,9 +517,16 @@ func (updater *StaticSecretCacheUpdater) updateStaticSecret(ctx context.Context, // Set the index's Response index.Response = respBytes.Bytes() index.LastRenewed = time.Now().UTC() + if currentVersion != 0 { + // It should always be non-nil, but avoid a panic just in case. + if index.Versions == nil { + index.Versions = map[int][]byte{} + } + index.Versions[currentVersion] = index.Response + } // Lastly, store the secret - updater.logger.Debug("storing response into the cache due to update", "path", path) + updater.logger.Debug("storing response into the cache due to update", "path", path, "currentVersion", currentVersion) err = updater.leaseCache.db.Set(index) if err != nil { return err diff --git a/command/agentproxyshared/cache/static_secret_cache_updater_test.go b/command/agentproxyshared/cache/static_secret_cache_updater_test.go index 91158d954c09..69154975ec10 100644 --- a/command/agentproxyshared/cache/static_secret_cache_updater_test.go +++ b/command/agentproxyshared/cache/static_secret_cache_updater_test.go @@ -536,9 +536,7 @@ func Test_StreamStaticSecretEvents_UpdatesCacheWithNewSecrets(t *testing.T) { runStreamStaticSecretEvents := func() { wg.Add(1) err := updater.streamStaticSecretEvents(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } go runStreamStaticSecretEvents() @@ -550,6 +548,7 @@ func Test_StreamStaticSecretEvents_UpdatesCacheWithNewSecrets(t *testing.T) { index := &cachememdb.Index{ Namespace: "root/", RequestPath: path, + Versions: map[int][]byte{}, LastRenewed: initialTime, ID: indexId, // Valid token provided, so update should work. @@ -557,9 +556,7 @@ func Test_StreamStaticSecretEvents_UpdatesCacheWithNewSecrets(t *testing.T) { Response: []byte{}, } err := leaseCache.db.Set(index) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) secretData := map[string]interface{}{ "foo": "bar", @@ -568,9 +565,7 @@ func Test_StreamStaticSecretEvents_UpdatesCacheWithNewSecrets(t *testing.T) { err = client.Sys().Mount("secret-v2", &api.MountInput{ Type: "kv-v2", }) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Wait for the event stream to be fully up and running. Should be faster than this in reality, but // we make it five seconds to protect against CI flakiness. @@ -578,9 +573,7 @@ func Test_StreamStaticSecretEvents_UpdatesCacheWithNewSecrets(t *testing.T) { // Put a secret, which should trigger an event _, err = client.KVv2("secret-v2").Put(context.Background(), "foo", secretData) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Wait for the event to arrive. Events are usually much, much faster // than this, but we make it five seconds to protect against CI flakiness. @@ -588,15 +581,19 @@ func Test_StreamStaticSecretEvents_UpdatesCacheWithNewSecrets(t *testing.T) { // Then, do a GET to see if the index got updated by the event newIndex, err := leaseCache.db.Get(cachememdb.IndexNameID, indexId) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) require.NotNil(t, newIndex) require.NotEqual(t, []byte{}, newIndex.Response) require.Truef(t, initialTime.Before(newIndex.LastRenewed), "last updated time not updated on index") require.Equal(t, index.RequestPath, newIndex.RequestPath) require.Equal(t, index.Tokens, newIndex.Tokens) + // Assert that the corresponding version got updated too + require.Len(t, newIndex.Versions, 1) + require.NotNil(t, newIndex.Versions) + require.NotNil(t, newIndex.Versions[1]) + require.Equal(t, newIndex.Versions[1], newIndex.Response) + wg.Done() } @@ -622,14 +619,13 @@ func TestUpdateStaticSecret(t *testing.T) { RequestPath: "secret/foo", LastRenewed: initialTime, ID: indexId, + Versions: map[int][]byte{}, // Valid token provided, so update should work. Tokens: map[string]struct{}{client.Token(): {}}, Response: []byte{}, } err := leaseCache.db.Set(index) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) secretData := map[string]interface{}{ "foo": "bar", @@ -637,25 +633,20 @@ func TestUpdateStaticSecret(t *testing.T) { // create the secret in Vault. n.b. the test cluster has already mounted the KVv1 backend at "secret" err = client.KVv1("secret").Put(context.Background(), "foo", secretData) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // attempt the update - err = updater.updateStaticSecret(context.Background(), path) - if err != nil { - t.Fatal(err) - } + err = updater.updateStaticSecret(context.Background(), path, 0) + require.NoError(t, err) newIndex, err := leaseCache.db.Get(cachememdb.IndexNameID, indexId) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) require.NotNil(t, newIndex) require.Truef(t, initialTime.Before(newIndex.LastRenewed), "last updated time not updated on index") require.NotEqual(t, []byte{}, newIndex.Response) require.Equal(t, index.RequestPath, newIndex.RequestPath) require.Equal(t, index.Tokens, newIndex.Tokens) + require.Len(t, newIndex.Versions, 0) } // TestUpdateStaticSecret_EvictsIfInvalidTokens tests that updateStaticSecret will @@ -700,7 +691,7 @@ func TestUpdateStaticSecret_EvictsIfInvalidTokens(t *testing.T) { } // attempt the update - err = updater.updateStaticSecret(context.Background(), path) + err = updater.updateStaticSecret(context.Background(), path, 0) if err != nil { t.Fatal(err) } @@ -724,11 +715,14 @@ func TestUpdateStaticSecret_HandlesNonCachedPaths(t *testing.T) { path := "secret/foo" - // attempt the update - err := updater.updateStaticSecret(context.Background(), path) - if err != nil { - t.Fatal(err) - } + // Attempt the update for with currentVersion 0 + err := updater.updateStaticSecret(context.Background(), path, 0) + require.NoError(t, err) + require.Nil(t, err) + + // Attempt a higher currentVersion just to be sure + err = updater.updateStaticSecret(context.Background(), path, 100) + require.NoError(t, err) require.Nil(t, err) } @@ -758,15 +752,14 @@ func TestPreEventStreamUpdate(t *testing.T) { RequestPath: path, LastRenewed: initialTime, ID: indexId, + Versions: map[int][]byte{}, // Valid token provided, so update should work. Tokens: map[string]struct{}{client.Token(): {}}, Response: []byte{}, Type: cacheboltdb.StaticSecretType, } err := leaseCache.db.Set(index) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) secretData := map[string]interface{}{ "foo": "bar", @@ -775,15 +768,11 @@ func TestPreEventStreamUpdate(t *testing.T) { err = client.Sys().Mount("secret-v2", &api.MountInput{ Type: "kv-v2", }) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Put a secret (with different values to what's currently in the cache) _, err = client.KVv2("secret-v2").Put(context.Background(), "foo", secretData) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // perform the pre-event stream update: err = updater.preEventStreamUpdate(context.Background()) @@ -797,6 +786,7 @@ func TestPreEventStreamUpdate(t *testing.T) { require.Truef(t, initialTime.Before(newIndex.LastRenewed), "last updated time not updated on index") require.Equal(t, index.RequestPath, newIndex.RequestPath) require.Equal(t, index.Tokens, newIndex.Tokens) + require.Equal(t, index.Versions, newIndex.Versions) } // TestPreEventStreamUpdateErrorUpdating tests that preEventStreamUpdate correctly responds @@ -863,3 +853,189 @@ func TestPreEventStreamUpdateErrorUpdating(t *testing.T) { _, err = leaseCache.db.Get(cachememdb.IndexNameID, indexId) require.Equal(t, cachememdb.ErrCacheItemNotFound, err) } + +// TestCheckForDeleteOrDestroyEvent tests the behaviour of checkForDeleteOrDestroyEvent +// and assures it gives the right responses for different events. +func TestCheckForDeleteOrDestroyEvent(t *testing.T) { + t.Parallel() + + expectedVersions := []int{1, 3, 5} + jsonFormatExpectedVersions := "[1,3,5]" + expectedPath := "secret-v2/data/my-secret" + deletedVersionEventMap := map[string]interface{}{ + "id": "abc", + "source": "abc", + "data": map[string]interface{}{ + "event": map[string]interface{}{ + "id": "bar", + "metadata": map[string]interface{}{ + "current_version": "2", + "deleted_versions": jsonFormatExpectedVersions, + "modified": true, + "operation": "delete", + "path": "secret-v2/delete/my-secret", + }, + }, + "event_type": "kv-v2/delete", + "plugin_info": map[string]interface{}{ + "mount_path": "secret-v2/", + "plugin": "kv", + "version": 2, + }, + }, + } + + undeletedVersionEventMap := map[string]interface{}{ + "id": "abc", + "source": "abc", + "data": map[string]interface{}{ + "event": map[string]interface{}{ + "id": "bar", + "metadata": map[string]interface{}{ + "current_version": "2", + "undeleted_versions": jsonFormatExpectedVersions, + "modified": true, + "operation": "undelete", + "path": "secret-v2/undelete/my-secret", + }, + }, + "event_type": "kv-v2/undelete", + "plugin_info": map[string]interface{}{ + "mount_path": "secret-v2/", + "plugin": "kv", + "version": 2, + }, + }, + } + + destroyedVersionEventMap := map[string]interface{}{ + "id": "abc", + "source": "abc", + "data": map[string]interface{}{ + "event": map[string]interface{}{ + "id": "bar", + "metadata": map[string]interface{}{ + "current_version": "2", + "destroyed_versions": jsonFormatExpectedVersions, + "modified": true, + "operation": "destroy", + "path": "secret-v2/destroy/my-secret", + }, + }, + "event_type": "kv-v2/destroy", + "plugin_info": map[string]interface{}{ + "mount_path": "secret-v2/", + "plugin": "kv", + "version": 2, + }, + }, + } + + actualVersions, actualPath := checkForDeleteOrDestroyEvent(deletedVersionEventMap) + require.Equal(t, expectedVersions, actualVersions) + require.Equal(t, expectedPath, actualPath) + + actualVersions, actualPath = checkForDeleteOrDestroyEvent(undeletedVersionEventMap) + require.Equal(t, expectedVersions, actualVersions) + require.Equal(t, expectedPath, actualPath) + + actualVersions, actualPath = checkForDeleteOrDestroyEvent(destroyedVersionEventMap) + require.Equal(t, expectedVersions, actualVersions) + require.Equal(t, expectedPath, actualPath) +} + +// TestCheckForDeleteOrDestroyNamespacedEvent tests the behaviour of checkForDeleteOrDestroyEvent +// with namespaces in paths. +func TestCheckForDeleteOrDestroyNamespacedEvent(t *testing.T) { + t.Parallel() + + expectedVersions := []int{1, 3, 5} + jsonFormatExpectedVersions := "[1,3,5]" + expectedPath := "ns/secret-v2/data/my-secret" + deletedVersionEventMap := map[string]interface{}{ + "id": "abc", + "source": "abc", + "data": map[string]interface{}{ + "event": map[string]interface{}{ + "id": "bar", + "metadata": map[string]interface{}{ + "current_version": "2", + "deleted_versions": jsonFormatExpectedVersions, + "modified": true, + "operation": "delete", + "data_path": "secret-v2/data/my-secret", + "path": "secret-v2/delete/my-secret", + }, + }, + "namespace": "ns/", + "event_type": "kv-v2/delete", + "plugin_info": map[string]interface{}{ + "mount_path": "secret-v2/", + "plugin": "kv", + "version": 2, + }, + }, + } + + undeletedVersionEventMap := map[string]interface{}{ + "id": "abc", + "source": "abc", + "data": map[string]interface{}{ + "event": map[string]interface{}{ + "id": "bar", + "metadata": map[string]interface{}{ + "current_version": "2", + "undeleted_versions": jsonFormatExpectedVersions, + "modified": true, + "operation": "undelete", + "data_path": "secret-v2/data/my-secret", + "path": "secret-v2/undelete/my-secret", + }, + }, + "namespace": "ns/", + "event_type": "kv-v2/undelete", + "plugin_info": map[string]interface{}{ + "mount_path": "secret-v2/", + "plugin": "kv", + "version": 2, + }, + }, + } + + destroyedVersionEventMap := map[string]interface{}{ + "id": "abc", + "source": "abc", + "data": map[string]interface{}{ + "event": map[string]interface{}{ + "id": "bar", + "metadata": map[string]interface{}{ + "current_version": "2", + "destroyed_versions": jsonFormatExpectedVersions, + "modified": true, + "operation": "destroy", + "data_path": "secret-v2/data/my-secret", + "path": "secret-v2/destroy/my-secret", + }, + }, + "namespace": "ns/", + "event_type": "kv-v2/destroy", + "plugin_info": map[string]interface{}{ + "mount_path": "secret-v2/", + "plugin": "kv", + "version": 2, + }, + }, + } + + actualVersions, actualPath := checkForDeleteOrDestroyEvent(deletedVersionEventMap) + require.Equal(t, expectedVersions, actualVersions) + require.Equal(t, expectedPath, actualPath) + + actualVersions, actualPath = checkForDeleteOrDestroyEvent(undeletedVersionEventMap) + require.Equal(t, expectedVersions, actualVersions) + require.Equal(t, expectedPath, actualPath) + + actualVersions, actualPath = checkForDeleteOrDestroyEvent(destroyedVersionEventMap) + require.Equal(t, expectedVersions, actualVersions) + require.Equal(t, expectedPath, actualPath) +}