From e9000542b32ef4dc172c910c328a0df23a44cdcf Mon Sep 17 00:00:00 2001 From: Arata Furukawa Date: Tue, 14 May 2024 22:49:20 +0000 Subject: [PATCH] fix: copy visited map #11699 (#12667) This commit fixed an issue #11699 that caused a warning even if the cycle didn't exist. Fix false cycle discovery by copying the visited resource map before recursively calling of getAppRecursive. Fixes #11699 Signed-off-by: Arata Furukawa Co-authored-by: Blake Pettersson --- controller/cache/cache.go | 21 ++-- controller/cache/cache_test.go | 211 +++++++++++++++++++++++++++++++++ 2 files changed, 224 insertions(+), 8 deletions(-) diff --git a/controller/cache/cache.go b/controller/cache/cache.go index 295d867f1df5f..df58a5955c5f4 100644 --- a/controller/cache/cache.go +++ b/controller/cache/cache.go @@ -290,7 +290,8 @@ func isRootAppNode(r *clustercache.Resource) bool { } func getApp(r *clustercache.Resource, ns map[kube.ResourceKey]*clustercache.Resource) string { - return getAppRecursive(r, ns, map[kube.ResourceKey]bool{}) + name, _ := getAppRecursive(r, ns, map[kube.ResourceKey]bool{}) + return name } func ownerRefGV(ownerRef metav1.OwnerReference) schema.GroupVersion { @@ -301,27 +302,31 @@ func ownerRefGV(ownerRef metav1.OwnerReference) schema.GroupVersion { return gv } -func getAppRecursive(r *clustercache.Resource, ns map[kube.ResourceKey]*clustercache.Resource, visited map[kube.ResourceKey]bool) string { +func getAppRecursive(r *clustercache.Resource, ns map[kube.ResourceKey]*clustercache.Resource, visited map[kube.ResourceKey]bool) (string, bool) { if !visited[r.ResourceKey()] { visited[r.ResourceKey()] = true } else { log.Warnf("Circular dependency detected: %v.", visited) - return resInfo(r).AppName + return resInfo(r).AppName, false } if resInfo(r).AppName != "" { - return resInfo(r).AppName + return resInfo(r).AppName, true } for _, ownerRef := range r.OwnerRefs { gv := ownerRefGV(ownerRef) if parent, ok := ns[kube.NewResourceKey(gv.Group, ownerRef.Kind, r.Ref.Namespace, ownerRef.Name)]; ok { - app := getAppRecursive(parent, ns, visited) - if app != "" { - return app + visited_branch := make(map[kube.ResourceKey]bool, len(visited)) + for k, v := range visited { + visited_branch[k] = v + } + app, ok := getAppRecursive(parent, ns, visited_branch) + if app != "" || !ok { + return app, ok } } } - return "" + return "", true } var ( diff --git a/controller/cache/cache_test.go b/controller/cache/cache_test.go index 53a03ca81995e..584f311f2ee30 100644 --- a/controller/cache/cache_test.go +++ b/controller/cache/cache_test.go @@ -18,6 +18,7 @@ import ( "github.com/argoproj/gitops-engine/pkg/cache" "github.com/argoproj/gitops-engine/pkg/cache/mocks" "github.com/argoproj/gitops-engine/pkg/health" + "github.com/argoproj/gitops-engine/pkg/utils/kube" "github.com/stretchr/testify/mock" "k8s.io/client-go/kubernetes/fake" @@ -319,6 +320,216 @@ func Test_asResourceNode_owner_refs(t *testing.T) { assert.Equal(t, expected, resNode) } +func Test_getAppRecursive(t *testing.T) { + for _, tt := range []struct { + name string + r *cache.Resource + ns map[kube.ResourceKey]*cache.Resource + wantName string + wantOK assert.BoolAssertionFunc + }{ + { + name: "ok: cm1->app1", + r: &cache.Resource{ + Ref: v1.ObjectReference{ + Name: "cm1", + }, + OwnerRefs: []metav1.OwnerReference{ + {Name: "app1"}, + }, + }, + ns: map[kube.ResourceKey]*cache.Resource{ + kube.NewResourceKey("", "", "", "app1"): { + Info: &ResourceInfo{ + AppName: "app1", + }, + }, + }, + wantName: "app1", + wantOK: assert.True, + }, + { + name: "ok: cm1->cm2->app1", + r: &cache.Resource{ + Ref: v1.ObjectReference{ + Name: "cm1", + }, + OwnerRefs: []metav1.OwnerReference{ + {Name: "cm2"}, + }, + }, + ns: map[kube.ResourceKey]*cache.Resource{ + kube.NewResourceKey("", "", "", "cm2"): { + Ref: v1.ObjectReference{ + Name: "cm2", + }, + OwnerRefs: []metav1.OwnerReference{ + {Name: "app1"}, + }, + }, + kube.NewResourceKey("", "", "", "app1"): { + Info: &ResourceInfo{ + AppName: "app1", + }, + }, + }, + wantName: "app1", + wantOK: assert.True, + }, + { + name: "cm1->cm2->app1 & cm1->cm3->app1", + r: &cache.Resource{ + Ref: v1.ObjectReference{ + Name: "cm1", + }, + OwnerRefs: []metav1.OwnerReference{ + {Name: "cm2"}, + {Name: "cm3"}, + }, + }, + ns: map[kube.ResourceKey]*cache.Resource{ + kube.NewResourceKey("", "", "", "cm2"): { + Ref: v1.ObjectReference{ + Name: "cm2", + }, + OwnerRefs: []metav1.OwnerReference{ + {Name: "app1"}, + }, + }, + kube.NewResourceKey("", "", "", "cm3"): { + Ref: v1.ObjectReference{ + Name: "cm3", + }, + OwnerRefs: []metav1.OwnerReference{ + {Name: "app1"}, + }, + }, + kube.NewResourceKey("", "", "", "app1"): { + Info: &ResourceInfo{ + AppName: "app1", + }, + }, + }, + wantName: "app1", + wantOK: assert.True, + }, + { + // Nothing cycle. + // Issue #11699, fixed #12667. + name: "ok: cm1->cm2 & cm1->cm3->cm2 & cm1->cm3->app1", + r: &cache.Resource{ + Ref: v1.ObjectReference{ + Name: "cm1", + }, + OwnerRefs: []metav1.OwnerReference{ + {Name: "cm2"}, + {Name: "cm3"}, + }, + }, + ns: map[kube.ResourceKey]*cache.Resource{ + kube.NewResourceKey("", "", "", "cm2"): { + Ref: v1.ObjectReference{ + Name: "cm2", + }, + }, + kube.NewResourceKey("", "", "", "cm3"): { + Ref: v1.ObjectReference{ + Name: "cm3", + }, + OwnerRefs: []metav1.OwnerReference{ + {Name: "cm2"}, + {Name: "app1"}, + }, + }, + kube.NewResourceKey("", "", "", "app1"): { + Info: &ResourceInfo{ + AppName: "app1", + }, + }, + }, + wantName: "app1", + wantOK: assert.True, + }, + { + name: "cycle: cm1<->cm2", + r: &cache.Resource{ + Ref: v1.ObjectReference{ + Name: "cm1", + }, + OwnerRefs: []metav1.OwnerReference{ + {Name: "cm2"}, + }, + }, + ns: map[kube.ResourceKey]*cache.Resource{ + kube.NewResourceKey("", "", "", "cm1"): { + Ref: v1.ObjectReference{ + Name: "cm1", + }, + OwnerRefs: []metav1.OwnerReference{ + {Name: "cm2"}, + }, + }, + kube.NewResourceKey("", "", "", "cm2"): { + Ref: v1.ObjectReference{ + Name: "cm2", + }, + OwnerRefs: []metav1.OwnerReference{ + {Name: "cm1"}, + }, + }, + }, + wantName: "", + wantOK: assert.False, + }, + { + name: "cycle: cm1->cm2->cm3->cm1", + r: &cache.Resource{ + Ref: v1.ObjectReference{ + Name: "cm1", + }, + OwnerRefs: []metav1.OwnerReference{ + {Name: "cm2"}, + }, + }, + ns: map[kube.ResourceKey]*cache.Resource{ + kube.NewResourceKey("", "", "", "cm1"): { + Ref: v1.ObjectReference{ + Name: "cm1", + }, + OwnerRefs: []metav1.OwnerReference{ + {Name: "cm2"}, + }, + }, + kube.NewResourceKey("", "", "", "cm2"): { + Ref: v1.ObjectReference{ + Name: "cm2", + }, + OwnerRefs: []metav1.OwnerReference{ + {Name: "cm3"}, + }, + }, + kube.NewResourceKey("", "", "", "cm3"): { + Ref: v1.ObjectReference{ + Name: "cm3", + }, + OwnerRefs: []metav1.OwnerReference{ + {Name: "cm1"}, + }, + }, + }, + wantName: "", + wantOK: assert.False, + }, + } { + t.Run(tt.name, func(t *testing.T) { + visited := map[kube.ResourceKey]bool{} + got, ok := getAppRecursive(tt.r, tt.ns, visited) + assert.Equal(t, tt.wantName, got) + tt.wantOK(t, ok) + }) + } +} + func TestSkipResourceUpdate(t *testing.T) { var ( hash1_x string = "x"