From b112bf05d2a343ee997913ba4f15ce9d72bef249 Mon Sep 17 00:00:00 2001 From: Zach Aller Date: Mon, 25 Mar 2024 16:35:25 -0500 Subject: [PATCH] Revert "fix(trafficrouting): apply stable selectors on canary service on rollout abort #2781 (#2818)" This reverts commit 3c5ac368649527940b33a7bc08f3c45f14ec0c77. Signed-off-by: Zach Aller --- rollout/canary_test.go | 132 +----------------- rollout/service.go | 8 -- rollout/trafficrouting.go | 6 +- test/e2e/canary_test.go | 2 +- ...nary-scaledownonabortnotrafficrouting.yaml | 91 ------------ 5 files changed, 13 insertions(+), 226 deletions(-) delete mode 100644 test/e2e/functional/canary-scaledownonabortnotrafficrouting.yaml diff --git a/rollout/canary_test.go b/rollout/canary_test.go index 6b9d550e0f..e26d1ddc5e 100644 --- a/rollout/canary_test.go +++ b/rollout/canary_test.go @@ -708,6 +708,7 @@ func TestCanaryRolloutScaleDownOldRsDontScaleDownTooMuch(t *testing.T) { assert.Equal(t, int32(0), *updatedRS1.Spec.Replicas) updatedRS2 := f.getUpdatedReplicaSet(updatedRS2Index) assert.Equal(t, int32(4), *updatedRS2.Spec.Replicas) + } // TestCanaryDontScaleDownOldRsDuringInterruptedUpdate tests when we need to prevent scale down an @@ -1068,8 +1069,9 @@ func TestSyncRolloutWaitAddToQueue(t *testing.T) { c, i, k8sI := f.newController(func() time.Duration { return 30 * time.Minute }) f.runController(key, true, false, c, i, k8sI) - // When the controller starts, it will enqueue the rollout while syncing the informer and during the reconciliation step + //When the controller starts, it will enqueue the rollout while syncing the informer and during the reconciliation step assert.Equal(t, 2, f.enqueuedObjects[key]) + } func TestSyncRolloutIgnoreWaitOutsideOfReconciliationPeriod(t *testing.T) { @@ -1082,7 +1084,7 @@ func TestSyncRolloutIgnoreWaitOutsideOfReconciliationPeriod(t *testing.T) { }, { Pause: &v1alpha1.RolloutPause{ - Duration: v1alpha1.DurationFromInt(3600), // 1 hour + Duration: v1alpha1.DurationFromInt(3600), //1 hour }, }, } @@ -1116,8 +1118,9 @@ func TestSyncRolloutIgnoreWaitOutsideOfReconciliationPeriod(t *testing.T) { key := fmt.Sprintf("%s/%s", r2.Namespace, r2.Name) c, i, k8sI := f.newController(func() time.Duration { return 30 * time.Minute }) f.runController(key, true, false, c, i, k8sI) - // When the controller starts, it will enqueue the rollout so we expect the rollout to enqueue at least once. + //When the controller starts, it will enqueue the rollout so we expect the rollout to enqueue at least once. assert.Equal(t, 1, f.enqueuedObjects[key]) + } func TestSyncRolloutWaitIncrementStepIndex(t *testing.T) { @@ -1131,8 +1134,7 @@ func TestSyncRolloutWaitIncrementStepIndex(t *testing.T) { Pause: &v1alpha1.RolloutPause{ Duration: v1alpha1.DurationFromInt(5), }, - }, - { + }, { Pause: &v1alpha1.RolloutPause{}, }, } @@ -1284,7 +1286,6 @@ func TestCanarySVCSelectors(t *testing.T) { }, }, } - rc := rolloutContext{ log: logutil.WithRollout(rollout), reconcilerBase: reconcilerBase{ @@ -1315,7 +1316,6 @@ func TestCanarySVCSelectors(t *testing.T) { }, }, } - stopchan := make(chan struct{}) defer close(stopchan) informers.Start(stopchan) @@ -1336,124 +1336,6 @@ func TestCanarySVCSelectors(t *testing.T) { } } -func TestCanarySVCSelectorsBasicCanaryAbortServiceSwitchBack(t *testing.T) { - for _, tc := range []struct { - canaryReplicas int32 - canaryAvailReplicas int32 - shouldAbortRollout bool - shouldTargetNewRS bool - }{ - {2, 2, false, true}, // Rollout, canaryService should point at the canary RS - {2, 2, true, false}, // Rollout aborted, canaryService should point at the stable RS - } { - namespace := "namespace" - selectorLabel := "selector-labels-test" - selectorNewRSVal := "new-rs-xxx" - selectorStableRSVal := "stable-rs-xxx" - stableService := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "stable", - Namespace: namespace, - Annotations: map[string]string{v1alpha1.ManagedByRolloutsKey: selectorLabel}, - Labels: map[string]string{ - v1alpha1.DefaultRolloutUniqueLabelKey: selectorStableRSVal, - }, - }, - } - canaryService := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "canary", - Namespace: namespace, - Annotations: map[string]string{v1alpha1.ManagedByRolloutsKey: selectorLabel}, - }, - } - kubeclient := k8sfake.NewSimpleClientset(stableService, canaryService) - informers := k8sinformers.NewSharedInformerFactory(kubeclient, 0) - servicesLister := informers.Core().V1().Services().Lister() - - rollout := &v1alpha1.Rollout{ - ObjectMeta: metav1.ObjectMeta{ - Name: selectorLabel, - Namespace: namespace, - }, - Spec: v1alpha1.RolloutSpec{ - Strategy: v1alpha1.RolloutStrategy{ - Canary: &v1alpha1.CanaryStrategy{ - StableService: stableService.Name, - CanaryService: canaryService.Name, - }, - }, - }, - } - - pc := pauseContext{ - rollout: rollout, - } - if tc.shouldAbortRollout { - pc.AddAbort("Add Abort") - } - - rc := rolloutContext{ - log: logutil.WithRollout(rollout), - pauseContext: &pc, - reconcilerBase: reconcilerBase{ - servicesLister: servicesLister, - kubeclientset: kubeclient, - recorder: record.NewFakeEventRecorder(), - }, - rollout: rollout, - newRS: &v1.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "canary", - Namespace: namespace, - Labels: map[string]string{ - v1alpha1.DefaultRolloutUniqueLabelKey: selectorNewRSVal, - }, - }, - Spec: v1.ReplicaSetSpec{ - Replicas: pointer.Int32Ptr(tc.canaryReplicas), - }, - Status: v1.ReplicaSetStatus{ - AvailableReplicas: tc.canaryAvailReplicas, - }, - }, - stableRS: &v1.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "stable", - Namespace: namespace, - Labels: map[string]string{ - v1alpha1.DefaultRolloutUniqueLabelKey: selectorStableRSVal, - }, - }, - Spec: v1.ReplicaSetSpec{ - Replicas: pointer.Int32Ptr(tc.canaryReplicas), - }, - Status: v1.ReplicaSetStatus{ - AvailableReplicas: tc.canaryAvailReplicas, - }, - }, - } - - stopchan := make(chan struct{}) - defer close(stopchan) - informers.Start(stopchan) - informers.WaitForCacheSync(stopchan) - err := rc.reconcileStableAndCanaryService() - assert.NoError(t, err, "unable to reconcileStableAndCanaryService") - updatedCanarySVC, err := servicesLister.Services(rc.rollout.Namespace).Get(canaryService.Name) - assert.NoError(t, err, "unable to get updated canary service") - if tc.shouldTargetNewRS { - assert.Equal(t, selectorNewRSVal, updatedCanarySVC.Spec.Selector[v1alpha1.DefaultRolloutUniqueLabelKey], - "canary SVC should have newRS selector label when newRS has %d replicas and %d AvailableReplicas", - tc.canaryReplicas, tc.canaryAvailReplicas) - } else { - assert.Equal(t, selectorStableRSVal, updatedCanarySVC.Spec.Selector[v1alpha1.DefaultRolloutUniqueLabelKey], - "canary SVC should have stableRS selector label when newRS has %d replicas and %d AvailableReplicas", - tc.canaryReplicas, tc.canaryAvailReplicas) - } - } -} - func TestCanaryRolloutWithInvalidCanaryServiceName(t *testing.T) { f := newFixture(t) defer f.Close() diff --git a/rollout/service.go b/rollout/service.go index 69739b9315..dcbfcf31d9 100644 --- a/rollout/service.go +++ b/rollout/service.go @@ -258,14 +258,6 @@ func (c *rolloutContext) reconcileStableAndCanaryService() error { return err } - if c.pauseContext != nil && c.pauseContext.IsAborted() && c.rollout.Spec.Strategy.Canary.TrafficRouting == nil { - err = c.ensureSVCTargets(c.rollout.Spec.Strategy.Canary.CanaryService, c.stableRS, true) - if err != nil { - return err - } - return nil - } - if dynamicallyRollingBackToStable, currSelector := isDynamicallyRollingBackToStable(c.rollout, c.newRS); dynamicallyRollingBackToStable { // User may have interrupted an update in order go back to stableRS, and is using dynamic // stable scaling. If that is the case, the stableRS might be undersized and if we blindly diff --git a/rollout/trafficrouting.go b/rollout/trafficrouting.go index 2a0d12c226..91a74d25a0 100644 --- a/rollout/trafficrouting.go +++ b/rollout/trafficrouting.go @@ -145,7 +145,11 @@ func (c *rolloutContext) reconcileTrafficRouting() error { c.newStatus.Canary.Weights = nil return nil } - + if reconcilers == nil { + // Not using traffic routing + c.newStatus.Canary.Weights = nil + return nil + } c.log.Infof("Found %d TrafficRouting Reconcilers", len(reconcilers)) // iterate over the list of trafficReconcilers for _, reconciler := range reconcilers { diff --git a/test/e2e/canary_test.go b/test/e2e/canary_test.go index 4526664d96..d33d1623ea 100644 --- a/test/e2e/canary_test.go +++ b/test/e2e/canary_test.go @@ -444,7 +444,7 @@ spec: port: 80 periodSeconds: 30 strategy: - canary: + canary: steps: - setWeight: 20 - pause: {} diff --git a/test/e2e/functional/canary-scaledownonabortnotrafficrouting.yaml b/test/e2e/functional/canary-scaledownonabortnotrafficrouting.yaml deleted file mode 100644 index 0c42735db3..0000000000 --- a/test/e2e/functional/canary-scaledownonabortnotrafficrouting.yaml +++ /dev/null @@ -1,91 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: canary-scaledowndelay-root -spec: - type: NodePort - ports: - - port: 80 - targetPort: http - protocol: TCP - name: http - selector: - app: canary-scaledowndelay ---- -apiVersion: v1 -kind: Service -metadata: - name: canary-scaledowndelay-canary -spec: - type: NodePort - ports: - - port: 80 - targetPort: http - protocol: TCP - name: http - selector: - app: canary-scaledowndelay ---- -apiVersion: v1 -kind: Service -metadata: - name: canary-scaledowndelay-stable -spec: - type: NodePort - ports: - - port: 80 - targetPort: http - protocol: TCP - name: http - selector: - app: canary-scaledowndelay ---- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: canary-scaledowndelay-ingress - annotations: - kubernetes.io/ingress.class: alb -spec: - rules: - - http: - paths: - - path: /* - pathType: Prefix - backend: - service: - name: canary-scaledowndelay-root - port: - name: use-annotation ---- -apiVersion: argoproj.io/v1alpha1 -kind: Rollout -metadata: - name: canary-scaledownd-on-abort -spec: - selector: - matchLabels: - app: canary-scaledowndelay - template: - metadata: - labels: - app: canary-scaledowndelay - spec: - containers: - - name: canary-scaledowndelay - image: nginx:1.19-alpine - ports: - - name: http - containerPort: 80 - protocol: TCP - resources: - requests: - memory: 16Mi - cpu: 5m - strategy: - canary: - canaryService: canary-scaledowndelay-canary - stableService: canary-scaledowndelay-stable - steps: - - setWeight: 50 - - pause: {}