Skip to content

Commit

Permalink
Automatically scale down Deployment after migrating to Rollout
Browse files Browse the repository at this point in the history
Signed-off-by: balasoiu <[email protected]>
  • Loading branch information
balasoiu authored and zachaller committed Dec 1, 2023
1 parent 839d727 commit b87e97e
Show file tree
Hide file tree
Showing 18 changed files with 1,148 additions and 528 deletions.
7 changes: 7 additions & 0 deletions docs/features/specification.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,13 @@ spec:
apiVersion: apps/v1
kind: Deployment
name: rollout-ref-deployment
# Specifies if the workload (Deployment) is scaled down after migrating to Rollout.
# The possible options are:
# "never": the Deployment is not scaled down
# "onsuccess": the Deployment is scaled down after the Rollout becomes healthy
# "progressively": as the Rollout is scaled up the Deployment is scaled down
# If the Rollout fails the Deployment will be scaled back up.
scaleDown: never|onsuccess|progressively

# Template describes the pods that will be created. Same as deployment.
# If used, then do not use Rollout workloadRef property.
Expand Down
7 changes: 6 additions & 1 deletion docs/migrating.md
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,11 @@ Instead of removing Deployment you can scale it down to zero and reference it fr
1. Create a Rollout resource.
1. Reference an existing Deployment using `workloadRef` field.
1. Scale-down an existing Deployment by changing `replicas` field of an existing Deployment to zero.
1. In the `workloadRef` field set the `scaleDown` attribute, which specifies how the Deployment should be scaled down. There are three options available:
* "never": the Deployment is not scaled down
* "onsuccess": the Deployment is scaled down after the Rollout becomes healthy
* "progressively": as the Rollout is scaled up the Deployment is scaled down.
Alternatively, manually scale down an existing Deployment by changing replicas field of an existing Deployment to zero.
1. To perform an update, the change should be made to the Pod template field of the Deployment.

Below is an example of a Rollout resource referencing a Deployment.
Expand All @@ -73,6 +77,7 @@ spec:
apiVersion: apps/v1
kind: Deployment
name: rollout-ref-deployment
scaleDown: onsuccess
strategy:
canary:
steps:
Expand Down
2 changes: 2 additions & 0 deletions manifests/crds/rollout-crd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3330,6 +3330,8 @@ spec:
type: string
name:
type: string
scaleDown:
type: string
type: object
type: object
status:
Expand Down
2 changes: 2 additions & 0 deletions manifests/install.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14920,6 +14920,8 @@ spec:
type: string
name:
type: string
scaleDown:
type: string
type: object
type: object
status:
Expand Down
4 changes: 4 additions & 0 deletions pkg/apiclient/rollout/rollout.swagger.json
Original file line number Diff line number Diff line change
Expand Up @@ -1695,6 +1695,10 @@
"name": {
"type": "string",
"title": "Name of the referent"
},
"scaleDown": {
"type": "string",
"title": "Automatically scale down deployment"
}
},
"title": "ObjectRef holds a references to the Kubernetes object"
Expand Down
1,093 changes: 567 additions & 526 deletions pkg/apis/rollouts/v1alpha1/generated.pb.go

Large diffs are not rendered by default.

3 changes: 3 additions & 0 deletions pkg/apis/rollouts/v1alpha1/generated.proto

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 7 additions & 0 deletions pkg/apis/rollouts/v1alpha1/openapi_generated.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 8 additions & 0 deletions pkg/apis/rollouts/v1alpha1/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,8 @@ type ObjectRef struct {
Kind string `json:"kind,omitempty" protobuf:"bytes,2,opt,name=kind"`
// Name of the referent
Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"`
// Automatically scale down deployment
ScaleDown string `json:"scaleDown,omitempty" protobuf:"bytes,4,opt,name=scaleDown"`
}

const (
Expand Down Expand Up @@ -1081,3 +1083,9 @@ type RolloutList struct {
type RollbackWindowSpec struct {
Revisions int32 `json:"revisions,omitempty" protobuf:"varint,1,opt,name=revisions"`
}

const (
ScaleDownNever string = "never"
ScaleDownOnSuccess string = "onsuccess"
ScaleDownProgressively string = "progressively"
)
16 changes: 16 additions & 0 deletions rollout/replicaset.go
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,22 @@ func (c *rolloutContext) reconcileNewReplicaSet() (bool, error) {
}

scaled, _, err := c.scaleReplicaSetAndRecordEvent(c.newRS, newReplicasCount)

if err != nil {
return scaled, err
}

Check warning on line 170 in rollout/replicaset.go

View check run for this annotation

Codecov / codecov/patch

rollout/replicaset.go#L169-L170

Added lines #L169 - L170 were not covered by tests

revision, _ := replicasetutil.Revision(c.newRS)

if revision == 1 && c.rollout.Spec.WorkloadRef != nil && c.rollout.Spec.WorkloadRef.ScaleDown == v1alpha1.ScaleDownProgressively {
oldScale := defaults.GetReplicasOrDefault(c.newRS.Spec.Replicas)
// scale down the deployment when the rollout has ready replicas or scale up the deployment if rollout fails
if c.rollout.Spec.Replicas != nil && (c.rollout.Status.ReadyReplicas > 0 || oldScale > newReplicasCount) {
targetScale := *c.rollout.Spec.Replicas - c.rollout.Status.ReadyReplicas
err = c.scaleDeployment(&targetScale)
}
}

return scaled, err
}

Expand Down
68 changes: 68 additions & 0 deletions rollout/replicaset_test.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package rollout

import (
"context"
"strconv"
"testing"
"time"
Expand Down Expand Up @@ -535,3 +536,70 @@ func TestIsReplicaSetReferenced(t *testing.T) {
})
}
}

func TestScaleDownProgressively(t *testing.T) {

tests := []struct {
name string
deploymentReplicas int32
newRSReplicas int
newRSRevision string
rolloutReplicas int32
rolloutReadyReplicas int32
abortScaleDownDelaySeconds int32
expectedDeploymentReplicas int32
}{
{
name: "Scale down deployment",
deploymentReplicas: 5,
newRSReplicas: 5,
newRSRevision: "1",
rolloutReplicas: 5,
rolloutReadyReplicas: 3,
abortScaleDownDelaySeconds: 0,
expectedDeploymentReplicas: 2,
},
{
name: "Scale up deployment",
deploymentReplicas: 0,
newRSReplicas: 5,
newRSRevision: "1",
rolloutReplicas: 5,
rolloutReadyReplicas: 1,
abortScaleDownDelaySeconds: 0,
expectedDeploymentReplicas: 4,
},
{
name: "Do not scale deployment",
deploymentReplicas: 5,
newRSReplicas: 5,
newRSRevision: "2",
rolloutReplicas: 5,
rolloutReadyReplicas: 3,
abortScaleDownDelaySeconds: 0,
expectedDeploymentReplicas: 5,
},
}

for _, test := range tests {
ctx := createScaleDownRolloutContext(v1alpha1.ScaleDownProgressively, test.deploymentReplicas, true, nil)
ctx.rollout.Spec.Strategy = v1alpha1.RolloutStrategy{
BlueGreen: &v1alpha1.BlueGreenStrategy{
AbortScaleDownDelaySeconds: &test.abortScaleDownDelaySeconds,
},
}
ctx.newRS = rs("foo-v2", test.newRSReplicas, nil, noTimestamp, nil)
ctx.newRS.ObjectMeta.Annotations[annotations.RevisionAnnotation] = test.newRSRevision
ctx.pauseContext.removeAbort = true
ctx.rollout.Spec.Replicas = &test.rolloutReplicas
ctx.rollout.Status.ReadyReplicas = test.rolloutReadyReplicas

_, err := ctx.reconcileNewReplicaSet()
assert.Nil(t, err)
k8sfakeClient := ctx.kubeclientset.(*k8sfake.Clientset)
updatedDeployment, err := k8sfakeClient.AppsV1().Deployments("default").Get(context.TODO(), "workload-test", metav1.GetOptions{})
assert.Nil(t, err)
assert.Equal(t, test.expectedDeploymentReplicas, *updatedDeployment.Spec.Replicas)

}
}
36 changes: 36 additions & 0 deletions rollout/scale_utils.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
package rollout

import (
"context"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

func (c *rolloutContext) scaleDeployment(targetScale *int32) error {
deploymentName := c.rollout.Spec.WorkloadRef.Name
namespace := c.rollout.Namespace
deployment, err := c.kubeclientset.AppsV1().Deployments(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{})
if err != nil {
c.log.Warnf("Failed to fetch deployment %s: %s", deploymentName, err.Error())
return err
}

var newReplicasCount int32
if *targetScale < 0 {
newReplicasCount = 0
} else {
newReplicasCount = *targetScale
}
if newReplicasCount == *deployment.Spec.Replicas {
return nil
}
c.log.Infof("Scaling deployment %s to %d replicas", deploymentName, newReplicasCount)
*deployment.Spec.Replicas = newReplicasCount

_, err = c.kubeclientset.AppsV1().Deployments(namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{})
if err != nil {
c.log.Warnf("Failed to update deployment %s: %s", deploymentName, err.Error())
return err
}
return nil
}
Loading

0 comments on commit b87e97e

Please sign in to comment.