Skip to content

Commit

Permalink
Delete autoscalers when rolling out new versions (#147)
Browse files Browse the repository at this point in the history
In the current implementation, when a new version of the app is fully
rolled out, we delete the kubernetes resources associated with the old
app (except the autoscalers). This is because we're not setting the
app and the version labels in the kubernetes manifest for the multi
dimensional autoscaler. However, this is not easy. Somehow if we
manually set the labels in the template, the labels are completely
ignored once the template is converted to an unstructured object.

In this PR we add the app and the version labels to the kubernetes
manifest for the multi pod autoscaler. Also, we delete the autoscaler
resources for old deployments.

Note that we also have a kill command that has the same issue. This PR
should fix it.
  • Loading branch information
rgrandl authored Jun 4, 2024
1 parent 22efc4f commit 73d0819
Show file tree
Hide file tree
Showing 2 changed files with 29 additions and 11 deletions.
33 changes: 28 additions & 5 deletions internal/gke/gke.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ import (
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
Expand Down Expand Up @@ -278,6 +279,21 @@ func stop(ctx context.Context, cluster *ClusterInfo, logger *slog.Logger, app, v
}
}

// Delete any autoscalers.
deleter := cluster.dynamicClient.Resource(schema.GroupVersionResource{
Group: "autoscaling.gke.io",
Version: "v1beta1",
Resource: "multidimpodautoscalers",
}).Namespace(namespaceName)
opts := metav1.DeleteOptions{}
selector = labels.SelectorFromSet(labels.Set{
appKey: name{app}.DNSLabel(),
versionKey: name{version}.DNSLabel()})
listOpts = metav1.ListOptions{LabelSelector: selector.String()}
if err := deleter.DeleteCollection(ctx, opts, listOpts); err != nil {
return err
}

// Stop any jobs.
jobsClient := cluster.Clientset.BatchV1().Jobs(namespaceName)
jobs, err := jobsClient.List(ctx, listOpts)
Expand Down Expand Up @@ -439,8 +455,6 @@ func ensureReplicaSetAutoscaler(ctx context.Context, cluster *ClusterInfo, logge
if err := autoJSONTmpl.Execute(&b, struct {
Name string
Namespace string
AppNameLabel string
AppVersionLabel string
AppContainerName string
MinMemory string
MinReplicas int32
Expand All @@ -449,8 +463,6 @@ func ensureReplicaSetAutoscaler(ctx context.Context, cluster *ClusterInfo, logge
}{
Name: aName,
Namespace: namespaceName,
AppNameLabel: name{dep.App.Name}.DNSLabel(),
AppVersionLabel: name{dep.Id}.DNSLabel(),
AppContainerName: appContainerName,
MinMemory: memoryUnit.String(),
MinReplicas: cfg.MinReplicas,
Expand All @@ -459,7 +471,18 @@ func ensureReplicaSetAutoscaler(ctx context.Context, cluster *ClusterInfo, logge
}); err != nil {
return err
}
return patchMultidimPodAutoscaler(ctx, cluster, patchOptions{logger: logger}, b.String())

// Add labels. Note that this is a bit hacky. However, if we set the labels in
// the template, things don't really work.
var auto unstructured.Unstructured
if err := auto.UnmarshalJSON([]byte(b.String())); err != nil {
return fmt.Errorf("internal error: cannot parse multidimensional pod autoscaler: %v", err)
}
auto.SetLabels(map[string]string{
appKey: name{dep.App.Name}.DNSLabel(),
versionKey: name{dep.Id}.DNSLabel(),
})
return patchMultidimPodAutoscaler(ctx, cluster, patchOptions{logger: logger}, auto)
}

func appContainer(app string, cluster *ClusterInfo, cfg *config.GKEConfig, replicaSet string) (v1.Container, error) {
Expand Down
7 changes: 1 addition & 6 deletions internal/gke/kube_patcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -460,12 +460,7 @@ func patchVerticalPodAutoscaler(ctx context.Context, cluster *ClusterInfo, opts

// patchMultidimPodAutoscaler updates the multidimensional pod autoscaler with
// the new configuration.
func patchMultidimPodAutoscaler(ctx context.Context, cluster *ClusterInfo, opts patchOptions, autoJSON string) error {
var auto unstructured.Unstructured
if err := auto.UnmarshalJSON([]byte(autoJSON)); err != nil {
return fmt.Errorf("internal error: cannot parse multidimensional pod autoscaler: %v", err)

}
func patchMultidimPodAutoscaler(ctx context.Context, cluster *ClusterInfo, opts patchOptions, auto unstructured.Unstructured) error {
auto.SetAPIVersion("autoscaling.gke.io/v1beta1")
cli := cluster.dynamicClient.Resource(schema.GroupVersionResource{
Group: "autoscaling.gke.io",
Expand Down

0 comments on commit 73d0819

Please sign in to comment.