Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support per-config configTracker disable via ConfigMap/Secret annotation #671

Merged
merged 1 commit into from
Aug 19, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 11 additions & 7 deletions docs/gitbook/usage/how-it-works.md
Original file line number Diff line number Diff line change
Expand Up @@ -75,16 +75,11 @@ Based on the above configuration, Flagger generates the following Kubernetes obj
* `deployment/<targetRef.name>-primary`
* `hpa/<autoscalerRef.name>-primary`

The primary deployment is considered the stable release of your app, by default all traffic is routed to this version
The primary deployment is considered the stable release of your app, by default all traffic is routed to this version
and the target deployment is scaled to zero.
Flagger will detect changes to the target deployment (including secrets and configmaps) and will perform a
canary analysis before promoting the new version as primary.

If the target deployment uses secrets and/or configmaps, Flagger will create a copy of each object using the `-primary`
prefix and will reference these objects in the primary deployment. You can disable the secrets/configmaps tracking
with the `-enable-config-tracking=false` command flag in the Flagger deployment manifest under containers args
or by setting `--set configTracking.enabled=false` when installing Flagger with Helm.

**Note** that the target deployment must have a single label selector in the format `app: <DEPLOYMENT-NAME>`:

```yaml
Expand All @@ -102,11 +97,20 @@ spec:
app: podinfo
```

Besides `app` Flagger supports `name` and `app.kubernetes.io/name` selectors.
In addition to `app`, Flagger supports `name` and `app.kubernetes.io/name` selectors.
If you use a different convention you can specify your label with
the `-selector-labels=my-app-label` command flag in the Flagger deployment manifest under containers args
or by setting `--set selectorLabels=my-app-label` when installing Flagger with Helm.

If the target deployment uses secrets and/or configmaps, Flagger will create a copy of each object using the `-primary`
suffix and will reference these objects in the primary deployment. If you annotate your ConfigMap or Secret with
`flagger.app/config-tracking: disabled`, Flagger will use the same object for the primary deployment instead of making
a primary copy.
You can disable the secrets/configmaps tracking globally with the `-enable-config-tracking=false` command flag in
the Flagger deployment manifest under containers args or by setting `--set configTracking.enabled=false` when
installing Flagger with Helm, but disabling config-tracking using the the per Secret/ConfigMap annotation may fit your
use-case better.

The autoscaler reference is optional, when specified, Flagger will pause the traffic increase while the
target and primary deployments are scaled up or down. HPA can help reduce the resource usage during the canary analysis.

Expand Down
22 changes: 21 additions & 1 deletion pkg/canary/config_tracker.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"crypto/sha256"
"encoding/json"
"fmt"
"strings"

"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -50,6 +51,15 @@ func checksum(data interface{}) string {
return fmt.Sprintf("%x", hashBytes[:8])
}

func configIsDisabled(annotations map[string]string) bool {
for k, v := range annotations {
if k == "flagger.app/config-tracking" && strings.HasPrefix(v, "disable") {
return true
}
}
return false
}

// getRefFromConfigMap transforms a Kubernetes ConfigMap into a ConfigRef
// and computes the checksum of the ConfigMap data
func (ct *ConfigTracker) getRefFromConfigMap(name string, namespace string) (*ConfigRef, error) {
Expand All @@ -58,6 +68,10 @@ func (ct *ConfigTracker) getRefFromConfigMap(name string, namespace string) (*Co
return nil, fmt.Errorf("configmap %s.%s get query error: %w", name, namespace, err)
}

if configIsDisabled(config.GetAnnotations()) {
return nil, nil
}

return &ConfigRef{
Name: config.Name,
Type: ConfigRefMap,
Expand All @@ -82,6 +96,10 @@ func (ct *ConfigTracker) getRefFromSecret(name string, namespace string) (*Confi
return nil, nil
}

if configIsDisabled(secret.GetAnnotations()) {
return nil, nil
}

return &ConfigRef{
Name: secret.Name,
Type: ConfigRefSecret,
Expand Down Expand Up @@ -180,7 +198,9 @@ func (ct *ConfigTracker) GetTargetConfigs(cd *flaggerv1.Canary) (map[string]Conf
ct.Logger.Errorf("getRefFromConfigMap failed: %v", err)
continue
}
res[config.GetName()] = *config
if config != nil {
res[config.GetName()] = *config
}
}
for secretName := range secretNames {
secret, err := ct.getRefFromSecret(secretName, cd.Namespace)
Expand Down
114 changes: 111 additions & 3 deletions pkg/canary/config_tracker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,33 @@ func TestConfigTracker_ConfigMaps(t *testing.T) {
if assert.NoError(t, err) {
assert.Equal(t, configMapProjected.Data["color"], configPrimaryProjected.Data["color"])
}

_, err = mocks.kubeClient.CoreV1().ConfigMaps("default").Get(context.TODO(), "podinfo-config-tracker-enabled", metav1.GetOptions{})
assert.NoError(t, err)
_, err = mocks.kubeClient.CoreV1().ConfigMaps("default").Get(context.TODO(), "podinfo-config-tracker-enabled-primary", metav1.GetOptions{})
assert.NoError(t, err)
_, err = mocks.kubeClient.CoreV1().ConfigMaps("default").Get(context.TODO(), "podinfo-config-tracker-disabled", metav1.GetOptions{})
assert.NoError(t, err)
_, err = mocks.kubeClient.CoreV1().ConfigMaps("default").Get(context.TODO(), "podinfo-config-tracker-disabled-primary", metav1.GetOptions{})
assert.Error(t, err)

var trackedVolPresent, originalVolPresent bool
for _, vol := range depPrimary.Spec.Template.Spec.Volumes {
if vol.ConfigMap != nil {
switch vol.ConfigMap.Name {
case "podinfo-config-tracker-enabled":
assert.Fail(t, "primary Deployment does not contain a volume for config-tracked configmap %q", vol.ConfigMap.Name)
case "podinfo-config-tracker-enabled-primary":
trackedVolPresent = true
case "podinfo-config-tracker-disabled":
originalVolPresent = true
case "podinfo-config-tracker-disabled-primary":
assert.Fail(t, "primary Deployment incorrectly contains a volume for a copy of an untracked configmap %q", vol.ConfigMap.Name)
}
}
}
assert.True(t, trackedVolPresent, "Volume for primary copy of config-tracked configmap should be present")
assert.True(t, originalVolPresent, "Volume for original configmap with disabled tracking should be present")
})

t.Run("daemonset", func(t *testing.T) {
Expand All @@ -56,10 +83,10 @@ func TestConfigTracker_ConfigMaps(t *testing.T) {
err := mocks.controller.Initialize(mocks.canary)
require.NoError(t, err)

depPrimary, err := mocks.kubeClient.AppsV1().DaemonSets("default").Get(context.TODO(), "podinfo-primary", metav1.GetOptions{})
daePrimary, err := mocks.kubeClient.AppsV1().DaemonSets("default").Get(context.TODO(), "podinfo-primary", metav1.GetOptions{})
require.NoError(t, err)

configPrimaryVolName := depPrimary.Spec.Template.Spec.Volumes[0].VolumeSource.ConfigMap.LocalObjectReference.Name
configPrimaryVolName := daePrimary.Spec.Template.Spec.Volumes[0].VolumeSource.ConfigMap.LocalObjectReference.Name
assert.Equal(t, "podinfo-config-vol-primary", configPrimaryVolName)

configPrimary, err := mocks.kubeClient.CoreV1().ConfigMaps("default").Get(context.TODO(), "podinfo-config-env-primary", metav1.GetOptions{})
Expand All @@ -77,13 +104,40 @@ func TestConfigTracker_ConfigMaps(t *testing.T) {
assert.Equal(t, configMap.Data["color"], configPrimaryVol.Data["color"])
}

configProjectedName := depPrimary.Spec.Template.Spec.Volumes[2].VolumeSource.Projected.Sources[0].ConfigMap.Name
configProjectedName := daePrimary.Spec.Template.Spec.Volumes[2].VolumeSource.Projected.Sources[0].ConfigMap.Name
assert.Equal(t, "podinfo-config-projected-primary", configProjectedName)

configPrimaryProjected, err := mocks.kubeClient.CoreV1().ConfigMaps("default").Get(context.TODO(), "podinfo-config-vol-primary", metav1.GetOptions{})
if assert.NoError(t, err) {
assert.Equal(t, configMapProjected.Data["color"], configPrimaryProjected.Data["color"])
}

_, err = mocks.kubeClient.CoreV1().ConfigMaps("default").Get(context.TODO(), "podinfo-config-tracker-enabled", metav1.GetOptions{})
assert.NoError(t, err)
_, err = mocks.kubeClient.CoreV1().ConfigMaps("default").Get(context.TODO(), "podinfo-config-tracker-enabled-primary", metav1.GetOptions{})
assert.NoError(t, err)
_, err = mocks.kubeClient.CoreV1().ConfigMaps("default").Get(context.TODO(), "podinfo-config-tracker-disabled", metav1.GetOptions{})
assert.NoError(t, err)
_, err = mocks.kubeClient.CoreV1().ConfigMaps("default").Get(context.TODO(), "podinfo-config-tracker-disabled-primary", metav1.GetOptions{})
assert.Error(t, err)

var trackedVolPresent, originalVolPresent bool
for _, vol := range daePrimary.Spec.Template.Spec.Volumes {
if vol.ConfigMap != nil {
switch vol.ConfigMap.Name {
case "podinfo-config-tracker-enabled":
assert.Fail(t, "primary Deployment does not contain a volume for config-tracked configmap %q", vol.ConfigMap.Name)
case "podinfo-config-tracker-enabled-primary":
trackedVolPresent = true
case "podinfo-config-tracker-disabled":
originalVolPresent = true
case "podinfo-config-tracker-disabled-primary":
assert.Fail(t, "primary Deployment incorrectly contains a volume for a copy of an untracked configmap %q", vol.ConfigMap.Name)
}
}
}
assert.True(t, trackedVolPresent, "Volume for primary copy of config-tracked configmap should be present")
assert.True(t, originalVolPresent, "Volume for original configmap with disabled tracking should be present")
})
}

Expand Down Expand Up @@ -123,6 +177,33 @@ func TestConfigTracker_Secrets(t *testing.T) {
if assert.NoError(t, err) {
assert.Equal(t, string(secretProjected.Data["apiKey"]), string(secretPrimaryProjected.Data["apiKey"]))
}

_, err = mocks.kubeClient.CoreV1().Secrets("default").Get(context.TODO(), "podinfo-secret-tracker-enabled", metav1.GetOptions{})
assert.NoError(t, err)
_, err = mocks.kubeClient.CoreV1().Secrets("default").Get(context.TODO(), "podinfo-secret-tracker-enabled-primary", metav1.GetOptions{})
assert.NoError(t, err)
_, err = mocks.kubeClient.CoreV1().Secrets("default").Get(context.TODO(), "podinfo-secret-tracker-disabled", metav1.GetOptions{})
assert.NoError(t, err)
_, err = mocks.kubeClient.CoreV1().Secrets("default").Get(context.TODO(), "podinfo-secret-tracker-disabled-primary", metav1.GetOptions{})
assert.Error(t, err)

var trackedVolPresent, originalVolPresent bool
for _, vol := range depPrimary.Spec.Template.Spec.Volumes {
if vol.Secret != nil {
switch vol.Secret.SecretName {
case "podinfo-secret-tracker-enabled":
assert.Fail(t, "primary Deployment does not contain a volume for config-tracked secret %q", vol.Secret.SecretName)
case "podinfo-secret-tracker-enabled-primary":
trackedVolPresent = true
case "podinfo-secret-tracker-disabled":
originalVolPresent = true
case "podinfo-secret-tracker-disabled-primary":
assert.Fail(t, "primary Deployment incorrectly contains a volume for a copy of an untracked secret %q", vol.Secret.SecretName)
}
}
}
assert.True(t, trackedVolPresent, "Volume for primary copy of config-tracked secret should be present")
assert.True(t, originalVolPresent, "Volume for original secret with disabled tracking should be present")
})

t.Run("daemonset", func(t *testing.T) {
Expand Down Expand Up @@ -160,5 +241,32 @@ func TestConfigTracker_Secrets(t *testing.T) {
if assert.NoError(t, err) {
assert.Equal(t, string(secretProjected.Data["apiKey"]), string(secretPrimaryProjected.Data["apiKey"]))
}

_, err = mocks.kubeClient.CoreV1().Secrets("default").Get(context.TODO(), "podinfo-secret-tracker-enabled", metav1.GetOptions{})
assert.NoError(t, err)
_, err = mocks.kubeClient.CoreV1().Secrets("default").Get(context.TODO(), "podinfo-secret-tracker-enabled-primary", metav1.GetOptions{})
assert.NoError(t, err)
_, err = mocks.kubeClient.CoreV1().Secrets("default").Get(context.TODO(), "podinfo-secret-tracker-disabled", metav1.GetOptions{})
assert.NoError(t, err)
_, err = mocks.kubeClient.CoreV1().Secrets("default").Get(context.TODO(), "podinfo-secret-tracker-disabled-primary", metav1.GetOptions{})
assert.Error(t, err)

var trackedVolPresent, originalVolPresent bool
for _, vol := range daePrimary.Spec.Template.Spec.Volumes {
if vol.Secret != nil {
switch vol.Secret.SecretName {
case "podinfo-secret-tracker-enabled":
assert.Fail(t, "primary Deployment does not contain a volume for config-tracked secret %q", vol.Secret.SecretName)
case "podinfo-secret-tracker-enabled-primary":
trackedVolPresent = true
case "podinfo-secret-tracker-disabled":
originalVolPresent = true
case "podinfo-secret-tracker-disabled-primary":
assert.Fail(t, "primary Deployment incorrectly contains a volume for a copy of an untracked secret %q", vol.Secret.SecretName)
}
}
}
assert.True(t, trackedVolPresent, "Volume for primary copy of config-tracked secret should be present")
assert.True(t, originalVolPresent, "Volume for original secret with disabled tracking should be present")
})
}
Loading