Skip to content

Commit

Permalink
feat: Support AnalysisRunMetadata and Dryrun for experiments via Rollout
Browse files Browse the repository at this point in the history
Signed-off-by: Guillaume Doussin <[email protected]>
  • Loading branch information
OpenGuidou committed Dec 5, 2023
1 parent c244c04 commit a18c8e9
Show file tree
Hide file tree
Showing 20 changed files with 1,132 additions and 578 deletions.
57 changes: 57 additions & 0 deletions experiments/analysisrun_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -734,3 +734,60 @@ func TestTerminateAnalysisRuns(t *testing.T) {
patchedAr := f.getPatchedAnalysisRunAsObj(arPatchIdx)
assert.True(t, patchedAr.Spec.Terminate)
}

// TestCreateAnalysisRunWithMetadataAndDryRun ensures we create the AnalysisRun with the appropriate labels, annotations, and dry-run options when provided in the experiment
func TestCreateAnalysisRunWithMetadataAndDryRun(t *testing.T) {
templates := generateTemplates("bar")
aTemplates := generateAnalysisTemplates("success-rate")
e := newExperiment("foo", templates, "")
e.Spec.Analyses = []v1alpha1.ExperimentAnalysisTemplateRef{
{
Name: "success-rate",
TemplateName: aTemplates[0].Name,
},
}
e.Status.Phase = v1alpha1.AnalysisPhaseRunning
e.Status.AvailableAt = now()
e.Spec.AnalysisRunMetadata = v1alpha1.AnalysisRunMetadata{
Labels: map[string]string{
"foo": "bar",
"foo2": "bar2",
},
Annotations: map[string]string{
"bar": "foo",
"bar2": "foo2",
},
}
e.Spec.DryRun = []v1alpha1.DryRun{
{
MetricName: "someMetric",
},
{
MetricName: "someOtherMetric",
},
}
rs := templateToRS(e, templates[0], 1)
ar := analysisTemplateToRun("success-rate", e, &aTemplates[0].Spec)

f := newFixture(t, e, rs, &aTemplates[0])
defer f.Close()

analysisRunIdx := f.expectCreateAnalysisRunAction(ar)
patchIdx := f.expectPatchExperimentAction(e)
f.run(getKey(e, t))

patchedEx := f.getPatchedExperimentAsObj(patchIdx)
assert.Equal(t, v1alpha1.AnalysisPhasePending, patchedEx.Status.AnalysisRuns[0].Phase)

analysisRun := f.getCreatedAnalysisRun(analysisRunIdx)
assert.Len(t, analysisRun.ObjectMeta.Labels, 2)
assert.Equal(t, analysisRun.ObjectMeta.Labels["foo"], "bar")
assert.Equal(t, analysisRun.ObjectMeta.Labels["foo2"], "bar2")
assert.Len(t, analysisRun.ObjectMeta.Annotations, 2)
assert.Equal(t, analysisRun.ObjectMeta.Annotations["bar"], "foo")
assert.Equal(t, analysisRun.ObjectMeta.Annotations["bar2"], "foo2")

assert.Len(t, analysisRun.Spec.DryRun, 2)
assert.Equal(t, analysisRun.Spec.DryRun[0].MetricName, "someMetric")
assert.Equal(t, analysisRun.Spec.DryRun[1].MetricName, "someOtherMetric")
}
56 changes: 40 additions & 16 deletions experiments/experiment.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ func (ec *experimentContext) reconcile() *v1alpha1.ExperimentStatus {
}

for _, analysis := range ec.ex.Spec.Analyses {
ec.reconcileAnalysisRun(analysis, ec.ex.Spec.DryRun, ec.ex.Spec.MeasurementRetention)
ec.reconcileAnalysisRun(analysis, ec.ex.Spec.DryRun, ec.ex.Spec.MeasurementRetention, &ec.ex.Spec.AnalysisRunMetadata)
}

newStatus := ec.calculateStatus()
Expand Down Expand Up @@ -390,7 +390,7 @@ func calculateEnqueueDuration(ex *v1alpha1.Experiment, newStatus *v1alpha1.Exper

// reconcileAnalysisRun reconciles a single analysis run, creating or terminating it as necessary.
// Updates the analysis run statuses, which may subsequently fail the experiment.
func (ec *experimentContext) reconcileAnalysisRun(analysis v1alpha1.ExperimentAnalysisTemplateRef, dryRunMetrics []v1alpha1.DryRun, measurementRetentionMetrics []v1alpha1.MeasurementRetention) {
func (ec *experimentContext) reconcileAnalysisRun(analysis v1alpha1.ExperimentAnalysisTemplateRef, dryRunMetrics []v1alpha1.DryRun, measurementRetentionMetrics []v1alpha1.MeasurementRetention, analysisRunMetadata *v1alpha1.AnalysisRunMetadata) {
logCtx := ec.log.WithField("analysis", analysis.Name)
logCtx.Infof("Reconciling analysis")
prevStatus := experimentutil.GetAnalysisRunStatus(ec.ex.Status, analysis.Name)
Expand Down Expand Up @@ -446,7 +446,7 @@ func (ec *experimentContext) reconcileAnalysisRun(analysis v1alpha1.ExperimentAn
logCtx.Warnf("Skipping AnalysisRun creation for analysis %s: experiment is terminating", analysis.Name)
return
}
run, err := ec.createAnalysisRun(analysis, dryRunMetrics, measurementRetentionMetrics)
run, err := ec.createAnalysisRun(analysis, dryRunMetrics, measurementRetentionMetrics, analysisRunMetadata)
if err != nil {
msg := fmt.Sprintf("Failed to create AnalysisRun for analysis '%s': %v", analysis.Name, err.Error())
newStatus.Phase = v1alpha1.AnalysisPhaseError
Expand Down Expand Up @@ -493,13 +493,13 @@ func (ec *experimentContext) reconcileAnalysisRun(analysis v1alpha1.ExperimentAn
// createAnalysisRun creates the analysis run. If an existing runs exists with same name, is
// semantically equal, and is not complete, returns the existing one, otherwise creates a new
// run with a collision counter increase.
func (ec *experimentContext) createAnalysisRun(analysis v1alpha1.ExperimentAnalysisTemplateRef, dryRunMetrics []v1alpha1.DryRun, measurementRetentionMetrics []v1alpha1.MeasurementRetention) (*v1alpha1.AnalysisRun, error) {
func (ec *experimentContext) createAnalysisRun(analysis v1alpha1.ExperimentAnalysisTemplateRef, dryRunMetrics []v1alpha1.DryRun, measurementRetentionMetrics []v1alpha1.MeasurementRetention, analysisRunMetadata *v1alpha1.AnalysisRunMetadata) (*v1alpha1.AnalysisRun, error) {
analysisRunIf := ec.argoProjClientset.ArgoprojV1alpha1().AnalysisRuns(ec.ex.Namespace)
args, err := ec.ResolveAnalysisRunArgs(analysis.Args)
if err != nil {
return nil, err
}
run, err := ec.newAnalysisRun(analysis, args, dryRunMetrics, measurementRetentionMetrics)
run, err := ec.newAnalysisRun(analysis, args, dryRunMetrics, measurementRetentionMetrics, analysisRunMetadata)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -635,7 +635,7 @@ func (ec *experimentContext) assessAnalysisRuns() (v1alpha1.AnalysisPhase, strin
}

// newAnalysisRun generates an AnalysisRun from the experiment and template
func (ec *experimentContext) newAnalysisRun(analysis v1alpha1.ExperimentAnalysisTemplateRef, args []v1alpha1.Argument, dryRunMetrics []v1alpha1.DryRun, measurementRetentionMetrics []v1alpha1.MeasurementRetention) (*v1alpha1.AnalysisRun, error) {
func (ec *experimentContext) newAnalysisRun(analysis v1alpha1.ExperimentAnalysisTemplateRef, args []v1alpha1.Argument, dryRunMetrics []v1alpha1.DryRun, measurementRetentionMetrics []v1alpha1.MeasurementRetention, analysisRunMetadata *v1alpha1.AnalysisRunMetadata) (*v1alpha1.AnalysisRun, error) {

if analysis.ClusterScope {
clusterTemplate, err := ec.clusterAnalysisTemplateLister.Get(analysis.TemplateName)
Expand All @@ -645,14 +645,26 @@ func (ec *experimentContext) newAnalysisRun(analysis v1alpha1.ExperimentAnalysis
name := fmt.Sprintf("%s-%s", ec.ex.Name, analysis.Name)

clusterAnalysisTemplates := []*v1alpha1.ClusterAnalysisTemplate{clusterTemplate}
run, err := analysisutil.NewAnalysisRunFromTemplates(nil, clusterAnalysisTemplates, args, dryRunMetrics, measurementRetentionMetrics, name, "", ec.ex.Namespace)
if err != nil {
return nil, err
}
runLabels := map[string]string{}
runAnnotations := map[string]string{}

instanceID := analysisutil.GetInstanceID(ec.ex)
if instanceID != "" {
run.Labels = map[string]string{v1alpha1.LabelKeyControllerInstanceID: ec.ex.Labels[v1alpha1.LabelKeyControllerInstanceID]}
runLabels[v1alpha1.LabelKeyControllerInstanceID] = ec.ex.Labels[v1alpha1.LabelKeyControllerInstanceID]
}
if analysisRunMetadata != nil {
for k, v := range analysisRunMetadata.Labels {
runLabels[k] = v
}
for k, v := range analysisRunMetadata.Annotations {
runAnnotations[k] = v
}
}
run, err := analysisutil.NewAnalysisRunFromTemplates(nil, clusterAnalysisTemplates, args, dryRunMetrics, measurementRetentionMetrics, runLabels, runAnnotations, name, "", ec.ex.Namespace)
if err != nil {
return nil, err
}

run.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(ec.ex, controllerKind)}
return run, nil
} else {
Expand All @@ -663,14 +675,26 @@ func (ec *experimentContext) newAnalysisRun(analysis v1alpha1.ExperimentAnalysis
name := fmt.Sprintf("%s-%s", ec.ex.Name, analysis.Name)

analysisTemplates := []*v1alpha1.AnalysisTemplate{template}
run, err := analysisutil.NewAnalysisRunFromTemplates(analysisTemplates, nil, args, dryRunMetrics, measurementRetentionMetrics, name, "", ec.ex.Namespace)
if err != nil {
return nil, err
}
runLabels := map[string]string{}
runAnnotations := map[string]string{}
instanceID := analysisutil.GetInstanceID(ec.ex)
if instanceID != "" {
run.Labels = map[string]string{v1alpha1.LabelKeyControllerInstanceID: ec.ex.Labels[v1alpha1.LabelKeyControllerInstanceID]}
runLabels[v1alpha1.LabelKeyControllerInstanceID] = ec.ex.Labels[v1alpha1.LabelKeyControllerInstanceID]
}
if analysisRunMetadata != nil {
for k, v := range analysisRunMetadata.Labels {
runLabels[k] = v
}
for k, v := range analysisRunMetadata.Annotations {
runAnnotations[k] = v
}
}

run, err := analysisutil.NewAnalysisRunFromTemplates(analysisTemplates, nil, args, dryRunMetrics, measurementRetentionMetrics, runLabels, runAnnotations, name, "", ec.ex.Namespace)
if err != nil {
return nil, err
}

run.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(ec.ex, controllerKind)}
return run, nil
}
Expand Down
11 changes: 11 additions & 0 deletions manifests/crds/experiment-crd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,17 @@ spec:
- templateName
type: object
type: array
analysisRunMetadata:
properties:
annotations:
additionalProperties:
type: string
type: object
labels:
additionalProperties:
type: string
type: object
type: object
dryRun:
items:
properties:
Expand Down
20 changes: 20 additions & 0 deletions manifests/crds/rollout-crd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -571,6 +571,26 @@ spec:
- templateName
type: object
type: array
analysisRunMetadata:
properties:
annotations:
additionalProperties:
type: string
type: object
labels:
additionalProperties:
type: string
type: object
type: object
dryRun:
items:
properties:
metricName:
type: string
required:
- metricName
type: object
type: array
duration:
type: string
templates:
Expand Down
31 changes: 31 additions & 0 deletions manifests/install.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8885,6 +8885,17 @@ spec:
- templateName
type: object
type: array
analysisRunMetadata:
properties:
annotations:
additionalProperties:
type: string
type: object
labels:
additionalProperties:
type: string
type: object
type: object
dryRun:
items:
properties:
Expand Down Expand Up @@ -12020,6 +12031,26 @@ spec:
- templateName
type: object
type: array
analysisRunMetadata:
properties:
annotations:
additionalProperties:
type: string
type: object
labels:
additionalProperties:
type: string
type: object
type: object
dryRun:
items:
properties:
metricName:
type: string
required:
- metricName
type: object
type: array
duration:
type: string
templates:
Expand Down
11 changes: 11 additions & 0 deletions pkg/apiclient/rollout/rollout.swagger.json
Original file line number Diff line number Diff line change
Expand Up @@ -1355,6 +1355,17 @@
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RolloutExperimentStepAnalysisTemplateRef"
},
"title": "Analyses reference which analysis templates to run with the experiment\n+patchMergeKey=name\n+patchStrategy=merge"
},
"dryRun": {
"type": "array",
"items": {
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.DryRun"
},
"title": "DryRun object contains the settings for running the analysis in Dry-Run mode\n+patchMergeKey=metricName\n+patchStrategy=merge\n+optional"
},
"analysisRunMetadata": {
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AnalysisRunMetadata",
"title": "AnalysisRunMetadata labels and annotations that will be added to the AnalysisRuns\n+optional"
}
},
"title": "RolloutExperimentStep defines a template that is used to create a experiment for a step"
Expand Down
1 change: 1 addition & 0 deletions pkg/apis/api-rules/violation_exceptions.list
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis
API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutAnalysis,MeasurementRetention
API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutAnalysis,Templates
API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutExperimentStep,Analyses
API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutExperimentStep,DryRun
API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutExperimentStep,Templates
API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutExperimentStepAnalysisTemplateRef,Args
API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutStatus,ALBs
Expand Down
3 changes: 3 additions & 0 deletions pkg/apis/rollouts/v1alpha1/experiment_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,9 @@ type ExperimentSpec struct {
// +patchStrategy=merge
// +optional
MeasurementRetention []MeasurementRetention `json:"measurementRetention,omitempty" patchStrategy:"merge" patchMergeKey:"metricName" protobuf:"bytes,8,rep,name=measurementRetention"`
// AnalysisRunMetadata labels and annotations that will be added to the AnalysisRuns
// +optional
AnalysisRunMetadata AnalysisRunMetadata `json:"analysisRunMetadata,omitempty" protobuf:"bytes,9,opt,name=analysisRunMetadata"`
}

type TemplateSpec struct {
Expand Down
Loading

0 comments on commit a18c8e9

Please sign in to comment.