diff --git a/docs/pipelineruns.md b/docs/pipelineruns.md index 7d905432514..901b46ebdf6 100644 --- a/docs/pipelineruns.md +++ b/docs/pipelineruns.md @@ -25,6 +25,9 @@ weight: 500 - [Specifying a Pod template](#specifying-a-pod-template) - [Specifying taskRunSpecs](#specifying-taskrunspecs) - [Specifying Workspaces](#specifying-workspaces) + - [Propagated Workspaces](#propagated-workspaces) + - [Referenced Resources](#workspace-referenced-resources) + - [Embedded Specifications with Referenced Resources](#embedded-specifications-with-referenced-resources) - [Specifying LimitRange values](#specifying-limitrange-values) - [Configuring a failure timeout](#configuring-a-failure-timeout) - [PipelineRun status](#pipelinerun-status) @@ -844,6 +847,518 @@ For more information, see the following topics: [`Custom tasks`](pipelines.md#using-custom-tasks) may or may not use workspaces. Consult the documentation of the custom task that you are using to determine whether it supports workspaces. +#### Propagated Workspaces + +**([alpha only](https://github.com/tektoncd/pipeline/blob/main/docs/install.md#alpha-features))** + +When using an embedded spec, workspaces from the parent `PipelineRun` will be +propagated to any inlined specs without needing to be explicitly defined. This +allows authors to simplify specs by automatically propagating top-level +workspaces down to other inlined resources. + +```yaml +# Inline specifications of a PipelineRun +apiVersion: v1 +kind: ConfigMap +metadata: + name: sensitive-recipe-storage +data: + brownies: | + My secret recipe! +--- +apiVersion: v1 +kind: Secret +metadata: + name: secret-password +type: Opaque +data: + password: aHVudGVyMg== +--- +apiVersion: tekton.dev/v1beta1 +kind: PipelineRun +metadata: + generateName: recipe-time- +spec: + params: + - name: filename + value: recipe.txt + workspaces: + - name: shared-data + volumeClaimTemplate: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 16Mi + volumeMode: Filesystem + - name: recipe-store + configMap: + name: sensitive-recipe-storage + items: + - key: brownies + path: recipe.txt + - name: password-vault + secret: + secretName: secret-password + pipelineSpec: + #workspaces: + # - name: password-vault + # - name: recipe-store + # - name: shared-data + tasks: + - name: fetch-secure-data + # workspaces: + # - name: password-vault + # - name: recipe-store + # - name: shared-data + taskSpec: + # workspaces: + # - name: password-vault + # - name: recipe-store + # - name: shared-data + steps: + - name: fetch-and-write-secure + image: ubuntu + script: | + if [ "hunter2" = "$(cat $(workspaces.password-vault.path)/password)" ]; then + cp $(workspaces.recipe-store.path)/recipe.txt $(workspaces.shared-data.path) + echo "success!!!" + else + echo "wrong password!" + exit 1 + fi + - name: print-the-recipe + # workspaces: + # - name: shared-data + runAfter: + - fetch-secure-data + taskSpec: + # workspaces: + # - name: shared-data + steps: + - name: print-secrets + image: ubuntu + script: cat $(workspaces.shared-data.path)/$(params.filename) +``` + +On executing the pipeline run, the workspaces will be interpolated during resolution. +The specifications are not mutated before storage and so it remains the same. +The status is updated. + +```yaml +# Successful execution of the above PipelineRun +apiVersion: tekton.dev/v1beta1 +kind: PipelineRun +metadata: + generateName: recipe-time- + ... +spec: + params: + - name: filename + value: recipe.txt + pipelineSpec: + tasks: + - name: fetch-secure-data + taskSpec: + steps: + - image: ubuntu + name: fetch-and-write-secure + resources: {} + script: | + if [ "hunter2" = "$(cat $(workspaces.password-vault.path)/password)" ]; then + cp $(workspaces.recipe-store.path)/recipe.txt $(workspaces.shared-data.path) + echo "success!!!" + else + echo "wrong password!" + exit 1 + fi + - name: print-the-recipe + runAfter: + - fetch-secure-data + taskSpec: + steps: + - image: ubuntu + name: print-secrets + resources: {} + script: cat $(workspaces.shared-data.path)/$(params.filename) + workspaces: + - name: shared-data + volumeClaimTemplate: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 16Mi + volumeMode: Filesystem + - configMap: + items: + - key: brownies + path: recipe.txt + name: sensitive-recipe-storage + name: recipe-store + - name: password-vault + secret: + secretName: secret-password +status: + completionTime: "2022-06-02T18:17:02Z" + conditions: + - lastTransitionTime: "2022-06-02T18:17:02Z" + message: 'Tasks Completed: 2 (Failed: 0, Canceled 0), Skipped: 0' + reason: Succeeded + status: "True" + type: Succeeded + pipelineSpec: + ... + taskRuns: + recipe-time-lslt9-fetch-secure-data: + pipelineTaskName: fetch-secure-data + status: + ... + taskSpec: + steps: + - image: ubuntu + name: fetch-and-write-secure + resources: {} + script: | + if [ "hunter2" = "$(cat /workspace/password-vault/password)" ]; then + cp /workspace/recipe-store/recipe.txt /workspace/shared-data + echo "success!!!" + else + echo "wrong password!" + exit 1 + fi + workspaces: + - name: password-vault + - name: recipe-store + - name: shared-data + recipe-time-lslt9-print-the-recipe: + pipelineTaskName: print-the-recipe + status: + ... + taskSpec: + steps: + - image: ubuntu + name: print-secrets + resources: {} + script: cat /workspace/shared-data/recipe.txt + workspaces: + - name: shared-data +``` + +##### Workspace Referenced Resources + +The propagation of `Workspaces` will not work for `referenced specifications` because the behavior would become opaque when users can't see the relationship between `Workspaces` declared in the referenced resources and the `Workspaces` supplied in runtime resources. Attempting to propagate `Workspaces` to referenced resources will cause failures. + +```yaml +# PipelineRun attempting to propagate Workspaces to referenced Tasks +apiVersion: v1 +kind: ConfigMap +metadata: + name: sensitive-recipe-storage +data: + brownies: | + My secret recipe! +--- +apiVersion: v1 +kind: Secret +metadata: + name: secret-password +type: Opaque +data: + password: aHVudGVyMg== +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: shared-task-storage +spec: + resources: + requests: + storage: 16Mi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce +--- +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: fetch-secure-data +spec: + steps: + - name: fetch-and-write + image: ubuntu + script: | + if [ "hunter2" = "$(cat $(workspaces.password-vault.path)/password)" ]; then + cp $(workspaces.recipe-store.path)/recipe.txt $(workspaces.shared-data.path) + else + echo "wrong password!" + exit 1 + fi +--- +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: print-data +spec: + params: + - name: filename + steps: + - name: print-secrets + image: ubuntu + script: cat $(workspaces.shared-data.path)/$(params.filename) +--- +apiVersion: tekton.dev/v1beta1 +kind: Pipeline +metadata: + name: fetch-and-print-recipe +spec: + tasks: + - name: fetch-the-recipe + taskRef: + name: fetch-secure-data + - name: print-the-recipe + taskRef: + name: print-data + runAfter: + - fetch-the-recipe + params: + - name: filename + value: recipe.txt +--- +apiVersion: tekton.dev/v1beta1 +kind: PipelineRun +metadata: + generateName: recipe-time- +spec: + pipelineRef: + name: fetch-and-print-recipe + workspaces: + - name: password-vault + secret: + secretName: secret-password + - name: recipe-store + configMap: + name: sensitive-recipe-storage + items: + - key: brownies + path: recipe.txt + - name: shared-data + persistentVolumeClaim: + claimName: shared-task-storage +``` + +Upon execution, this will cause failures: + +```yaml +# Failed execution of the above PipelineRun + +apiVersion: tekton.dev/v1beta1 +kind: PipelineRun +metadata: + generateName: recipe-time- + ... +spec: + pipelineRef: + name: fetch-and-print-recipe + workspaces: + - name: password-vault + secret: + secretName: secret-password + - configMap: + items: + - key: brownies + path: recipe.txt + name: sensitive-recipe-storage + name: recipe-store + - name: shared-data + persistentVolumeClaim: + claimName: shared-task-storage +status: + completionTime: "2022-06-02T19:02:58Z" + conditions: + - lastTransitionTime: "2022-06-02T19:02:58Z" + message: 'Tasks Completed: 1 (Failed: 1, Canceled 0), Skipped: 1' + reason: Failed + status: "False" + type: Succeeded + pipelineSpec: + ... + taskRuns: + recipe-time-v5scg-fetch-the-recipe: + pipelineTaskName: fetch-the-recipe + status: + completionTime: "2022-06-02T19:02:58Z" + conditions: + - lastTransitionTime: "2022-06-02T19:02:58Z" + message: | + "step-fetch-and-write" exited with code 1 (image: "docker.io/library/ubuntu@sha256:26c68657ccce2cb0a31b330cb0be2b5e108d467f641c62e13ab40cbec258c68d"); for logs run: kubectl -n default logs recipe-time-v5scg-fetch-the-recipe-pod -c step-fetch-and-write + reason: Failed + status: "False" + type: Succeeded + ... + taskSpec: + steps: + - image: ubuntu + name: fetch-and-write + resources: {} + script: | # See below: Replacements do not happen. + if [ "hunter2" = "$(cat $(workspaces.password-vault.path)/password)" ]; then + cp $(workspaces.recipe-store.path)/recipe.txt $(workspaces.shared-data.path) + else + echo "wrong password!" + exit 1 + fi +``` + +##### Embedded Specifications with Referenced Resources + +In the case of embedded specifications that have references, the `Workspaces` will be propagated up to the embedded specification level only. Users need to explicitly declare `Workspaces` wherever they reference resources. + +```yaml +# PipelineRun attempting to propagate Workspaces to referenced Tasks +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: fetch-secure-data +spec: + workspaces: # If Referenced, Workspaces need to be explicitly declared + - name: recipe-store + - name: shared-data + - name: password-vault + steps: + - name: fetch-and-write + image: ubuntu + script: | + if [ "hunter2" = "$(cat $(workspaces.password-vault.path)/password)" ]; then + cp $(workspaces.recipe-store.path)/recipe.txt $(workspaces.shared-data.path) + else + echo "wrong password!" + exit 1 + fi +--- +apiVersion: tekton.dev/v1beta1 +kind: PipelineRun +metadata: + generateName: recipe-time- +spec: + workspaces: + - name: password-vault + secret: + secretName: secret-password + - name: recipe-store + configMap: + name: sensitive-recipe-storage + items: + - key: brownies + path: recipe.txt + - name: shared-data + persistentVolumeClaim: + claimName: shared-task-storage + pipelineSpec: + # workspaces: # Since this is embedded specs, Workspaces don’t need to be declared + # ... + tasks: + - name: fetch-the-recipe + workspaces: # If referencing resources, Workspaces need to be explicitly declared + - name: recipe-store + - name: shared-data + - name: password-vault + taskRef: # Referencing a resource + name: fetch-secure-data + - name: print-the-recipe + # workspaces: # Since this is embedded specs, Workspaces don’t need to be declared + # ... + taskSpec: + # workspaces: # Since this is embedded specs, Workspaces don’t need to be declared + # ... + params: + - name: filename + steps: + - name: print-secrets + image: ubuntu + script: cat $(workspaces.shared-data.path)/$(params.filename) + runAfter: + - fetch-the-recipe + params: + - name: filename + value: recipe.txt +``` + +The above pipelinerun successfully resolves to: + +```yaml +# Successful execution of the above PipelineRun +apiVersion: tekton.dev/v1beta1 +kind: PipelineRun +metadata: + generateName: recipe-time- + ... +spec: + pipelineSpec: + ... + workspaces: + - name: password-vault + secret: + secretName: secret-password + - configMap: + items: + - key: brownies + path: recipe.txt + name: sensitive-recipe-storage + name: recipe-store + - name: shared-data + persistentVolumeClaim: + claimName: shared-task-storage +status: + completionTime: "2022-06-09T18:42:14Z" + conditions: + - lastTransitionTime: "2022-06-09T18:42:14Z" + message: 'Tasks Completed: 2 (Failed: 0, Cancelled 0), Skipped: 0' + reason: Succeeded + status: "True" + type: Succeeded + pipelineSpec: + ... + taskRuns: + recipe-time-pj6l7-fetch-the-recipe: + pipelineTaskName: fetch-the-recipe + status: + ... + taskSpec: + steps: + - image: ubuntu + name: fetch-and-write + resources: {} + script: | + if [ "hunter2" = "$(cat /workspace/password-vault/password)" ]; then + cp /workspace/recipe-store/recipe.txt /workspace/shared-data + else + echo "wrong password!" + exit 1 + fi + workspaces: + - name: recipe-store + - name: shared-data + - name: password-vault + recipe-time-pj6l7-print-the-recipe: + pipelineTaskName: print-the-recipe + status: + ... + taskSpec: + params: + - name: filename + type: string + steps: + - image: ubuntu + name: print-secrets + resources: {} + script: cat /workspace/shared-data/recipe.txt + workspaces: + - name: shared-data +``` + ### Specifying `LimitRange` values In order to only consume the bare minimum amount of resources needed to execute one `Step` at a diff --git a/examples/v1beta1/pipelineruns/alpha/propagating-workspaces.yaml b/examples/v1beta1/pipelineruns/alpha/propagating-workspaces.yaml new file mode 100644 index 00000000000..f5c26e46f87 --- /dev/null +++ b/examples/v1beta1/pipelineruns/alpha/propagating-workspaces.yaml @@ -0,0 +1,76 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: sensitive-recipe-storage +data: + brownies: | + 1. Heat oven to 325 degrees F + 2. Melt 1/2 cup butter w/ 1/2 cup cocoa, stirring smooth. + 3. Remove from heat, allow to cool for a few minutes. + 4. Transfer to bowl. + 5. Whisk in 2 eggs, one at a time. + 6. Stir in vanilla. + 7. Separately combine 1 cup sugar, 1/4 cup flour, 1 cup chopped + walnuts and pinch of salt + 8. Combine mixtures. + 9. Bake in greased pan for 30 minutes. Watch carefully for + appropriate level of gooeyness. +--- +apiVersion: v1 +kind: Secret +metadata: + name: secret-password +type: Opaque +data: + password: aHVudGVyMg== +--- +apiVersion: tekton.dev/v1beta1 +kind: PipelineRun +metadata: + generateName: recipe-time- +spec: + params: + - name: filename + value: recipe.txt + workspaces: + - name: shared-data + volumeClaimTemplate: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 16Mi + volumeMode: Filesystem + - name: recipe-store + configMap: + name: sensitive-recipe-storage + items: + - key: brownies + path: recipe.txt + - name: password-vault + secret: + secretName: secret-password + pipelineSpec: + tasks: + - name: fetch-secure-data + taskSpec: + steps: + - name: fetch-and-write-secure + image: ubuntu + script: | + if [ "hunter2" = "$(cat $(workspaces.password-vault.path)/password)" ]; then + cp $(workspaces.recipe-store.path)/recipe.txt $(workspaces.shared-data.path) + echo "success!!!" + else + echo "wrong password!" + exit 1 + fi + - name: print-the-recipe + runAfter: + - fetch-secure-data + taskSpec: + steps: + - name: print-secrets + image: ubuntu + script: cat $(workspaces.shared-data.path)/$(params.filename) diff --git a/pkg/apis/pipeline/v1beta1/pipeline_validation.go b/pkg/apis/pipeline/v1beta1/pipeline_validation.go index c96a86c5cd5..3378ad66d8e 100644 --- a/pkg/apis/pipeline/v1beta1/pipeline_validation.go +++ b/pkg/apis/pipeline/v1beta1/pipeline_validation.go @@ -21,6 +21,7 @@ import ( "fmt" "strings" + "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/validate" "github.com/tektoncd/pipeline/pkg/list" "github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag" @@ -45,6 +46,7 @@ func (p *Pipeline) Validate(ctx context.Context) *apis.FieldError { // Validate checks that taskNames in the Pipeline are valid and that the graph // of Tasks expressed in the Pipeline makes sense. func (ps *PipelineSpec) Validate(ctx context.Context) (errs *apis.FieldError) { + cfg := config.FromContextOrDefaults(ctx) if equality.Semantic.DeepEqual(ps, &PipelineSpec{}) { errs = errs.Also(apis.ErrGeneric("expected at least one, got none", "description", "params", "resources", "tasks", "workspaces")) } @@ -66,8 +68,10 @@ func (ps *PipelineSpec) Validate(ctx context.Context) (errs *apis.FieldError) { errs = errs.Also(validateExecutionStatusVariables(ps.Tasks, ps.Finally)) // Validate the pipeline's workspaces. errs = errs.Also(validatePipelineWorkspacesDeclarations(ps.Workspaces)) - errs = errs.Also(validatePipelineWorkspacesUsage(ps.Workspaces, ps.Tasks).ViaField("tasks")) - errs = errs.Also(validatePipelineWorkspacesUsage(ps.Workspaces, ps.Finally).ViaField("finally")) + if cfg.FeatureFlags.EnableAPIFields != config.AlphaAPIFields { + errs = errs.Also(validatePipelineWorkspacesUsage(ps.Workspaces, ps.Tasks).ViaField("tasks")) + errs = errs.Also(validatePipelineWorkspacesUsage(ps.Workspaces, ps.Finally).ViaField("finally")) + } // Validate the pipeline's results errs = errs.Also(validatePipelineResults(ps.Results)) errs = errs.Also(validateTasksAndFinallySection(ps)) diff --git a/pkg/reconciler/pipelinerun/pipelinerun.go b/pkg/reconciler/pipelinerun/pipelinerun.go index e3963c209e5..43a27fa8ae9 100644 --- a/pkg/reconciler/pipelinerun/pipelinerun.go +++ b/pkg/reconciler/pipelinerun/pipelinerun.go @@ -51,6 +51,7 @@ import ( tresources "github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources" "github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim" "github.com/tektoncd/pipeline/pkg/remote" + "github.com/tektoncd/pipeline/pkg/substitution" "github.com/tektoncd/pipeline/pkg/workspace" resolution "github.com/tektoncd/resolution/pkg/resource" "go.uber.org/zap" @@ -829,7 +830,7 @@ func (c *Reconciler) createTaskRun(ctx context.Context, taskRunName string, para var pipelinePVCWorkspaceName string var err error - tr.Spec.Workspaces, pipelinePVCWorkspaceName, err = getTaskrunWorkspaces(pr, rpt) + tr.Spec.Workspaces, pipelinePVCWorkspaceName, err = getTaskrunWorkspaces(ctx, pr, rpt) if err != nil { return nil, err } @@ -897,7 +898,7 @@ func (c *Reconciler) createRun(ctx context.Context, runName string, params []v1b } var pipelinePVCWorkspaceName string var err error - r.Spec.Workspaces, pipelinePVCWorkspaceName, err = getTaskrunWorkspaces(pr, rpt) + r.Spec.Workspaces, pipelinePVCWorkspaceName, err = getTaskrunWorkspaces(ctx, pr, rpt) if err != nil { return nil, err } @@ -912,16 +913,56 @@ func (c *Reconciler) createRun(ctx context.Context, runName string, params []v1b return c.PipelineClientSet.TektonV1alpha1().Runs(pr.Namespace).Create(ctx, r, metav1.CreateOptions{}) } -func getTaskrunWorkspaces(pr *v1beta1.PipelineRun, rpt *resources.ResolvedPipelineTask) ([]v1beta1.WorkspaceBinding, string, error) { +func getTaskrunWorkspaces(ctx context.Context, pr *v1beta1.PipelineRun, rprt *resources.ResolvedPipelineTask) ([]v1beta1.WorkspaceBinding, string, error) { var workspaces []v1beta1.WorkspaceBinding var pipelinePVCWorkspaceName string pipelineRunWorkspaces := make(map[string]v1beta1.WorkspaceBinding) for _, binding := range pr.Spec.Workspaces { pipelineRunWorkspaces[binding.Name] = binding } - for _, ws := range rpt.PipelineTask.Workspaces { - taskWorkspaceName, pipelineTaskSubPath, pipelineWorkspaceName := ws.Name, ws.SubPath, ws.Workspace + if config.FromContextOrDefaults(ctx).FeatureFlags.EnableAPIFields == "alpha" { + // Propagate required workspaces from pipelineRun to the pipelineTasks + if rprt.PipelineTask.TaskSpec != nil { + workspacesUsedInSteps := []string{} + ts := rprt.PipelineTask.TaskSpec.TaskSpec + for _, step := range ts.Steps { + workspacesUsedInScript, _, errString := substitution.ExtractVariablesFromString(step.Script, "workspaces") + workspacesUsedInSteps = append(workspacesUsedInSteps, workspacesUsedInScript...) + if errString != "" { + return nil, "", fmt.Errorf("Error while extracting workspace from Script: %s", errString) + } + for _, arg := range step.Args { + workspacesUsedInArg, _, errString := substitution.ExtractVariablesFromString(arg, "workspaces") + if errString != "" { + return nil, "", fmt.Errorf("Error while extracting workspace from Script: %s", errString) + } + workspacesUsedInSteps = append(workspacesUsedInSteps, workspacesUsedInArg...) + } + } + + ptw := []string{} + for _, ws := range rprt.PipelineTask.Workspaces { + ptw = append(ptw, ws.Name) + } + + for _, wspace := range workspacesUsedInSteps { + skip := false + for _, w := range ptw { + if w == wspace { + skip = true + break + } + } + if !skip { + rprt.PipelineTask.Workspaces = append(rprt.PipelineTask.Workspaces, v1beta1.WorkspacePipelineTaskBinding{Name: wspace}) + } + } + } + } + + for _, ws := range rprt.PipelineTask.Workspaces { + taskWorkspaceName, pipelineTaskSubPath, pipelineWorkspaceName := ws.Name, ws.SubPath, ws.Workspace pipelineWorkspace := pipelineWorkspaceName if pipelineWorkspaceName == "" { @@ -935,8 +976,8 @@ func getTaskrunWorkspaces(pr *v1beta1.PipelineRun, rpt *resources.ResolvedPipeli workspaces = append(workspaces, taskWorkspaceByWorkspaceVolumeSource(b, taskWorkspaceName, pipelineTaskSubPath, *kmeta.NewControllerRef(pr))) } else { workspaceIsOptional := false - if rpt.ResolvedTaskResources != nil && rpt.ResolvedTaskResources.TaskSpec != nil { - for _, taskWorkspaceDeclaration := range rpt.ResolvedTaskResources.TaskSpec.Workspaces { + if rprt.ResolvedTaskResources != nil && rprt.ResolvedTaskResources.TaskSpec != nil { + for _, taskWorkspaceDeclaration := range rprt.ResolvedTaskResources.TaskSpec.Workspaces { if taskWorkspaceDeclaration.Name == taskWorkspaceName && taskWorkspaceDeclaration.Optional { workspaceIsOptional = true break @@ -944,7 +985,7 @@ func getTaskrunWorkspaces(pr *v1beta1.PipelineRun, rpt *resources.ResolvedPipeli } } if !workspaceIsOptional { - return nil, "", fmt.Errorf("expected workspace %q to be provided by pipelinerun for pipeline task %q", pipelineWorkspace, rpt.PipelineTask.Name) + return nil, "", fmt.Errorf("expected workspace %q to be provided by pipelinerun for pipeline task %q", pipelineWorkspace, rprt.PipelineTask.Name) } } diff --git a/pkg/reconciler/pipelinerun/pipelinerun_test.go b/pkg/reconciler/pipelinerun/pipelinerun_test.go index 40d2c279c2e..b348b7bec3f 100644 --- a/pkg/reconciler/pipelinerun/pipelinerun_test.go +++ b/pkg/reconciler/pipelinerun/pipelinerun_test.go @@ -7262,6 +7262,7 @@ func checkPipelineRunConditionStatusAndReason(t *testing.T, reconciledRun *v1bet } func TestGetTaskrunWorkspaces_Failure(t *testing.T) { + ctx := getContextBasedOnFeatureFlag("alpha") tests := []struct { name string pr *v1beta1.PipelineRun @@ -7309,7 +7310,7 @@ spec: } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - _, _, err := getTaskrunWorkspaces(tt.pr, tt.rprt) + _, _, err := getTaskrunWorkspaces(ctx, tt.pr, tt.rprt) if err == nil { t.Errorf("Pipeline.getTaskrunWorkspaces() did not return error for invalid workspace") @@ -7322,6 +7323,7 @@ spec: } func TestGetTaskrunWorkspaces_Success(t *testing.T) { + ctx := getContextBasedOnFeatureFlag("alpha") tests := []struct { name string pr *v1beta1.PipelineRun @@ -7365,7 +7367,7 @@ spec: } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - _, _, err := getTaskrunWorkspaces(tt.pr, tt.rprt) + _, _, err := getTaskrunWorkspaces(ctx, tt.pr, tt.rprt) if err != nil { t.Errorf("Pipeline.getTaskrunWorkspaces() returned error for valid pipeline: %v", err) @@ -9138,3 +9140,14 @@ spec: func lessTaskResourceBindings(i, j v1beta1.TaskResourceBinding) bool { return i.Name < j.Name } + +func getContextBasedOnFeatureFlag(featureFlag string) context.Context { + featureFlags, _ := config.NewFeatureFlagsFromMap(map[string]string{ + "enable-api-fields": featureFlag, + }) + cfg := &config.Config{ + FeatureFlags: featureFlags, + } + + return config.ToContext(context.Background(), cfg) +} diff --git a/pkg/reconciler/taskrun/taskrun.go b/pkg/reconciler/taskrun/taskrun.go index ef8eee4f9a0..d63661f33e4 100644 --- a/pkg/reconciler/taskrun/taskrun.go +++ b/pkg/reconciler/taskrun/taskrun.go @@ -378,7 +378,7 @@ func (c *Reconciler) prepare(ctx context.Context, tr *v1beta1.TaskRun) (*v1beta1 return nil, nil, controller.NewPermanentError(err) } - if err := workspace.ValidateBindings(taskSpec.Workspaces, tr.Spec.Workspaces); err != nil { + if err := workspace.ValidateBindings(ctx, taskSpec.Workspaces, tr.Spec.Workspaces); err != nil { logger.Errorf("TaskRun %q workspaces are invalid: %v", tr.Name, err) tr.Status.MarkResourceFailed(podconvert.ReasonFailedValidation, err) return nil, nil, controller.NewPermanentError(err) @@ -417,13 +417,20 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1beta1.TaskRun, rtr *re defer c.durationAndCountMetrics(ctx, tr) logger := logging.FromContext(ctx) recorder := controller.GetEventRecorder(ctx) + var err error + + // Get the randomized volume names assigned to workspace bindings + workspaceVolumes := workspace.CreateVolumes(tr.Spec.Workspaces) - ts := updateTaskSpecParamsContextsResults(tr, rtr) + ts, err := updateTaskSpecParamsContextsResults(ctx, tr, rtr, workspaceVolumes) + if err != nil { + logger.Errorf("Error updating task spec parameters, contexts, results and workspaces: %s", err) + return err + } tr.Status.TaskSpec = ts // Get the TaskRun's Pod if it should have one. Otherwise, create the Pod. var pod *corev1.Pod - var err error if tr.Status.PodName != "" { pod, err = c.podLister.Pods(tr.Namespace).Get(tr.Status.PodName) @@ -469,7 +476,7 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1beta1.TaskRun, rtr *re // This is used by createPod below. Changes to the Spec are not updated. tr.Spec.Workspaces = taskRunWorkspaces } - pod, err = c.createPod(ctx, ts, tr, rtr) + pod, err = c.createPod(ctx, ts, tr, rtr, workspaceVolumes) if err != nil { newErr := c.handlePodCreationError(tr, err) logger.Errorf("Failed to create task run pod for taskrun %q: %v", tr.Name, newErr) @@ -651,7 +658,7 @@ func (c *Reconciler) failTaskRun(ctx context.Context, tr *v1beta1.TaskRun, reaso // createPod creates a Pod based on the Task's configuration, with pvcName as a volumeMount // TODO(dibyom): Refactor resource setup/substitution logic to its own function in the resources package -func (c *Reconciler) createPod(ctx context.Context, ts *v1beta1.TaskSpec, tr *v1beta1.TaskRun, rtr *resources.ResolvedTaskResources) (*corev1.Pod, error) { +func (c *Reconciler) createPod(ctx context.Context, ts *v1beta1.TaskSpec, tr *v1beta1.TaskRun, rtr *resources.ResolvedTaskResources, workspaceVolumes map[string]corev1.Volume) (*corev1.Pod, error) { logger := logging.FromContext(ctx) inputResources, err := resourceImplBinding(rtr.Inputs, c.Images) if err != nil { @@ -687,18 +694,13 @@ func (c *Reconciler) createPod(ctx context.Context, ts *v1beta1.TaskSpec, tr *v1 ts = resources.ApplyResources(ts, inputResources, "inputs") ts = resources.ApplyResources(ts, outputResources, "outputs") - // Get the randomized volume names assigned to workspace bindings - workspaceVolumes := workspace.CreateVolumes(tr.Spec.Workspaces) - - // Apply workspace resource substitution - ts = resources.ApplyWorkspaces(ctx, ts, ts.Workspaces, tr.Spec.Workspaces, workspaceVolumes) - if validateErr := ts.Validate(ctx); validateErr != nil { logger.Errorf("Failed to create a pod for taskrun: %s due to task validation error %v", tr.Name, validateErr) return nil, validateErr } ts, err = workspace.Apply(ctx, *ts, tr.Spec.Workspaces, workspaceVolumes) + if err != nil { logger.Errorf("Failed to create a pod for taskrun: %s due to workspace error %v", tr.Name, err) return nil, err @@ -740,7 +742,7 @@ func (c *Reconciler) createPod(ctx context.Context, ts *v1beta1.TaskSpec, tr *v1 return pod, err } -func updateTaskSpecParamsContextsResults(tr *v1beta1.TaskRun, rtr *resources.ResolvedTaskResources) *v1beta1.TaskSpec { +func updateTaskSpecParamsContextsResults(ctx context.Context, tr *v1beta1.TaskRun, rtr *resources.ResolvedTaskResources, workspaceVolumes map[string]corev1.Volume) (*v1beta1.TaskSpec, error) { ts := rtr.TaskSpec.DeepCopy() var defaults []v1beta1.ParamSpec if len(ts.Params) > 0 { @@ -758,7 +760,30 @@ func updateTaskSpecParamsContextsResults(tr *v1beta1.TaskRun, rtr *resources.Res // Apply step exitCode path substitution ts = resources.ApplyStepExitCodePath(ts) - return ts + // Apply workspace resource substitution + if config.FromContextOrDefaults(ctx).FeatureFlags.EnableAPIFields == "alpha" { + // propagate workspaces from taskrun to task. + twn := []string{} + for _, tw := range ts.Workspaces { + twn = append(twn, tw.Name) + } + + for _, trw := range tr.Spec.Workspaces { + skip := false + for _, tw := range twn { + if tw == trw.Name { + skip = true + break + } + } + if !skip { + ts.Workspaces = append(ts.Workspaces, v1beta1.WorkspaceDeclaration{Name: trw.Name}) + } + } + } + ts = resources.ApplyWorkspaces(ctx, ts, ts.Workspaces, tr.Spec.Workspaces, workspaceVolumes) + + return ts, nil } func isExceededResourceQuotaError(err error) bool { diff --git a/pkg/reconciler/taskrun/taskrun_test.go b/pkg/reconciler/taskrun/taskrun_test.go index 878e9a7bf34..742e85728fc 100644 --- a/pkg/reconciler/taskrun/taskrun_test.go +++ b/pkg/reconciler/taskrun/taskrun_test.go @@ -43,6 +43,7 @@ import ( "github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources" ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing" "github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim" + "github.com/tektoncd/pipeline/pkg/workspace" "github.com/tektoncd/pipeline/test" "github.com/tektoncd/pipeline/test/diff" eventstest "github.com/tektoncd/pipeline/test/events" @@ -2297,8 +2298,13 @@ spec: Kind: "Task", TaskSpec: &v1beta1.TaskSpec{Steps: simpleTask.Spec.Steps, Workspaces: simpleTask.Spec.Workspaces}, } - taskSpec := updateTaskSpecParamsContextsResults(taskRun, rtr) - pod, err := r.createPod(testAssets.Ctx, taskSpec, taskRun, rtr) + ctx := getContextBasedOnFeatureFlag("alpha") + workspaceVolumes := workspace.CreateVolumes(taskRun.Spec.Workspaces) + taskSpec, err := updateTaskSpecParamsContextsResults(ctx, taskRun, rtr, workspaceVolumes) + if err != nil { + t.Fatalf("update task spec threw error %v", err) + } + pod, err := r.createPod(testAssets.Ctx, taskSpec, taskRun, rtr, workspaceVolumes) if err != nil { t.Fatalf("create pod threw error %v", err) @@ -2401,8 +2407,13 @@ spec: TaskSpec: &v1beta1.TaskSpec{Steps: simpleTask.Spec.Steps, Workspaces: simpleTask.Spec.Workspaces}, } - taskSpec := updateTaskSpecParamsContextsResults(taskRun, rtr) - _, err := r.createPod(testAssets.Ctx, taskSpec, taskRun, rtr) + ctx := getContextBasedOnFeatureFlag("alpha") + workspaceVolumes := workspace.CreateVolumes(taskRun.Spec.Workspaces) + taskSpec, err := updateTaskSpecParamsContextsResults(ctx, taskRun, rtr, workspaceVolumes) + if err != nil { + t.Errorf("update task spec threw an error: %v", err) + } + _, err = r.createPod(testAssets.Ctx, taskSpec, taskRun, rtr, workspaceVolumes) if err == nil || err.Error() != expectedError { t.Errorf("Expected to fail validation for duplicate Workspace mount paths, error was %v", err) @@ -4482,3 +4493,14 @@ func objectMeta(name, ns string) metav1.ObjectMeta { Annotations: map[string]string{}, } } + +func getContextBasedOnFeatureFlag(featureFlag string) context.Context { + featureFlags, _ := config.NewFeatureFlagsFromMap(map[string]string{ + "enable-api-fields": featureFlag, + }) + cfg := &config.Config{ + FeatureFlags: featureFlags, + } + + return config.ToContext(context.Background(), cfg) +} diff --git a/pkg/substitution/substitution.go b/pkg/substitution/substitution.go index 08f1c4e9452..72c57963dff 100644 --- a/pkg/substitution/substitution.go +++ b/pkg/substitution/substitution.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software +Unless requirned by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and @@ -30,7 +30,7 @@ const braceMatchingRegex = "(\\$(\\(%s(\\.(?P%s)|\\[\"(?P%s)\"\\]|\\ // ValidateVariable makes sure all variables in the provided string are known func ValidateVariable(name, value, prefix, locationName, path string, vars sets.String) *apis.FieldError { - if vs, present, _ := extractVariablesFromString(value, prefix); present { + if vs, present, _ := ExtractVariablesFromString(value, prefix); present { for _, v := range vs { v = strings.TrimSuffix(v, "[*]") if !vars.Has(v) { @@ -46,7 +46,7 @@ func ValidateVariable(name, value, prefix, locationName, path string, vars sets. // ValidateVariableP makes sure all variables for a parameter in the provided string are known func ValidateVariableP(value, prefix string, vars sets.String) *apis.FieldError { - if vs, present, errString := extractVariablesFromString(value, prefix); present { + if vs, present, errString := ExtractVariablesFromString(value, prefix); present { if errString != "" { return &apis.FieldError{ Message: errString, @@ -70,7 +70,7 @@ func ValidateVariableP(value, prefix string, vars sets.String) *apis.FieldError // ValidateVariableProhibited verifies that variables matching the relevant string expressions do not reference any of the names present in vars. func ValidateVariableProhibited(name, value, prefix, locationName, path string, vars sets.String) *apis.FieldError { - if vs, present, _ := extractVariablesFromString(value, prefix); present { + if vs, present, _ := ExtractVariablesFromString(value, prefix); present { for _, v := range vs { v = strings.TrimSuffix(v, "[*]") if vars.Has(v) { @@ -86,7 +86,7 @@ func ValidateVariableProhibited(name, value, prefix, locationName, path string, // ValidateVariableProhibitedP verifies that variables for a parameter matching the relevant string expressions do not reference any of the names present in vars. func ValidateVariableProhibitedP(value, prefix string, vars sets.String) *apis.FieldError { - if vs, present, errString := extractVariablesFromString(value, prefix); present { + if vs, present, errString := ExtractVariablesFromString(value, prefix); present { if errString != "" { return &apis.FieldError{ Message: errString, @@ -135,7 +135,7 @@ func ValidateEntireVariableProhibitedP(value, prefix string, vars sets.String) * // ValidateVariableIsolated verifies that variables matching the relevant string expressions are completely isolated if present. func ValidateVariableIsolated(name, value, prefix, locationName, path string, vars sets.String) *apis.FieldError { - if vs, present, _ := extractVariablesFromString(value, prefix); present { + if vs, present, _ := ExtractVariablesFromString(value, prefix); present { firstMatch, _ := extractExpressionFromString(value, prefix) for _, v := range vs { v = strings.TrimSuffix(v, "[*]") @@ -154,7 +154,7 @@ func ValidateVariableIsolated(name, value, prefix, locationName, path string, va // ValidateVariableIsolatedP verifies that variables matching the relevant string expressions are completely isolated if present. func ValidateVariableIsolatedP(value, prefix string, vars sets.String) *apis.FieldError { - if vs, present, errString := extractVariablesFromString(value, prefix); present { + if vs, present, errString := ExtractVariablesFromString(value, prefix); present { if errString != "" { return &apis.FieldError{ Message: errString, @@ -191,7 +191,8 @@ func extractExpressionFromString(s, prefix string) (string, bool) { return match[0], true } -func extractVariablesFromString(s, prefix string) ([]string, bool, string) { +// ExtractVariablesFromString extracts variables from an input string via regex matching. +func ExtractVariablesFromString(s, prefix string) ([]string, bool, string) { pattern := fmt.Sprintf(braceMatchingRegex, prefix, parameterSubstitution, parameterSubstitution, parameterSubstitution) re := regexp.MustCompile(pattern) matches := re.FindAllStringSubmatch(s, -1) diff --git a/pkg/workspace/apply.go b/pkg/workspace/apply.go index d7d0e38ed18..fb679d090fe 100644 --- a/pkg/workspace/apply.go +++ b/pkg/workspace/apply.go @@ -119,6 +119,19 @@ func Apply(ctx context.Context, ts v1beta1.TaskSpec, wb []v1beta1.WorkspaceBindi } for i := range wb { + if alphaAPIEnabled { + // Propagate missing Workspaces + addWorkspace := true + for _, ws := range ts.Workspaces { + if ws.Name == wb[i].Name { + addWorkspace = false + break + } + } + if addWorkspace { + ts.Workspaces = append(ts.Workspaces, v1beta1.WorkspaceDeclaration{Name: wb[i].Name}) + } + } w, err := getDeclaredWorkspace(wb[i].Name, ts.Workspaces) if err != nil { return nil, err diff --git a/pkg/workspace/validate.go b/pkg/workspace/validate.go index 570934aad25..98a676e275d 100644 --- a/pkg/workspace/validate.go +++ b/pkg/workspace/validate.go @@ -20,13 +20,14 @@ import ( "context" "fmt" + "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "k8s.io/apimachinery/pkg/util/sets" ) // ValidateBindings will return an error if the bound workspaces in binds don't satisfy the declared // workspaces in decls. -func ValidateBindings(decls []v1beta1.WorkspaceDeclaration, binds []v1beta1.WorkspaceBinding) error { +func ValidateBindings(ctx context.Context, decls []v1beta1.WorkspaceDeclaration, binds []v1beta1.WorkspaceBinding) error { // This will also be validated at webhook time but in case the webhook isn't invoked for some // reason we'll invoke the same validation here. for _, b := range binds { @@ -53,7 +54,8 @@ func ValidateBindings(decls []v1beta1.WorkspaceDeclaration, binds []v1beta1.Work } } for _, bind := range binds { - if !declNames.Has(bind.Name) { + if !declNames.Has(bind.Name) && config.FromContextOrDefaults(ctx).FeatureFlags.EnableAPIFields != "alpha" { + // Skip this validation if propagating workspaces since we will propagate as we continue. return fmt.Errorf("workspace binding %q does not match any declared workspace", bind.Name) } } diff --git a/pkg/workspace/validate_test.go b/pkg/workspace/validate_test.go index cd4894d0b8a..30df714f853 100644 --- a/pkg/workspace/validate_test.go +++ b/pkg/workspace/validate_test.go @@ -17,14 +17,17 @@ limitations under the License. package workspace import ( + "context" "errors" "testing" + "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" corev1 "k8s.io/api/core/v1" ) func TestValidateBindingsValid(t *testing.T) { + ctx := getContextBasedOnFeatureFlag("alpha") for _, tc := range []struct { name string declarations []v1beta1.WorkspaceDeclaration @@ -76,7 +79,7 @@ func TestValidateBindingsValid(t *testing.T) { bindings: []v1beta1.WorkspaceBinding{}, }} { t.Run(tc.name, func(t *testing.T) { - if err := ValidateBindings(tc.declarations, tc.bindings); err != nil { + if err := ValidateBindings(ctx, tc.declarations, tc.bindings); err != nil { t.Errorf("didnt expect error for valid bindings but got: %v", err) } }) @@ -85,23 +88,12 @@ func TestValidateBindingsValid(t *testing.T) { } func TestValidateBindingsInvalid(t *testing.T) { + ctx := getContextBasedOnFeatureFlag("alpha") for _, tc := range []struct { name string declarations []v1beta1.WorkspaceDeclaration bindings []v1beta1.WorkspaceBinding }{{ - name: "Didn't provide binding matching declared workspace", - declarations: []v1beta1.WorkspaceDeclaration{{ - Name: "beth", - }}, - bindings: []v1beta1.WorkspaceBinding{{ - Name: "kate", - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }, { - Name: "beth", - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }}, - }, { name: "Provided a binding that wasn't needed", declarations: []v1beta1.WorkspaceDeclaration{{ Name: "randall", @@ -143,7 +135,7 @@ func TestValidateBindingsInvalid(t *testing.T) { }}, }} { t.Run(tc.name, func(t *testing.T) { - if err := ValidateBindings(tc.declarations, tc.bindings); err == nil { + if err := ValidateBindings(ctx, tc.declarations, tc.bindings); err == nil { t.Errorf("expected error for invalid bindings but didn't get any!") } }) @@ -235,3 +227,14 @@ func TestValidateOnlyOnePVCIsUsed_Invalid(t *testing.T) { }) } } + +func getContextBasedOnFeatureFlag(featureFlag string) context.Context { + featureFlags, _ := config.NewFeatureFlagsFromMap(map[string]string{ + "enable-api-fields": featureFlag, + }) + cfg := &config.Config{ + FeatureFlags: featureFlags, + } + + return config.ToContext(context.Background(), cfg) +}