From 0d07da43daf4a0263f80f67309846e748ea1ac35 Mon Sep 17 00:00:00 2001 From: Scott Date: Mon, 6 Jan 2020 13:55:47 -0500 Subject: [PATCH] This commit introduces workspaces for Pipelines. A workspace can be threaded through the tasks of a Pipeline, allowing easier reuse and customisation of shared storage between tasks. Workspace types have been added to Pipeline structs: * A Pipeline declares a list of workspace names that it requires be provided by a PipelineRun. * A PipelineTask declares a mapping from a Pipeline's workspace name to a workspace name declared by the Task it references (or embeds). * A PipelineRun declares the concrete workspace implementations - whether they be a PVC, emptyDir, configMap or secret - and passes those in with the name declared by the Pipeline it references (or embeds). An example PipelineRun has been added to demonstrate usage of the workspaces in pipelines. Outstanding TODOs: * Add tests --- docs/pipelineruns.md | 89 +++++++++++- docs/pipelines.md | 58 +++++++- docs/taskruns.md | 6 +- examples/pipelineruns/workspaces.yaml | 136 ++++++++++++++++++ pkg/apis/pipeline/v1alpha1/pipeline_types.go | 9 ++ .../pipeline/v1alpha1/pipeline_validation.go | 34 +++++ .../v1alpha1/pipeline_validation_test.go | 23 +++ .../pipeline/v1alpha1/pipelinerun_types.go | 5 +- .../v1alpha1/pipelinerun_validation.go | 13 ++ .../v1alpha1/pipelinerun_validation_test.go | 19 +++ pkg/apis/pipeline/v1alpha1/workspace_types.go | 8 ++ .../v1alpha1/zz_generated.deepcopy.go | 17 +++ pkg/apis/pipeline/v1alpha2/workspace_types.go | 16 +++ .../v1alpha2/zz_generated.deepcopy.go | 32 +++++ pkg/reconciler/pipelinerun/pipelinerun.go | 30 ++++ .../resources/pipelinerunresolution.go | 15 ++ .../resources/pipelinerunresolution_test.go | 12 ++ test/builder/pipeline.go | 28 ++++ test/builder/pipeline_test.go | 9 ++ test/workspace_test.go | 101 +++++++++++++ 20 files changed, 651 insertions(+), 9 deletions(-) create mode 100644 examples/pipelineruns/workspaces.yaml diff --git a/docs/pipelineruns.md b/docs/pipelineruns.md index def962d0e01..08adaf2d915 100644 --- a/docs/pipelineruns.md +++ b/docs/pipelineruns.md @@ -270,9 +270,92 @@ internally generated persistent volume claims. ## Workspaces -It is not yet possible to specify [workspaces](tasks.md#workspaces) via `Pipelines` -or `PipelineRuns`, so `Tasks` requiring `workspaces` cannot be used with them until -[#1438](https://github.com/tektoncd/pipeline/issues/1438) is completed. +Workspaces allow PVC, emptyDir, configmap and secret volume sources to be +easily wired into tasks and pipelines. + +For a `PipelineRun` to execute [a Pipeline that declares `workspaces`](pipelines.md#workspaces), +at runtime you need to map the `workspace` names to actual physical volumes. +This is managed through the `PipelineRun`'s `workspaces` field. Values in `workspaces` are +[`Volumes`](https://kubernetes.io/docs/tasks/configure-pod-container/configure-volume-storage/), +however currently we only support a subset of `VolumeSources`: + +_If you need support for a `VolumeSource` not listed here +[please open an issue](https://github.com/tektoncd/pipeline/issues) or feel free to +[contribute a PR](https://github.com/tektoncd/pipeline/blob/master/CONTRIBUTING.md)._ + +* [`emptyDir`](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) +* [`persistentVolumeClaim`](https://kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim) +* [`configMap`](https://kubernetes.io/docs/concepts/storage/volumes/#configmap) +* [`secret`](https://kubernetes.io/docs/concepts/storage/volumes/#secret) + +If the `workspaces` declared in the Pipeline are not provided at runtime, the `PipelineRun` will fail +with an error. + +For example to provide an existing PVC called `mypvc` for a `workspace` called +`myworkspace` declared by the `Pipeline`, using the `my-subdir` folder which already exists +on the PVC (there will be an error if it does not exist): + +```yaml +workspaces: +- name: myworkspace + persistentVolumeClaim: + claimName: mypvc + subPath: my-subdir +``` + +An [`emptyDir`](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) can also be used for +this with the following caveats: + +1. An `emptyDir` volume type is not _shared_ amongst Tasks. Instead each Task will simply receive a +new `emptyDir` of its own from its underlying Pod. + +```yaml +workspaces: +- name: myworkspace + emptyDir: {} +``` + +A [`configMap`](https://kubernetes.io/docs/concepts/storage/volumes/#configmap) can also be used +as a workspace with the following caveats: + +1. ConfigMap volume sources are always mounted as read-only inside a task's +containers - tasks cannot write content to them and a step may error out +and fail the task if a write is attempted. +2. The ConfigMap you want to use as a workspace must already exist prior +to the TaskRun being submitted. +3. ConfigMaps have a [size limit of 1MB](https://github.com/kubernetes/kubernetes/blob/f16bfb069a22241a5501f6fe530f5d4e2a82cf0e/pkg/apis/core/validation/validation.go#L5042) + +To use a [`configMap`](https://kubernetes.io/docs/concepts/storage/volumes/#configmap) +as a `workspace`: + +```yaml +workspaces: +- name: myworkspace + configmap: + name: my-configmap +``` + +A [`secret`](https://kubernetes.io/docs/concepts/storage/volumes/#secret) can also be used as a +workspace with the following caveats: + +1. Secret volume sources are always mounted as read-only inside a task's +containers - tasks cannot write content to them and a step may error out +and fail the task if a write is attempted. +2. The Secret you want to use as a workspace must already exist prior +to the TaskRun being submitted. +3. Secrets have a [size limit of 1MB](https://github.com/kubernetes/kubernetes/blob/f16bfb069a22241a5501f6fe530f5d4e2a82cf0e/pkg/apis/core/validation/validation.go#L4933) + +To use a [`secret`](https://kubernetes.io/docs/concepts/storage/volumes/#secret) +as a `workspace`: + +```yaml +workspaces: +- name: myworkspace + secret: + secretName: my-secret +``` + +_For a complete example see [workspace.yaml](../examples/pipelineruns/workspace.yaml)._ ## Cancelling a PipelineRun diff --git a/docs/pipelines.md b/docs/pipelines.md index cbaf6124e78..3c04e66c7c1 100644 --- a/docs/pipelines.md +++ b/docs/pipelines.md @@ -75,9 +75,61 @@ spec: ### Declared Workspaces -It is not yet possible to specify [workspaces](tasks.md#workspaces) via `Pipelines` -or `PipelineRuns`, so `Tasks` requiring `workspaces` cannot be used with them until -[#1438](https://github.com/tektoncd/pipeline/issues/1438) is completed. +`workspaces` are a way of declaring volumes you expect to be made available to your +executing `Pipeline` and its `Task`s. They are similar to [`volumes`](#volumes) but +allow you to enforce at runtime that the volumes have been attached and +[allow you to specify subpaths](taskruns.md#workspaces) in the volumes to attach. + +Any `Pipeline` using a `Task` that declares a workspace will need to provide one at +runtime. Doing so requires two additions in a Pipeline: + +1. The `Pipeline` will need to declare a list of `workspaces` that `PipelineRun`s will be +expected to provide. This is done with the `workspaces` field in the `Pipeline`'s spec. +Each entry in that list must have a unique name. +2. When a `Pipeline` refers to a `Task` requiring workspaces, one of the named workspaces +from (1) will need to be provided. The workspace name needs to be mapped from the name +given to it by the pipeline to the name expected by the task. + +In total this looks as follows: + +```yaml +spec: + workspaces: + - name: pipeline-ws1 # The name of a workspace provided by PipelineRuns + tasks: + - name: use-ws-from-pipeline + taskRef: + name: gen-code # gen-code task expects a workspace be provided with name "output" + workspaces: + - name: output + workspace: pipeline-ws1 + - name: use-ws-again + taskRef: + name: commit # commit task expects a workspace be provided with name "src" + workspaces: + - name: src + workspace: pipeline-ws1 +``` + +This will tell Tekton to take whatever workspace is provided by the PipelineRun +with name "pipeline-ws1" and wire it into the "output" workspace expected by +the gen-code task. The same workspace will then also be wired into the "src" workspace +expected by the commit task. If the workspace provided by the PipelineRun is a +persitent volume claim then we have successfully shared files between the two tasks! + +#### Workspaces Don't Imply Task Ordering (Yet) + +One usecase for workspaces in `Pipeline`s is to provide a PVC to multiple `Task`s +and have one or some write to it before the others read from it. This kind of behaviour +relies on the order of the `Task`s - one writes, the next reads, and so on - but this +ordering is not currently enforced by Tekton. This means that `Task`s which write to a +PVC may be run at the same time as `Task`s expecting to read that data. In the worst case +this can result in deadlock behaviour where multiple `Task`'s pods are all attempting +to mount a PVC for writing at the same time. + +To avoid this situation `Pipeline` authors can explicitly declare the ordering of `Task`s +sharing a PVC-backed workspace by using the `runAfter` field. See [the section on +`runAfter`](#runAfter) for more information about using this field. ### Parameters diff --git a/docs/taskruns.md b/docs/taskruns.md index b6424465b44..2aa3d07f06b 100644 --- a/docs/taskruns.md +++ b/docs/taskruns.md @@ -232,17 +232,17 @@ at runtime you need to map the `workspaces` to actual physical volumes with * [`emptyDir`](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) * [`persistentVolumeClaim`](https://kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim) * [`configMap`](https://kubernetes.io/docs/concepts/storage/volumes/#configmap) +* [`secret`](https://kubernetes.io/docs/concepts/storage/volumes/#secret) _If you need support for a `VolumeSource` not listed here [please open an issue](https://github.com/tektoncd/pipeline/issues) or feel free to [contribute a PR](https://github.com/tektoncd/pipeline/blob/master/CONTRIBUTING.md)._ - If the declared `workspaces` are not provided at runtime, the `TaskRun` will fail with an error. For example to provide an existing PVC called `mypvc` for a `workspace` called -`myworkspace` declared by the `Pipeline`, using the `my-subdir` folder which already exists +`myworkspace` declared by the `Task`, using the `my-subdir` folder which already exists on the PVC (there will be an error if it does not exist): ```yaml @@ -268,6 +268,7 @@ containers - tasks cannot write content to them and a step may error out and fail the task if a write is attempted. 2. The ConfigMap you want to use as a workspace must already exist prior to the TaskRun being submitted. +3. ConfigMaps have a [size limit of 1MB](https://github.com/kubernetes/kubernetes/blob/f16bfb069a22241a5501f6fe530f5d4e2a82cf0e/pkg/apis/core/validation/validation.go#L5042) To use a [`configMap`](https://kubernetes.io/docs/concepts/storage/volumes/#configmap) as a `workspace`: @@ -286,6 +287,7 @@ containers - tasks cannot write content to them and a step may error out and fail the task if a write is attempted. 2. The Secret you want to use as a workspace must already exist prior to the TaskRun being submitted. +3. Secrets have a [size limit of 1MB](https://github.com/kubernetes/kubernetes/blob/f16bfb069a22241a5501f6fe530f5d4e2a82cf0e/pkg/apis/core/validation/validation.go#L4933) To use a [`secret`](https://kubernetes.io/docs/concepts/storage/volumes/#secret) as a `workspace`: diff --git a/examples/pipelineruns/workspaces.yaml b/examples/pipelineruns/workspaces.yaml new file mode 100644 index 00000000000..c9d5457477d --- /dev/null +++ b/examples/pipelineruns/workspaces.yaml @@ -0,0 +1,136 @@ +# In this contrived example 3 different kinds of workspace volume are used to thread +# data through a pipeline's tasks. +# 1. A ConfigMap is used as source of recipe data. +# 2. A Secret is used to store a password. +# 3. A PVC is used to share data from one task to the next. +# +# The end result is a pipeline that first checks if the password is correct and, if so, +# copies data out of a recipe store onto a shared volume. The recipe data is then read +# by a subsequent task and printed to screen. +apiVersion: v1 +kind: ConfigMap +metadata: + name: sensitive-recipe-storage +data: + brownies: | + 1. Heat oven to 325 degrees F + 2. Melt 1/2 cup butter w/ 1/2 cup cocoa, stirring smooth. + 3. Remove from heat, allow to cool for a few minutes. + 4. Transfer to bowl. + 5. Whisk in 2 eggs, one at a time. + 6. Stir in vanilla. + 7. Separately combine 1 cup sugar, 1/4 cup flour, 1 cup chopped + walnuts and pinch of salt + 8. Combine mixtures. + 9. Bake in greased pan for 30 minutes. Watch carefully for + appropriate level of gooeyness. +--- +apiVersion: v1 +kind: Secret +metadata: + name: secret-password +type: Opaque +data: + password: aHVudGVyMg== +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: shared-task-storage +spec: + resources: + requests: + storage: 16Mi + volumeMode: Filesystem + accessModes: + - ReadWriteOnce +--- +apiVersion: tekton.dev/v1alpha1 +kind: Task +metadata: + name: fetch-secure-data +spec: + workspaces: + - name: super-secret-password + - name: secure-store + - name: filedrop + steps: + - name: fetch-and-write + image: ubuntu + script: | + if [ "hunter2" = "$(cat $(workspaces.super-secret-password.path)/password)" ]; then + cp $(workspaces.secure-store.path)/recipe.txt $(workspaces.filedrop.path) + else + echo "wrong password!" + exit 1 + fi +--- +apiVersion: tekton.dev/v1alpha1 +kind: Task +metadata: + name: print-data +spec: + workspaces: + - name: storage + readOnly: true + inputs: + params: + - name: filename + steps: + - name: print-secrets + image: ubuntu + script: cat $(workspaces.storage.path)/$(inputs.params.filename) +--- +apiVersion: tekton.dev/v1alpha1 +kind: Pipeline +metadata: + name: fetch-and-print-recipe +spec: + workspaces: + - name: password-vault + - name: recipe-store + - name: shared-data + tasks: + - name: fetch-the-recipe + taskRef: + name: fetch-secure-data + workspaces: + - name: super-secret-password + workspace: password-vault + - name: secure-store + workspace: recipe-store + - name: filedrop + workspace: shared-data + - name: print-the-recipe + taskRef: + name: print-data + # Note: this is currently required to ensure order of write / read on PVC is correct. + runAfter: + - fetch-the-recipe + params: + - name: filename + value: recipe.txt + workspaces: + - name: storage + workspace: shared-data +--- +apiVersion: tekton.dev/v1alpha1 +kind: PipelineRun +metadata: + generateName: recipe-time- +spec: + pipelineRef: + name: fetch-and-print-recipe + workspaces: + - name: password-vault + secret: + secretName: secret-password + - name: recipe-store + configMap: + name: sensitive-recipe-storage + items: + - key: brownies + path: recipe.txt + - name: shared-data + persistentVolumeClaim: + claimName: shared-task-storage diff --git a/pkg/apis/pipeline/v1alpha1/pipeline_types.go b/pkg/apis/pipeline/v1alpha1/pipeline_types.go index 721c86ea241..a087a0164aa 100644 --- a/pkg/apis/pipeline/v1alpha1/pipeline_types.go +++ b/pkg/apis/pipeline/v1alpha1/pipeline_types.go @@ -32,6 +32,10 @@ type PipelineSpec struct { // Params declares a list of input parameters that must be supplied when // this Pipeline is run. Params []ParamSpec `json:"params,omitempty"` + // Workspaces declares a set of named workspaces that are expected to be + // provided by a PipelineRun. + // +optional + Workspaces []WorkspacePipelineDeclaration `json:"workspaces,omitempty"` } // Check that Pipeline may be validated and defaulted. @@ -123,6 +127,11 @@ type PipelineTask struct { // Parameters declares parameters passed to this task. // +optional Params []Param `json:"params,omitempty"` + + // Workspaces maps workspaces from the pipeline spec to the workspaces + // declared in the Task. + // +optional + Workspaces []WorkspacePipelineTaskBinding `json:"workspaces,omitempty"` } func (pt PipelineTask) HashKey() string { diff --git a/pkg/apis/pipeline/v1alpha1/pipeline_validation.go b/pkg/apis/pipeline/v1alpha1/pipeline_validation.go index 30e962655c7..b0b6aa00377 100644 --- a/pkg/apis/pipeline/v1alpha1/pipeline_validation.go +++ b/pkg/apis/pipeline/v1alpha1/pipeline_validation.go @@ -68,6 +68,7 @@ func validateDeclaredResources(ps *PipelineSpec) error { if len(missing) > 0 { return fmt.Errorf("pipeline declared resources didn't match usage in Tasks: Didn't provide required values: %s", missing) } + return nil } @@ -189,6 +190,39 @@ func (ps *PipelineSpec) Validate(ctx context.Context) *apis.FieldError { return err } + // Validate the pipeline's workspaces. + if err := validatePipelineWorkspaces(ps.Workspaces, ps.Tasks); err != nil { + return err + } + + return nil +} + +func validatePipelineWorkspaces(wss []WorkspacePipelineDeclaration, pts []PipelineTask) *apis.FieldError { + // Workspace names must be non-empty and unique. + wsTable := make(map[string]struct{}) + for i, ws := range wss { + if ws.Name == "" { + return apis.ErrInvalidValue(fmt.Sprintf("workspace %d has empty name", i), "spec.workspaces") + } + if _, ok := wsTable[ws.Name]; ok { + return apis.ErrInvalidValue(fmt.Sprintf("workspace with name %q appears more than once", ws.Name), "spec.workspaces") + } + wsTable[ws.Name] = struct{}{} + } + + // Any workspaces used in PipelineTasks should have their name declared in the Pipeline's + // Workspaces list. + for ptIdx, pt := range pts { + for wsIdx, ws := range pt.Workspaces { + if _, ok := wsTable[ws.Workspace]; !ok { + return apis.ErrInvalidValue( + fmt.Sprintf("pipeline task %q expects workspace with name %q but none exists in pipeline spec", pt.Name, ws.Workspace), + fmt.Sprintf("spec.tasks[%d].workspaces[%d]", ptIdx, wsIdx), + ) + } + } + } return nil } diff --git a/pkg/apis/pipeline/v1alpha1/pipeline_validation_test.go b/pkg/apis/pipeline/v1alpha1/pipeline_validation_test.go index bd503e49178..0cef1fee166 100644 --- a/pkg/apis/pipeline/v1alpha1/pipeline_validation_test.go +++ b/pkg/apis/pipeline/v1alpha1/pipeline_validation_test.go @@ -297,6 +297,29 @@ func TestPipeline_Validate(t *testing.T) { tb.PipelineTask("bar", "bar", tb.RunAfter("foo")), )), failureExpected: true, + }, { + name: "unused pipeline spec workspaces do not cause an error", + p: tb.Pipeline("name", "namespace", tb.PipelineSpec( + tb.PipelineWorkspaceDeclaration("foo"), + tb.PipelineWorkspaceDeclaration("bar"), + tb.PipelineTask("foo", "foo"), + )), + failureExpected: false, + }, { + name: "workspace bindings relying on a non-existent pipeline workspace cause an error", + p: tb.Pipeline("name", "namespace", tb.PipelineSpec( + tb.PipelineWorkspaceDeclaration("foo"), + tb.PipelineTask("taskname", "taskref", + tb.PipelineTaskWorkspaceBinding("taskWorkspaceName", "pipelineWorkspaceName")), + )), + failureExpected: true, + }, { + name: "multiple workspaces sharing the same name are not allowed", + p: tb.Pipeline("name", "namespace", tb.PipelineSpec( + tb.PipelineWorkspaceDeclaration("foo"), + tb.PipelineWorkspaceDeclaration("foo"), + )), + failureExpected: true, }} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go b/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go index 28288537333..787ba3cfdd1 100644 --- a/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go +++ b/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go @@ -60,9 +60,12 @@ type PipelineRunSpec struct { // Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration // +optional Timeout *metav1.Duration `json:"timeout,omitempty"` - // PodTemplate holds pod specific configuration PodTemplate *PodTemplate `json:"podTemplate,omitempty"` + // Workspaces holds a set of workspace bindings that must match names + // with those declared in the pipeline. + // +optional + Workspaces []WorkspaceBinding `json:"workspaces,omitempty"` } // PipelineRunSpecStatus defines the pipelinerun spec status the user can provide diff --git a/pkg/apis/pipeline/v1alpha1/pipelinerun_validation.go b/pkg/apis/pipeline/v1alpha1/pipelinerun_validation.go index 3c4f412259d..249e0465449 100644 --- a/pkg/apis/pipeline/v1alpha1/pipelinerun_validation.go +++ b/pkg/apis/pipeline/v1alpha1/pipelinerun_validation.go @@ -65,5 +65,18 @@ func (ps *PipelineRunSpec) Validate(ctx context.Context) *apis.FieldError { } } + if ps.Workspaces != nil { + wsNames := make(map[string]int) + for idx, ws := range ps.Workspaces { + if prevIdx, alreadyExists := wsNames[ws.Name]; alreadyExists { + return &apis.FieldError{ + Message: fmt.Sprintf("workspace %q provided by pipelinerun more than once, at index %d and %d", ws.Name, prevIdx, idx), + Paths: []string{"spec.workspaces"}, + } + } + wsNames[ws.Name] = idx + } + } + return nil } diff --git a/pkg/apis/pipeline/v1alpha1/pipelinerun_validation_test.go b/pkg/apis/pipeline/v1alpha1/pipelinerun_validation_test.go index c311d07ea8a..29bccf68797 100644 --- a/pkg/apis/pipeline/v1alpha1/pipelinerun_validation_test.go +++ b/pkg/apis/pipeline/v1alpha1/pipelinerun_validation_test.go @@ -23,6 +23,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/apis" ) @@ -163,6 +164,24 @@ func TestPipelineRunSpec_Invalidate(t *testing.T) { }}}, }, wantErr: apis.ErrDisallowedFields("spec.pipelinespec", "spec.pipelineref"), + }, { + name: "workspaces may only appear once", + spec: v1alpha1.PipelineRunSpec{ + PipelineRef: &v1alpha1.PipelineRef{ + Name: "pipelinerefname", + }, + Workspaces: []v1alpha1.WorkspaceBinding{{ + Name: "ws", + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, { + Name: "ws", + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }}, + }, + wantErr: &apis.FieldError{ + Message: `workspace "ws" provided by pipelinerun more than once, at index 0 and 1`, + Paths: []string{"spec.workspaces"}, + }, }} for _, ps := range tests { t.Run(ps.name, func(t *testing.T) { diff --git a/pkg/apis/pipeline/v1alpha1/workspace_types.go b/pkg/apis/pipeline/v1alpha1/workspace_types.go index 5c1a0242c66..09b68d4ab05 100644 --- a/pkg/apis/pipeline/v1alpha1/workspace_types.go +++ b/pkg/apis/pipeline/v1alpha1/workspace_types.go @@ -25,3 +25,11 @@ type WorkspaceDeclaration = v1alpha2.WorkspaceDeclaration // WorkspaceBinding maps a Task's declared workspace to a Volume. type WorkspaceBinding = v1alpha2.WorkspaceBinding + +// WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun +// is expected to populate with a workspace binding. +type WorkspacePipelineDeclaration = v1alpha2.WorkspacePipelineDeclaration + +// WorkspacePipelineTaskBinding describes how a workspace passed into the pipeline should be +// mapped to a task's declared workspace. +type WorkspacePipelineTaskBinding = v1alpha2.WorkspacePipelineTaskBinding diff --git a/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go index 298c3dc2d4d..564eb8dc614 100644 --- a/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go @@ -765,6 +765,13 @@ func (in *PipelineRunSpec) DeepCopyInto(out *PipelineRunSpec) { *out = new(pod.Template) (*in).DeepCopyInto(*out) } + if in.Workspaces != nil { + in, out := &in.Workspaces, &out.Workspaces + *out = make([]v1alpha2.WorkspaceBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -909,6 +916,11 @@ func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Workspaces != nil { + in, out := &in.Workspaces, &out.Workspaces + *out = make([]v1alpha2.WorkspacePipelineDeclaration, len(*in)) + copy(*out, *in) + } return } @@ -975,6 +987,11 @@ func (in *PipelineTask) DeepCopyInto(out *PipelineTask) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Workspaces != nil { + in, out := &in.Workspaces, &out.Workspaces + *out = make([]v1alpha2.WorkspacePipelineTaskBinding, len(*in)) + copy(*out, *in) + } return } diff --git a/pkg/apis/pipeline/v1alpha2/workspace_types.go b/pkg/apis/pipeline/v1alpha2/workspace_types.go index 175649b414a..a5082b98ed5 100644 --- a/pkg/apis/pipeline/v1alpha2/workspace_types.go +++ b/pkg/apis/pipeline/v1alpha2/workspace_types.go @@ -71,3 +71,19 @@ type WorkspaceBinding struct { // +optional Secret *corev1.SecretVolumeSource `json:"secret,omitempty"` } + +// WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun +// is expected to populate with a workspace binding. +type WorkspacePipelineDeclaration struct { + // Name is the name of a workspace to be provided by a PipelineRun. + Name string `json:"name"` +} + +// WorkspacePipelineTaskBinding describes how a workspace passed into the pipeline should be +// mapped to a task's declared workspace. +type WorkspacePipelineTaskBinding struct { + // Name is the name of the workspace as declared by the task + Name string `json:"name"` + // Workspace is the name of the workspace declared by the pipeline + Workspace string `json:"workspace"` +} diff --git a/pkg/apis/pipeline/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/pipeline/v1alpha2/zz_generated.deepcopy.go index af1cdd8bbe7..a65f3910adc 100644 --- a/pkg/apis/pipeline/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/apis/pipeline/v1alpha2/zz_generated.deepcopy.go @@ -771,3 +771,35 @@ func (in *WorkspaceDeclaration) DeepCopy() *WorkspaceDeclaration { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspacePipelineDeclaration) DeepCopyInto(out *WorkspacePipelineDeclaration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspacePipelineDeclaration. +func (in *WorkspacePipelineDeclaration) DeepCopy() *WorkspacePipelineDeclaration { + if in == nil { + return nil + } + out := new(WorkspacePipelineDeclaration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspacePipelineTaskBinding) DeepCopyInto(out *WorkspacePipelineTaskBinding) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspacePipelineTaskBinding. +func (in *WorkspacePipelineTaskBinding) DeepCopy() *WorkspacePipelineTaskBinding { + if in == nil { + return nil + } + out := new(WorkspacePipelineTaskBinding) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/reconciler/pipelinerun/pipelinerun.go b/pkg/reconciler/pipelinerun/pipelinerun.go index 310245092ef..75da4ef0875 100644 --- a/pkg/reconciler/pipelinerun/pipelinerun.go +++ b/pkg/reconciler/pipelinerun/pipelinerun.go @@ -54,6 +54,9 @@ const ( // ReasonInvalidBindings indicates that the reason for the failure status is that the // PipelineResources bound in the PipelineRun didn't match those declared in the Pipeline ReasonInvalidBindings = "InvalidPipelineResourceBindings" + // ReasonInvalidWorkspaceBinding indicates that a Pipeline expects a workspace but a + // PipelineRun has provided an invalid binding. + ReasonInvalidWorkspaceBinding = "InvalidWorkspaceBindings" // ReasonParameterTypeMismatch indicates that the reason for the failure status is that // parameter(s) declared in the PipelineRun do not have the some declared type as the // parameters(s) declared in the Pipeline that they are supposed to override. @@ -327,6 +330,18 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1alpha1.PipelineRun) er return nil } + // Ensure that the workspaces expected by the Pipeline are provided by the PipelineRun. + if err := resources.ValidateWorkspaceBindings(pipelineSpec, pr); err != nil { + pr.Status.SetCondition(&apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionFalse, + Reason: ReasonInvalidWorkspaceBinding, + Message: fmt.Sprintf("PipelineRun %s doesn't bind Pipeline %s's Workspaces correctly: %s", + fmt.Sprintf("%s/%s", pr.Namespace, pr.Name), fmt.Sprintf("%s/%s", pr.Namespace, pipelineMeta.Name), err), + }) + return nil + } + // Apply parameter substitution from the PipelineRun pipelineSpec = resources.ApplyParameters(pipelineSpec, pr) @@ -558,6 +573,21 @@ func (c *Reconciler) createTaskRun(rprt *resources.ResolvedPipelineRunTask, pr * tr.Spec.TaskSpec = rprt.ResolvedTaskResources.TaskSpec } + pipelineRunWorkspaces := make(map[string]v1alpha1.WorkspaceBinding) + for _, binding := range pr.Spec.Workspaces { + pipelineRunWorkspaces[binding.Name] = binding + } + for _, ws := range rprt.PipelineTask.Workspaces { + taskWorkspaceName, pipelineWorkspaceName := ws.Name, ws.Workspace + if b, hasBinding := pipelineRunWorkspaces[pipelineWorkspaceName]; hasBinding { + binding := *b.DeepCopy() + binding.Name = taskWorkspaceName + tr.Spec.Workspaces = append(tr.Spec.Workspaces, binding) + } else { + return nil, fmt.Errorf("expected workspace %q to be provided by pipelinerun for pipeline task %q", pipelineWorkspaceName, rprt.PipelineTask.Name) + } + } + resources.WrapSteps(&tr.Spec, rprt.PipelineTask, rprt.ResolvedTaskResources.Inputs, rprt.ResolvedTaskResources.Outputs, storageBasePath) c.Logger.Infof("Creating a new TaskRun object %s", rprt.TaskRunName) return c.PipelineClientSet.TektonV1alpha1().TaskRuns(pr.Namespace).Create(tr) diff --git a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go index 56571c370e3..128813c32a3 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go +++ b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go @@ -197,6 +197,21 @@ func ValidateResourceBindings(p *v1alpha1.PipelineSpec, pr *v1alpha1.PipelineRun return nil } +// ValidateWorkspaceBindings validates that the Workspaces expected by a Pipeline are provided by a PipelineRun. +func ValidateWorkspaceBindings(p *v1alpha1.PipelineSpec, pr *v1alpha1.PipelineRun) error { + pipelineRunWorkspaces := make(map[string]v1alpha1.WorkspaceBinding) + for _, binding := range pr.Spec.Workspaces { + pipelineRunWorkspaces[binding.Name] = binding + } + + for _, ws := range p.Workspaces { + if _, ok := pipelineRunWorkspaces[ws.Name]; !ok { + return fmt.Errorf("pipeline expects workspace with name %q be provided by pipelinerun", ws.Name) + } + } + return nil +} + // TaskNotFoundError indicates that the resolution failed because a referenced Task couldn't be retrieved type TaskNotFoundError struct { Name string diff --git a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go index 88e71bce4de..0cd08dc161f 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go +++ b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go @@ -1811,3 +1811,15 @@ func TestGetResourcesFromBindings_Extra(t *testing.T) { t.Fatalf("Expected error indicating `image-resource` was extra but got no error") } } + +func TestValidateWorkspaceBindings(t *testing.T) { + p := tb.Pipeline("pipelines", "namespace", tb.PipelineSpec( + tb.PipelineWorkspaceDeclaration("foo"), + )) + pr := tb.PipelineRun("pipelinerun", "namespace", tb.PipelineRunSpec("pipeline", + tb.PipelineRunWorkspaceBindingEmptyDir("bar"), + )) + if err := ValidateWorkspaceBindings(&p.Spec, pr); err == nil { + t.Fatalf("Expected error indicating `foo` workspace was not provided but got no error") + } +} diff --git a/test/builder/pipeline.go b/test/builder/pipeline.go index 164ae5e1e3a..3d49afd8d65 100644 --- a/test/builder/pipeline.go +++ b/test/builder/pipeline.go @@ -267,6 +267,15 @@ func PipelineTaskConditionResource(name, resource string, from ...string) Pipeli } } +func PipelineTaskWorkspaceBinding(name, workspace string) PipelineTaskOp { + return func(pt *v1alpha1.PipelineTask) { + pt.Workspaces = append(pt.Workspaces, v1alpha1.WorkspacePipelineTaskBinding{ + Name: name, + Workspace: workspace, + }) + } +} + // PipelineRun creates a PipelineRun with default values. // Any number of PipelineRun modifier can be passed to transform it. func PipelineRun(name, namespace string, ops ...PipelineRunOp) *v1alpha1.PipelineRun { @@ -528,3 +537,22 @@ func PipelineResourceSpecSecretParam(fieldname, secretName, secretKey string) Pi }) } } + +// PipelineWorkspaceDeclaration adds a Workspace to the workspaces listed in the pipeline spec. +func PipelineWorkspaceDeclaration(names ...string) PipelineSpecOp { + return func(spec *v1alpha1.PipelineSpec) { + for _, name := range names { + spec.Workspaces = append(spec.Workspaces, v1alpha1.WorkspacePipelineDeclaration{Name: name}) + } + } +} + +// PipelineRunWorkspaceBindingEmptyDir adds an EmptyDir Workspace to the workspaces of a pipelinerun spec. +func PipelineRunWorkspaceBindingEmptyDir(name string) PipelineRunSpecOp { + return func(spec *v1alpha1.PipelineRunSpec) { + spec.Workspaces = append(spec.Workspaces, v1alpha1.WorkspaceBinding{ + Name: name, + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }) + } +} diff --git a/test/builder/pipeline_test.go b/test/builder/pipeline_test.go index f4e6f293524..63dfbc0797d 100644 --- a/test/builder/pipeline_test.go +++ b/test/builder/pipeline_test.go @@ -44,6 +44,7 @@ func TestPipeline(t *testing.T) { tb.PipelineTaskConditionParam("param-name", "param-value"), tb.PipelineTaskConditionResource("some-resource", "my-only-git-resource", "bar", "never-gonna"), ), + tb.PipelineTaskWorkspaceBinding("task-workspace1", "workspace1"), ), tb.PipelineTask("bar", "chocolate", tb.PipelineTaskRefKind(v1alpha1.ClusterTaskKind), @@ -59,6 +60,7 @@ func TestPipeline(t *testing.T) { Image: "myimage", }}}}, )), + tb.PipelineWorkspaceDeclaration("workspace1"), ), tb.PipelineCreationTimestamp(creationTime), ) @@ -106,6 +108,10 @@ func TestPipeline(t *testing.T) { From: []string{"bar", "never-gonna"}, }}, }}, + Workspaces: []v1alpha1.WorkspacePipelineTaskBinding{{ + Name: "task-workspace1", + Workspace: "workspace1", + }}, }, { Name: "bar", TaskRef: &v1alpha1.TaskRef{Name: "chocolate", Kind: v1alpha1.ClusterTaskKind}, @@ -134,6 +140,9 @@ func TestPipeline(t *testing.T) { }, }, }}, + Workspaces: []v1alpha1.WorkspacePipelineDeclaration{{ + Name: "workspace1", + }}, }, } if d := cmp.Diff(expectedPipeline, pipeline); d != "" { diff --git a/test/workspace_test.go b/test/workspace_test.go index beb3b27dbea..01f5b2f2776 100644 --- a/test/workspace_test.go +++ b/test/workspace_test.go @@ -21,10 +21,14 @@ package test import ( "strings" "testing" + "time" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" tb "github.com/tektoncd/pipeline/test/builder" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck/v1beta1" knativetest "knative.dev/pkg/test" ) @@ -83,3 +87,100 @@ func TestWorkspaceReadOnlyDisallowsWrite(t *testing.T) { } } } + +func TestWorkspacePipelineRunDuplicateWorkspaceEntriesInvalid(t *testing.T) { + c, namespace := setup(t) + + taskName := "read-workspace" + pipelineName := "read-workspace-pipeline" + pipelineRunName := "read-workspace-pipelinerun" + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + task := tb.Task(taskName, namespace, tb.TaskSpec( + tb.Step("alpine", tb.StepScript("cat /workspace/test/file")), + tb.TaskWorkspace("test", "test workspace", "/workspace/test/file", true), + )) + if _, err := c.TaskClient.Create(task); err != nil { + t.Fatalf("Failed to create Task: %s", err) + } + + pipeline := tb.Pipeline(pipelineName, namespace, tb.PipelineSpec( + tb.PipelineWorkspaceDeclaration("foo"), + tb.PipelineTask("task1", taskName, tb.PipelineTaskWorkspaceBinding("test", "foo")), + )) + if _, err := c.PipelineClient.Create(pipeline); err != nil { + t.Fatalf("Failed to create Pipeline: %s", err) + } + + pipelineRun := tb.PipelineRun(pipelineRunName, namespace, + tb.PipelineRunSpec( + pipelineName, + // These are the duplicated workspace entries that are being tested. + tb.PipelineRunWorkspaceBindingEmptyDir("foo"), + tb.PipelineRunWorkspaceBindingEmptyDir("foo"), + ), + ) + _, err := c.PipelineRunClient.Create(pipelineRun) + + if err == nil || !strings.Contains(err.Error(), "provided by pipelinerun more than once") { + t.Fatalf("Expected error when creating pipelinerun with duplicate workspace entries but received: %v", err) + } +} + +func TestWorkspacePipelineRunMissingWorkspaceInvalid(t *testing.T) { + c, namespace := setup(t) + + taskName := "read-workspace" + pipelineName := "read-workspace-pipeline" + pipelineRunName := "read-workspace-pipelinerun" + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + task := tb.Task(taskName, namespace, tb.TaskSpec( + tb.Step("alpine", tb.StepScript("cat /workspace/test/file")), + tb.TaskWorkspace("test", "test workspace", "/workspace/test/file", true), + )) + if _, err := c.TaskClient.Create(task); err != nil { + t.Fatalf("Failed to create Task: %s", err) + } + + pipeline := tb.Pipeline(pipelineName, namespace, tb.PipelineSpec( + tb.PipelineWorkspaceDeclaration("foo"), + tb.PipelineTask("task1", taskName, tb.PipelineTaskWorkspaceBinding("test", "foo")), + )) + if _, err := c.PipelineClient.Create(pipeline); err != nil { + t.Fatalf("Failed to create Pipeline: %s", err) + } + + pipelineRun := tb.PipelineRun(pipelineRunName, namespace, + tb.PipelineRunSpec( + pipelineName, + ), + ) + if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { + t.Fatalf("Failed to create PipelineRun: %s", err) + } + + conditions := make(v1beta1.Conditions, 0) + + if err := WaitForPipelineRunState(c, pipelineRunName, 10*time.Second, func(pr *v1alpha1.PipelineRun) (bool, error) { + if len(pr.Status.Conditions) > 0 { + conditions = pr.Status.Conditions + return true, nil + } + return false, nil + }, "PipelineRunHasCondition"); err != nil { + t.Fatalf("Failed to wait for PipelineRun %q to finish: %s", pipelineRunName, err) + } + + for _, condition := range conditions { + if condition.Type == apis.ConditionSucceeded && condition.Status == corev1.ConditionFalse && strings.Contains(condition.Message, `pipeline expects workspace with name "foo" be provided by pipelinerun`) { + return + } + } + + t.Fatalf("Expected failure condition after creating pipelinerun with missing workspace but did not receive one.") +}