diff --git a/pkg/apis/pipeline/v1/container_types.go b/pkg/apis/pipeline/v1/container_types.go new file mode 100644 index 00000000000..7fa38999ebc --- /dev/null +++ b/pkg/apis/pipeline/v1/container_types.go @@ -0,0 +1,526 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Step runs a subcomponent of a Task +type Step struct { + + // Name of the container specified as a DNS_LABEL. + // Each container in a pod must have a unique name (DNS_LABEL). + // Cannot be updated. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. + // +optional + Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + // +listType=atomic + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + // +listType=atomic + Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + // +listType=atomic + EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=atomic + Env []corev1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` + // Compute Resources required by this container. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + // +listType=atomic + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + // volumeDevices is the list of block devices to be used by the container. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + // +listType=atomic + VolumeDevices []corev1.VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` + // SecurityContext defines the security options the container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + // +optional + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` + + // Script is the contents of an executable file to execute. + // + // If Script is not empty, the Step cannot have an Command and the Args will be passed to the Script. + // +optional + Script string `json:"script,omitempty"` + + // Timeout is the time after which the step times out. Defaults to never. + // Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // This is an alpha field. You must set the "enable-api-fields" feature flag to "alpha" + // for this field to be supported. + // + // Workspaces is a list of workspaces from the Task that this Step wants + // exclusive access to. Adding a workspace to this list means that any + // other Step or Sidecar that does not also request this Workspace will + // not have access to it. + // +optional + // +listType=atomic + Workspaces []WorkspaceUsage `json:"workspaces,omitempty"` + + // OnError defines the exiting behavior of a container on error + // can be set to [ continue | stopAndFail ] + // stopAndFail indicates exit the taskRun if the container exits with non-zero exit code + // continue indicates continue executing the rest of the steps irrespective of the container exit code + OnError string `json:"onError,omitempty"` +} + +// ToK8sContainer converts the Step to a Kubernetes Container struct +func (s *Step) ToK8sContainer() *corev1.Container { + return &corev1.Container{ + Name: s.Name, + Image: s.Image, + Command: s.Command, + Args: s.Args, + WorkingDir: s.WorkingDir, + EnvFrom: s.EnvFrom, + Env: s.Env, + Resources: s.Resources, + VolumeMounts: s.VolumeMounts, + VolumeDevices: s.VolumeDevices, + ImagePullPolicy: s.ImagePullPolicy, + SecurityContext: s.SecurityContext, + } +} + +// SetContainerFields sets the fields of the Step to the values of the corresponding fields in the Container +func (s *Step) SetContainerFields(c corev1.Container) { + s.Name = c.Name + s.Image = c.Image + s.Command = c.Command + s.Args = c.Args + s.WorkingDir = c.WorkingDir + s.EnvFrom = c.EnvFrom + s.Env = c.Env + s.Resources = c.Resources + s.VolumeMounts = c.VolumeMounts + s.VolumeDevices = c.VolumeDevices + s.ImagePullPolicy = c.ImagePullPolicy + s.SecurityContext = c.SecurityContext +} + +// StepTemplate is a template for a Step +type StepTemplate struct { + + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. + // +optional + Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + // +listType=atomic + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + // +listType=atomic + Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + // +listType=atomic + EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=atomic + Env []corev1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` + // Compute Resources required by this container. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + // +listType=atomic + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + // volumeDevices is the list of block devices to be used by the container. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + // +listType=atomic + VolumeDevices []corev1.VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` + // SecurityContext defines the security options the container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + // +optional + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` +} + +// SetContainerFields sets the fields of the Step to the values of the corresponding fields in the Container +func (s *StepTemplate) SetContainerFields(c corev1.Container) { + s.Image = c.Image + s.Command = c.Command + s.Args = c.Args + s.WorkingDir = c.WorkingDir + s.EnvFrom = c.EnvFrom + s.Env = c.Env + s.Resources = c.Resources + s.VolumeMounts = c.VolumeMounts + s.VolumeDevices = c.VolumeDevices + s.ImagePullPolicy = c.ImagePullPolicy + s.SecurityContext = c.SecurityContext +} + +// ToK8sContainer converts the StepTemplate to a Kubernetes Container struct +func (s *StepTemplate) ToK8sContainer() *corev1.Container { + return &corev1.Container{ + Image: s.Image, + Command: s.Command, + Args: s.Args, + WorkingDir: s.WorkingDir, + EnvFrom: s.EnvFrom, + Env: s.Env, + Resources: s.Resources, + VolumeMounts: s.VolumeMounts, + VolumeDevices: s.VolumeDevices, + ImagePullPolicy: s.ImagePullPolicy, + SecurityContext: s.SecurityContext, + } +} + +// Sidecar has nearly the same data structure as Step but does not have the ability to timeout. +type Sidecar struct { + + // Name of the container specified as a DNS_LABEL. + // Each container in a pod must have a unique name (DNS_LABEL). + // Cannot be updated. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. + // +optional + Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + // +listType=atomic + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + // +listType=atomic + Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` + // List of ports to expose from the container. Exposing a port here gives + // the system additional information about the network connections a + // container uses, but is primarily informational. Not specifying a port here + // DOES NOT prevent that port from being exposed. Any port which is + // listening on the default "0.0.0.0" address inside a container will be + // accessible from the network. + // Cannot be updated. + // +optional + // +patchMergeKey=containerPort + // +patchStrategy=merge + // +listType=map + // +listMapKey=containerPort + // +listMapKey=protocol + Ports []corev1.ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + // +listType=atomic + EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=atomic + Env []corev1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` + // Compute Resources required by this container. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + // +listType=atomic + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + // volumeDevices is the list of block devices to be used by the container. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + // +listType=atomic + VolumeDevices []corev1.VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` + // Periodic probe of container liveness. + // Container will be restarted if the probe fails. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + LivenessProbe *corev1.Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"` + // Periodic probe of container service readiness. + // Container will be removed from service endpoints if the probe fails. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + ReadinessProbe *corev1.Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` + // StartupProbe indicates that the Pod has successfully initialized. + // If specified, no other probes are executed until this completes successfully. + // If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + // when it might take a long time to load data or warm a cache, than during steady-state operation. + // This cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + StartupProbe *corev1.Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` + // Actions that the management system should take in response to container lifecycle events. + // Cannot be updated. + // +optional + Lifecycle *corev1.Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"` + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"` + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + TerminationMessagePolicy corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"` + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` + // SecurityContext defines the security options the container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + // +optional + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"` + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"` + // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"` + + // Script is the contents of an executable file to execute. + // + // If Script is not empty, the Step cannot have an Command or Args. + // +optional + Script string `json:"script,omitempty"` + + // This is an alpha field. You must set the "enable-api-fields" feature flag to "alpha" + // for this field to be supported. + // + // Workspaces is a list of workspaces from the Task that this Sidecar wants + // exclusive access to. Adding a workspace to this list means that any + // other Step or Sidecar that does not also request this Workspace will + // not have access to it. + // +optional + // +listType=atomic + Workspaces []WorkspaceUsage `json:"workspaces,omitempty"` +} + +// ToK8sContainer converts the Sidecar to a Kubernetes Container struct +func (s *Sidecar) ToK8sContainer() *corev1.Container { + return &corev1.Container{ + Name: s.Name, + Image: s.Image, + Command: s.Command, + Args: s.Args, + WorkingDir: s.WorkingDir, + Ports: s.Ports, + EnvFrom: s.EnvFrom, + Env: s.Env, + Resources: s.Resources, + VolumeMounts: s.VolumeMounts, + VolumeDevices: s.VolumeDevices, + LivenessProbe: s.LivenessProbe, + ReadinessProbe: s.ReadinessProbe, + StartupProbe: s.StartupProbe, + Lifecycle: s.Lifecycle, + TerminationMessagePath: s.TerminationMessagePath, + ImagePullPolicy: s.ImagePullPolicy, + SecurityContext: s.SecurityContext, + Stdin: s.Stdin, + StdinOnce: s.StdinOnce, + TTY: s.TTY, + } +} + +// SetContainerFields sets the fields of the Sidecar to the values of the corresponding fields in the Container +func (s *Sidecar) SetContainerFields(c corev1.Container) { + s.Name = c.Name + s.Image = c.Image + s.Command = c.Command + s.Args = c.Args + s.WorkingDir = c.WorkingDir + s.Ports = c.Ports + s.EnvFrom = c.EnvFrom + s.Env = c.Env + s.Resources = c.Resources + s.VolumeMounts = c.VolumeMounts + s.VolumeDevices = c.VolumeDevices + s.LivenessProbe = c.LivenessProbe + s.ReadinessProbe = c.ReadinessProbe + s.StartupProbe = c.StartupProbe + s.Lifecycle = c.Lifecycle + s.TerminationMessagePath = c.TerminationMessagePath + s.ImagePullPolicy = c.ImagePullPolicy + s.SecurityContext = c.SecurityContext + s.Stdin = c.Stdin + s.StdinOnce = c.StdinOnce + s.TTY = c.TTY +} diff --git a/pkg/apis/pipeline/v1/merge.go b/pkg/apis/pipeline/v1/merge.go new file mode 100644 index 00000000000..6075ac04980 --- /dev/null +++ b/pkg/apis/pipeline/v1/merge.go @@ -0,0 +1,116 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/strategicpatch" +) + +// mergeData is used to store the intermediate data needed to merge an object +// with a template. It's provided to avoid repeatedly re-serializing the template. +// +k8s:openapi-gen=false +type mergeData struct { + emptyJSON []byte + templateJSON []byte + patchSchema strategicpatch.PatchMetaFromStruct +} + +// MergeStepsWithStepTemplate takes a possibly nil container template and a +// list of steps, merging each of the steps with the container template, if +// it's not nil, and returning the resulting list. +func MergeStepsWithStepTemplate(template *StepTemplate, steps []Step) ([]Step, error) { + if template == nil { + return steps, nil + } + + md, err := getMergeData(template.ToK8sContainer(), &corev1.Container{}) + if err != nil { + return nil, err + } + + for i, s := range steps { + merged := corev1.Container{} + err := mergeObjWithTemplateBytes(md, s.ToK8sContainer(), &merged) + if err != nil { + return nil, err + } + + // If the container's args is nil, reset it to empty instead + if merged.Args == nil && s.Args != nil { + merged.Args = []string{} + } + + // Pass through original step Script, for later conversion. + newStep := Step{Script: s.Script, OnError: s.OnError, Timeout: s.Timeout} + newStep.SetContainerFields(merged) + steps[i] = newStep + } + return steps, nil +} + +// getMergeData serializes the template and empty object to get the intermediate results necessary for +// merging an object of the same type with this template. +// This function is provided to avoid repeatedly serializing an identical template. +func getMergeData(template, empty interface{}) (*mergeData, error) { + // We need JSON bytes to generate a patch to merge the object + // onto the template, so marshal the template. + templateJSON, err := json.Marshal(template) + if err != nil { + return nil, err + } + // We need to do a three-way merge to actually merge the template and + // object, so we need an empty object as the "original" + emptyJSON, err := json.Marshal(empty) + if err != nil { + return nil, err + } + // Get the patch meta, which is needed for generating and applying the merge patch. + patchSchema, err := strategicpatch.NewPatchMetaFromStruct(template) + if err != nil { + return nil, err + } + return &mergeData{templateJSON: templateJSON, emptyJSON: emptyJSON, patchSchema: patchSchema}, nil +} + +// mergeObjWithTemplateBytes merges obj with md's template JSON and updates out to reflect the merged result. +// out is a pointer to the zero value of obj's type. +// This function is provided to avoid repeatedly serializing an identical template. +func mergeObjWithTemplateBytes(md *mergeData, obj, out interface{}) error { + // Marshal the object to JSON + objAsJSON, err := json.Marshal(obj) + if err != nil { + return err + } + // Create a merge patch, with the empty JSON as the original, the object JSON as the modified, and the template + // JSON as the current - this lets us do a deep merge of the template and object, with awareness of + // the "patchMerge" tags. + patch, err := strategicpatch.CreateThreeWayMergePatch(md.emptyJSON, objAsJSON, md.templateJSON, md.patchSchema, true) + if err != nil { + return err + } + + // Actually apply the merge patch to the template JSON. + mergedAsJSON, err := strategicpatch.StrategicMergePatchUsingLookupPatchMeta(md.templateJSON, patch, md.patchSchema) + if err != nil { + return err + } + // Unmarshal the merged JSON to a pointer, and return it. + return json.Unmarshal(mergedAsJSON, out) +} diff --git a/pkg/apis/pipeline/v1/merge_test.go b/pkg/apis/pipeline/v1/merge_test.go new file mode 100644 index 00000000000..8fa836f7bab --- /dev/null +++ b/pkg/apis/pipeline/v1/merge_test.go @@ -0,0 +1,120 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1_test + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + "github.com/tektoncd/pipeline/test/diff" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +func TestMergeStepsWithStepTemplate(t *testing.T) { + resourceQuantityCmp := cmp.Comparer(func(x, y resource.Quantity) bool { + return x.Cmp(y) == 0 + }) + + for _, tc := range []struct { + name string + template *v1.StepTemplate + steps []v1.Step + expected []v1.Step + }{{ + name: "nil-template", + template: nil, + steps: []v1.Step{{ + Image: "some-image", + OnError: "foo", + }}, + expected: []v1.Step{{ + Image: "some-image", + OnError: "foo", + }}, + }, { + name: "not-overlapping", + template: &v1.StepTemplate{ + Command: []string{"/somecmd"}, + }, + steps: []v1.Step{{ + Image: "some-image", + OnError: "foo", + }}, + expected: []v1.Step{{ + Command: []string{"/somecmd"}, Image: "some-image", + OnError: "foo", + }}, + }, { + name: "overwriting-one-field", + template: &v1.StepTemplate{ + Image: "some-image", + Command: []string{"/somecmd"}, + }, + steps: []v1.Step{{ + Image: "some-other-image", + }}, + expected: []v1.Step{{ + Command: []string{"/somecmd"}, + Image: "some-other-image", + }}, + }, { + name: "merge-and-overwrite-slice", + template: &v1.StepTemplate{ + Env: []corev1.EnvVar{{ + Name: "KEEP_THIS", + Value: "A_VALUE", + }, { + Name: "SOME_KEY", + Value: "ORIGINAL_VALUE", + }}, + }, + steps: []v1.Step{{ + Env: []corev1.EnvVar{{ + Name: "NEW_KEY", + Value: "A_VALUE", + }, { + Name: "SOME_KEY", + Value: "NEW_VALUE", + }}, + }}, + expected: []v1.Step{{ + Env: []corev1.EnvVar{{ + Name: "NEW_KEY", + Value: "A_VALUE", + }, { + Name: "KEEP_THIS", + Value: "A_VALUE", + }, { + Name: "SOME_KEY", + Value: "NEW_VALUE", + }}, + }}, + }} { + t.Run(tc.name, func(t *testing.T) { + result, err := v1.MergeStepsWithStepTemplate(tc.template, tc.steps) + if err != nil { + t.Errorf("expected no error. Got error %v", err) + } + + if d := cmp.Diff(tc.expected, result, resourceQuantityCmp); d != "" { + t.Errorf("merged steps don't match, diff: %s", diff.PrintWantGot(d)) + } + }) + } +} diff --git a/pkg/apis/pipeline/v1/openapi_generated.go b/pkg/apis/pipeline/v1/openapi_generated.go index 77502115e2e..d389b93a804 100644 --- a/pkg/apis/pipeline/v1/openapi_generated.go +++ b/pkg/apis/pipeline/v1/openapi_generated.go @@ -30,11 +30,27 @@ import ( func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { return map[string]common.OpenAPIDefinition{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.AffinityAssistantTemplate": schema_pkg_apis_pipeline_pod_AffinityAssistantTemplate(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template": schema_pkg_apis_pipeline_pod_Template(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Task": schema_pkg_apis_pipeline_v1_Task(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskList": schema_pkg_apis_pipeline_v1_TaskList(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec": schema_pkg_apis_pipeline_v1_TaskSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.AffinityAssistantTemplate": schema_pkg_apis_pipeline_pod_AffinityAssistantTemplate(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template": schema_pkg_apis_pipeline_pod_Template(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ArrayOrString": schema_pkg_apis_pipeline_v1_ArrayOrString(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param": schema_pkg_apis_pipeline_v1_Param(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec": schema_pkg_apis_pipeline_v1_ParamSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineWorkspaceDeclaration": schema_pkg_apis_pipeline_v1_PipelineWorkspaceDeclaration(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec": schema_pkg_apis_pipeline_v1_PropertySpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ResolverParam": schema_pkg_apis_pipeline_v1_ResolverParam(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ResolverRef": schema_pkg_apis_pipeline_v1_ResolverRef(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Sidecar": schema_pkg_apis_pipeline_v1_Sidecar(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Step": schema_pkg_apis_pipeline_v1_Step(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepTemplate": schema_pkg_apis_pipeline_v1_StepTemplate(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Task": schema_pkg_apis_pipeline_v1_Task(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskList": schema_pkg_apis_pipeline_v1_TaskList(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskResult": schema_pkg_apis_pipeline_v1_TaskResult(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunResult": schema_pkg_apis_pipeline_v1_TaskRunResult(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec": schema_pkg_apis_pipeline_v1_TaskSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceBinding": schema_pkg_apis_pipeline_v1_WorkspaceBinding(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceDeclaration": schema_pkg_apis_pipeline_v1_WorkspaceDeclaration(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspacePipelineTaskBinding": schema_pkg_apis_pipeline_v1_WorkspacePipelineTaskBinding(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage": schema_pkg_apis_pipeline_v1_WorkspaceUsage(ref), } } @@ -283,112 +299,1459 @@ func schema_pkg_apis_pipeline_pod_Template(ref common.ReferenceCallback) common. } } -func schema_pkg_apis_pipeline_v1_Task(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_pkg_apis_pipeline_v1_ArrayOrString(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "Task represents a collection of sequential steps that are run as part of a Pipeline using a set of inputs and producing a set of outputs. Tasks execute when TaskRuns are created that provide the input parameters and resources and output resources the Task requires.", + Description: "ArrayOrString is a type that can hold a single string or string array. Used in JSON unmarshalling so that a single JSON field can accept either an individual string or an array of strings. consideration the object case after the community reaches an agreement on it.", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "kind": { + "type": { SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "stringVal": { + SchemaProps: spec.SchemaProps{ + Description: "Represents the stored type of ArrayOrString.", + Default: "", Type: []string{"string"}, Format: "", }, }, - "apiVersion": { + "arrayVal": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "objectVal": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + Required: []string{"type", "stringVal", "arrayVal", "objectVal"}, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_Param(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Param declares an ArrayOrString to use for the parameter called name.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ArrayOrString"), + }, + }, + }, + Required: []string{"name", "value"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ArrayOrString"}, + } +} + +func schema_pkg_apis_pipeline_v1_ParamSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ParamSpec defines arbitrary parameters needed beyond typed inputs (such as resources). Parameter values are provided by users as inputs on a TaskRun or PipelineRun.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name declares the name by which a parameter is referenced.", + Default: "", Type: []string{"string"}, Format: "", }, }, - "metadata": { + "type": { SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + Description: "Type is the user-specified type of the parameter. The possible types are currently \"string\", \"array\" and \"object\", and \"string\" is the default.", + Type: []string{"string"}, + Format: "", }, }, - "spec": { + "description": { SchemaProps: spec.SchemaProps{ - Description: "Spec holds the desired state of the Task from the client", - Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec"), + Description: "Description is a user-facing description of the parameter that may be used to populate a UI.", + Type: []string{"string"}, + Format: "", + }, + }, + "properties": { + SchemaProps: spec.SchemaProps{ + Description: "Properties is the JSON Schema properties to support key-value pairs parameter.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec"), + }, + }, + }, + }, + }, + "default": { + SchemaProps: spec.SchemaProps{ + Description: "Default is the value a parameter takes if no input value is supplied. If default is set, a Task may be executed without a supplied value for the parameter.", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ArrayOrString"), }, }, }, + Required: []string{"name"}, }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ArrayOrString", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec"}, } } -func schema_pkg_apis_pipeline_v1_TaskList(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_pkg_apis_pipeline_v1_PipelineWorkspaceDeclaration(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "TaskList contains a list of Task", + Description: "WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun is expected to populate with a workspace binding. Deprecated: use PipelineWorkspaceDeclaration type instead", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "kind": { + "name": { SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Description: "Name is the name of a workspace to be provided by a PipelineRun.", + Default: "", Type: []string{"string"}, Format: "", }, }, - "apiVersion": { + "description": { SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Description: "Description is a human readable string describing how the workspace will be used in the Pipeline. It can be useful to include a bit of detail about which tasks are intended to have access to the data on the workspace.", Type: []string{"string"}, Format: "", }, }, - "metadata": { + "optional": { SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + Description: "Optional marks a Workspace as not being required in PipelineRuns. By default this field is false and so declared workspaces are required.", + Type: []string{"boolean"}, + Format: "", }, }, - "items": { + }, + Required: []string{"name"}, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_PropertySpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PropertySpec defines the struct for object keys", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "type": { SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_ResolverParam(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ResolverParam is a single parameter passed to a resolver.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of the parameter that will be passed to the resolver.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Description: "Value is the string value of the parameter that will be passed to the resolver.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name", "value"}, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_ResolverRef(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ResolverRef can be used to refer to a Pipeline or Task in a remote location like a git repo. This feature is in alpha and these fields are only available when the alpha feature gate is enabled.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "resolver": { + SchemaProps: spec.SchemaProps{ + Description: "Resolver is the name of the resolver that should perform resolution of the referenced Tekton resource, such as \"git\".", + Type: []string{"string"}, + Format: "", + }, + }, + "resource": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Resource contains the parameters used to identify the referenced Tekton resource. Example entries might include \"repo\" or \"path\" but the set of params ultimately depends on the chosen resolver.", + Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Task"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ResolverParam"), }, }, }, }, }, }, - Required: []string{"items"}, }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Task", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ResolverParam"}, } } -func schema_pkg_apis_pipeline_v1_TaskSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_pkg_apis_pipeline_v1_Sidecar(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "TaskSpec defines the desired state of Task.", + Description: "Sidecar has nearly the same data structure as Step but does not have the ability to timeout.", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "description": { + "name": { SchemaProps: spec.SchemaProps{ - Description: "Description is a user-facing description of the task that may be used to populate a UI.", + Description: "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "image": { + SchemaProps: spec.SchemaProps{ + Description: "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + Type: []string{"string"}, + Format: "", + }, + }, + "command": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "args": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "workingDir": { + SchemaProps: spec.SchemaProps{ + Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "ports": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "containerPort", + "protocol", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "containerPort", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ContainerPort"), + }, + }, + }, + }, + }, + "envFrom": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvFromSource"), + }, + }, + }, + }, + }, + "env": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of environment variables to set in the container. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvVar"), + }, + }, + }, + }, + }, + "resources": { + SchemaProps: spec.SchemaProps{ + Description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + "volumeMounts": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeMount"), + }, + }, + }, + }, + }, + "volumeDevices": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "volumeDevices is the list of block devices to be used by the container.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeDevice"), + }, + }, + }, + }, + }, + "livenessProbe": { + SchemaProps: spec.SchemaProps{ + Description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "readinessProbe": { + SchemaProps: spec.SchemaProps{ + Description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "startupProbe": { + SchemaProps: spec.SchemaProps{ + Description: "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "lifecycle": { + SchemaProps: spec.SchemaProps{ + Description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", + Ref: ref("k8s.io/api/core/v1.Lifecycle"), + }, + }, + "terminationMessagePath": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "terminationMessagePolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "imagePullPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + Type: []string{"string"}, + Format: "", + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + Ref: ref("k8s.io/api/core/v1.SecurityContext"), + }, + }, + "stdin": { + SchemaProps: spec.SchemaProps{ + Description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "stdinOnce": { + SchemaProps: spec.SchemaProps{ + Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + Type: []string{"boolean"}, + Format: "", + }, + }, + "tty": { + SchemaProps: spec.SchemaProps{ + Description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "script": { + SchemaProps: spec.SchemaProps{ + Description: "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command or Args.", + Type: []string{"string"}, + Format: "", + }, + }, + "workspaces": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "This is an alpha field. You must set the \"enable-api-fields\" feature flag to \"alpha\" for this field to be supported.\n\nWorkspaces is a list of workspaces from the Task that this Sidecar wants exclusive access to. Adding a workspace to this list means that any other Step or Sidecar that does not also request this Workspace will not have access to it.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage"), + }, + }, + }, + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage", "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, + } +} + +func schema_pkg_apis_pipeline_v1_Step(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Step runs a subcomponent of a Task", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "image": { + SchemaProps: spec.SchemaProps{ + Description: "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + Type: []string{"string"}, + Format: "", + }, + }, + "command": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "args": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "workingDir": { + SchemaProps: spec.SchemaProps{ + Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "envFrom": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvFromSource"), + }, + }, + }, + }, + }, + "env": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of environment variables to set in the container. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvVar"), + }, + }, + }, + }, + }, + "resources": { + SchemaProps: spec.SchemaProps{ + Description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + "volumeMounts": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeMount"), + }, + }, + }, + }, + }, + "volumeDevices": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "volumeDevices is the list of block devices to be used by the container.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeDevice"), + }, + }, + }, + }, + }, + "imagePullPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + Type: []string{"string"}, + Format: "", + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + Ref: ref("k8s.io/api/core/v1.SecurityContext"), + }, + }, + "script": { + SchemaProps: spec.SchemaProps{ + Description: "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command and the Args will be passed to the Script.", + Type: []string{"string"}, + Format: "", + }, + }, + "timeout": { + SchemaProps: spec.SchemaProps{ + Description: "Timeout is the time after which the step times out. Defaults to never. Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "workspaces": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "This is an alpha field. You must set the \"enable-api-fields\" feature flag to \"alpha\" for this field to be supported.\n\nWorkspaces is a list of workspaces from the Task that this Step wants exclusive access to. Adding a workspace to this list means that any other Step or Sidecar that does not also request this Workspace will not have access to it.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage"), + }, + }, + }, + }, + }, + "onError": { + SchemaProps: spec.SchemaProps{ + Description: "OnError defines the exiting behavior of a container on error can be set to [ continue | stopAndFail ] stopAndFail indicates exit the taskRun if the container exits with non-zero exit code continue indicates continue executing the rest of the steps irrespective of the container exit code", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, + } +} + +func schema_pkg_apis_pipeline_v1_StepTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "StepTemplate is a template for a Step", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "image": { + SchemaProps: spec.SchemaProps{ + Description: "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + Type: []string{"string"}, + Format: "", + }, + }, + "command": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "args": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "workingDir": { + SchemaProps: spec.SchemaProps{ + Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "envFrom": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvFromSource"), + }, + }, + }, + }, + }, + "env": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of environment variables to set in the container. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvVar"), + }, + }, + }, + }, + }, + "resources": { + SchemaProps: spec.SchemaProps{ + Description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + "volumeMounts": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeMount"), + }, + }, + }, + }, + }, + "volumeDevices": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "volumeDevices is the list of block devices to be used by the container.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeDevice"), + }, + }, + }, + }, + }, + "imagePullPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + Type: []string{"string"}, + Format: "", + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + Ref: ref("k8s.io/api/core/v1.SecurityContext"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, + } +} + +func schema_pkg_apis_pipeline_v1_Task(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Task represents a collection of sequential steps that are run as part of a Pipeline using a set of inputs and producing a set of outputs. Tasks execute when TaskRuns are created that provide the input parameters and resources and output resources the Task requires.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "Spec holds the desired state of the Task from the client", + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_pkg_apis_pipeline_v1_TaskList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TaskList contains a list of Task", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Task"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Task", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_pkg_apis_pipeline_v1_TaskResult(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TaskResult used to describe the results of a task", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name the given name", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type is the user-specified type of the result. The possible type is currently \"string\" and will support \"array\" in following work.", + Type: []string{"string"}, + Format: "", + }, + }, + "properties": { + SchemaProps: spec.SchemaProps{ + Description: "Properties is the JSON Schema properties to support key-value pairs results.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec"), + }, + }, + }, + }, + }, + "description": { + SchemaProps: spec.SchemaProps{ + Description: "Description is a human-readable description of the result", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec"}, + } +} + +func schema_pkg_apis_pipeline_v1_TaskRunResult(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TaskRunResult used to describe the results of a task", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name the given name", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type is the user-specified type of the result. The possible type is currently \"string\" and will support \"array\" in following work.", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Description: "Value the given value of the result", + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ArrayOrString"), + }, + }, + }, + Required: []string{"name", "value"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ArrayOrString"}, + } +} + +func schema_pkg_apis_pipeline_v1_TaskSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TaskSpec defines the desired state of Task.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "params": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Params is a list of input parameters required to run the task. Params must be supplied as inputs in TaskRuns unless they declare a default value.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec"), + }, + }, + }, + }, + }, + "description": { + SchemaProps: spec.SchemaProps{ + Description: "Description is a user-facing description of the task that may be used to populate a UI.", + Type: []string{"string"}, + Format: "", + }, + }, + "steps": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Steps are the steps of the build; each step is run sequentially with the source mounted into /workspace.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Step"), + }, + }, + }, + }, + }, + "volumes": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Volumes is a collection of volumes that are available to mount into the steps of the build.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.Volume"), + }, + }, + }, + }, + }, + "stepTemplate": { + SchemaProps: spec.SchemaProps{ + Description: "StepTemplate can be used as the basis for all step containers within the Task, so that the steps inherit settings on the base container.", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepTemplate"), + }, + }, + "sidecars": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Sidecars are run alongside the Task's step containers. They begin before the steps start and end after the steps complete.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Sidecar"), + }, + }, + }, + }, + }, + "workspaces": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Workspaces are the volumes that this Task requires.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceDeclaration"), + }, + }, + }, + }, + }, + "results": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Results are values that this Task can output", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskResult"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Sidecar", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Step", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepTemplate", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceDeclaration", "k8s.io/api/core/v1.Volume"}, + } +} + +func schema_pkg_apis_pipeline_v1_WorkspaceBinding(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkspaceBinding maps a Task's declared workspace to a Volume.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of the workspace populated by the volume.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "subPath": { + SchemaProps: spec.SchemaProps{ + Description: "SubPath is optionally a directory on the volume which should be used for this binding (i.e. the volume will be mounted at this sub directory).", + Type: []string{"string"}, + Format: "", + }, + }, + "volumeClaimTemplate": { + SchemaProps: spec.SchemaProps{ + Description: "VolumeClaimTemplate is a template for a claim that will be created in the same namespace. The PipelineRun controller is responsible for creating a unique claim for each instance of PipelineRun.", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaim"), + }, + }, + "persistentVolumeClaim": { + SchemaProps: spec.SchemaProps{ + Description: "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. Either this OR EmptyDir can be used.", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"), + }, + }, + "emptyDir": { + SchemaProps: spec.SchemaProps{ + Description: "EmptyDir represents a temporary directory that shares a Task's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir Either this OR PersistentVolumeClaim can be used.", + Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"), + }, + }, + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "ConfigMap represents a configMap that should populate this workspace.", + Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"), + }, + }, + "secret": { + SchemaProps: spec.SchemaProps{ + Description: "Secret represents a secret that should populate this workspace.", + Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"), + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaim", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource"}, + } +} + +func schema_pkg_apis_pipeline_v1_WorkspaceDeclaration(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkspaceDeclaration is a declaration of a volume that a Task requires.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name by which you can bind the volume at runtime.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "description": { + SchemaProps: spec.SchemaProps{ + Description: "Description is an optional human readable description of this volume.", + Type: []string{"string"}, + Format: "", + }, + }, + "mountPath": { + SchemaProps: spec.SchemaProps{ + Description: "MountPath overrides the directory that the volume will be made available at.", + Type: []string{"string"}, + Format: "", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "ReadOnly dictates whether a mounted volume is writable. By default this field is false and so mounted volumes are writable.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "optional": { + SchemaProps: spec.SchemaProps{ + Description: "Optional marks a Workspace as not being required in TaskRuns. By default this field is false and so declared workspaces are required.", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"name"}, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_WorkspacePipelineTaskBinding(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkspacePipelineTaskBinding describes how a workspace passed into the pipeline should be mapped to a task's declared workspace.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of the workspace as declared by the task", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "workspace": { + SchemaProps: spec.SchemaProps{ + Description: "Workspace is the name of the workspace declared by the pipeline", + Type: []string{"string"}, + Format: "", + }, + }, + "subPath": { + SchemaProps: spec.SchemaProps{ + Description: "SubPath is optionally a directory on the volume which should be used for this binding (i.e. the volume will be mounted at this sub directory).", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name"}, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_WorkspaceUsage(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkspaceUsage is used by a Step or Sidecar to declare that it wants isolated access to a Workspace defined in a Task.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of the workspace this Step or Sidecar wants access to.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "mountPath": { + SchemaProps: spec.SchemaProps{ + Description: "MountPath is the path that the workspace should be mounted to inside the Step or Sidecar, overriding any MountPath specified in the Task's WorkspaceDeclaration.", + Default: "", Type: []string{"string"}, Format: "", }, }, }, + Required: []string{"name", "mountPath"}, }, }, } diff --git a/pkg/apis/pipeline/v1/param_types.go b/pkg/apis/pipeline/v1/param_types.go new file mode 100644 index 00000000000..c3fc2976286 --- /dev/null +++ b/pkg/apis/pipeline/v1/param_types.go @@ -0,0 +1,290 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "encoding/json" + "fmt" + "regexp" + "strings" + + resource "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" + "github.com/tektoncd/pipeline/pkg/substitution" +) + +// exactVariableSubstitutionFormat matches strings that only contain a single reference to result or param variables, but nothing else +// i.e. `$(result.resultname)` is a match, but `foo $(result.resultname)` is not. +const exactVariableSubstitutionFormat = `^\$\([_a-zA-Z0-9.-]+(\.[_a-zA-Z0-9.-]+)*(\[([0-9]+|\*)\])?\)$` + +var exactVariableSubstitutionRegex = regexp.MustCompile(exactVariableSubstitutionFormat) + +// ParamsPrefix is the prefix used in $(...) expressions referring to parameters +const ParamsPrefix = "params" + +// ParamSpec defines arbitrary parameters needed beyond typed inputs (such as +// resources). Parameter values are provided by users as inputs on a TaskRun +// or PipelineRun. +type ParamSpec struct { + // Name declares the name by which a parameter is referenced. + Name string `json:"name"` + // Type is the user-specified type of the parameter. The possible types + // are currently "string", "array" and "object", and "string" is the default. + // +optional + Type ParamType `json:"type,omitempty"` + // Description is a user-facing description of the parameter that may be + // used to populate a UI. + // +optional + Description string `json:"description,omitempty"` + // Properties is the JSON Schema properties to support key-value pairs parameter. + // +optional + Properties map[string]PropertySpec `json:"properties,omitempty"` + // Default is the value a parameter takes if no input value is supplied. If + // default is set, a Task may be executed without a supplied value for the + // parameter. + // +optional + Default *ArrayOrString `json:"default,omitempty"` +} + +// PropertySpec defines the struct for object keys +type PropertySpec struct { + Type ParamType `json:"type,omitempty"` +} + +// SetDefaults set the default type +func (pp *ParamSpec) SetDefaults(context.Context) { + if pp == nil { + return + } + + // Propagate inferred type to the parent ParamSpec's type, and default type to the PropertySpec's type + // The sequence to look at is type in ParamSpec -> properties -> type in default -> array/string/object value in default + // If neither `properties` or `default` section is provided, ParamTypeString will be the default type. + switch { + case pp.Type != "": + // If param type is provided by the author, do nothing but just set default type for PropertySpec in case `properties` section is provided. + pp.setDefaultsForProperties() + case pp.Properties != nil: + pp.Type = ParamTypeObject + // Also set default type for PropertySpec + pp.setDefaultsForProperties() + case pp.Default == nil: + // ParamTypeString is the default value (when no type can be inferred from the default value) + pp.Type = ParamTypeString + case pp.Default.Type != "": + pp.Type = pp.Default.Type + case pp.Default.ArrayVal != nil: + pp.Type = ParamTypeArray + case pp.Default.ObjectVal != nil: + pp.Type = ParamTypeObject + default: + pp.Type = ParamTypeString + } +} + +// setDefaultsForProperties sets default type for PropertySpec (string) if it's not specified +func (pp *ParamSpec) setDefaultsForProperties() { + for key, propertySpec := range pp.Properties { + if propertySpec.Type == "" { + pp.Properties[key] = PropertySpec{Type: ParamTypeString} + } + } +} + +// ResourceParam declares a string value to use for the parameter called Name, and is used in +// the specific context of PipelineResources. +type ResourceParam = resource.ResourceParam + +// Param declares an ArrayOrString to use for the parameter called name. +type Param struct { + Name string `json:"name"` + Value ArrayOrString `json:"value"` +} + +// ParamType indicates the type of an input parameter; +// Used to distinguish between a single string and an array of strings. +type ParamType string + +// Valid ParamTypes: +const ( + ParamTypeString ParamType = "string" + ParamTypeArray ParamType = "array" + ParamTypeObject ParamType = "object" +) + +// AllParamTypes can be used for ParamType validation. +var AllParamTypes = []ParamType{ParamTypeString, ParamTypeArray, ParamTypeObject} + +// ArrayOrString is modeled after IntOrString in kubernetes/apimachinery: + +// ArrayOrString is a type that can hold a single string or string array. +// Used in JSON unmarshalling so that a single JSON field can accept +// either an individual string or an array of strings. +// TODO (@chuangw6): This struct will be renamed or be embedded in a new struct to take into +// consideration the object case after the community reaches an agreement on it. +type ArrayOrString struct { + Type ParamType `json:"type"` // Represents the stored type of ArrayOrString. + StringVal string `json:"stringVal"` + // +listType=atomic + ArrayVal []string `json:"arrayVal"` + ObjectVal map[string]string `json:"objectVal"` +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (arrayOrString *ArrayOrString) UnmarshalJSON(value []byte) error { + // ArrayOrString is used for Results Value as well, the results can be any kind of + // data so we need to check if it is empty. + if len(value) == 0 { + arrayOrString.Type = ParamTypeString + return nil + } + if value[0] == '[' { + // We're trying to Unmarshal to []string, but for cases like []int or other types + // of nested array which we don't support yet, we should continue and Unmarshal + // it to String. If the Type being set doesn't match what it actually should be, + // it will be captured by validation in reconciler. + // if failed to unmarshal to array, we will convert the value to string and marshal it to string + var a []string + if err := json.Unmarshal(value, &a); err == nil { + arrayOrString.Type = ParamTypeArray + arrayOrString.ArrayVal = a + return nil + } + } + if value[0] == '{' { + // if failed to unmarshal to map, we will convert the value to string and marshal it to string + var m map[string]string + if err := json.Unmarshal(value, &m); err == nil { + arrayOrString.Type = ParamTypeObject + arrayOrString.ObjectVal = m + return nil + } + } + + // By default we unmarshal to string + arrayOrString.Type = ParamTypeString + if err := json.Unmarshal(value, &arrayOrString.StringVal); err == nil { + return nil + } + arrayOrString.StringVal = string(value) + + return nil +} + +// MarshalJSON implements the json.Marshaller interface. +func (arrayOrString ArrayOrString) MarshalJSON() ([]byte, error) { + switch arrayOrString.Type { + case ParamTypeString: + return json.Marshal(arrayOrString.StringVal) + case ParamTypeArray: + return json.Marshal(arrayOrString.ArrayVal) + case ParamTypeObject: + return json.Marshal(arrayOrString.ObjectVal) + default: + return []byte{}, fmt.Errorf("impossible ArrayOrString.Type: %q", arrayOrString.Type) + } +} + +// ApplyReplacements applyes replacements for ArrayOrString type +func (arrayOrString *ArrayOrString) ApplyReplacements(stringReplacements map[string]string, arrayReplacements map[string][]string, objectReplacements map[string]map[string]string) { + switch arrayOrString.Type { + case ParamTypeArray: + var newArrayVal []string + for _, v := range arrayOrString.ArrayVal { + newArrayVal = append(newArrayVal, substitution.ApplyArrayReplacements(v, stringReplacements, arrayReplacements)...) + } + arrayOrString.ArrayVal = newArrayVal + case ParamTypeObject: + newObjectVal := map[string]string{} + for k, v := range arrayOrString.ObjectVal { + newObjectVal[k] = substitution.ApplyReplacements(v, stringReplacements) + } + arrayOrString.ObjectVal = newObjectVal + default: + arrayOrString.applyOrCorrect(stringReplacements, arrayReplacements, objectReplacements) + } +} + +// applyOrCorrect deals with string param whose value can be string literal or a reference to a string/array/object param/result. +// If the value of arrayOrString is a reference to array or object, the type will be corrected from string to array/object. +func (arrayOrString *ArrayOrString) applyOrCorrect(stringReplacements map[string]string, arrayReplacements map[string][]string, objectReplacements map[string]map[string]string) { + stringVal := arrayOrString.StringVal + + // if the stringVal is a string literal or a string that mixed with var references + // just do the normal string replacement + if !exactVariableSubstitutionRegex.MatchString(stringVal) { + arrayOrString.StringVal = substitution.ApplyReplacements(arrayOrString.StringVal, stringReplacements) + return + } + + // trim the head "$(" and the tail ")" or "[*])" + // i.e. get "params.name" from "$(params.name)" or "$(params.name[*])" + trimedStringVal := StripStarVarSubExpression(stringVal) + + // if the stringVal is a reference to a string param + if _, ok := stringReplacements[trimedStringVal]; ok { + arrayOrString.StringVal = substitution.ApplyReplacements(arrayOrString.StringVal, stringReplacements) + } + + // if the stringVal is a reference to an array param, we need to change the type other than apply replacement + if _, ok := arrayReplacements[trimedStringVal]; ok { + arrayOrString.StringVal = "" + arrayOrString.ArrayVal = substitution.ApplyArrayReplacements(stringVal, stringReplacements, arrayReplacements) + arrayOrString.Type = ParamTypeArray + } + + // if the stringVal is a reference an object param, we need to change the type other than apply replacement + if _, ok := objectReplacements[trimedStringVal]; ok { + arrayOrString.StringVal = "" + arrayOrString.ObjectVal = objectReplacements[trimedStringVal] + arrayOrString.Type = ParamTypeObject + } +} + +// StripStarVarSubExpression strips "$(target[*])"" to get "target" +func StripStarVarSubExpression(s string) string { + return strings.TrimSuffix(strings.TrimSuffix(strings.TrimPrefix(s, "$("), ")"), "[*]") +} + +// NewArrayOrString creates an ArrayOrString of type ParamTypeString or ParamTypeArray, based on +// how many inputs are given (>1 input will create an array, not string). +func NewArrayOrString(value string, values ...string) *ArrayOrString { + if len(values) > 0 { + return &ArrayOrString{ + Type: ParamTypeArray, + ArrayVal: append([]string{value}, values...), + } + } + return &ArrayOrString{ + Type: ParamTypeString, + StringVal: value, + } +} + +// NewObject creates an ArrayOrString of type ParamTypeObject using the provided key-value pairs +func NewObject(pairs map[string]string) *ArrayOrString { + return &ArrayOrString{ + Type: ParamTypeObject, + ObjectVal: pairs, + } +} + +// ArrayReference returns the name of the parameter from array parameter reference +// returns arrayParam from $(params.arrayParam[*]) +func ArrayReference(a string) string { + return strings.TrimSuffix(strings.TrimPrefix(a, "$("+ParamsPrefix+"."), "[*])") +} diff --git a/pkg/apis/pipeline/v1/param_types_test.go b/pkg/apis/pipeline/v1/param_types_test.go new file mode 100644 index 00000000000..dbd686c2482 --- /dev/null +++ b/pkg/apis/pipeline/v1/param_types_test.go @@ -0,0 +1,441 @@ +/* +Copyright 2022 The Tekton Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1_test + +import ( + "bytes" + "context" + "encoding/json" + "reflect" + "testing" + + "github.com/google/go-cmp/cmp" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + "github.com/tektoncd/pipeline/test/diff" +) + +func TestParamSpec_SetDefaults(t *testing.T) { + tests := []struct { + name string + before *v1.ParamSpec + defaultsApplied *v1.ParamSpec + }{{ + name: "inferred string type", + before: &v1.ParamSpec{ + Name: "parametername", + }, + defaultsApplied: &v1.ParamSpec{ + Name: "parametername", + Type: v1.ParamTypeString, + }, + }, { + name: "inferred type from default value - array", + before: &v1.ParamSpec{ + Name: "parametername", + Default: &v1.ArrayOrString{ + ArrayVal: []string{"array"}, + }, + }, + defaultsApplied: &v1.ParamSpec{ + Name: "parametername", + Type: v1.ParamTypeArray, + Default: &v1.ArrayOrString{ + ArrayVal: []string{"array"}, + }, + }, + }, { + name: "inferred type from default value - string", + before: &v1.ParamSpec{ + Name: "parametername", + Default: &v1.ArrayOrString{ + StringVal: "an", + }, + }, + defaultsApplied: &v1.ParamSpec{ + Name: "parametername", + Type: v1.ParamTypeString, + Default: &v1.ArrayOrString{ + StringVal: "an", + }, + }, + }, { + name: "inferred type from default value - object", + before: &v1.ParamSpec{ + Name: "parametername", + Default: &v1.ArrayOrString{ + ObjectVal: map[string]string{"url": "test", "path": "test"}, + }, + }, + defaultsApplied: &v1.ParamSpec{ + Name: "parametername", + Type: v1.ParamTypeObject, + Default: &v1.ArrayOrString{ + ObjectVal: map[string]string{"url": "test", "path": "test"}, + }, + }, + }, { + name: "inferred type from properties - PropertySpec type is not provided", + before: &v1.ParamSpec{ + Name: "parametername", + Properties: map[string]v1.PropertySpec{"key1": {}}, + }, + defaultsApplied: &v1.ParamSpec{ + Name: "parametername", + Type: v1.ParamTypeObject, + Properties: map[string]v1.PropertySpec{"key1": {Type: "string"}}, + }, + }, { + name: "inferred type from properties - PropertySpec type is provided", + before: &v1.ParamSpec{ + Name: "parametername", + Properties: map[string]v1.PropertySpec{"key2": {Type: "string"}}, + }, + defaultsApplied: &v1.ParamSpec{ + Name: "parametername", + Type: v1.ParamTypeObject, + Properties: map[string]v1.PropertySpec{"key2": {Type: "string"}}, + }, + }, { + name: "fully defined ParamSpec - array", + before: &v1.ParamSpec{ + Name: "parametername", + Type: v1.ParamTypeArray, + Description: "a description", + Default: &v1.ArrayOrString{ + ArrayVal: []string{"array"}, + }, + }, + defaultsApplied: &v1.ParamSpec{ + Name: "parametername", + Type: v1.ParamTypeArray, + Description: "a description", + Default: &v1.ArrayOrString{ + ArrayVal: []string{"array"}, + }, + }, + }, { + name: "fully defined ParamSpec - object", + before: &v1.ParamSpec{ + Name: "parametername", + Type: v1.ParamTypeObject, + Description: "a description", + Default: &v1.ArrayOrString{ + ObjectVal: map[string]string{"url": "test", "path": "test"}, + }, + }, + defaultsApplied: &v1.ParamSpec{ + Name: "parametername", + Type: v1.ParamTypeObject, + Description: "a description", + Default: &v1.ArrayOrString{ + ObjectVal: map[string]string{"url": "test", "path": "test"}, + }, + }, + }} + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctx := context.Background() + tc.before.SetDefaults(ctx) + if d := cmp.Diff(tc.before, tc.defaultsApplied); d != "" { + t.Error(diff.PrintWantGot(d)) + } + }) + } +} + +func TestArrayOrString_ApplyReplacements(t *testing.T) { + type args struct { + input *v1.ArrayOrString + stringReplacements map[string]string + arrayReplacements map[string][]string + objectReplacements map[string]map[string]string + } + tests := []struct { + name string + args args + expectedOutput *v1.ArrayOrString + }{{ + name: "no replacements on array", + args: args{ + input: v1.NewArrayOrString("an", "array"), + stringReplacements: map[string]string{"some": "value", "anotherkey": "value"}, + arrayReplacements: map[string][]string{"arraykey": {"array", "value"}, "sdfdf": {"sdf", "sdfsd"}}, + }, + expectedOutput: v1.NewArrayOrString("an", "array"), + }, { + name: "single string replacement on string", + args: args{ + input: v1.NewArrayOrString("$(params.myString1)"), + stringReplacements: map[string]string{"params.myString1": "value1", "params.myString2": "value2"}, + arrayReplacements: map[string][]string{"arraykey": {"array", "value"}, "sdfdf": {"asdf", "sdfsd"}}, + }, + expectedOutput: v1.NewArrayOrString("value1"), + }, { + name: "multiple string replacements on string", + args: args{ + input: v1.NewArrayOrString("astring$(some) asdf $(anotherkey)"), + stringReplacements: map[string]string{"some": "value", "anotherkey": "value"}, + arrayReplacements: map[string][]string{"arraykey": {"array", "value"}, "sdfdf": {"asdf", "sdfsd"}}, + }, + expectedOutput: v1.NewArrayOrString("astringvalue asdf value"), + }, { + name: "single array replacement", + args: args{ + input: v1.NewArrayOrString("firstvalue", "$(arraykey)", "lastvalue"), + stringReplacements: map[string]string{"some": "value", "anotherkey": "value"}, + arrayReplacements: map[string][]string{"arraykey": {"array", "value"}, "sdfdf": {"asdf", "sdfsd"}}, + }, + expectedOutput: v1.NewArrayOrString("firstvalue", "array", "value", "lastvalue"), + }, { + name: "multiple array replacement", + args: args{ + input: v1.NewArrayOrString("firstvalue", "$(arraykey)", "lastvalue", "$(sdfdf)"), + stringReplacements: map[string]string{"some": "value", "anotherkey": "value"}, + arrayReplacements: map[string][]string{"arraykey": {"array", "value"}, "sdfdf": {"asdf", "sdfsd"}}, + }, + expectedOutput: v1.NewArrayOrString("firstvalue", "array", "value", "lastvalue", "asdf", "sdfsd"), + }, { + name: "empty array replacement", + args: args{ + input: v1.NewArrayOrString("firstvalue", "$(arraykey)", "lastvalue"), + stringReplacements: map[string]string{"some": "value", "anotherkey": "value"}, + arrayReplacements: map[string][]string{"arraykey": {}}, + }, + expectedOutput: v1.NewArrayOrString("firstvalue", "lastvalue"), + }, { + name: "array replacement on string val", + args: args{ + input: v1.NewArrayOrString("$(params.myarray)"), + arrayReplacements: map[string][]string{"params.myarray": {"a", "b", "c"}}, + }, + expectedOutput: v1.NewArrayOrString("a", "b", "c"), + }, { + name: "array star replacement on string val", + args: args{ + input: v1.NewArrayOrString("$(params.myarray[*])"), + arrayReplacements: map[string][]string{"params.myarray": {"a", "b", "c"}}, + }, + expectedOutput: v1.NewArrayOrString("a", "b", "c"), + }, { + name: "object replacement on string val", + args: args{ + input: v1.NewArrayOrString("$(params.object)"), + objectReplacements: map[string]map[string]string{ + "params.object": { + "url": "abc.com", + "commit": "af234", + }, + }, + }, + expectedOutput: v1.NewObject(map[string]string{ + "url": "abc.com", + "commit": "af234", + }), + }, { + name: "object star replacement on string val", + args: args{ + input: v1.NewArrayOrString("$(params.object[*])"), + objectReplacements: map[string]map[string]string{ + "params.object": { + "url": "abc.com", + "commit": "af234", + }, + }, + }, + expectedOutput: v1.NewObject(map[string]string{ + "url": "abc.com", + "commit": "af234", + }), + }, { + name: "string replacement on object individual variables", + args: args{ + input: v1.NewObject(map[string]string{ + "key1": "$(mystring)", + "key2": "$(anotherObject.key)", + }), + stringReplacements: map[string]string{ + "mystring": "foo", + "anotherObject.key": "bar", + }, + }, + expectedOutput: v1.NewObject(map[string]string{ + "key1": "foo", + "key2": "bar", + }), + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.args.input.ApplyReplacements(tt.args.stringReplacements, tt.args.arrayReplacements, tt.args.objectReplacements) + if d := cmp.Diff(tt.expectedOutput, tt.args.input); d != "" { + t.Errorf("ApplyReplacements() output did not match expected value %s", diff.PrintWantGot(d)) + } + }) + } +} + +type ArrayOrStringHolder struct { + AOrS v1.ArrayOrString `json:"val"` +} + +func TestArrayOrString_UnmarshalJSON(t *testing.T) { + cases := []struct { + input map[string]interface{} + result v1.ArrayOrString + }{ + { + input: map[string]interface{}{"val": 123}, + result: *v1.NewArrayOrString("123"), + }, + { + input: map[string]interface{}{"val": "123"}, + result: *v1.NewArrayOrString("123"), + }, + { + input: map[string]interface{}{"val": ""}, + result: *v1.NewArrayOrString(""), + }, + { + input: map[string]interface{}{"val": nil}, + result: v1.ArrayOrString{Type: v1.ParamTypeString, ArrayVal: nil}, + }, + { + input: map[string]interface{}{"val": []string{}}, + result: v1.ArrayOrString{Type: v1.ParamTypeArray, ArrayVal: []string{}}, + }, + { + input: map[string]interface{}{"val": []string{"oneelement"}}, + result: v1.ArrayOrString{Type: v1.ParamTypeArray, ArrayVal: []string{"oneelement"}}, + }, + { + input: map[string]interface{}{"val": []string{"multiple", "elements"}}, + result: v1.ArrayOrString{Type: v1.ParamTypeArray, ArrayVal: []string{"multiple", "elements"}}, + }, + { + input: map[string]interface{}{"val": map[string]string{"key1": "val1", "key2": "val2"}}, + result: v1.ArrayOrString{Type: v1.ParamTypeObject, ObjectVal: map[string]string{"key1": "val1", "key2": "val2"}}, + }, + } + + for _, c := range cases { + for _, opts := range []func(enc *json.Encoder){ + // Default encoding + func(enc *json.Encoder) {}, + // Multiline encoding + func(enc *json.Encoder) { enc.SetIndent("", " ") }, + } { + b := new(bytes.Buffer) + enc := json.NewEncoder(b) + opts(enc) + if err := enc.Encode(c.input); err != nil { + t.Fatalf("error encoding json: %v", err) + } + + var result ArrayOrStringHolder + if err := json.Unmarshal(b.Bytes(), &result); err != nil { + t.Errorf("Failed to unmarshal input '%v': %v", c.input, err) + } + if !reflect.DeepEqual(result.AOrS, c.result) { + t.Errorf("expected %+v, got %+v", c.result, result) + } + } + } +} + +func TestArrayOrString_UnmarshalJSON_Directly(t *testing.T) { + cases := []struct { + desc string + input string + expected v1.ArrayOrString + }{ + {desc: "empty value", input: ``, expected: *v1.NewArrayOrString("")}, + {desc: "int value", input: `1`, expected: *v1.NewArrayOrString("1")}, + {desc: "int array", input: `[1,2,3]`, expected: *v1.NewArrayOrString("[1,2,3]")}, + {desc: "nested array", input: `[1,\"2\",3]`, expected: *v1.NewArrayOrString(`[1,\"2\",3]`)}, + {desc: "string value", input: `hello`, expected: *v1.NewArrayOrString("hello")}, + {desc: "array value", input: `["hello","world"]`, expected: *v1.NewArrayOrString("hello", "world")}, + {desc: "object value", input: `{"hello":"world"}`, expected: *v1.NewObject(map[string]string{"hello": "world"})}, + } + + for _, c := range cases { + aos := v1.ArrayOrString{} + if err := aos.UnmarshalJSON([]byte(c.input)); err != nil { + t.Errorf("Failed to unmarshal input '%v': %v", c.input, err) + } + if !reflect.DeepEqual(aos, c.expected) { + t.Errorf("Failed to unmarshal input '%v': expected %+v, got %+v", c.input, c.expected, aos) + } + } +} + +func TestArrayOrString_UnmarshalJSON_Error(t *testing.T) { + cases := []struct { + desc string + input string + }{ + {desc: "empty value", input: "{\"val\": }"}, + {desc: "wrong beginning value", input: "{\"val\": @}"}, + } + + for _, c := range cases { + var result ArrayOrStringHolder + if err := json.Unmarshal([]byte(c.input), &result); err == nil { + t.Errorf("Should return err but got nil '%v'", c.input) + } + } +} + +func TestArrayOrString_MarshalJSON(t *testing.T) { + cases := []struct { + input v1.ArrayOrString + result string + }{ + {*v1.NewArrayOrString("123"), "{\"val\":\"123\"}"}, + {*v1.NewArrayOrString("123", "1234"), "{\"val\":[\"123\",\"1234\"]}"}, + {*v1.NewArrayOrString("a", "a", "a"), "{\"val\":[\"a\",\"a\",\"a\"]}"}, + {*v1.NewObject(map[string]string{"key1": "var1", "key2": "var2"}), "{\"val\":{\"key1\":\"var1\",\"key2\":\"var2\"}}"}, + } + + for _, c := range cases { + input := ArrayOrStringHolder{c.input} + result, err := json.Marshal(&input) + if err != nil { + t.Errorf("Failed to marshal input '%v': %v", input, err) + } + if string(result) != c.result { + t.Errorf("Failed to marshal input '%v': expected: %+v, got %q", input, c.result, string(result)) + } + } +} + +func TestArrayReference(t *testing.T) { + tests := []struct { + name, p, expectedResult string + }{{ + name: "valid array parameter expression with star notation returns param name", + p: "$(params.arrayParam[*])", + expectedResult: "arrayParam", + }, { + name: "invalid array parameter without dollar notation returns the input as is", + p: "params.arrayParam[*]", + expectedResult: "params.arrayParam[*]", + }} + for _, tt := range tests { + if d := cmp.Diff(tt.expectedResult, v1.ArrayReference(tt.p)); d != "" { + t.Errorf(diff.PrintWantGot(d)) + } + } +} diff --git a/pkg/apis/pipeline/v1/resolver_types.go b/pkg/apis/pipeline/v1/resolver_types.go new file mode 100644 index 00000000000..bb547b2a0f0 --- /dev/null +++ b/pkg/apis/pipeline/v1/resolver_types.go @@ -0,0 +1,48 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// ResolverName is the name of a resolver from which a resource can be +// requested. +type ResolverName string + +// ResolverRef can be used to refer to a Pipeline or Task in a remote +// location like a git repo. This feature is in alpha and these fields +// are only available when the alpha feature gate is enabled. +type ResolverRef struct { + // Resolver is the name of the resolver that should perform + // resolution of the referenced Tekton resource, such as "git". + // +optional + Resolver ResolverName `json:"resolver,omitempty"` + // Resource contains the parameters used to identify the + // referenced Tekton resource. Example entries might include + // "repo" or "path" but the set of params ultimately depends on + // the chosen resolver. + // +optional + // +listType=atomic + Resource []ResolverParam `json:"resource,omitempty"` +} + +// ResolverParam is a single parameter passed to a resolver. +type ResolverParam struct { + // Name is the name of the parameter that will be passed to the + // resolver. + Name string `json:"name"` + // Value is the string value of the parameter that will be + // passed to the resolver. + Value string `json:"value"` +} diff --git a/pkg/apis/pipeline/v1/task_conversion.go b/pkg/apis/pipeline/v1/result_defaults.go similarity index 51% rename from pkg/apis/pipeline/v1/task_conversion.go rename to pkg/apis/pipeline/v1/result_defaults.go index 2f41acfaabe..9a5020ba12b 100644 --- a/pkg/apis/pipeline/v1/task_conversion.go +++ b/pkg/apis/pipeline/v1/result_defaults.go @@ -1,12 +1,9 @@ /* -Copyright 2020 The Tekton Authors - +Copyright 2022 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -16,21 +13,12 @@ limitations under the License. package v1 -import ( - "context" - "fmt" - - "knative.dev/pkg/apis" -) - -var _ apis.Convertible = (*Task)(nil) - -// ConvertTo implements api.Convertible -func (t *Task) ConvertTo(ctx context.Context, sink apis.Convertible) error { - return fmt.Errorf("v1 is the highest known version, got: %T", sink) -} +import "context" -// ConvertFrom implements api.Convertible -func (t *Task) ConvertFrom(ctx context.Context, source apis.Convertible) error { - return fmt.Errorf("v1 is the highest known version, got: %T", source) +// SetDefaults set the default type for TaskResult +func (tr *TaskResult) SetDefaults(context.Context) { + if tr != nil && tr.Type == "" { + // ResultsTypeString is the default value + tr.Type = ResultsTypeString + } } diff --git a/pkg/apis/pipeline/v1/result_types.go b/pkg/apis/pipeline/v1/result_types.go new file mode 100644 index 00000000000..daf9abf2606 --- /dev/null +++ b/pkg/apis/pipeline/v1/result_types.go @@ -0,0 +1,73 @@ +/* +Copyright 2022 The Tekton Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import "strings" + +// TaskResult used to describe the results of a task +type TaskResult struct { + // Name the given name + Name string `json:"name"` + + // Type is the user-specified type of the result. The possible type + // is currently "string" and will support "array" in following work. + // +optional + Type ResultsType `json:"type,omitempty"` + + // Properties is the JSON Schema properties to support key-value pairs results. + // +optional + Properties map[string]PropertySpec `json:"properties,omitempty"` + + // Description is a human-readable description of the result + // +optional + Description string `json:"description,omitempty"` +} + +// TaskRunResult used to describe the results of a task +type TaskRunResult struct { + // Name the given name + Name string `json:"name"` + + // Type is the user-specified type of the result. The possible type + // is currently "string" and will support "array" in following work. + // +optional + Type ResultsType `json:"type,omitempty"` + + // Value the given value of the result + Value ArrayOrString `json:"value"` +} + +// ResultsType indicates the type of a result; +// Used to distinguish between a single string and an array of strings. +// Note that there is ResultType used to find out whether a +// PipelineResourceResult is from a task result or not, which is different from +// this ResultsType. +// TODO(#4723): add "array" and "object" support +// TODO(#4723): align ResultsType and ParamType in ArrayOrString +type ResultsType string + +// Valid ResultsType: +const ( + ResultsTypeString ResultsType = "string" + ResultsTypeArray ResultsType = "array" + ResultsTypeObject ResultsType = "object" +) + +// AllResultsTypes can be used for ResultsTypes validation. +var AllResultsTypes = []ResultsType{ResultsTypeString, ResultsTypeArray, ResultsTypeObject} + +// ResultsArrayReference returns the reference of the result. e.g. results.resultname from $(results.resultname[*]) +func ResultsArrayReference(a string) string { + return strings.TrimSuffix(strings.TrimSuffix(strings.TrimPrefix(a, "$("), ")"), "[*]") +} diff --git a/pkg/apis/pipeline/v1/result_validation.go b/pkg/apis/pipeline/v1/result_validation.go new file mode 100644 index 00000000000..de17c847182 --- /dev/null +++ b/pkg/apis/pipeline/v1/result_validation.go @@ -0,0 +1,53 @@ +/* +Copyright 2022 The Tekton Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + "regexp" + + "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/version" + "knative.dev/pkg/apis" +) + +// ResultNameFormat Constant used to define the the regex Result.Name should follow +const ResultNameFormat = `^([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$` + +var resultNameFormatRegex = regexp.MustCompile(ResultNameFormat) + +// Validate implements apis.Validatable +func (tr TaskResult) Validate(ctx context.Context) (errs *apis.FieldError) { + if !resultNameFormatRegex.MatchString(tr.Name) { + return apis.ErrInvalidKeyName(tr.Name, "name", fmt.Sprintf("Name must consist of alphanumeric characters, '-', '_', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my-name', or 'my_name', regex used for validation is '%s')", ResultNameFormat)) + } + // Array and Object are alpha features + if tr.Type == ResultsTypeArray || tr.Type == ResultsTypeObject { + return errs.Also(version.ValidateEnabledAPIFields(ctx, "results type", config.AlphaAPIFields)) + } + + // Resources created before the result. Type was introduced may not have Type set + // and should be considered valid + if tr.Type == "" { + return nil + } + + // By default the result type is string + if tr.Type != ResultsTypeString { + return apis.ErrInvalidValue(tr.Type, "type", fmt.Sprintf("type must be string")) + } + + return nil +} diff --git a/pkg/apis/pipeline/v1/result_validation_test.go b/pkg/apis/pipeline/v1/result_validation_test.go new file mode 100644 index 00000000000..366143bb42f --- /dev/null +++ b/pkg/apis/pipeline/v1/result_validation_test.go @@ -0,0 +1,155 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1_test + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/tektoncd/pipeline/pkg/apis/config" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + "github.com/tektoncd/pipeline/test/diff" + "knative.dev/pkg/apis" +) + +func TestResultsValidate(t *testing.T) { + tests := []struct { + name string + Result v1.TaskResult + apiFields string + }{{ + name: "valid result type empty", + Result: v1.TaskResult{ + Name: "MY-RESULT", + Description: "my great result", + }, + apiFields: "stable", + }, { + name: "valid result type string", + Result: v1.TaskResult{ + Name: "MY-RESULT", + Type: "string", + Description: "my great result", + }, + + apiFields: "stable", + }, { + name: "valid result type array", + Result: v1.TaskResult{ + Name: "MY-RESULT", + Type: "array", + Description: "my great result", + }, + + apiFields: "alpha", + }, { + name: "valid result type object", + Result: v1.TaskResult{ + Name: "MY-RESULT", + Type: "array", + Description: "my great result", + }, + + apiFields: "alpha", + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.Background() + if tt.apiFields == "alpha" { + ctx = config.EnableAlphaAPIFields(ctx) + } + if err := tt.Result.Validate(ctx); err != nil { + t.Errorf("TaskSpec.Validate() = %v", err) + } + }) + } +} + +func TestResultsValidateError(t *testing.T) { + tests := []struct { + name string + Result v1.TaskResult + apiFields string + expectedError apis.FieldError + }{{ + name: "invalid result type in stable", + Result: v1.TaskResult{ + Name: "MY-RESULT", + Type: "wrong", + Description: "my great result", + }, + apiFields: "stable", + expectedError: apis.FieldError{ + Message: `invalid value: wrong`, + Paths: []string{"type"}, + Details: "type must be string", + }, + }, { + name: "invalid result type in alpha", + Result: v1.TaskResult{ + Name: "MY-RESULT", + Type: "wrong", + Description: "my great result", + }, + apiFields: "alpha", + expectedError: apis.FieldError{ + Message: `invalid value: wrong`, + Paths: []string{"type"}, + Details: "type must be string", + }, + }, { + name: "invalid array result type in stable", + Result: v1.TaskResult{ + Name: "MY-RESULT", + Type: "array", + Description: "my great result", + }, + apiFields: "stable", + expectedError: apis.FieldError{ + Message: "results type requires \"enable-api-fields\" feature gate to be \"alpha\" but it is \"stable\"", + }, + }, { + name: "invalid object result type in stable", + Result: v1.TaskResult{ + Name: "MY-RESULT", + Type: "object", + Description: "my great result", + }, + apiFields: "stable", + expectedError: apis.FieldError{ + Message: "results type requires \"enable-api-fields\" feature gate to be \"alpha\" but it is \"stable\"", + }, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.Background() + if tt.apiFields == "alpha" { + ctx = config.EnableAlphaAPIFields(ctx) + } + err := tt.Result.Validate(ctx) + if err == nil { + t.Fatalf("Expected an error, got nothing for %v", tt.Result) + } + if d := cmp.Diff(tt.expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" { + t.Errorf("TaskSpec.Validate() errors diff %s", diff.PrintWantGot(d)) + } + + }) + } +} diff --git a/pkg/apis/pipeline/v1/swagger.json b/pkg/apis/pipeline/v1/swagger.json index 4e8649b2e51..a945319cde9 100644 --- a/pkg/apis/pipeline/v1/swagger.json +++ b/pkg/apis/pipeline/v1/swagger.json @@ -131,6 +131,526 @@ } } }, + "v1.ArrayOrString": { + "description": "ArrayOrString is a type that can hold a single string or string array. Used in JSON unmarshalling so that a single JSON field can accept either an individual string or an array of strings. consideration the object case after the community reaches an agreement on it.", + "type": "object", + "required": [ + "type", + "stringVal", + "arrayVal", + "objectVal" + ], + "properties": { + "arrayVal": { + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + }, + "objectVal": { + "type": "object", + "additionalProperties": { + "type": "string", + "default": "" + } + }, + "stringVal": { + "description": "Represents the stored type of ArrayOrString.", + "type": "string", + "default": "" + }, + "type": { + "type": "string", + "default": "" + } + } + }, + "v1.Param": { + "description": "Param declares an ArrayOrString to use for the parameter called name.", + "type": "object", + "required": [ + "name", + "value" + ], + "properties": { + "name": { + "type": "string", + "default": "" + }, + "value": { + "default": {}, + "$ref": "#/definitions/v1.ArrayOrString" + } + } + }, + "v1.ParamSpec": { + "description": "ParamSpec defines arbitrary parameters needed beyond typed inputs (such as resources). Parameter values are provided by users as inputs on a TaskRun or PipelineRun.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "default": { + "description": "Default is the value a parameter takes if no input value is supplied. If default is set, a Task may be executed without a supplied value for the parameter.", + "$ref": "#/definitions/v1.ArrayOrString" + }, + "description": { + "description": "Description is a user-facing description of the parameter that may be used to populate a UI.", + "type": "string" + }, + "name": { + "description": "Name declares the name by which a parameter is referenced.", + "type": "string", + "default": "" + }, + "properties": { + "description": "Properties is the JSON Schema properties to support key-value pairs parameter.", + "type": "object", + "additionalProperties": { + "default": {}, + "$ref": "#/definitions/v1.PropertySpec" + } + }, + "type": { + "description": "Type is the user-specified type of the parameter. The possible types are currently \"string\", \"array\" and \"object\", and \"string\" is the default.", + "type": "string" + } + } + }, + "v1.PipelineWorkspaceDeclaration": { + "description": "WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun is expected to populate with a workspace binding. Deprecated: use PipelineWorkspaceDeclaration type instead", + "type": "object", + "required": [ + "name" + ], + "properties": { + "description": { + "description": "Description is a human readable string describing how the workspace will be used in the Pipeline. It can be useful to include a bit of detail about which tasks are intended to have access to the data on the workspace.", + "type": "string" + }, + "name": { + "description": "Name is the name of a workspace to be provided by a PipelineRun.", + "type": "string", + "default": "" + }, + "optional": { + "description": "Optional marks a Workspace as not being required in PipelineRuns. By default this field is false and so declared workspaces are required.", + "type": "boolean" + } + } + }, + "v1.PropertySpec": { + "description": "PropertySpec defines the struct for object keys", + "type": "object", + "properties": { + "type": { + "type": "string" + } + } + }, + "v1.ResolverParam": { + "description": "ResolverParam is a single parameter passed to a resolver.", + "type": "object", + "required": [ + "name", + "value" + ], + "properties": { + "name": { + "description": "Name is the name of the parameter that will be passed to the resolver.", + "type": "string", + "default": "" + }, + "value": { + "description": "Value is the string value of the parameter that will be passed to the resolver.", + "type": "string", + "default": "" + } + } + }, + "v1.ResolverRef": { + "description": "ResolverRef can be used to refer to a Pipeline or Task in a remote location like a git repo. This feature is in alpha and these fields are only available when the alpha feature gate is enabled.", + "type": "object", + "properties": { + "resolver": { + "description": "Resolver is the name of the resolver that should perform resolution of the referenced Tekton resource, such as \"git\".", + "type": "string" + }, + "resource": { + "description": "Resource contains the parameters used to identify the referenced Tekton resource. Example entries might include \"repo\" or \"path\" but the set of params ultimately depends on the chosen resolver.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.ResolverParam" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "v1.Sidecar": { + "description": "Sidecar has nearly the same data structure as Step but does not have the ability to timeout.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "args": { + "description": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + }, + "command": { + "description": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + }, + "env": { + "description": "List of environment variables to set in the container. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.EnvVar" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "envFrom": { + "description": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.EnvFromSource" + }, + "x-kubernetes-list-type": "atomic" + }, + "image": { + "description": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + "type": "string" + }, + "imagePullPolicy": { + "description": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + "type": "string" + }, + "lifecycle": { + "description": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", + "$ref": "#/definitions/v1.Lifecycle" + }, + "livenessProbe": { + "description": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "$ref": "#/definitions/v1.Probe" + }, + "name": { + "description": "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + "type": "string", + "default": "" + }, + "ports": { + "description": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.ContainerPort" + }, + "x-kubernetes-list-map-keys": [ + "containerPort", + "protocol" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "containerPort", + "x-kubernetes-patch-strategy": "merge" + }, + "readinessProbe": { + "description": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "$ref": "#/definitions/v1.Probe" + }, + "resources": { + "description": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "default": {}, + "$ref": "#/definitions/v1.ResourceRequirements" + }, + "script": { + "description": "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command or Args.", + "type": "string" + }, + "securityContext": { + "description": "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + "$ref": "#/definitions/v1.SecurityContext" + }, + "startupProbe": { + "description": "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "$ref": "#/definitions/v1.Probe" + }, + "stdin": { + "description": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + "type": "boolean" + }, + "stdinOnce": { + "description": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + "type": "boolean" + }, + "terminationMessagePath": { + "description": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + "type": "string" + }, + "terminationMessagePolicy": { + "description": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + "type": "string" + }, + "tty": { + "description": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", + "type": "boolean" + }, + "volumeDevices": { + "description": "volumeDevices is the list of block devices to be used by the container.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.VolumeDevice" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge" + }, + "volumeMounts": { + "description": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.VolumeMount" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge" + }, + "workingDir": { + "description": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + "type": "string" + }, + "workspaces": { + "description": "This is an alpha field. You must set the \"enable-api-fields\" feature flag to \"alpha\" for this field to be supported.\n\nWorkspaces is a list of workspaces from the Task that this Sidecar wants exclusive access to. Adding a workspace to this list means that any other Step or Sidecar that does not also request this Workspace will not have access to it.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.WorkspaceUsage" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "v1.Step": { + "description": "Step runs a subcomponent of a Task", + "type": "object", + "required": [ + "name" + ], + "properties": { + "args": { + "description": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + }, + "command": { + "description": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + }, + "env": { + "description": "List of environment variables to set in the container. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.EnvVar" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "envFrom": { + "description": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.EnvFromSource" + }, + "x-kubernetes-list-type": "atomic" + }, + "image": { + "description": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + "type": "string" + }, + "imagePullPolicy": { + "description": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + "type": "string" + }, + "name": { + "description": "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + "type": "string", + "default": "" + }, + "onError": { + "description": "OnError defines the exiting behavior of a container on error can be set to [ continue | stopAndFail ] stopAndFail indicates exit the taskRun if the container exits with non-zero exit code continue indicates continue executing the rest of the steps irrespective of the container exit code", + "type": "string" + }, + "resources": { + "description": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "default": {}, + "$ref": "#/definitions/v1.ResourceRequirements" + }, + "script": { + "description": "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command and the Args will be passed to the Script.", + "type": "string" + }, + "securityContext": { + "description": "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + "$ref": "#/definitions/v1.SecurityContext" + }, + "timeout": { + "description": "Timeout is the time after which the step times out. Defaults to never. Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + "$ref": "#/definitions/v1.Duration" + }, + "volumeDevices": { + "description": "volumeDevices is the list of block devices to be used by the container.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.VolumeDevice" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge" + }, + "volumeMounts": { + "description": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.VolumeMount" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge" + }, + "workingDir": { + "description": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + "type": "string" + }, + "workspaces": { + "description": "This is an alpha field. You must set the \"enable-api-fields\" feature flag to \"alpha\" for this field to be supported.\n\nWorkspaces is a list of workspaces from the Task that this Step wants exclusive access to. Adding a workspace to this list means that any other Step or Sidecar that does not also request this Workspace will not have access to it.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.WorkspaceUsage" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "v1.StepTemplate": { + "description": "StepTemplate is a template for a Step", + "type": "object", + "properties": { + "args": { + "description": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + }, + "command": { + "description": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + }, + "env": { + "description": "List of environment variables to set in the container. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.EnvVar" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "envFrom": { + "description": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.EnvFromSource" + }, + "x-kubernetes-list-type": "atomic" + }, + "image": { + "description": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + "type": "string" + }, + "imagePullPolicy": { + "description": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + "type": "string" + }, + "resources": { + "description": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "default": {}, + "$ref": "#/definitions/v1.ResourceRequirements" + }, + "securityContext": { + "description": "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + "$ref": "#/definitions/v1.SecurityContext" + }, + "volumeDevices": { + "description": "volumeDevices is the list of block devices to be used by the container.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.VolumeDevice" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge" + }, + "volumeMounts": { + "description": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.VolumeMount" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge" + }, + "workingDir": { + "description": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + "type": "string" + } + } + }, "v1.Task": { "description": "Task represents a collection of sequential steps that are run as part of a Pipeline using a set of inputs and producing a set of outputs. Tasks execute when TaskRuns are created that provide the input parameters and resources and output resources the Task requires.", "type": "object", @@ -182,6 +702,60 @@ } } }, + "v1.TaskResult": { + "description": "TaskResult used to describe the results of a task", + "type": "object", + "required": [ + "name" + ], + "properties": { + "description": { + "description": "Description is a human-readable description of the result", + "type": "string" + }, + "name": { + "description": "Name the given name", + "type": "string", + "default": "" + }, + "properties": { + "description": "Properties is the JSON Schema properties to support key-value pairs results.", + "type": "object", + "additionalProperties": { + "default": {}, + "$ref": "#/definitions/v1.PropertySpec" + } + }, + "type": { + "description": "Type is the user-specified type of the result. The possible type is currently \"string\" and will support \"array\" in following work.", + "type": "string" + } + } + }, + "v1.TaskRunResult": { + "description": "TaskRunResult used to describe the results of a task", + "type": "object", + "required": [ + "name", + "value" + ], + "properties": { + "name": { + "description": "Name the given name", + "type": "string", + "default": "" + }, + "type": { + "description": "Type is the user-specified type of the result. The possible type is currently \"string\" and will support \"array\" in following work.", + "type": "string" + }, + "value": { + "description": "Value the given value of the result", + "default": {}, + "$ref": "#/definitions/v1.ArrayOrString" + } + } + }, "v1.TaskSpec": { "description": "TaskSpec defines the desired state of Task.", "type": "object", @@ -189,6 +763,174 @@ "description": { "description": "Description is a user-facing description of the task that may be used to populate a UI.", "type": "string" + }, + "params": { + "description": "Params is a list of input parameters required to run the task. Params must be supplied as inputs in TaskRuns unless they declare a default value.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.ParamSpec" + }, + "x-kubernetes-list-type": "atomic" + }, + "results": { + "description": "Results are values that this Task can output", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.TaskResult" + }, + "x-kubernetes-list-type": "atomic" + }, + "sidecars": { + "description": "Sidecars are run alongside the Task's step containers. They begin before the steps start and end after the steps complete.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.Sidecar" + }, + "x-kubernetes-list-type": "atomic" + }, + "stepTemplate": { + "description": "StepTemplate can be used as the basis for all step containers within the Task, so that the steps inherit settings on the base container.", + "$ref": "#/definitions/v1.StepTemplate" + }, + "steps": { + "description": "Steps are the steps of the build; each step is run sequentially with the source mounted into /workspace.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.Step" + }, + "x-kubernetes-list-type": "atomic" + }, + "volumes": { + "description": "Volumes is a collection of volumes that are available to mount into the steps of the build.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.Volume" + }, + "x-kubernetes-list-type": "atomic" + }, + "workspaces": { + "description": "Workspaces are the volumes that this Task requires.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.WorkspaceDeclaration" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "v1.WorkspaceBinding": { + "description": "WorkspaceBinding maps a Task's declared workspace to a Volume.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "configMap": { + "description": "ConfigMap represents a configMap that should populate this workspace.", + "$ref": "#/definitions/v1.ConfigMapVolumeSource" + }, + "emptyDir": { + "description": "EmptyDir represents a temporary directory that shares a Task's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir Either this OR PersistentVolumeClaim can be used.", + "$ref": "#/definitions/v1.EmptyDirVolumeSource" + }, + "name": { + "description": "Name is the name of the workspace populated by the volume.", + "type": "string", + "default": "" + }, + "persistentVolumeClaim": { + "description": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. Either this OR EmptyDir can be used.", + "$ref": "#/definitions/v1.PersistentVolumeClaimVolumeSource" + }, + "secret": { + "description": "Secret represents a secret that should populate this workspace.", + "$ref": "#/definitions/v1.SecretVolumeSource" + }, + "subPath": { + "description": "SubPath is optionally a directory on the volume which should be used for this binding (i.e. the volume will be mounted at this sub directory).", + "type": "string" + }, + "volumeClaimTemplate": { + "description": "VolumeClaimTemplate is a template for a claim that will be created in the same namespace. The PipelineRun controller is responsible for creating a unique claim for each instance of PipelineRun.", + "$ref": "#/definitions/v1.PersistentVolumeClaim" + } + } + }, + "v1.WorkspaceDeclaration": { + "description": "WorkspaceDeclaration is a declaration of a volume that a Task requires.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "description": { + "description": "Description is an optional human readable description of this volume.", + "type": "string" + }, + "mountPath": { + "description": "MountPath overrides the directory that the volume will be made available at.", + "type": "string" + }, + "name": { + "description": "Name is the name by which you can bind the volume at runtime.", + "type": "string", + "default": "" + }, + "optional": { + "description": "Optional marks a Workspace as not being required in TaskRuns. By default this field is false and so declared workspaces are required.", + "type": "boolean" + }, + "readOnly": { + "description": "ReadOnly dictates whether a mounted volume is writable. By default this field is false and so mounted volumes are writable.", + "type": "boolean" + } + } + }, + "v1.WorkspacePipelineTaskBinding": { + "description": "WorkspacePipelineTaskBinding describes how a workspace passed into the pipeline should be mapped to a task's declared workspace.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "description": "Name is the name of the workspace as declared by the task", + "type": "string", + "default": "" + }, + "subPath": { + "description": "SubPath is optionally a directory on the volume which should be used for this binding (i.e. the volume will be mounted at this sub directory).", + "type": "string" + }, + "workspace": { + "description": "Workspace is the name of the workspace declared by the pipeline", + "type": "string" + } + } + }, + "v1.WorkspaceUsage": { + "description": "WorkspaceUsage is used by a Step or Sidecar to declare that it wants isolated access to a Workspace defined in a Task.", + "type": "object", + "required": [ + "name", + "mountPath" + ], + "properties": { + "mountPath": { + "description": "MountPath is the path that the workspace should be mounted to inside the Step or Sidecar, overriding any MountPath specified in the Task's WorkspaceDeclaration.", + "type": "string", + "default": "" + }, + "name": { + "description": "Name is the name of the workspace this Step or Sidecar wants access to.", + "type": "string", + "default": "" } } } diff --git a/pkg/apis/pipeline/v1/task_defaults.go b/pkg/apis/pipeline/v1/task_defaults.go index ae1fc5fb66b..77a38425f23 100644 --- a/pkg/apis/pipeline/v1/task_defaults.go +++ b/pkg/apis/pipeline/v1/task_defaults.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2022 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -31,4 +31,10 @@ func (t *Task) SetDefaults(ctx context.Context) { // SetDefaults set any defaults for the task spec func (ts *TaskSpec) SetDefaults(ctx context.Context) { + for i := range ts.Params { + ts.Params[i].SetDefaults(ctx) + } + for i := range ts.Results { + ts.Results[i].SetDefaults(ctx) + } } diff --git a/pkg/apis/pipeline/v1/task_types.go b/pkg/apis/pipeline/v1/task_types.go index 4b2e8bb75c1..4283e8119cd 100644 --- a/pkg/apis/pipeline/v1/task_types.go +++ b/pkg/apis/pipeline/v1/task_types.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2022 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,6 +18,7 @@ package v1 import ( "github.com/tektoncd/pipeline/pkg/apis/pipeline" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "knative.dev/pkg/kmeta" @@ -54,12 +55,44 @@ func (*Task) GetGroupVersionKind() schema.GroupVersionKind { // TaskSpec defines the desired state of Task. type TaskSpec struct { + // Params is a list of input parameters required to run the task. Params + // must be supplied as inputs in TaskRuns unless they declare a default + // value. + // +optional + // +listType=atomic + Params []ParamSpec `json:"params,omitempty"` + // Description is a user-facing description of the task that may be // used to populate a UI. // +optional Description string `json:"description,omitempty"` - // TODO(#4984): Add the rest of task spec + // Steps are the steps of the build; each step is run sequentially with the + // source mounted into /workspace. + // +listType=atomic + Steps []Step `json:"steps,omitempty"` + + // Volumes is a collection of volumes that are available to mount into the + // steps of the build. + // +listType=atomic + Volumes []corev1.Volume `json:"volumes,omitempty"` + + // StepTemplate can be used as the basis for all step containers within the + // Task, so that the steps inherit settings on the base container. + StepTemplate *StepTemplate `json:"stepTemplate,omitempty"` + + // Sidecars are run alongside the Task's step containers. They begin before + // the steps start and end after the steps complete. + // +listType=atomic + Sidecars []Sidecar `json:"sidecars,omitempty"` + + // Workspaces are the volumes that this Task requires. + // +listType=atomic + Workspaces []WorkspaceDeclaration `json:"workspaces,omitempty"` + + // Results are values that this Task can output + // +listType=atomic + Results []TaskResult `json:"results,omitempty"` } // TaskList contains a list of Task diff --git a/pkg/apis/pipeline/v1/task_validation.go b/pkg/apis/pipeline/v1/task_validation.go index 5b129f7547b..75b32e2096e 100644 --- a/pkg/apis/pipeline/v1/task_validation.go +++ b/pkg/apis/pipeline/v1/task_validation.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2022 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,12 +18,37 @@ package v1 import ( "context" + "fmt" + "path/filepath" + "regexp" + "strings" + "time" + "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/validate" + "github.com/tektoncd/pipeline/pkg/apis/version" + "github.com/tektoncd/pipeline/pkg/list" + "github.com/tektoncd/pipeline/pkg/substitution" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" "knative.dev/pkg/apis" ) +const ( + // stringAndArrayVariableNameFormat is the regex to validate if string/array variable name format follows the following rules. + // - Must only contain alphanumeric characters, hyphens (-), underscores (_), and dots (.) + // - Must begin with a letter or an underscore (_) + stringAndArrayVariableNameFormat = "^[_a-zA-Z][_a-zA-Z0-9.-]*$" + + // objectVariableNameFormat is the regext used to validate object name and key names format + // The difference with the array or string name format is that object variable names shouldn't contain dots. + objectVariableNameFormat = "^[_a-zA-Z][_a-zA-Z0-9-]*$" +) + var _ apis.Validatable = (*Task)(nil) +var stringAndArrayVariableNameFormatRegex = regexp.MustCompile(stringAndArrayVariableNameFormat) +var objectVariableNameFormatRegex = regexp.MustCompile(objectVariableNameFormat) // Validate implements apis.Validatable func (t *Task) Validate(ctx context.Context) *apis.FieldError { @@ -36,5 +61,530 @@ func (t *Task) Validate(ctx context.Context) *apis.FieldError { // Validate implements apis.Validatable func (ts *TaskSpec) Validate(ctx context.Context) (errs *apis.FieldError) { + if len(ts.Steps) == 0 { + errs = errs.Also(apis.ErrMissingField("steps")) + } + + if config.IsSubstituted(ctx) { + // Validate the task's workspaces only. + errs = errs.Also(validateDeclaredWorkspaces(ts.Workspaces, ts.Steps, ts.StepTemplate).ViaField("workspaces")) + errs = errs.Also(validateWorkspaceUsages(ctx, ts)) + + return errs + } + + errs = errs.Also(ValidateVolumes(ts.Volumes).ViaField("volumes")) + errs = errs.Also(validateDeclaredWorkspaces(ts.Workspaces, ts.Steps, ts.StepTemplate).ViaField("workspaces")) + errs = errs.Also(validateWorkspaceUsages(ctx, ts)) + mergedSteps, err := MergeStepsWithStepTemplate(ts.StepTemplate, ts.Steps) + if err != nil { + errs = errs.Also(&apis.FieldError{ + Message: fmt.Sprintf("error merging step template and steps: %s", err), + Paths: []string{"stepTemplate"}, + Details: err.Error(), + }) + } + + errs = errs.Also(validateSteps(ctx, mergedSteps).ViaField("steps")) + errs = errs.Also(ValidateParameterTypes(ctx, ts.Params).ViaField("params")) + errs = errs.Also(ValidateParameterVariables(ctx, ts.Steps, ts.Params)) + errs = errs.Also(validateTaskContextVariables(ctx, ts.Steps)) + errs = errs.Also(validateResults(ctx, ts.Results).ViaField("results")) + return errs +} + +func validateResults(ctx context.Context, results []TaskResult) (errs *apis.FieldError) { + for index, result := range results { + errs = errs.Also(result.Validate(ctx).ViaIndex(index)) + } + return errs +} + +// a mount path which conflicts with any other declared workspaces, with the explicitly +// declared volume mounts, or with the stepTemplate. The names must also be unique. +func validateDeclaredWorkspaces(workspaces []WorkspaceDeclaration, steps []Step, stepTemplate *StepTemplate) (errs *apis.FieldError) { + mountPaths := sets.NewString() + for _, step := range steps { + for _, vm := range step.VolumeMounts { + mountPaths.Insert(filepath.Clean(vm.MountPath)) + } + } + if stepTemplate != nil { + for _, vm := range stepTemplate.VolumeMounts { + mountPaths.Insert(filepath.Clean(vm.MountPath)) + } + } + + wsNames := sets.NewString() + for idx, w := range workspaces { + // Workspace names must be unique + if wsNames.Has(w.Name) { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("workspace name %q must be unique", w.Name), "name").ViaIndex(idx)) + } else { + wsNames.Insert(w.Name) + } + // Workspaces must not try to use mount paths that are already used + mountPath := filepath.Clean(w.GetMountPath()) + if _, ok := mountPaths[mountPath]; ok { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("workspace mount path %q must be unique", mountPath), "mountpath").ViaIndex(idx)) + } + mountPaths[mountPath] = struct{}{} + } + return errs +} + +// validateWorkspaceUsages checks that all WorkspaceUsage objects in Steps +// refer to workspaces that are defined in the Task. +// +// This is an alpha feature and will fail validation if it's used by a step +// or sidecar when the enable-api-fields feature gate is anything but "alpha". +func validateWorkspaceUsages(ctx context.Context, ts *TaskSpec) (errs *apis.FieldError) { + workspaces := ts.Workspaces + steps := ts.Steps + sidecars := ts.Sidecars + + wsNames := sets.NewString() + for _, w := range workspaces { + wsNames.Insert(w.Name) + } + + for stepIdx, step := range steps { + if len(step.Workspaces) != 0 { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "step workspaces", config.AlphaAPIFields).ViaIndex(stepIdx).ViaField("steps")) + } + for workspaceIdx, w := range step.Workspaces { + if !wsNames.Has(w.Name) { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("undefined workspace %q", w.Name), "name").ViaIndex(workspaceIdx).ViaField("workspaces").ViaIndex(stepIdx).ViaField("steps")) + } + } + } + + for sidecarIdx, sidecar := range sidecars { + if len(sidecar.Workspaces) != 0 { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "sidecar workspaces", config.AlphaAPIFields).ViaIndex(sidecarIdx).ViaField("sidecars")) + } + for workspaceIdx, w := range sidecar.Workspaces { + if !wsNames.Has(w.Name) { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("undefined workspace %q", w.Name), "name").ViaIndex(workspaceIdx).ViaField("workspaces").ViaIndex(sidecarIdx).ViaField("sidecars")) + } + } + } + + return errs +} + +// ValidateVolumes validates a slice of volumes to make sure there are no dupilcate names +func ValidateVolumes(volumes []corev1.Volume) (errs *apis.FieldError) { + // Task must not have duplicate volume names. + vols := sets.NewString() + for idx, v := range volumes { + if vols.Has(v.Name) { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("multiple volumes with same name %q", v.Name), "name").ViaIndex(idx)) + } else { + vols.Insert(v.Name) + } + } + return errs +} + +func validateSteps(ctx context.Context, steps []Step) (errs *apis.FieldError) { + // Task must not have duplicate step names. + names := sets.NewString() + for idx, s := range steps { + errs = errs.Also(validateStep(ctx, s, names).ViaIndex(idx)) + } + return errs +} + +func validateStep(ctx context.Context, s Step, names sets.String) (errs *apis.FieldError) { + if s.Image == "" { + errs = errs.Also(apis.ErrMissingField("Image")) + } + + if s.Script != "" { + if len(s.Command) > 0 { + errs = errs.Also(&apis.FieldError{ + Message: "script cannot be used with command", + Paths: []string{"script"}, + }) + } + } + + if s.Name != "" { + if names.Has(s.Name) { + errs = errs.Also(apis.ErrInvalidValue(s.Name, "name")) + } + if e := validation.IsDNS1123Label(s.Name); len(e) > 0 { + errs = errs.Also(&apis.FieldError{ + Message: fmt.Sprintf("invalid value %q", s.Name), + Paths: []string{"name"}, + Details: "Task step name must be a valid DNS Label, For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + }) + } + names.Insert(s.Name) + } + + if s.Timeout != nil { + if s.Timeout.Duration < time.Duration(0) { + return apis.ErrInvalidValue(s.Timeout.Duration, "negative timeout") + } + } + + for j, vm := range s.VolumeMounts { + if strings.HasPrefix(vm.MountPath, "/tekton/") && + !strings.HasPrefix(vm.MountPath, "/tekton/home") { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("volumeMount cannot be mounted under /tekton/ (volumeMount %q mounted at %q)", vm.Name, vm.MountPath), "mountPath").ViaFieldIndex("volumeMounts", j)) + } + if strings.HasPrefix(vm.Name, "tekton-internal-") { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf(`volumeMount name %q cannot start with "tekton-internal-"`, vm.Name), "name").ViaFieldIndex("volumeMounts", j)) + } + } + + if s.OnError != "" { + if s.OnError != "continue" && s.OnError != "stopAndFail" { + errs = errs.Also(&apis.FieldError{ + Message: fmt.Sprintf("invalid value: %v", s.OnError), + Paths: []string{"onError"}, + Details: "Task step onError must be either continue or stopAndFail", + }) + } + } + + if s.Script != "" { + cleaned := strings.TrimSpace(s.Script) + if strings.HasPrefix(cleaned, "#!win") { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "windows script support", config.AlphaAPIFields).ViaField("script")) + } + } + return errs +} + +// ValidateParameterTypes validates all the types within a slice of ParamSpecs +func ValidateParameterTypes(ctx context.Context, params []ParamSpec) (errs *apis.FieldError) { + for _, p := range params { + if p.Type == ParamTypeObject { + // Object type parameter is an alpha feature and will fail validation if it's used in a task spec + // when the enable-api-fields feature gate is not "alpha". + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.AlphaAPIFields)) + } + errs = errs.Also(p.ValidateType()) + } + return errs +} + +// ValidateType checks that the type of a ParamSpec is allowed and its default value matches that type +func (p ParamSpec) ValidateType() *apis.FieldError { + // Ensure param has a valid type. + validType := false + for _, allowedType := range AllParamTypes { + if p.Type == allowedType { + validType = true + } + } + if !validType { + return apis.ErrInvalidValue(p.Type, fmt.Sprintf("%s.type", p.Name)) + } + + // If a default value is provided, ensure its type matches param's declared type. + if (p.Default != nil) && (p.Default.Type != p.Type) { + return &apis.FieldError{ + Message: fmt.Sprintf( + "\"%v\" type does not match default value's type: \"%v\"", p.Type, p.Default.Type), + Paths: []string{ + fmt.Sprintf("%s.type", p.Name), + fmt.Sprintf("%s.default.type", p.Name), + }, + } + } + + // Check object type and its PropertySpec type + return p.ValidateObjectType() +} + +// ValidateObjectType checks that object type parameter does not miss the +// definition of `properties` section and the type of a PropertySpec is allowed. +// (Currently, only string is allowed) +func (p ParamSpec) ValidateObjectType() *apis.FieldError { + if p.Type == ParamTypeObject && p.Properties == nil { + return apis.ErrMissingField(fmt.Sprintf("%s.properties", p.Name)) + } + + invalidKeys := []string{} + for key, propertySpec := range p.Properties { + if propertySpec.Type != ParamTypeString { + invalidKeys = append(invalidKeys, key) + } + } + + if len(invalidKeys) != 0 { + return &apis.FieldError{ + Message: fmt.Sprintf("The value type specified for these keys %v is invalid", invalidKeys), + Paths: []string{fmt.Sprintf("%s.properties", p.Name)}, + } + } + return nil } + +// ValidateParameterVariables validates all variables within a slice of ParamSpecs against a slice of Steps +func ValidateParameterVariables(ctx context.Context, steps []Step, params []ParamSpec) *apis.FieldError { + allParameterNames := sets.NewString() + stringParameterNames := sets.NewString() + arrayParameterNames := sets.NewString() + objectParamSpecs := []ParamSpec{} + var errs *apis.FieldError + for _, p := range params { + // validate no duplicate names + if allParameterNames.Has(p.Name) { + errs = errs.Also(apis.ErrGeneric("parameter appears more than once", "").ViaFieldKey("params", p.Name)) + } + allParameterNames.Insert(p.Name) + + switch p.Type { + case ParamTypeArray: + arrayParameterNames.Insert(p.Name) + case ParamTypeObject: + objectParamSpecs = append(objectParamSpecs, p) + default: + stringParameterNames.Insert(p.Name) + } + } + + errs = errs.Also(validateNameFormat(stringParameterNames.Insert(arrayParameterNames.List()...), objectParamSpecs)) + errs = errs.Also(validateVariables(ctx, steps, "params", allParameterNames)) + errs = errs.Also(validateArrayUsage(steps, "params", arrayParameterNames)) + errs = errs.Also(validateObjectDefault(objectParamSpecs)) + return errs.Also(validateObjectUsage(ctx, steps, objectParamSpecs)) +} + +func validateTaskContextVariables(ctx context.Context, steps []Step) *apis.FieldError { + taskRunContextNames := sets.NewString().Insert( + "name", + "namespace", + "uid", + ) + taskContextNames := sets.NewString().Insert( + "name", + "retry-count", + ) + errs := validateVariables(ctx, steps, "context\\.taskRun", taskRunContextNames) + return errs.Also(validateVariables(ctx, steps, "context\\.task", taskContextNames)) +} + +// validateObjectUsage validates the usage of individual attributes of an object param and the usage of the entire object +func validateObjectUsage(ctx context.Context, steps []Step, params []ParamSpec) (errs *apis.FieldError) { + objectParameterNames := sets.NewString() + for _, p := range params { + // collect all names of object type params + objectParameterNames.Insert(p.Name) + + // collect all keys for this object param + objectKeys := sets.NewString() + for key := range p.Properties { + objectKeys.Insert(key) + } + + // check if the object's key names are referenced correctly i.e. param.objectParam.key1 + errs = errs.Also(validateVariables(ctx, steps, fmt.Sprintf("params\\.%s", p.Name), objectKeys)) + } + + return errs.Also(validateObjectUsageAsWhole(steps, "params", objectParameterNames)) +} + +// validateObjectDefault validates the keys of all the object params within a +// slice of ParamSpecs are provided in default iff the default section is provided. +func validateObjectDefault(objectParams []ParamSpec) (errs *apis.FieldError) { + for _, p := range objectParams { + errs = errs.Also(ValidateObjectKeys(p.Properties, p.Default).ViaField(p.Name)) + } + return errs +} + +// ValidateObjectKeys validates if object keys defined in properties are all provided in its value provider iff the provider is not nil. +func ValidateObjectKeys(properties map[string]PropertySpec, propertiesProvider *ArrayOrString) (errs *apis.FieldError) { + if propertiesProvider == nil || propertiesProvider.ObjectVal == nil { + return nil + } + + neededKeys := []string{} + providedKeys := []string{} + + // collect all needed keys + for key := range properties { + neededKeys = append(neededKeys, key) + } + + // collect all provided keys + for key := range propertiesProvider.ObjectVal { + providedKeys = append(providedKeys, key) + } + + missings := list.DiffLeft(neededKeys, providedKeys) + if len(missings) != 0 { + return &apis.FieldError{ + Message: fmt.Sprintf("Required key(s) %s are missing in the value provider.", missings), + Paths: []string{"properties", "default"}, + } + } + + return nil +} + +// validateObjectUsageAsWhole makes sure the object params are not used as whole when providing values for strings +// i.e. param.objectParam, param.objectParam[*] +func validateObjectUsageAsWhole(steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { + for idx, step := range steps { + errs = errs.Also(validateStepObjectUsageAsWhole(step, prefix, vars)).ViaFieldIndex("steps", idx) + } + return errs +} + +func validateStepObjectUsageAsWhole(step Step, prefix string, vars sets.String) *apis.FieldError { + errs := validateTaskNoObjectReferenced(step.Name, prefix, vars).ViaField("name") + errs = errs.Also(validateTaskNoObjectReferenced(step.Image, prefix, vars).ViaField("image")) + errs = errs.Also(validateTaskNoObjectReferenced(step.WorkingDir, prefix, vars).ViaField("workingDir")) + errs = errs.Also(validateTaskNoObjectReferenced(step.Script, prefix, vars).ViaField("script")) + for i, cmd := range step.Command { + errs = errs.Also(validateTaskNoObjectReferenced(cmd, prefix, vars).ViaFieldIndex("command", i)) + } + for i, arg := range step.Args { + errs = errs.Also(validateTaskNoObjectReferenced(arg, prefix, vars).ViaFieldIndex("args", i)) + + } + for _, env := range step.Env { + errs = errs.Also(validateTaskNoObjectReferenced(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) + } + for i, v := range step.VolumeMounts { + errs = errs.Also(validateTaskNoObjectReferenced(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(validateTaskNoObjectReferenced(v.MountPath, prefix, vars).ViaField("mountPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(validateTaskNoObjectReferenced(v.SubPath, prefix, vars).ViaField("subPath").ViaFieldIndex("volumeMount", i)) + } + return errs +} + +func validateArrayUsage(steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { + for idx, step := range steps { + errs = errs.Also(validateStepArrayUsage(step, prefix, vars)).ViaFieldIndex("steps", idx) + } + return errs +} + +func validateStepArrayUsage(step Step, prefix string, vars sets.String) *apis.FieldError { + errs := validateTaskNoArrayReferenced(step.Name, prefix, vars).ViaField("name") + errs = errs.Also(validateTaskNoArrayReferenced(step.Image, prefix, vars).ViaField("image")) + errs = errs.Also(validateTaskNoArrayReferenced(step.WorkingDir, prefix, vars).ViaField("workingDir")) + errs = errs.Also(validateTaskNoArrayReferenced(step.Script, prefix, vars).ViaField("script")) + for i, cmd := range step.Command { + errs = errs.Also(validateTaskArraysIsolated(cmd, prefix, vars).ViaFieldIndex("command", i)) + } + for i, arg := range step.Args { + errs = errs.Also(validateTaskArraysIsolated(arg, prefix, vars).ViaFieldIndex("args", i)) + + } + for _, env := range step.Env { + errs = errs.Also(validateTaskNoArrayReferenced(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) + } + for i, v := range step.VolumeMounts { + errs = errs.Also(validateTaskNoArrayReferenced(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(validateTaskNoArrayReferenced(v.MountPath, prefix, vars).ViaField("mountPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(validateTaskNoArrayReferenced(v.SubPath, prefix, vars).ViaField("subPath").ViaFieldIndex("volumeMount", i)) + } + return errs +} + +func validateVariables(ctx context.Context, steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { + // We've checked param name format. Now, we want to check if param names are referenced correctly in each step + for idx, step := range steps { + errs = errs.Also(validateStepVariables(ctx, step, prefix, vars).ViaFieldIndex("steps", idx)) + } + return errs +} + +// validateNameFormat validates that the name format of all param types follows the rules +func validateNameFormat(stringAndArrayParams sets.String, objectParams []ParamSpec) (errs *apis.FieldError) { + // checking string or array name format + // ---- + invalidStringAndArrayNames := []string{} + // Converting to sorted list here rather than just looping map keys + // because we want the order of items in vars to be deterministic for purpose of unit testing + for _, name := range stringAndArrayParams.List() { + if !stringAndArrayVariableNameFormatRegex.MatchString(name) { + invalidStringAndArrayNames = append(invalidStringAndArrayNames, name) + } + } + + if len(invalidStringAndArrayNames) != 0 { + errs = errs.Also(&apis.FieldError{ + Message: fmt.Sprintf("The format of following array and string variable names is invalid: %s", invalidStringAndArrayNames), + Paths: []string{"params"}, + Details: "String/Array Names: \nMust only contain alphanumeric characters, hyphens (-), underscores (_), and dots (.)\nMust begin with a letter or an underscore (_)", + }) + } + + // checking object name and key name format + // ----- + invalidObjectNames := map[string][]string{} + for _, obj := range objectParams { + // check object param name + if !objectVariableNameFormatRegex.MatchString(obj.Name) { + invalidObjectNames[obj.Name] = []string{} + } + + // check key names + for k := range obj.Properties { + if !objectVariableNameFormatRegex.MatchString(k) { + invalidObjectNames[obj.Name] = append(invalidObjectNames[obj.Name], k) + } + } + } + + if len(invalidObjectNames) != 0 { + errs = errs.Also(&apis.FieldError{ + Message: fmt.Sprintf("Object param name and key name format is invalid: %s", invalidObjectNames), + Paths: []string{"params"}, + Details: "Object Names: \nMust only contain alphanumeric characters, hyphens (-), underscores (_) \nMust begin with a letter or an underscore (_)", + }) + } + + return errs +} + +func validateStepVariables(ctx context.Context, step Step, prefix string, vars sets.String) *apis.FieldError { + errs := validateTaskVariable(step.Name, prefix, vars).ViaField("name") + errs = errs.Also(validateTaskVariable(step.Image, prefix, vars).ViaField("image")) + errs = errs.Also(validateTaskVariable(step.WorkingDir, prefix, vars).ViaField("workingDir")) + if !(config.FromContextOrDefaults(ctx).FeatureFlags.EnableAPIFields == "alpha" && prefix == "params") { + errs = errs.Also(validateTaskVariable(step.Script, prefix, vars).ViaField("script")) + } + for i, cmd := range step.Command { + errs = errs.Also(validateTaskVariable(cmd, prefix, vars).ViaFieldIndex("command", i)) + } + for i, arg := range step.Args { + errs = errs.Also(validateTaskVariable(arg, prefix, vars).ViaFieldIndex("args", i)) + } + for _, env := range step.Env { + errs = errs.Also(validateTaskVariable(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) + } + for i, v := range step.VolumeMounts { + errs = errs.Also(validateTaskVariable(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(validateTaskVariable(v.MountPath, prefix, vars).ViaField("MountPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(validateTaskVariable(v.SubPath, prefix, vars).ViaField("SubPath").ViaFieldIndex("volumeMount", i)) + } + return errs +} + +func validateTaskVariable(value, prefix string, vars sets.String) *apis.FieldError { + return substitution.ValidateVariableP(value, prefix, vars) +} + +func validateTaskNoObjectReferenced(value, prefix string, objectNames sets.String) *apis.FieldError { + return substitution.ValidateEntireVariableProhibitedP(value, prefix, objectNames) +} + +func validateTaskNoArrayReferenced(value, prefix string, arrayNames sets.String) *apis.FieldError { + return substitution.ValidateVariableProhibitedP(value, prefix, arrayNames) +} + +func validateTaskArraysIsolated(value, prefix string, arrayNames sets.String) *apis.FieldError { + return substitution.ValidateVariableIsolatedP(value, prefix, arrayNames) +} diff --git a/pkg/apis/pipeline/v1/task_validation_test.go b/pkg/apis/pipeline/v1/task_validation_test.go new file mode 100644 index 00000000000..b05c9287043 --- /dev/null +++ b/pkg/apis/pipeline/v1/task_validation_test.go @@ -0,0 +1,1519 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1_test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/tektoncd/pipeline/pkg/apis/config" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + "github.com/tektoncd/pipeline/test/diff" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" +) + +var validSteps = []v1.Step{{ + Name: "mystep", + Image: "myimage", +}} + +var invalidSteps = []v1.Step{{ + Name: "replaceImage", + Image: "myimage", +}} + +func TestTaskValidate(t *testing.T) { + tests := []struct { + name string + t *v1.Task + wc func(context.Context) context.Context + }{{ + name: "do not validate spec on delete", + t: &v1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: "task"}, + }, + wc: apis.WithinDelete, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.Background() + if tt.wc != nil { + ctx = tt.wc(ctx) + } + err := tt.t.Validate(ctx) + if err != nil { + t.Errorf("Task.Validate() returned error for valid Task: %v", err) + } + }) + } +} + +func TestTaskSpecValidate(t *testing.T) { + type fields struct { + Params []v1.ParamSpec + Steps []v1.Step + StepTemplate *v1.StepTemplate + Workspaces []v1.WorkspaceDeclaration + Results []v1.TaskResult + } + tests := []struct { + name string + fields fields + }{{ + name: "unnamed steps", + fields: fields{ + Steps: []v1.Step{{ + Image: "myimage", + }, { + Image: "myotherimage", + }}, + }, + }, { + name: "valid params type implied", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "task", + Description: "param", + Default: v1.NewArrayOrString("default"), + }}, + Steps: validSteps, + }, + }, { + name: "valid params type explicit", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "task", + Type: v1.ParamTypeString, + Description: "param", + Default: v1.NewArrayOrString("default"), + }, { + Name: "myobj", + Type: v1.ParamTypeObject, + Description: "param", + Properties: map[string]v1.PropertySpec{ + "key1": {}, + "key2": {}, + }, + Default: v1.NewObject(map[string]string{ + "key1": "var1", + "key2": "var2", + }), + }, { + Name: "myobjWithoutDefault", + Type: v1.ParamTypeObject, + Description: "param", + Properties: map[string]v1.PropertySpec{ + "key1": {}, + "key2": {}, + }, + }}, + Steps: validSteps, + }, + }, { + name: "valid template variable", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "baz", + }, { + Name: "foo-is-baz", + }}, + Steps: []v1.Step{{ + Name: "mystep", + Image: "url", + Args: []string{"--flag=$(params.baz) && $(params.foo-is-baz)"}, + WorkingDir: "/foo/bar/src/", + }}, + }, + }, { + name: "valid array template variable", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "baz", + Type: v1.ParamTypeArray, + }, { + Name: "foo-is-baz", + Type: v1.ParamTypeArray, + }}, + Steps: []v1.Step{{ + Name: "mystep", + Image: "myimage", + Command: []string{"$(params.foo-is-baz)"}, + Args: []string{"$(params.baz)", "middle string", "$(params.foo-is-baz)"}, + WorkingDir: "/foo/bar/src/", + }}, + }, + }, { + name: "valid object template variable", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "gitrepo", + Type: v1.ParamTypeObject, + Properties: map[string]v1.PropertySpec{ + "url": {}, + "commit": {}, + }, + }}, + Steps: []v1.Step{{ + Name: "do-the-clone", + Image: "some-git-image", + Args: []string{"-url=$(params.gitrepo.url)", "-commit=$(params.gitrepo.commit)"}, + WorkingDir: "/foo/bar/src/", + }}, + }, + }, { + name: "valid star array template variable", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "baz", + Type: v1.ParamTypeArray, + }, { + Name: "foo-is-baz", + Type: v1.ParamTypeArray, + }}, + Steps: []v1.Step{{ + Name: "mystep", + Image: "myimage", + Command: []string{"$(params.foo-is-baz)"}, + Args: []string{"$(params.baz[*])", "middle string", "$(params.foo-is-baz[*])"}, + WorkingDir: "/foo/bar/src/", + }}, + }, + }, { + name: "valid path variable for legacy credential helper (aka creds-init)", + fields: fields{ + Steps: []v1.Step{{ + Name: "mystep", + Image: "echo", + Args: []string{"$(credentials.path)"}, + }}, + }, + }, { + name: "step template included in validation", + fields: fields{ + Steps: []v1.Step{{ + Name: "astep", + Command: []string{"echo"}, + Args: []string{"hello"}, + }}, + StepTemplate: &v1.StepTemplate{ + Image: "some-image", + }, + }, + }, { + name: "valid step with script", + fields: fields{ + Steps: []v1.Step{{ + Image: "my-image", + Script: ` + #!/usr/bin/env bash + hello world`, + }}, + }, + }, { + name: "valid step with parameterized script", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "baz", + }, { + Name: "foo-is-baz", + }}, + Steps: []v1.Step{{ + Image: "my-image", + Script: ` + #!/usr/bin/env bash + hello $(params.baz)`, + }}, + }, + }, { + name: "valid step with script and args", + fields: fields{ + Steps: []v1.Step{{ + Image: "my-image", + Args: []string{"arg"}, + Script: ` + #!/usr/bin/env bash + hello $1`, + }}, + }, + }, { + name: "valid step with volumeMount under /tekton/home", + fields: fields{ + Steps: []v1.Step{{ + Image: "myimage", + VolumeMounts: []corev1.VolumeMount{{ + Name: "foo", + MountPath: "/tekton/home", + }}, + }}, + }, + }, { + name: "valid workspace", + fields: fields{ + Steps: []v1.Step{{ + Image: "my-image", + Args: []string{"arg"}, + }}, + Workspaces: []v1.WorkspaceDeclaration{{ + Name: "foo-workspace", + Description: "my great workspace", + MountPath: "some/path", + }}, + }, + }, { + name: "valid result", + fields: fields{ + Steps: []v1.Step{{ + Image: "my-image", + Args: []string{"arg"}, + }}, + Results: []v1.TaskResult{{ + Name: "MY-RESULT", + Description: "my great result", + }}, + }, + }, { + name: "valid result type string", + fields: fields{ + Steps: []v1.Step{{ + Image: "my-image", + Args: []string{"arg"}, + }}, + Results: []v1.TaskResult{{ + Name: "MY-RESULT", + Type: "string", + Description: "my great result", + }}, + }, + }, { + name: "valid result type array", + fields: fields{ + Steps: []v1.Step{{ + Image: "my-image", + Args: []string{"arg"}, + }}, + Results: []v1.TaskResult{{ + Name: "MY-RESULT", + Type: v1.ResultsTypeArray, + Description: "my great result", + }}, + }, + }, { + name: "valid result type object", + fields: fields{ + Steps: []v1.Step{{ + Image: "my-image", + Args: []string{"arg"}, + }}, + Results: []v1.TaskResult{{ + Name: "MY-RESULT", + Type: v1.ResultsTypeObject, + Description: "my great result", + }}, + }, + }, { + name: "valid task name context", + fields: fields{ + Steps: []v1.Step{{ + Image: "my-image", + Args: []string{"arg"}, + Script: ` + #!/usr/bin/env bash + hello "$(context.task.name)"`, + }}, + }, + }, { + name: "valid task retry count context", + fields: fields{ + Steps: []v1.Step{{ + Image: "my-image", + Args: []string{"arg"}, + Script: ` + #!/usr/bin/env bash + retry count "$(context.task.retry-count)"`, + }}, + }, + }, { + name: "valid taskrun name context", + fields: fields{ + Steps: []v1.Step{{ + Image: "my-image", + Args: []string{"arg"}, + Script: ` + #!/usr/bin/env bash + hello "$(context.taskRun.name)"`, + }}, + }, + }, { + name: "valid taskrun uid context", + fields: fields{ + Steps: []v1.Step{{ + Image: "my-image", + Args: []string{"arg"}, + Script: ` + #!/usr/bin/env bash + hello "$(context.taskRun.uid)"`, + }}, + }, + }, { + name: "valid context", + fields: fields{ + Steps: []v1.Step{{ + Image: "my-image", + Args: []string{"arg"}, + Script: ` + #!/usr/bin/env bash + hello "$(context.taskRun.namespace)"`, + }}, + }, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts := &v1.TaskSpec{ + Params: tt.fields.Params, + Steps: tt.fields.Steps, + StepTemplate: tt.fields.StepTemplate, + Workspaces: tt.fields.Workspaces, + Results: tt.fields.Results, + } + ctx := config.EnableAlphaAPIFields(context.Background()) + ts.SetDefaults(ctx) + if err := ts.Validate(ctx); err != nil { + t.Errorf("TaskSpec.Validate() = %v", err) + } + }) + } +} + +func TestTaskSpecValidateError(t *testing.T) { + type fields struct { + Params []v1.ParamSpec + Steps []v1.Step + Volumes []corev1.Volume + StepTemplate *v1.StepTemplate + Workspaces []v1.WorkspaceDeclaration + Results []v1.TaskResult + } + tests := []struct { + name string + fields fields + expectedError apis.FieldError + }{ /*{ + name: "empty spec", + expectedError: apis.FieldError{ + Message: `missing field(s)`, + Paths: []string{"steps"}, + }, + }, { + name: "no step", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "validparam", + Type: v1.ParamTypeString, + Description: "parameter", + Default: v1.NewArrayOrString("default"), + }}, + }, + expectedError: apis.FieldError{ + Message: `missing field(s)`, + Paths: []string{"steps"}, + }, + }, */{ + name: "invalid param name format", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "_validparam1", + Description: "valid param name format", + }, { + Name: "valid_param2", + Description: "valid param name format", + }, { + Name: "", + Description: "invalid param name format", + }, { + Name: "a^b", + Description: "invalid param name format", + }, { + Name: "0ab", + Description: "invalid param name format", + }, { + Name: "f oo", + Description: "invalid param name format", + }}, + Steps: validSteps, + }, + expectedError: apis.FieldError{ + Message: fmt.Sprintf("The format of following array and string variable names is invalid: %s", []string{"", "0ab", "a^b", "f oo"}), + Paths: []string{"params"}, + Details: "String/Array Names: \nMust only contain alphanumeric characters, hyphens (-), underscores (_), and dots (.)\nMust begin with a letter or an underscore (_)", + }, + }, { + name: "invalid object param format - object param name and key name shouldn't contain dots.", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "invalid.name1", + Description: "object param name contains dots", + Properties: map[string]v1.PropertySpec{ + "invalid.key1": {}, + "mykey2": {}, + }, + }}, + Steps: validSteps, + }, + expectedError: apis.FieldError{ + Message: fmt.Sprintf("Object param name and key name format is invalid: %v", map[string][]string{ + "invalid.name1": {"invalid.key1"}, + }), + Paths: []string{"params"}, + Details: "Object Names: \nMust only contain alphanumeric characters, hyphens (-), underscores (_) \nMust begin with a letter or an underscore (_)", + }, + }, { + name: "duplicated param names", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "foo", + Type: v1.ParamTypeString, + Description: "parameter", + Default: v1.NewArrayOrString("value1"), + }, { + Name: "foo", + Type: v1.ParamTypeString, + Description: "parameter", + Default: v1.NewArrayOrString("value2"), + }}, + Steps: validSteps, + }, + expectedError: apis.FieldError{ + Message: `parameter appears more than once`, + Paths: []string{"params[foo]"}, + }, + }, { + name: "invalid param type", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "validparam", + Type: v1.ParamTypeString, + Description: "parameter", + Default: v1.NewArrayOrString("default"), + }, { + Name: "param-with-invalid-type", + Type: "invalidtype", + Description: "invalidtypedesc", + Default: v1.NewArrayOrString("default"), + }}, + Steps: validSteps, + }, + expectedError: apis.FieldError{ + Message: `invalid value: invalidtype`, + Paths: []string{"params.param-with-invalid-type.type"}, + }, + }, { + name: "param mismatching default/type 1", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "task", + Type: v1.ParamTypeArray, + Description: "param", + Default: v1.NewArrayOrString("default"), + }}, + Steps: validSteps, + }, + expectedError: apis.FieldError{ + Message: `"array" type does not match default value's type: "string"`, + Paths: []string{"params.task.type", "params.task.default.type"}, + }, + }, { + name: "param mismatching default/type 2", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "task", + Type: v1.ParamTypeString, + Description: "param", + Default: v1.NewArrayOrString("default", "array"), + }}, + Steps: validSteps, + }, + expectedError: apis.FieldError{ + Message: `"string" type does not match default value's type: "array"`, + Paths: []string{"params.task.type", "params.task.default.type"}, + }, + }, { + name: "param mismatching default/type 3", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "task", + Type: v1.ParamTypeArray, + Description: "param", + Default: v1.NewObject(map[string]string{ + "key1": "var1", + "key2": "var2", + }), + }}, + Steps: validSteps, + }, + expectedError: apis.FieldError{ + Message: `"array" type does not match default value's type: "object"`, + Paths: []string{"params.task.type", "params.task.default.type"}, + }, + }, { + name: "param mismatching default/type 4", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "task", + Type: v1.ParamTypeObject, + Description: "param", + Properties: map[string]v1.PropertySpec{"key1": {}}, + Default: v1.NewArrayOrString("var"), + }}, + Steps: validSteps, + }, + expectedError: apis.FieldError{ + Message: `"object" type does not match default value's type: "string"`, + Paths: []string{"params.task.type", "params.task.default.type"}, + }, + }, { + name: "the spec of object type parameter misses the definition of properties", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "task", + Type: v1.ParamTypeObject, + Description: "param", + }}, + Steps: validSteps, + }, + expectedError: *apis.ErrMissingField(fmt.Sprintf("params.task.properties")), + }, { + name: "PropertySpec type is set with unsupported type", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "task", + Type: v1.ParamTypeObject, + Description: "param", + Properties: map[string]v1.PropertySpec{ + "key1": {Type: "number"}, + "key2": {Type: "string"}, + }, + }}, + Steps: validSteps, + }, + expectedError: apis.FieldError{ + Message: fmt.Sprintf("The value type specified for these keys %v is invalid", []string{"key1"}), + Paths: []string{"params.task.properties"}, + }, + }, { + name: "keys defined in properties are missed in default", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "myobjectParam", + Description: "param", + Properties: map[string]v1.PropertySpec{ + "key1": {}, + "key2": {}, + }, + Default: v1.NewObject(map[string]string{ + "key1": "var1", + "key3": "var1", + }), + }}, + Steps: validSteps, + }, + expectedError: apis.FieldError{ + Message: fmt.Sprintf("Required key(s) %s are missing in the value provider.", []string{"key2"}), + Paths: []string{"myobjectParam.properties", "myobjectParam.default"}, + }, + }, { + name: "invalid step", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "validparam", + Type: v1.ParamTypeString, + Description: "parameter", + Default: v1.NewArrayOrString("default"), + }}, + Steps: []v1.Step{}, + }, + expectedError: apis.FieldError{ + Message: "missing field(s)", + Paths: []string{"steps"}, + }, + }, { + name: "invalid step name", + fields: fields{ + Steps: invalidSteps, + }, + expectedError: apis.FieldError{ + Message: `invalid value "replaceImage"`, + Paths: []string{"steps[0].name"}, + Details: "Task step name must be a valid DNS Label, For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + }, + }, { + name: "inexistent param variable", + fields: fields{ + Steps: []v1.Step{{ + Name: "mystep", + Image: "myimage", + Args: []string{"--flag=$(params.inexistent)"}, + }}, + }, + expectedError: apis.FieldError{ + Message: `non-existent variable in "--flag=$(params.inexistent)"`, + Paths: []string{"steps[0].args[0]"}, + }, + }, { + name: "array used in unaccepted field", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "baz", + Type: v1.ParamTypeArray, + }, { + Name: "foo-is-baz", + Type: v1.ParamTypeArray, + }}, + Steps: []v1.Step{{ + Name: "mystep", + Image: "$(params.baz)", + Command: []string{"$(params.foo-is-baz)"}, + Args: []string{"$(params.baz)", "middle string", "url"}, + WorkingDir: "/foo/bar/src/", + }}, + }, + expectedError: apis.FieldError{ + Message: `variable type invalid in "$(params.baz)"`, + Paths: []string{"steps[0].image"}, + }, + }, { + name: "array star used in unaccepted field", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "baz", + Type: v1.ParamTypeArray, + }, { + Name: "foo-is-baz", + Type: v1.ParamTypeArray, + }}, + Steps: []v1.Step{{ + Name: "mystep", + Image: "$(params.baz[*])", + Command: []string{"$(params.foo-is-baz)"}, + Args: []string{"$(params.baz)", "middle string", "url"}, + WorkingDir: "/foo/bar/src/", + }}, + }, + expectedError: apis.FieldError{ + Message: `variable type invalid in "$(params.baz[*])"`, + Paths: []string{"steps[0].image"}, + }, + }, { + name: "array star used illegaly in script field", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "baz", + Type: v1.ParamTypeArray, + }, { + Name: "foo-is-baz", + Type: v1.ParamTypeArray, + }}, + Steps: []v1.Step{ + { + Script: "$(params.baz[*])", + Name: "mystep", + Image: "my-image", + WorkingDir: "/foo/bar/src/", + }}, + }, + expectedError: apis.FieldError{ + Message: `variable type invalid in "$(params.baz[*])"`, + Paths: []string{"steps[0].script"}, + }, + }, { + name: "array not properly isolated", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "baz", + Type: v1.ParamTypeArray, + }, { + Name: "foo-is-baz", + Type: v1.ParamTypeArray, + }}, + Steps: []v1.Step{{ + Name: "mystep", + Image: "someimage", + Command: []string{"$(params.foo-is-baz)"}, + Args: []string{"not isolated: $(params.baz)", "middle string", "url"}, + WorkingDir: "/foo/bar/src/", + }}, + }, + expectedError: apis.FieldError{ + Message: `variable is not properly isolated in "not isolated: $(params.baz)"`, + Paths: []string{"steps[0].args[0]"}, + }, + }, { + name: "array star not properly isolated", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "baz", + Type: v1.ParamTypeArray, + }, { + Name: "foo-is-baz", + Type: v1.ParamTypeArray, + }}, + Steps: []v1.Step{{ + Name: "mystep", + Image: "someimage", + Command: []string{"$(params.foo-is-baz)"}, + Args: []string{"not isolated: $(params.baz[*])", "middle string", "url"}, + WorkingDir: "/foo/bar/src/", + }}, + }, + expectedError: apis.FieldError{ + Message: `variable is not properly isolated in "not isolated: $(params.baz[*])"`, + Paths: []string{"steps[0].args[0]"}, + }, + }, { + name: "inferred array not properly isolated", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "baz", + Default: v1.NewArrayOrString("implied", "array", "type"), + }, { + Name: "foo-is-baz", + Default: v1.NewArrayOrString("implied", "array", "type"), + }}, + Steps: []v1.Step{{ + Name: "mystep", + Image: "someimage", + Command: []string{"$(params.foo-is-baz)"}, + Args: []string{"not isolated: $(params.baz)", "middle string", "url"}, + WorkingDir: "/foo/bar/src/", + }}, + }, + expectedError: apis.FieldError{ + Message: `variable is not properly isolated in "not isolated: $(params.baz)"`, + Paths: []string{"steps[0].args[0]"}, + }, + }, { + name: "inferred array star not properly isolated", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "baz", + Default: v1.NewArrayOrString("implied", "array", "type"), + }, { + Name: "foo-is-baz", + Default: v1.NewArrayOrString("implied", "array", "type"), + }}, + Steps: []v1.Step{{ + Name: "mystep", + Image: "someimage", + Command: []string{"$(params.foo-is-baz)"}, + Args: []string{"not isolated: $(params.baz[*])", "middle string", "url"}, + WorkingDir: "/foo/bar/src/", + }}, + }, + expectedError: apis.FieldError{ + Message: `variable is not properly isolated in "not isolated: $(params.baz[*])"`, + Paths: []string{"steps[0].args[0]"}, + }, + }, { + name: "object used in a string field", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "gitrepo", + Type: v1.ParamTypeObject, + Properties: map[string]v1.PropertySpec{ + "url": {}, + "commit": {}, + }, + }}, + Steps: []v1.Step{{ + Name: "do-the-clone", + Image: "$(params.gitrepo)", + Args: []string{"echo"}, + WorkingDir: "/foo/bar/src/", + }}, + }, + expectedError: apis.FieldError{ + Message: `variable type invalid in "$(params.gitrepo)"`, + Paths: []string{"steps[0].image"}, + }, + }, { + name: "object star used in a string field", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "gitrepo", + Type: v1.ParamTypeObject, + Properties: map[string]v1.PropertySpec{ + "url": {}, + "commit": {}, + }, + }}, + Steps: []v1.Step{{ + Name: "do-the-clone", + Image: "$(params.gitrepo[*])", + Args: []string{"echo"}, + WorkingDir: "/foo/bar/src/", + }}, + }, + expectedError: apis.FieldError{ + Message: `variable type invalid in "$(params.gitrepo[*])"`, + Paths: []string{"steps[0].image"}, + }, + }, { + name: "object used in a field that can accept array type", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "gitrepo", + Type: v1.ParamTypeObject, + Properties: map[string]v1.PropertySpec{ + "url": {}, + "commit": {}, + }, + }}, + Steps: []v1.Step{{ + Name: "do-the-clone", + Image: "myimage", + Args: []string{"$(params.gitrepo)"}, + WorkingDir: "/foo/bar/src/", + }}, + }, + expectedError: apis.FieldError{ + Message: `variable type invalid in "$(params.gitrepo)"`, + Paths: []string{"steps[0].args[0]"}, + }, + }, { + name: "object star used in a field that can accept array type", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "gitrepo", + Type: v1.ParamTypeObject, + Properties: map[string]v1.PropertySpec{ + "url": {}, + "commit": {}, + }, + }}, + Steps: []v1.Step{{ + Name: "do-the-clone", + Image: "some-git-image", + Args: []string{"$(params.gitrepo[*])"}, + WorkingDir: "/foo/bar/src/", + }}, + }, + expectedError: apis.FieldError{ + Message: `variable type invalid in "$(params.gitrepo[*])"`, + Paths: []string{"steps[0].args[0]"}, + }, + }, { + name: "Inexistent param variable in volumeMount with existing", + fields: fields{ + Params: []v1.ParamSpec{ + { + Name: "foo", + Description: "param", + Default: v1.NewArrayOrString("default"), + }, + }, + Steps: []v1.Step{{ + Name: "mystep", + Image: "myimage", + VolumeMounts: []corev1.VolumeMount{{ + Name: "$(params.inexistent)-foo", + }}, + }}, + }, + expectedError: apis.FieldError{ + Message: `non-existent variable in "$(params.inexistent)-foo"`, + Paths: []string{"steps[0].volumeMount[0].name"}, + }, + }, { + name: "Inexistent param variable with existing", + fields: fields{ + Params: []v1.ParamSpec{{ + Name: "foo", + Description: "param", + Default: v1.NewArrayOrString("default"), + }}, + Steps: []v1.Step{{ + Name: "mystep", + Image: "myimage", + Args: []string{"$(params.foo) && $(params.inexistent)"}, + }}, + }, + expectedError: apis.FieldError{ + Message: `non-existent variable in "$(params.foo) && $(params.inexistent)"`, + Paths: []string{"steps[0].args[0]"}, + }, + }, { + name: "Multiple volumes with same name", + fields: fields{ + Steps: validSteps, + Volumes: []corev1.Volume{{ + Name: "workspace", + }, { + Name: "workspace", + }}, + }, + expectedError: apis.FieldError{ + Message: `multiple volumes with same name "workspace"`, + Paths: []string{"volumes[1].name"}, + }, + }, { + name: "step with script and command", + fields: fields{ + Steps: []v1.Step{{ + Image: "myimage", + Command: []string{"command"}, + Script: "script", + }}, + }, + expectedError: apis.FieldError{ + Message: "script cannot be used with command", + Paths: []string{"steps[0].script"}, + }, + }, { + name: "step volume mounts under /tekton/", + fields: fields{ + Steps: []v1.Step{{ + Image: "myimage", + VolumeMounts: []corev1.VolumeMount{{ + Name: "foo", + MountPath: "/tekton/foo", + }}, + }}, + }, + expectedError: apis.FieldError{ + Message: `volumeMount cannot be mounted under /tekton/ (volumeMount "foo" mounted at "/tekton/foo")`, + Paths: []string{"steps[0].volumeMounts[0].mountPath"}, + }, + }, { + name: "step volume mount name starts with tekton-internal-", + fields: fields{ + Steps: []v1.Step{{ + Image: "myimage", + VolumeMounts: []corev1.VolumeMount{{ + Name: "tekton-internal-foo", + MountPath: "/this/is/fine", + }}, + }}, + }, + expectedError: apis.FieldError{ + Message: `volumeMount name "tekton-internal-foo" cannot start with "tekton-internal-"`, + Paths: []string{"steps[0].volumeMounts[0].name"}, + }, + }, { + name: "declared workspaces names are not unique", + fields: fields{ + Steps: validSteps, + Workspaces: []v1.WorkspaceDeclaration{{ + Name: "same-workspace", + MountPath: "/foo", + }, { + Name: "same-workspace", + MountPath: "/bar", + }}, + }, + expectedError: apis.FieldError{ + Message: "workspace name \"same-workspace\" must be unique", + Paths: []string{"workspaces[1].name"}, + }, + }, { + name: "declared workspaces clash with each other", + fields: fields{ + Steps: validSteps, + Workspaces: []v1.WorkspaceDeclaration{{ + Name: "some-workspace", + MountPath: "/foo", + }, { + Name: "another-workspace", + MountPath: "/foo", + }}, + }, + expectedError: apis.FieldError{ + Message: "workspace mount path \"/foo\" must be unique", + Paths: []string{"workspaces[1].mountpath"}, + }, + }, { + name: "workspace mount path already in volumeMounts", + fields: fields{ + Steps: []v1.Step{{ + Image: "myimage", + Command: []string{"command"}, + VolumeMounts: []corev1.VolumeMount{{ + Name: "my-mount", + MountPath: "/foo", + }}, + }}, + Workspaces: []v1.WorkspaceDeclaration{{ + Name: "some-workspace", + MountPath: "/foo", + }}, + }, + expectedError: apis.FieldError{ + Message: "workspace mount path \"/foo\" must be unique", + Paths: []string{"workspaces[0].mountpath"}, + }, + }, { + name: "workspace default mount path already in volumeMounts", + fields: fields{ + Steps: []v1.Step{{ + Image: "myimage", + Command: []string{"command"}, + VolumeMounts: []corev1.VolumeMount{{ + Name: "my-mount", + MountPath: "/workspace/some-workspace/", + }}, + }}, + Workspaces: []v1.WorkspaceDeclaration{{ + Name: "some-workspace", + }}, + }, + expectedError: apis.FieldError{ + Message: "workspace mount path \"/workspace/some-workspace\" must be unique", + Paths: []string{"workspaces[0].mountpath"}, + }, + }, { + name: "workspace mount path already in stepTemplate", + fields: fields{ + StepTemplate: &v1.StepTemplate{ + VolumeMounts: []corev1.VolumeMount{{ + Name: "my-mount", + MountPath: "/foo", + }}, + }, + Steps: validSteps, + Workspaces: []v1.WorkspaceDeclaration{{ + Name: "some-workspace", + MountPath: "/foo", + }}, + }, + expectedError: apis.FieldError{ + Message: "workspace mount path \"/foo\" must be unique", + Paths: []string{"workspaces[0].mountpath"}, + }, + }, { + name: "workspace default mount path already in stepTemplate", + fields: fields{ + StepTemplate: &v1.StepTemplate{ + VolumeMounts: []corev1.VolumeMount{{ + Name: "my-mount", + MountPath: "/workspace/some-workspace", + }}, + }, + Steps: validSteps, + Workspaces: []v1.WorkspaceDeclaration{{ + Name: "some-workspace", + }}, + }, + expectedError: apis.FieldError{ + Message: "workspace mount path \"/workspace/some-workspace\" must be unique", + Paths: []string{"workspaces[0].mountpath"}, + }, + }, { + name: "result name not validate", + fields: fields{ + Steps: validSteps, + Results: []v1.TaskResult{{ + Name: "MY^RESULT", + Description: "my great result", + }}, + }, + expectedError: apis.FieldError{ + Message: `invalid key name "MY^RESULT"`, + Paths: []string{"results[0].name"}, + Details: "Name must consist of alphanumeric characters, '-', '_', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my-name', or 'my_name', regex used for validation is '^([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$')", + }, + }, { + name: "result type not validate", + fields: fields{ + Steps: validSteps, + Results: []v1.TaskResult{{ + Name: "MY-RESULT", + Type: "wrong", + Description: "my great result", + }}, + }, + expectedError: apis.FieldError{ + Message: `invalid value: wrong`, + Paths: []string{"results[0].type"}, + Details: "type must be string", + }, + }, { + name: "context not validate", + fields: fields{ + Steps: []v1.Step{{ + Image: "my-image", + Args: []string{"arg"}, + Script: ` + #!/usr/bin/env bash + hello "$(context.task.missing)"`, + }}, + }, + expectedError: apis.FieldError{ + Message: `non-existent variable in "\n\t\t\t\t#!/usr/bin/env bash\n\t\t\t\thello \"$(context.task.missing)\""`, + Paths: []string{"steps[0].script"}, + }, + }, { + name: "negative timeout string", + fields: fields{ + Steps: []v1.Step{{ + Timeout: &metav1.Duration{Duration: -10 * time.Second}, + }}, + }, + expectedError: apis.FieldError{ + Message: "invalid value: -10s", + Paths: []string{"steps[0].negative timeout"}, + }, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts := &v1.TaskSpec{ + Params: tt.fields.Params, + Steps: tt.fields.Steps, + Volumes: tt.fields.Volumes, + StepTemplate: tt.fields.StepTemplate, + Workspaces: tt.fields.Workspaces, + Results: tt.fields.Results, + } + ctx := config.EnableAlphaAPIFields(context.Background()) + ts.SetDefaults(ctx) + err := ts.Validate(ctx) + if err == nil { + t.Fatalf("Expected an error, got nothing for %v", ts) + } + if d := cmp.Diff(tt.expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" { + t.Errorf("TaskSpec.Validate() errors diff %s", diff.PrintWantGot(d)) + } + }) + } +} + +func TestStepAndSidecarWorkspaces(t *testing.T) { + type fields struct { + Steps []v1.Step + Sidecars []v1.Sidecar + Workspaces []v1.WorkspaceDeclaration + } + tests := []struct { + name string + fields fields + }{{ + name: "valid step workspace usage", + fields: fields{ + Steps: []v1.Step{{ + Image: "my-image", + Args: []string{"arg"}, + Workspaces: []v1.WorkspaceUsage{{ + Name: "foo-workspace", + MountPath: "/a/custom/mountpath", + }}, + }}, + Workspaces: []v1.WorkspaceDeclaration{{ + Name: "foo-workspace", + Description: "my great workspace", + MountPath: "some/path", + }}, + }, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts := &v1.TaskSpec{ + Steps: tt.fields.Steps, + Sidecars: tt.fields.Sidecars, + Workspaces: tt.fields.Workspaces, + } + ctx := config.EnableAlphaAPIFields(context.Background()) + ts.SetDefaults(ctx) + if err := ts.Validate(ctx); err != nil { + t.Errorf("TaskSpec.Validate() = %v", err) + } + }) + } +} + +func TestStepAndSidecarWorkspacesErrors(t *testing.T) { + type fields struct { + Steps []v1.Step + Sidecars []v1.Sidecar + } + tests := []struct { + name string + fields fields + expectedError apis.FieldError + }{{ + name: "step workspace that refers to non-existent workspace declaration fails", + fields: fields{ + Steps: []v1.Step{{ + Image: "foo", + Workspaces: []v1.WorkspaceUsage{{ + Name: "foo", + }}, + }}, + }, + expectedError: apis.FieldError{ + Message: `undefined workspace "foo"`, + Paths: []string{"steps[0].workspaces[0].name"}, + }, + }, { + name: "sidecar workspace that refers to non-existent workspace declaration fails", + fields: fields{ + Steps: []v1.Step{{ + Image: "foo", + }}, + Sidecars: []v1.Sidecar{{ + Image: "foo", + Workspaces: []v1.WorkspaceUsage{{ + Name: "foo", + }}, + }}, + }, + expectedError: apis.FieldError{ + Message: `undefined workspace "foo"`, + Paths: []string{"sidecars[0].workspaces[0].name"}, + }, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts := &v1.TaskSpec{ + Steps: tt.fields.Steps, + Sidecars: tt.fields.Sidecars, + } + + ctx := config.EnableAlphaAPIFields(context.Background()) + ts.SetDefaults(ctx) + err := ts.Validate(ctx) + if err == nil { + t.Fatalf("Expected an error, got nothing for %v", ts) + } + + if d := cmp.Diff(tt.expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" { + t.Errorf("TaskSpec.Validate() errors diff %s", diff.PrintWantGot(d)) + } + }) + } +} + +func TestStepOnError(t *testing.T) { + tests := []struct { + name string + steps []v1.Step + expectedError *apis.FieldError + }{{ + name: "valid step - valid onError usage - set to continue - alpha API", + steps: []v1.Step{{ + OnError: "continue", + Image: "image", + Args: []string{"arg"}, + }}, + }, { + name: "valid step - valid onError usage - set to stopAndFail - alpha API", + steps: []v1.Step{{ + OnError: "stopAndFail", + Image: "image", + Args: []string{"arg"}, + }}, + }, { + name: "invalid step - onError set to invalid value - alpha API", + steps: []v1.Step{{ + OnError: "onError", + Image: "image", + Args: []string{"arg"}, + }}, + expectedError: &apis.FieldError{ + Message: fmt.Sprintf("invalid value: onError"), + Paths: []string{"onError"}, + Details: "Task step onError must be either continue or stopAndFail", + }, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts := &v1.TaskSpec{ + Steps: tt.steps, + } + ctx := context.Background() + ts.SetDefaults(ctx) + err := ts.Validate(ctx) + if tt.expectedError == nil && err != nil { + t.Errorf("TaskSpec.Validate() = %v", err) + } else if tt.expectedError != nil && err == nil { + t.Errorf("TaskSpec.Validate() = %v", err) + } + }) + } + +} + +// TestIncompatibleAPIVersions exercises validation of fields that +// require a specific feature gate version in order to work. +func TestIncompatibleAPIVersions(t *testing.T) { + tests := []struct { + name string + requiredVersion string + spec v1.TaskSpec + }{{ + name: "step workspace requires alpha", + requiredVersion: "alpha", + spec: v1.TaskSpec{ + Workspaces: []v1.WorkspaceDeclaration{{ + Name: "foo", + }}, + Steps: []v1.Step{{ + Image: "foo", + Workspaces: []v1.WorkspaceUsage{{ + Name: "foo", + }}, + }}, + }, + }, { + name: "sidecar workspace requires alpha", + requiredVersion: "alpha", + spec: v1.TaskSpec{ + Workspaces: []v1.WorkspaceDeclaration{{ + Name: "foo", + }}, + Steps: []v1.Step{{ + Image: "foo", + }}, + Sidecars: []v1.Sidecar{{ + Image: "foo", + Workspaces: []v1.WorkspaceUsage{{ + Name: "foo", + }}, + }}, + }, + }, { + name: "windows script support requires alpha", + requiredVersion: "alpha", + spec: v1.TaskSpec{ + Steps: []v1.Step{{ + Image: "my-image", + Script: ` + #!win powershell -File + script-1`, + }}, + }, + }} + versions := []string{"alpha", "stable"} + for _, tt := range tests { + for _, version := range versions { + testName := fmt.Sprintf("(using %s) %s", version, tt.name) + t.Run(testName, func(t *testing.T) { + ts := tt.spec + ctx := context.Background() + if version == "alpha" { + ctx = config.EnableAlphaAPIFields(ctx) + } + + ts.SetDefaults(ctx) + err := ts.Validate(ctx) + + if tt.requiredVersion != version && err == nil { + t.Fatalf("no error received even though version required is %q while feature gate is %q", tt.requiredVersion, version) + } + + if tt.requiredVersion == version && err != nil { + t.Fatalf("error received despite required version and feature gate matching %q: %v", version, err) + } + }) + } + } +} + +func TestSubstitutedContext(t *testing.T) { + type fields struct { + Params []v1.ParamSpec + Steps []v1.Step + SubstitutionContext bool + } + tests := []struct { + name string + fields fields + expectedError apis.FieldError + }{{ + name: "variable not substituted", + fields: fields{ + Steps: []v1.Step{{ + Image: "my-image", + Args: []string{"params"}, + Script: ` + #!/usr/bin/env bash + hello "$(params.a)"`, + }}, + SubstitutionContext: false, + }, + expectedError: apis.FieldError{ + Message: `non-existent variable in "\n\t\t\t\t#!/usr/bin/env bash\n\t\t\t\thello \"$(params.a)\""`, + Paths: []string{"steps[0].script"}, + }, + }, { + name: "variable substituted double quoted", + fields: fields{ + Steps: []v1.Step{{ + Image: "my-image", + Args: []string{"params"}, + Script: ` + #!/usr/bin/env bash + hello "$(params.a)"`, + }}, + SubstitutionContext: true, + }, + }, { + name: "variable substituted not quoted", + fields: fields{ + Steps: []v1.Step{{ + Image: "my-image", + Args: []string{"params"}, + Script: ` + #!/usr/bin/env bash + hello $(params.a)`, + }}, + SubstitutionContext: true, + }, + }, { + name: "variable substituted single quoted", + fields: fields{ + Steps: []v1.Step{{ + Image: "my-image", + Args: []string{"params"}, + Script: "echo `$(params.a)`", + }}, + SubstitutionContext: true, + }, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts := &v1.TaskSpec{ + Params: tt.fields.Params, + Steps: tt.fields.Steps, + } + ctx := context.Background() + ts.SetDefaults(ctx) + if tt.fields.SubstitutionContext { + ctx = config.WithinSubstituted(ctx) + } + err := ts.Validate(ctx) + if err == nil && tt.expectedError.Error() != "" { + t.Fatalf("Expected an error, got nothing for %v", ts) + } + if d := cmp.Diff(tt.expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" { + t.Errorf("TaskSpec.Validate() errors diff %s", diff.PrintWantGot(d)) + } + }) + } +} diff --git a/pkg/apis/pipeline/v1/workspace_types.go b/pkg/apis/pipeline/v1/workspace_types.go new file mode 100644 index 00000000000..da896606245 --- /dev/null +++ b/pkg/apis/pipeline/v1/workspace_types.go @@ -0,0 +1,124 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "path/filepath" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline" + corev1 "k8s.io/api/core/v1" +) + +// WorkspaceDeclaration is a declaration of a volume that a Task requires. +type WorkspaceDeclaration struct { + // Name is the name by which you can bind the volume at runtime. + Name string `json:"name"` + // Description is an optional human readable description of this volume. + // +optional + Description string `json:"description,omitempty"` + // MountPath overrides the directory that the volume will be made available at. + // +optional + MountPath string `json:"mountPath,omitempty"` + // ReadOnly dictates whether a mounted volume is writable. By default this + // field is false and so mounted volumes are writable. + ReadOnly bool `json:"readOnly,omitempty"` + // Optional marks a Workspace as not being required in TaskRuns. By default + // this field is false and so declared workspaces are required. + Optional bool `json:"optional,omitempty"` +} + +// GetMountPath returns the mountPath for w which is the MountPath if provided or the +// default if not. +func (w *WorkspaceDeclaration) GetMountPath() string { + if w.MountPath != "" { + return w.MountPath + } + return filepath.Join(pipeline.WorkspaceDir, w.Name) +} + +// WorkspaceBinding maps a Task's declared workspace to a Volume. +type WorkspaceBinding struct { + // Name is the name of the workspace populated by the volume. + Name string `json:"name"` + // SubPath is optionally a directory on the volume which should be used + // for this binding (i.e. the volume will be mounted at this sub directory). + // +optional + SubPath string `json:"subPath,omitempty"` + // VolumeClaimTemplate is a template for a claim that will be created in the same namespace. + // The PipelineRun controller is responsible for creating a unique claim for each instance of PipelineRun. + // +optional + VolumeClaimTemplate *corev1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"` + // PersistentVolumeClaimVolumeSource represents a reference to a + // PersistentVolumeClaim in the same namespace. Either this OR EmptyDir can be used. + // +optional + PersistentVolumeClaim *corev1.PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty"` + // EmptyDir represents a temporary directory that shares a Task's lifetime. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + // Either this OR PersistentVolumeClaim can be used. + // +optional + EmptyDir *corev1.EmptyDirVolumeSource `json:"emptyDir,omitempty"` + // ConfigMap represents a configMap that should populate this workspace. + // +optional + ConfigMap *corev1.ConfigMapVolumeSource `json:"configMap,omitempty"` + // Secret represents a secret that should populate this workspace. + // +optional + Secret *corev1.SecretVolumeSource `json:"secret,omitempty"` +} + +// WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun +// is expected to populate with a workspace binding. +// Deprecated: use PipelineWorkspaceDeclaration type instead +type WorkspacePipelineDeclaration = PipelineWorkspaceDeclaration + +// PipelineWorkspaceDeclaration creates a named slot in a Pipeline that a PipelineRun +// is expected to populate with a workspace binding. +type PipelineWorkspaceDeclaration struct { + // Name is the name of a workspace to be provided by a PipelineRun. + Name string `json:"name"` + // Description is a human readable string describing how the workspace will be + // used in the Pipeline. It can be useful to include a bit of detail about which + // tasks are intended to have access to the data on the workspace. + // +optional + Description string `json:"description,omitempty"` + // Optional marks a Workspace as not being required in PipelineRuns. By default + // this field is false and so declared workspaces are required. + Optional bool `json:"optional,omitempty"` +} + +// WorkspacePipelineTaskBinding describes how a workspace passed into the pipeline should be +// mapped to a task's declared workspace. +type WorkspacePipelineTaskBinding struct { + // Name is the name of the workspace as declared by the task + Name string `json:"name"` + // Workspace is the name of the workspace declared by the pipeline + // +optional + Workspace string `json:"workspace,omitempty"` + // SubPath is optionally a directory on the volume which should be used + // for this binding (i.e. the volume will be mounted at this sub directory). + // +optional + SubPath string `json:"subPath,omitempty"` +} + +// WorkspaceUsage is used by a Step or Sidecar to declare that it wants isolated access +// to a Workspace defined in a Task. +type WorkspaceUsage struct { + // Name is the name of the workspace this Step or Sidecar wants access to. + Name string `json:"name"` + // MountPath is the path that the workspace should be mounted to inside the Step or Sidecar, + // overriding any MountPath specified in the Task's WorkspaceDeclaration. + MountPath string `json:"mountPath"` +} diff --git a/pkg/apis/pipeline/v1/workspace_validation.go b/pkg/apis/pipeline/v1/workspace_validation.go new file mode 100644 index 00000000000..be852bb46d2 --- /dev/null +++ b/pkg/apis/pipeline/v1/workspace_validation.go @@ -0,0 +1,92 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/equality" + "knative.dev/pkg/apis" +) + +// allVolumeSourceFields is a list of all the volume source field paths that a +// WorkspaceBinding may include. +var allVolumeSourceFields = []string{ + "persistentvolumeclaim", + "volumeclaimtemplate", + "emptydir", + "configmap", + "secret", +} + +// Validate looks at the Volume provided in wb and makes sure that it is valid. +// This means that only one VolumeSource can be specified, and also that the +// supported VolumeSource is itself valid. +func (b *WorkspaceBinding) Validate(context.Context) *apis.FieldError { + if equality.Semantic.DeepEqual(b, &WorkspaceBinding{}) || b == nil { + return apis.ErrMissingField(apis.CurrentField) + } + + numSources := b.numSources() + + if numSources > 1 { + return apis.ErrMultipleOneOf(allVolumeSourceFields...) + } + + if numSources == 0 { + return apis.ErrMissingOneOf(allVolumeSourceFields...) + } + + // For a PersistentVolumeClaim to work, you must at least provide the name of the PVC to use. + if b.PersistentVolumeClaim != nil && b.PersistentVolumeClaim.ClaimName == "" { + return apis.ErrMissingField("persistentvolumeclaim.claimname") + } + + // For a ConfigMap to work, you must provide the name of the ConfigMap to use. + if b.ConfigMap != nil && b.ConfigMap.LocalObjectReference.Name == "" { + return apis.ErrMissingField("configmap.name") + } + + // For a Secret to work, you must provide the name of the Secret to use. + if b.Secret != nil && b.Secret.SecretName == "" { + return apis.ErrMissingField("secret.secretName") + } + + return nil +} + +// numSources returns the total number of volume sources that this WorkspaceBinding +// has been configured with. +func (b *WorkspaceBinding) numSources() int { + n := 0 + if b.VolumeClaimTemplate != nil { + n++ + } + if b.PersistentVolumeClaim != nil { + n++ + } + if b.EmptyDir != nil { + n++ + } + if b.ConfigMap != nil { + n++ + } + if b.Secret != nil { + n++ + } + return n +} diff --git a/pkg/apis/pipeline/v1/workspace_validation_test.go b/pkg/apis/pipeline/v1/workspace_validation_test.go new file mode 100644 index 00000000000..160db05eda8 --- /dev/null +++ b/pkg/apis/pipeline/v1/workspace_validation_test.go @@ -0,0 +1,139 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1_test + +import ( + "context" + "testing" + + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestWorkspaceBindingValidateValid(t *testing.T) { + for _, tc := range []struct { + name string + binding *v1.WorkspaceBinding + }{{ + name: "Valid PVC", + binding: &v1.WorkspaceBinding{ + Name: "beth", + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pool-party", + }, + }, + }, { + name: "Valid volumeClaimTemplate", + binding: &v1.WorkspaceBinding{ + Name: "beth", + VolumeClaimTemplate: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mypvc", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + "storage": resource.MustParse("1Gi"), + }, + }, + }, + }, + }, + }, { + name: "Valid emptyDir", + binding: &v1.WorkspaceBinding{ + Name: "beth", + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, { + name: "Valid configMap", + binding: &v1.WorkspaceBinding{ + Name: "beth", + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "a-configmap-name", + }, + }, + }, + }, { + name: "Valid secret", + binding: &v1.WorkspaceBinding{ + Name: "beth", + Secret: &corev1.SecretVolumeSource{ + SecretName: "my-secret", + }, + }, + }} { + t.Run(tc.name, func(t *testing.T) { + if err := tc.binding.Validate(context.Background()); err != nil { + t.Errorf("didnt expect error for valid binding but got: %v", err) + } + }) + } + +} + +func TestWorkspaceBindingValidateInvalid(t *testing.T) { + for _, tc := range []struct { + name string + binding *v1.WorkspaceBinding + }{{ + name: "no binding provided", + binding: nil, + }, { + name: "Provided both pvc and emptydir", + binding: &v1.WorkspaceBinding{ + Name: "beth", + EmptyDir: &corev1.EmptyDirVolumeSource{}, + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pool-party", + }, + }, + }, { + name: "Provided neither pvc nor emptydir", + binding: &v1.WorkspaceBinding{ + Name: "beth", + }, + }, { + name: "Provided pvc without claim name", + binding: &v1.WorkspaceBinding{ + Name: "beth", + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{}, + }, + }, { + name: "Provide configmap without a name", + binding: &v1.WorkspaceBinding{ + Name: "beth", + ConfigMap: &corev1.ConfigMapVolumeSource{}, + }, + }, { + name: "Provide secret without a secretName", + binding: &v1.WorkspaceBinding{ + Name: "beth", + Secret: &corev1.SecretVolumeSource{}, + }, + }} { + t.Run(tc.name, func(t *testing.T) { + if err := tc.binding.Validate(context.Background()); err == nil { + t.Errorf("expected error for invalid binding but didn't get any!") + } + }) + } +} diff --git a/pkg/apis/pipeline/v1/zz_generated.deepcopy.go b/pkg/apis/pipeline/v1/zz_generated.deepcopy.go index 4259b0740fc..55bd52f8892 100644 --- a/pkg/apis/pipeline/v1/zz_generated.deepcopy.go +++ b/pkg/apis/pipeline/v1/zz_generated.deepcopy.go @@ -22,15 +22,373 @@ limitations under the License. package v1 import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArrayOrString) DeepCopyInto(out *ArrayOrString) { + *out = *in + if in.ArrayVal != nil { + in, out := &in.ArrayVal, &out.ArrayVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ObjectVal != nil { + in, out := &in.ObjectVal, &out.ObjectVal + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArrayOrString. +func (in *ArrayOrString) DeepCopy() *ArrayOrString { + if in == nil { + return nil + } + out := new(ArrayOrString) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Param) DeepCopyInto(out *Param) { + *out = *in + in.Value.DeepCopyInto(&out.Value) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Param. +func (in *Param) DeepCopy() *Param { + if in == nil { + return nil + } + out := new(Param) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParamSpec) DeepCopyInto(out *ParamSpec) { + *out = *in + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]PropertySpec, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(ArrayOrString) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamSpec. +func (in *ParamSpec) DeepCopy() *ParamSpec { + if in == nil { + return nil + } + out := new(ParamSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineWorkspaceDeclaration) DeepCopyInto(out *PipelineWorkspaceDeclaration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineWorkspaceDeclaration. +func (in *PipelineWorkspaceDeclaration) DeepCopy() *PipelineWorkspaceDeclaration { + if in == nil { + return nil + } + out := new(PipelineWorkspaceDeclaration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PropertySpec) DeepCopyInto(out *PropertySpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropertySpec. +func (in *PropertySpec) DeepCopy() *PropertySpec { + if in == nil { + return nil + } + out := new(PropertySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResolverParam) DeepCopyInto(out *ResolverParam) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolverParam. +func (in *ResolverParam) DeepCopy() *ResolverParam { + if in == nil { + return nil + } + out := new(ResolverParam) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResolverRef) DeepCopyInto(out *ResolverRef) { + *out = *in + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = make([]ResolverParam, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolverRef. +func (in *ResolverRef) DeepCopy() *ResolverRef { + if in == nil { + return nil + } + out := new(ResolverRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Sidecar) DeepCopyInto(out *Sidecar) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]corev1.ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]corev1.EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]corev1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]corev1.VolumeDevice, len(*in)) + copy(*out, *in) + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(corev1.Probe) + (*in).DeepCopyInto(*out) + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(corev1.Probe) + (*in).DeepCopyInto(*out) + } + if in.StartupProbe != nil { + in, out := &in.StartupProbe, &out.StartupProbe + *out = new(corev1.Probe) + (*in).DeepCopyInto(*out) + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(corev1.Lifecycle) + (*in).DeepCopyInto(*out) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(corev1.SecurityContext) + (*in).DeepCopyInto(*out) + } + if in.Workspaces != nil { + in, out := &in.Workspaces, &out.Workspaces + *out = make([]WorkspaceUsage, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sidecar. +func (in *Sidecar) DeepCopy() *Sidecar { + if in == nil { + return nil + } + out := new(Sidecar) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Step) DeepCopyInto(out *Step) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]corev1.EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]corev1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]corev1.VolumeDevice, len(*in)) + copy(*out, *in) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(corev1.SecurityContext) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + if in.Workspaces != nil { + in, out := &in.Workspaces, &out.Workspaces + *out = make([]WorkspaceUsage, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Step. +func (in *Step) DeepCopy() *Step { + if in == nil { + return nil + } + out := new(Step) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepTemplate) DeepCopyInto(out *StepTemplate) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]corev1.EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]corev1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]corev1.VolumeDevice, len(*in)) + copy(*out, *in) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(corev1.SecurityContext) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepTemplate. +func (in *StepTemplate) DeepCopy() *StepTemplate { + if in == nil { + return nil + } + out := new(StepTemplate) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Task) DeepCopyInto(out *Task) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) return } @@ -85,9 +443,94 @@ func (in *TaskList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskResult) DeepCopyInto(out *TaskResult) { + *out = *in + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]PropertySpec, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskResult. +func (in *TaskResult) DeepCopy() *TaskResult { + if in == nil { + return nil + } + out := new(TaskResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskRunResult) DeepCopyInto(out *TaskRunResult) { + *out = *in + in.Value.DeepCopyInto(&out.Value) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunResult. +func (in *TaskRunResult) DeepCopy() *TaskRunResult { + if in == nil { + return nil + } + out := new(TaskRunResult) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TaskSpec) DeepCopyInto(out *TaskSpec) { *out = *in + if in.Params != nil { + in, out := &in.Params, &out.Params + *out = make([]ParamSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Steps != nil { + in, out := &in.Steps, &out.Steps + *out = make([]Step, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]corev1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StepTemplate != nil { + in, out := &in.StepTemplate, &out.StepTemplate + *out = new(StepTemplate) + (*in).DeepCopyInto(*out) + } + if in.Sidecars != nil { + in, out := &in.Sidecars, &out.Sidecars + *out = make([]Sidecar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Workspaces != nil { + in, out := &in.Workspaces, &out.Workspaces + *out = make([]WorkspaceDeclaration, len(*in)) + copy(*out, *in) + } + if in.Results != nil { + in, out := &in.Results, &out.Results + *out = make([]TaskResult, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -100,3 +543,92 @@ func (in *TaskSpec) DeepCopy() *TaskSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceBinding) DeepCopyInto(out *WorkspaceBinding) { + *out = *in + if in.VolumeClaimTemplate != nil { + in, out := &in.VolumeClaimTemplate, &out.VolumeClaimTemplate + *out = new(corev1.PersistentVolumeClaim) + (*in).DeepCopyInto(*out) + } + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + *out = new(corev1.PersistentVolumeClaimVolumeSource) + **out = **in + } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(corev1.EmptyDirVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(corev1.ConfigMapVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(corev1.SecretVolumeSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceBinding. +func (in *WorkspaceBinding) DeepCopy() *WorkspaceBinding { + if in == nil { + return nil + } + out := new(WorkspaceBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceDeclaration) DeepCopyInto(out *WorkspaceDeclaration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceDeclaration. +func (in *WorkspaceDeclaration) DeepCopy() *WorkspaceDeclaration { + if in == nil { + return nil + } + out := new(WorkspaceDeclaration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspacePipelineTaskBinding) DeepCopyInto(out *WorkspacePipelineTaskBinding) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspacePipelineTaskBinding. +func (in *WorkspacePipelineTaskBinding) DeepCopy() *WorkspacePipelineTaskBinding { + if in == nil { + return nil + } + out := new(WorkspacePipelineTaskBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceUsage) DeepCopyInto(out *WorkspaceUsage) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceUsage. +func (in *WorkspaceUsage) DeepCopy() *WorkspaceUsage { + if in == nil { + return nil + } + out := new(WorkspaceUsage) + in.DeepCopyInto(out) + return out +}