diff --git a/docs/pipelines.md b/docs/pipelines.md index 68aca50dcb6..44c4c588c9f 100644 --- a/docs/pipelines.md +++ b/docs/pipelines.md @@ -10,6 +10,7 @@ This document defines `Pipelines` and their capabilities. - [Pipeline Tasks](#pipeline-tasks) - [From](#from) - [RunAfter](#runafter) + - [Retries](#retries) - [Ordering](#ordering) - [Examples](#examples) @@ -41,6 +42,7 @@ following fields: - [`runAfter`](#runAfter) - Used when the [Pipeline Task](#pipeline-task) should be executed after another Pipeline Task, but there is no [output linking](#from) required + - [`retries`](#retries) - Used when the task is wanted to be executed if it fails. Could a network error or a missing dependency. It does not apply to cancellations. [kubernetes-overview]: https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/#required-fields @@ -255,6 +257,24 @@ is no output from `test-app`, so `build-app` uses `runAfter` to indicate that `test-app` should run before it, regardless of the order they appear in the spec. +#### retries + +Sometimes is needed some policy for retrying tasks for various reasons such as network errors, missing dependencies or upload problems. +Any of those issue must be reflected as False (corev1.ConditionFalse) within the TaskRun Status Succeeded Condition. +For that reason there is an optional attribute called `retries` which declares how many times that task should be retried in case of failure, + +By default and in its absence there are no retries; its value is 0. + +```yaml +tasks: + - name: build-the-image + retries: 1 + taskRef: + name: build-push +``` + +In this example, the task "build-the-image" will be executed and if the first run fails a second one would triggered. But, if that fails no more would triggered: a max of two executions. + ## Ordering The [Pipeline Tasks](#pipeline-tasks) in a `Pipeline` can be connected and run diff --git a/pkg/apis/pipeline/v1alpha1/pipeline_types.go b/pkg/apis/pipeline/v1alpha1/pipeline_types.go index 1962ce0080d..1a38d3cfa23 100644 --- a/pkg/apis/pipeline/v1alpha1/pipeline_types.go +++ b/pkg/apis/pipeline/v1alpha1/pipeline_types.go @@ -73,6 +73,10 @@ type PipelineTask struct { Name string `json:"name,omitempty"` TaskRef TaskRef `json:"taskRef"` + // Retries represents how many times this task should be retried in case of task failure: ConditionSucceeded set to False + // +optional + Retries int `json:"retries",omitempty` + // RunAfter is the list of PipelineTask names that should be executed before // this Task executes. (Used to force a specific ordering in graph execution.) // +optional diff --git a/pkg/apis/pipeline/v1alpha1/taskrun_types.go b/pkg/apis/pipeline/v1alpha1/taskrun_types.go index b6a85ff5043..5b96cf14a9d 100644 --- a/pkg/apis/pipeline/v1alpha1/taskrun_types.go +++ b/pkg/apis/pipeline/v1alpha1/taskrun_types.go @@ -138,6 +138,10 @@ type TaskRunStatus struct { // Steps describes the state of each build step container. // +optional Steps []StepState `json:"steps,omitempty"` + // RetriesStatus contains the history of TaskRunStatus in case of a retry in order to keep record of failures. + // All TaskRunStatus stored in RetriesStatus will have no date within the RetriesStatus as is redundant. + // +optional + RetriesStatus []TaskRunStatus `json:"retriesStatus,omitempty"` } // GetCondition returns the Condition matching the given type. diff --git a/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go index 797732c4acf..d3ef09cdc40 100644 --- a/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go @@ -1430,6 +1430,13 @@ func (in *TaskRunStatus) DeepCopyInto(out *TaskRunStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.RetriesStatus != nil { + in, out := &in.RetriesStatus, &out.RetriesStatus + *out = make([]TaskRunStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } diff --git a/pkg/reconciler/v1alpha1/pipelinerun/pipelinerun.go b/pkg/reconciler/v1alpha1/pipelinerun/pipelinerun.go index 4eb3c92a4c6..695badf8b5e 100644 --- a/pkg/reconciler/v1alpha1/pipelinerun/pipelinerun.go +++ b/pkg/reconciler/v1alpha1/pipelinerun/pipelinerun.go @@ -144,6 +144,9 @@ func NewController( // converge the two. It then updates the Status block of the Pipeline Run // resource with the current status of the resource. func (c *Reconciler) Reconcile(ctx context.Context, key string) error { + + c.Logger.Infof("Reconciling %v", time.Now()) + // Convert the namespace/name string into a distinct namespace and name namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { @@ -275,6 +278,9 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1alpha1.PipelineRun) er func(name string) (v1alpha1.TaskInterface, error) { return c.taskLister.Tasks(pr.Namespace).Get(name) }, + func(name string) (*v1alpha1.TaskRun, error) { + return c.taskRunLister.TaskRuns(pr.Namespace).Get(name) + }, func(name string) (v1alpha1.TaskInterface, error) { return c.clusterTaskLister.Get(name) }, @@ -311,6 +317,13 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1alpha1.PipelineRun) er } return nil } + + if pipelineState.IsDone() && pr.IsDone() { + c.timeoutHandler.Release(pr) + c.Recorder.Event(pr, corev1.EventTypeNormal, eventReasonSucceeded, "PipelineRun completed successfully.") + return nil + } + if err := resources.ValidateFrom(pipelineState); err != nil { // This Run has failed, so we need to mark it as failed and stop reconciling it pr.Status.SetCondition(&apis.Condition{ @@ -338,11 +351,6 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1alpha1.PipelineRun) er } } - err = resources.ResolveTaskRuns(c.taskRunLister.TaskRuns(pr.Namespace).Get, pipelineState) - if err != nil { - return fmt.Errorf("Error getting TaskRuns for Pipeline %s: %s", p.Name, err) - } - // If the pipelinerun is cancelled, cancel tasks and update status if pr.IsCancelled() { return cancelPipelineRun(pr, pipelineState, c.PipelineClientSet) @@ -438,7 +446,19 @@ func (c *Reconciler) createTaskRun(logger *zap.SugaredLogger, rprt *resources.Re } labels[pipeline.GroupName+pipeline.PipelineRunLabelKey] = pr.Name - tr := &v1alpha1.TaskRun{ + tr, _ := c.taskRunLister.TaskRuns(pr.Namespace).Get(rprt.TaskRunName) + + if tr != nil { + //is a retry + addRetryHistory(tr) + clearStatus(tr) + tr.Status.SetCondition(&apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionUnknown, + }) + return c.PipelineClientSet.TektonV1alpha1().TaskRuns(pr.Namespace).UpdateStatus(tr) + } + tr = &v1alpha1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ Name: rprt.TaskRunName, Namespace: pr.Namespace, @@ -464,6 +484,19 @@ func (c *Reconciler) createTaskRun(logger *zap.SugaredLogger, rprt *resources.Re return c.PipelineClientSet.TektonV1alpha1().TaskRuns(pr.Namespace).Create(tr) } +func addRetryHistory(tr *v1alpha1.TaskRun) { + newStatus := *tr.Status.DeepCopy() + newStatus.RetriesStatus = nil + tr.Status.RetriesStatus = append(tr.Status.RetriesStatus, newStatus) +} + +func clearStatus(tr *v1alpha1.TaskRun) { + tr.Status.StartTime = nil + tr.Status.CompletionTime = nil + tr.Status.Results = nil + tr.Status.PodName = "" +} + func (c *Reconciler) updateStatus(pr *v1alpha1.PipelineRun) (*v1alpha1.PipelineRun, error) { newPr, err := c.pipelineRunLister.PipelineRuns(pr.Namespace).Get(pr.Name) if err != nil { diff --git a/pkg/reconciler/v1alpha1/pipelinerun/pipelinerun_test.go b/pkg/reconciler/v1alpha1/pipelinerun/pipelinerun_test.go index cbf7b71d395..356c5038a9c 100644 --- a/pkg/reconciler/v1alpha1/pipelinerun/pipelinerun_test.go +++ b/pkg/reconciler/v1alpha1/pipelinerun/pipelinerun_test.go @@ -15,6 +15,7 @@ package pipelinerun import ( "context" + "fmt" "strings" "testing" "time" @@ -512,6 +513,15 @@ func TestReconcileOnCompletedPipelineRun(t *testing.T) { if actual == nil { t.Errorf("Expected a PipelineRun to be updated, but it wasn't.") } + actions := clients.Pipeline.Actions() + for _, action := range actions { + if action != nil { + resource := action.GetResource().Resource + if resource != "pipelineruns" { + t.Fatalf("Expected client to not have created a TaskRun for the completed PipelineRun, but it did") + } + } + } // Check that the PipelineRun was reconciled correctly reconciledRun, err := clients.Pipeline.Tekton().PipelineRuns("foo").Get("test-pipeline-run-completed", metav1.GetOptions{}) @@ -679,6 +689,55 @@ func TestReconcileWithTimeout(t *testing.T) { t.Errorf("TaskRun timeout %s should be less than or equal to PipelineRun timeout %s", actual.Spec.Timeout.Duration.String(), prs[0].Spec.Timeout.Duration.String()) } } +func TestReconcileCancelledPipelineRun(t *testing.T) { + ps := []*v1alpha1.Pipeline{tb.Pipeline("test-pipeline", "foo", tb.PipelineSpec( + tb.PipelineTask("hello-world-1", "hello-world", tb.Retries(1)), + ))} + prs := []*v1alpha1.PipelineRun{tb.PipelineRun("test-pipeline-run-with-timeout", "foo", + tb.PipelineRunSpec("test-pipeline", + tb.PipelineRunCancelled, + ), + )} + ts := []*v1alpha1.Task{tb.Task("hello-world", "foo")} + + d := test.Data{ + PipelineRuns: prs, + Pipelines: ps, + Tasks: ts, + } + + // create fake recorder for testing + fr := record.NewFakeRecorder(2) + + testAssets := getPipelineRunController(d, fr) + c := testAssets.Controller + clients := testAssets.Clients + + err := c.Reconciler.Reconcile(context.Background(), "foo/test-pipeline-run-with-timeout") + if err != nil { + t.Errorf("Did not expect to see error when reconciling completed PipelineRun but saw %s", err) + } + + // Check that the PipelineRun was reconciled correctly + reconciledRun, err := clients.Pipeline.Tekton().PipelineRuns("foo").Get("test-pipeline-run-with-timeout", metav1.GetOptions{}) + if err != nil { + t.Fatalf("Somehow had error getting completed reconciled run out of fake client: %s", err) + } + + // The PipelineRun should be still cancelled. + if reconciledRun.Status.GetCondition(apis.ConditionSucceeded).Reason != "PipelineRunCancelled" { + t.Errorf("Expected PipelineRun to be cancelled, but condition reason is %s", reconciledRun.Status.GetCondition(apis.ConditionSucceeded)) + } + + // Check that no TaskRun is created or run + actions := clients.Pipeline.Actions() + for _, action := range actions { + actionType := fmt.Sprintf("%T", action) + if !(actionType == "testing.UpdateActionImpl" || actionType == "testing.GetActionImpl") { + t.Errorf("Expected a TaskRun to be get/updated, but it was %s", actionType) + } + } +} func TestReconcilePropagateLabels(t *testing.T) { names.TestingSeed() @@ -742,3 +801,102 @@ func TestReconcilePropagateLabels(t *testing.T) { t.Errorf("expected to see TaskRun %v created. Diff %s", expectedTaskRun, d) } } + +func TestReconcileWithTimeoutAndRetry(t *testing.T) { + + tcs := []struct { + name string + retries int + conditionSucceeded corev1.ConditionStatus + }{ + { + name: "One try has to be done", + retries: 1, + conditionSucceeded: corev1.ConditionFalse, + }, + { + name: "No more retries are needed", + retries: 2, + conditionSucceeded: corev1.ConditionUnknown, + }, + } + + for _, tc := range tcs { + + t.Run(tc.name, func(t *testing.T) { + ps := []*v1alpha1.Pipeline{tb.Pipeline("test-pipeline-retry", "foo", tb.PipelineSpec( + tb.PipelineTask("hello-world-1", "hello-world", tb.Retries(tc.retries)), + ))} + prs := []*v1alpha1.PipelineRun{tb.PipelineRun("test-pipeline-retry-run-with-timeout", "foo", + tb.PipelineRunSpec("test-pipeline-retry", + tb.PipelineRunServiceAccount("test-sa"), + tb.PipelineRunTimeout(&metav1.Duration{Duration: 12 * time.Hour}), + ), + tb.PipelineRunStatus( + tb.PipelineRunStartTime(time.Now().AddDate(0, 0, -1))), + )} + + ts := []*v1alpha1.Task{ + tb.Task("hello-world", "foo"), + } + trs := []*v1alpha1.TaskRun{ + tb.TaskRun("hello-world-1", "foo", + tb.TaskRunStatus( + tb.PodName("my-pod-name"), + tb.Condition(apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionFalse, + }), + tb.Retry(v1alpha1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionFalse, + }}, + }, + }), + )), + } + + prtrs := &v1alpha1.PipelineRunTaskRunStatus{ + PipelineTaskName: "hello-world-1", + Status: &trs[0].Status, + } + prs[0].Status.TaskRuns = make(map[string]*v1alpha1.PipelineRunTaskRunStatus) + prs[0].Status.TaskRuns["hello-world-1"] = prtrs + + d := test.Data{ + PipelineRuns: prs, + Pipelines: ps, + Tasks: ts, + TaskRuns: trs, + } + + fr := record.NewFakeRecorder(2) + + testAssets := getPipelineRunController(d, fr) + c := testAssets.Controller + clients := testAssets.Clients + + err := c.Reconciler.Reconcile(context.Background(), "foo/test-pipeline-retry-run-with-timeout") + if err != nil { + t.Errorf("Did not expect to see error when reconciling completed PipelineRun but saw %s", err) + } + + // Check that the PipelineRun was reconciled correctly + reconciledRun, err := clients.Pipeline.TektonV1alpha1().PipelineRuns("foo").Get("test-pipeline-retry-run-with-timeout", metav1.GetOptions{}) + if err != nil { + t.Fatalf("Somehow had error getting completed reconciled run out of fake client: %s", err) + } + + if len(reconciledRun.Status.TaskRuns["hello-world-1"].Status.RetriesStatus) != tc.retries { + t.Fatalf(" %d retry expected but %d ", tc.retries, len(reconciledRun.Status.TaskRuns["hello-world-1"].Status.RetriesStatus)) + } + + if status := reconciledRun.Status.TaskRuns["hello-world-1"].Status.GetCondition(apis.ConditionSucceeded).Status; status != tc.conditionSucceeded { + t.Fatalf("Succedded expected to be %s but is %s", tc.conditionSucceeded, status) + } + + }) + } +} diff --git a/pkg/reconciler/v1alpha1/pipelinerun/resources/pipelinerunresolution.go b/pkg/reconciler/v1alpha1/pipelinerun/resources/pipelinerunresolution.go index c3d587f36b0..90e798b460c 100644 --- a/pkg/reconciler/v1alpha1/pipelinerun/resources/pipelinerunresolution.go +++ b/pkg/reconciler/v1alpha1/pipelinerun/resources/pipelinerunresolution.go @@ -62,6 +62,32 @@ type ResolvedPipelineRunTask struct { // state of the PipelineRun. type PipelineRunState []*ResolvedPipelineRunTask +func (t ResolvedPipelineRunTask) IsDone() (isDone bool) { + if t.TaskRun == nil || t.PipelineTask == nil { + return + } + + status := t.TaskRun.Status.GetCondition(apis.ConditionSucceeded) + retriesDone := len(t.TaskRun.Status.RetriesStatus) + retries := t.PipelineTask.Retries + isDone = status.IsTrue() || status.IsFalse() && retriesDone >= retries + return +} + +func (state PipelineRunState) IsDone() (isDone bool) { + isDone = true + for _, t := range state { + if t.TaskRun == nil || t.PipelineTask == nil { + return false + } + isDone = isDone && t.IsDone() + if !isDone { + return + } + } + return +} + // GetNextTasks will return the next ResolvedPipelineRunTasks to execute, which are the ones in the // list of candidateTasks which aren't yet indicated in state to be running. func (state PipelineRunState) GetNextTasks(candidateTasks map[string]v1alpha1.PipelineTask) []*ResolvedPipelineRunTask { @@ -70,6 +96,16 @@ func (state PipelineRunState) GetNextTasks(candidateTasks map[string]v1alpha1.Pi if _, ok := candidateTasks[t.PipelineTask.Name]; ok && t.TaskRun == nil { tasks = append(tasks, t) } + if _, ok := candidateTasks[t.PipelineTask.Name]; ok && t.TaskRun != nil { + status := t.TaskRun.Status.GetCondition(apis.ConditionSucceeded) + if status != nil && status.IsFalse() { + if !(t.TaskRun.IsCancelled() || status.Reason == "TaskRunCancelled") { + if len(t.TaskRun.Status.RetriesStatus) < t.PipelineTask.Retries { + tasks = append(tasks, t) + } + } + } + } } return tasks } @@ -171,6 +207,7 @@ func (e *ResourceNotFoundError) Error() string { func ResolvePipelineRun( pipelineRun v1alpha1.PipelineRun, getTask resources.GetTask, + getTaskRun resources.GetTaskRun, getClusterTask resources.GetClusterTask, getResource resources.GetResource, tasks []v1alpha1.PipelineTask, @@ -214,28 +251,19 @@ func ResolvePipelineRun( } rprt.ResolvedTaskResources = rtr - // Add this task to the state of the PipelineRun - state = append(state, &rprt) - } - return state, nil -} - -// ResolveTaskRuns will go through all tasks in state and check if there are existing TaskRuns -// for each of them by calling getTaskRun. -func ResolveTaskRuns(getTaskRun GetTaskRun, state PipelineRunState) error { - for _, rprt := range state { - // Check if we have already started a TaskRun for this task taskRun, err := getTaskRun(rprt.TaskRunName) if err != nil { - // If the TaskRun isn't found, it just means it hasn't been run yet if !errors.IsNotFound(err) { - return fmt.Errorf("error retrieving TaskRun %s: %s", rprt.TaskRunName, err) + return nil, fmt.Errorf("error retrieving TaskRun %s: %s", rprt.TaskRunName, err) } - } else { + } + if taskRun != nil { rprt.TaskRun = taskRun } + // Add this task to the state of the PipelineRun + state = append(state, &rprt) } - return nil + return state, nil } // getTaskRunName should return a unique name for a `TaskRun` if one has not already been defined, and the existing one otherwise. @@ -283,8 +311,8 @@ func GetPipelineConditionStatus(prName string, state PipelineRunState, logger *z } logger.Infof("TaskRun %s status : %v", rprt.TaskRunName, c.Status) // If any TaskRuns have failed, we should halt execution and consider the run failed - if c.Status == corev1.ConditionFalse { - logger.Infof("TaskRun %s has failed, so PipelineRun %s has failed", rprt.TaskRunName, prName) + if c.Status == corev1.ConditionFalse && rprt.IsDone() { + logger.Infof("TaskRun %s has failed, so PipelineRun %s has failed, retries done: %b", rprt.TaskRunName, prName, len(rprt.TaskRun.Status.RetriesStatus)) return &apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionFalse, diff --git a/pkg/reconciler/v1alpha1/pipelinerun/resources/pipelinerunresolution_test.go b/pkg/reconciler/v1alpha1/pipelinerun/resources/pipelinerunresolution_test.go index 0f429a23bee..18b1f287947 100644 --- a/pkg/reconciler/v1alpha1/pipelinerun/resources/pipelinerunresolution_test.go +++ b/pkg/reconciler/v1alpha1/pipelinerun/resources/pipelinerunresolution_test.go @@ -49,6 +49,14 @@ var pts = []v1alpha1.PipelineTask{{ }, { Name: "mytask3", TaskRef: v1alpha1.TaskRef{Name: "clustertask"}, +}, { + Name: "mytask4", + TaskRef: v1alpha1.TaskRef{Name: "task"}, + Retries: 1, +}, { + Name: "mytask5", + TaskRef: v1alpha1.TaskRef{Name: "cancelledTask"}, + Retries: 2, }} var p = &v1alpha1.Pipeline{ @@ -115,6 +123,40 @@ func makeFailed(tr v1alpha1.TaskRun) *v1alpha1.TaskRun { return newTr } +func withCancelled(tr *v1alpha1.TaskRun) *v1alpha1.TaskRun { + tr.Status.Conditions[0].Reason = "TaskRunCancelled" + return tr +} + +func withCancelledBySpec(tr *v1alpha1.TaskRun) *v1alpha1.TaskRun { + tr.Spec.Status = v1alpha1.TaskRunSpecStatusCancelled + return tr +} + +func makeRetried(tr v1alpha1.TaskRun) (newTr *v1alpha1.TaskRun) { + newTr = newTaskRun(tr) + newTr.Status.RetriesStatus = []v1alpha1.TaskRunStatus{{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionFalse, + }}, + }, + }} + return +} +func withRetries(tr *v1alpha1.TaskRun) *v1alpha1.TaskRun { + tr.Status.RetriesStatus = []v1alpha1.TaskRunStatus{{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionFalse, + }}, + }, + }} + return tr +} + func newTaskRun(tr v1alpha1.TaskRun) *v1alpha1.TaskRun { return &v1alpha1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ @@ -206,6 +248,45 @@ var allFinishedState = PipelineRunState{{ }, }} +var oneTaskFailedToRetry = PipelineRunState{{ + PipelineTask: &pts[3], + TaskRunName: "pipelinerun-mytask-failed", + TaskRun: makeFailed(trs[0]), + ResolvedTaskResources: &resources.ResolvedTaskResources{ + TaskSpec: &task.Spec, + }, +}, { + PipelineTask: &pts[0], + TaskRunName: "pipelinerun-mytask1", + TaskRun: nil, + ResolvedTaskResources: &resources.ResolvedTaskResources{ + TaskSpec: &task.Spec, + }, +}} +var oneTaskFailedWithRetry = PipelineRunState{{ + PipelineTask: &pts[3], + TaskRunName: "pipelinerun-mytask-failed", + TaskRun: makeRetried(*makeFailed(trs[0])), + ResolvedTaskResources: &resources.ResolvedTaskResources{ + TaskSpec: &task.Spec, + }, +}, { + PipelineTask: &pts[0], + TaskRunName: "pipelinerun-mytask1", + TaskRun: nil, + ResolvedTaskResources: &resources.ResolvedTaskResources{ + TaskSpec: &task.Spec, + }, +}} +var taskCancelled = PipelineRunState{{ + PipelineTask: &pts[4], + TaskRunName: "pipelinerun-mytask1", + TaskRun: withCancelled(makeRetried(trs[0])), + ResolvedTaskResources: &resources.ResolvedTaskResources{ + TaskSpec: &task.Spec, + }, +}} + func TestGetNextTasks(t *testing.T) { tcs := []struct { name string @@ -368,7 +449,140 @@ func TestGetNextTasks(t *testing.T) { }, expectedNext: []*ResolvedPipelineRunTask{}, }, + { + name: "one-cancelled-one-candidate", + state: taskCancelled, + candidates: map[string]v1alpha1.PipelineTask{ + "mytask5": pts[4], + }, + expectedNext: []*ResolvedPipelineRunTask{}, + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + next := tc.state.GetNextTasks(tc.candidates) + if d := cmp.Diff(next, tc.expectedNext); d != "" { + t.Errorf("Didn't get expected next Tasks: %v", d) + } + }) } +} + +func TestGetNextTaskWithRetries(t *testing.T) { + + var taskCancelledByStatusState = PipelineRunState{{ + PipelineTask: &pts[4], // 2 retries needed + TaskRunName: "pipelinerun-mytask1", + TaskRun: withCancelled(makeRetried(trs[0])), + ResolvedTaskResources: &resources.ResolvedTaskResources{ + TaskSpec: &task.Spec, + }, + }} + + var taskCancelledBySpecState = PipelineRunState{{ + PipelineTask: &pts[4], + TaskRunName: "pipelinerun-mytask1", + TaskRun: withCancelledBySpec(makeRetried(trs[0])), + ResolvedTaskResources: &resources.ResolvedTaskResources{ + TaskSpec: &task.Spec, + }, + }} + + var taskRunningState = PipelineRunState{{ + PipelineTask: &pts[4], + TaskRunName: "pipelinerun-mytask1", + TaskRun: makeStarted(trs[0]), + ResolvedTaskResources: &resources.ResolvedTaskResources{ + TaskSpec: &task.Spec, + }, + }} + + var taskSucceededState = PipelineRunState{{ + PipelineTask: &pts[4], + TaskRunName: "pipelinerun-mytask1", + TaskRun: makeSucceeded(trs[0]), + ResolvedTaskResources: &resources.ResolvedTaskResources{ + TaskSpec: &task.Spec, + }, + }} + + var taskRetriedState = PipelineRunState{{ + PipelineTask: &pts[3], // 1 retry needed + TaskRunName: "pipelinerun-mytask1", + TaskRun: withCancelled(makeRetried(trs[0])), + ResolvedTaskResources: &resources.ResolvedTaskResources{ + TaskSpec: &task.Spec, + }, + }} + + var taskExpectedState = PipelineRunState{{ + PipelineTask: &pts[4], // 2 retries needed + TaskRunName: "pipelinerun-mytask1", + TaskRun: withRetries(makeFailed(trs[0])), + ResolvedTaskResources: &resources.ResolvedTaskResources{ + TaskSpec: &task.Spec, + }, + }} + + tcs := []struct { + name string + state PipelineRunState + candidates map[string]v1alpha1.PipelineTask + expectedNext []*ResolvedPipelineRunTask + }{ + { + name: "tasks-cancelled-no-candidates", + state: taskCancelledByStatusState, + candidates: map[string]v1alpha1.PipelineTask{ + "mytask5": pts[4], + }, + expectedNext: []*ResolvedPipelineRunTask{}, + }, + { + name: "tasks-cancelled-bySpec-no-candidates", + state: taskCancelledBySpecState, + candidates: map[string]v1alpha1.PipelineTask{ + "mytask5": pts[4], + }, + expectedNext: []*ResolvedPipelineRunTask{}, + }, + { + name: "tasks-running-no-candidates", + state: taskRunningState, + candidates: map[string]v1alpha1.PipelineTask{ + "mytask5": pts[4], + }, + expectedNext: []*ResolvedPipelineRunTask{}, + }, + { + name: "tasks-succeeded-bySpec-no-candidates", + state: taskSucceededState, + candidates: map[string]v1alpha1.PipelineTask{ + "mytask5": pts[4], + }, + expectedNext: []*ResolvedPipelineRunTask{}, + }, + { + name: "tasks-retried-no-candidates", + state: taskRetriedState, + candidates: map[string]v1alpha1.PipelineTask{ + "mytask5": pts[3], + }, + expectedNext: []*ResolvedPipelineRunTask{}, + }, + { + name: "tasks-retried-one-candidates", + state: taskExpectedState, + candidates: map[string]v1alpha1.PipelineTask{ + "mytask5": pts[3], + }, + expectedNext: []*ResolvedPipelineRunTask{taskExpectedState[0]}, + }, + } + + // iterate over *state* to get from candidate and check if TaskRun is there. + // Cancelled TaskRun should have a TaskRun cancelled and with a retry but should not retry. + for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { next := tc.state.GetNextTasks(tc.candidates) @@ -378,6 +592,153 @@ func TestGetNextTasks(t *testing.T) { }) } } +func TestIsDone(t *testing.T) { + + var taskCancelledByStatusState = PipelineRunState{{ + PipelineTask: &pts[4], // 2 retries needed + TaskRunName: "pipelinerun-mytask1", + TaskRun: withCancelled(makeRetried(trs[0])), + ResolvedTaskResources: &resources.ResolvedTaskResources{ + TaskSpec: &task.Spec, + }, + }} + + var taskCancelledBySpecState = PipelineRunState{{ + PipelineTask: &pts[4], + TaskRunName: "pipelinerun-mytask1", + TaskRun: withCancelledBySpec(makeRetried(trs[0])), + ResolvedTaskResources: &resources.ResolvedTaskResources{ + TaskSpec: &task.Spec, + }, + }} + + var taskRunningState = PipelineRunState{{ + PipelineTask: &pts[4], + TaskRunName: "pipelinerun-mytask1", + TaskRun: makeStarted(trs[0]), + ResolvedTaskResources: &resources.ResolvedTaskResources{ + TaskSpec: &task.Spec, + }, + }} + + var taskSucceededState = PipelineRunState{{ + PipelineTask: &pts[4], + TaskRunName: "pipelinerun-mytask1", + TaskRun: makeSucceeded(trs[0]), + ResolvedTaskResources: &resources.ResolvedTaskResources{ + TaskSpec: &task.Spec, + }, + }} + + var taskRetriedState = PipelineRunState{{ + PipelineTask: &pts[3], // 1 retry needed + TaskRunName: "pipelinerun-mytask1", + TaskRun: withCancelled(makeRetried(trs[0])), + ResolvedTaskResources: &resources.ResolvedTaskResources{ + TaskSpec: &task.Spec, + }, + }} + + var taskExpectedState = PipelineRunState{{ + PipelineTask: &pts[4], // 2 retries needed + TaskRunName: "pipelinerun-mytask1", + TaskRun: withRetries(makeFailed(trs[0])), + ResolvedTaskResources: &resources.ResolvedTaskResources{ + TaskSpec: &task.Spec, + }, + }} + + var noPipelineTaskState = PipelineRunState{{ + PipelineTask: nil, + TaskRunName: "pipelinerun-mytask1", + TaskRun: withRetries(makeFailed(trs[0])), + ResolvedTaskResources: &resources.ResolvedTaskResources{ + TaskSpec: &task.Spec, + }, + }} + + var noTaskRunState = PipelineRunState{{ + PipelineTask: &pts[4], // 2 retries needed + TaskRunName: "pipelinerun-mytask1", + TaskRun: nil, + ResolvedTaskResources: &resources.ResolvedTaskResources{ + TaskSpec: &task.Spec, + }, + }} + + tcs := []struct { + name string + state PipelineRunState + expected bool + ptExpected []bool + }{ + { + name: "tasks-cancelled-no-candidates", + state: taskCancelledByStatusState, + expected: false, + ptExpected: []bool{false}, + }, + { + name: "tasks-cancelled-bySpec-no-candidates", + state: taskCancelledBySpecState, + expected: false, + ptExpected: []bool{false}, + }, + { + name: "tasks-running-no-candidates", + state: taskRunningState, + expected: false, + ptExpected: []bool{false}, + }, + { + name: "tasks-succeeded-bySpec-no-candidates", + state: taskSucceededState, + expected: true, + ptExpected: []bool{true}, + }, + { + name: "tasks-retried-no-candidates", + state: taskRetriedState, + expected: false, + ptExpected: []bool{false}, + }, + { + name: "tasks-retried-one-candidates", + state: taskExpectedState, + expected: false, + ptExpected: []bool{false}, + }, + { + name: "no-pipelineTask", + state: noPipelineTaskState, + expected: false, + ptExpected: []bool{false}, + }, + { + name: "No-taskrun", + state: noTaskRunState, + expected: false, + ptExpected: []bool{false}, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + + isDone := tc.state.IsDone() + if d := cmp.Diff(isDone, tc.expected); d != "" { + t.Errorf("Didn't get expected IsDone: %v", d) + } + for i, pt := range tc.state { + isDone = pt.IsDone() + if d := cmp.Diff(isDone, tc.ptExpected[i]); d != "" { + t.Errorf("Didn't get expected (ResolvedPipelineRunTask) IsDone: %v", d) + } + + } + }) + } +} func TestSuccessfulPipelineTaskNames(t *testing.T) { tcs := []struct { @@ -422,6 +783,16 @@ func TestSuccessfulPipelineTaskNames(t *testing.T) { } func TestGetPipelineConditionStatus(t *testing.T) { + + var taskRetriedState = PipelineRunState{{ + PipelineTask: &pts[3], // 1 retry needed + TaskRunName: "pipelinerun-mytask1", + TaskRun: withCancelled(makeRetried(trs[0])), + ResolvedTaskResources: &resources.ResolvedTaskResources{ + TaskSpec: &task.Spec, + }, + }} + tcs := []struct { name string state []*ResolvedPipelineRunTask @@ -452,6 +823,11 @@ func TestGetPipelineConditionStatus(t *testing.T) { state: allFinishedState, expectedStatus: corev1.ConditionTrue, }, + { + name: "one-retry-needed", + state: taskRetriedState, + expectedStatus: corev1.ConditionUnknown, + }, } for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { @@ -549,10 +925,11 @@ func TestResolvePipelineRun(t *testing.T) { // The Task "task" doesn't actually take any inputs or outputs, but validating // that is not done as part of Run resolution getTask := func(name string) (v1alpha1.TaskInterface, error) { return task, nil } + getTaskRun := func(name string) (*v1alpha1.TaskRun, error) { return nil, nil } getClusterTask := func(name string) (v1alpha1.TaskInterface, error) { return nil, nil } getResource := func(name string) (*v1alpha1.PipelineResource, error) { return r, nil } - pipelineState, err := ResolvePipelineRun(pr, getTask, getClusterTask, getResource, p.Spec.Tasks, providedResources) + pipelineState, err := ResolvePipelineRun(pr, getTask, getTaskRun, getClusterTask, getResource, p.Spec.Tasks, providedResources) if err != nil { t.Fatalf("Error getting tasks for fake pipeline %s: %s", p.ObjectMeta.Name, err) } @@ -613,6 +990,7 @@ func TestResolvePipelineRun_PipelineTaskHasNoResources(t *testing.T) { providedResources := map[string]v1alpha1.PipelineResourceRef{} getTask := func(name string) (v1alpha1.TaskInterface, error) { return task, nil } + getTaskRun := func(name string) (*v1alpha1.TaskRun, error) { return &trs[0], nil } getClusterTask := func(name string) (v1alpha1.TaskInterface, error) { return clustertask, nil } getResource := func(name string) (*v1alpha1.PipelineResource, error) { return nil, fmt.Errorf("should not get called") } pr := v1alpha1.PipelineRun{ @@ -620,7 +998,7 @@ func TestResolvePipelineRun_PipelineTaskHasNoResources(t *testing.T) { Name: "pipelinerun", }, } - pipelineState, err := ResolvePipelineRun(pr, getTask, getClusterTask, getResource, pts, providedResources) + pipelineState, err := ResolvePipelineRun(pr, getTask, getTaskRun, getClusterTask, getResource, pts, providedResources) if err != nil { t.Fatalf("Did not expect error when resolving PipelineRun without Resources: %v", err) } @@ -655,13 +1033,17 @@ func TestResolvePipelineRun_TaskDoesntExist(t *testing.T) { getClusterTask := func(name string) (v1alpha1.TaskInterface, error) { return nil, errors.NewNotFound(v1alpha1.Resource("clustertask"), name) } + + getTaskRun := func(name string) (*v1alpha1.TaskRun, error) { + return nil, errors.NewNotFound(v1alpha1.Resource("taskrun"), name) + } getResource := func(name string) (*v1alpha1.PipelineResource, error) { return nil, fmt.Errorf("should not get called") } pr := v1alpha1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{ Name: "pipelinerun", }, } - _, err := ResolvePipelineRun(pr, getTask, getClusterTask, getResource, pts, providedResources) + _, err := ResolvePipelineRun(pr, getTask, getTaskRun, getClusterTask, getResource, pts, providedResources) switch err := err.(type) { case nil: t.Fatalf("Expected error getting non-existent Tasks for Pipeline %s but got none", p.Name) @@ -697,6 +1079,7 @@ func TestResolvePipelineRun_ResourceBindingsDontExist(t *testing.T) { providedResources := map[string]v1alpha1.PipelineResourceRef{} getTask := func(name string) (v1alpha1.TaskInterface, error) { return task, nil } + getTaskRun := func(name string) (*v1alpha1.TaskRun, error) { return &trs[0], nil } getClusterTask := func(name string) (v1alpha1.TaskInterface, error) { return clustertask, nil } getResource := func(name string) (*v1alpha1.PipelineResource, error) { return nil, fmt.Errorf("shouldnt be called") } @@ -707,7 +1090,7 @@ func TestResolvePipelineRun_ResourceBindingsDontExist(t *testing.T) { Name: "pipelinerun", }, } - _, err := ResolvePipelineRun(pr, getTask, getClusterTask, getResource, tt.p.Spec.Tasks, providedResources) + _, err := ResolvePipelineRun(pr, getTask, getTaskRun, getClusterTask, getResource, tt.p.Spec.Tasks, providedResources) if err == nil { t.Fatalf("Expected error when bindings are in incorrect state for Pipeline %s but got none", p.Name) } @@ -744,6 +1127,7 @@ func TestResolvePipelineRun_ResourcesDontExist(t *testing.T) { } getTask := func(name string) (v1alpha1.TaskInterface, error) { return task, nil } + getTaskRun := func(name string) (*v1alpha1.TaskRun, error) { return &trs[0], nil } getClusterTask := func(name string) (v1alpha1.TaskInterface, error) { return clustertask, nil } getResource := func(name string) (*v1alpha1.PipelineResource, error) { return nil, errors.NewNotFound(v1alpha1.Resource("pipelineresource"), name) @@ -756,7 +1140,7 @@ func TestResolvePipelineRun_ResourcesDontExist(t *testing.T) { Name: "pipelinerun", }, } - _, err := ResolvePipelineRun(pr, getTask, getClusterTask, getResource, tt.p.Spec.Tasks, providedResources) + _, err := ResolvePipelineRun(pr, getTask, getTaskRun, getClusterTask, getResource, tt.p.Spec.Tasks, providedResources) switch err := err.(type) { case nil: t.Fatalf("Expected error getting non-existent Resources for Pipeline %s but got none", p.Name) @@ -769,92 +1153,6 @@ func TestResolvePipelineRun_ResourcesDontExist(t *testing.T) { } } -func TestResolveTaskRuns_AllStarted(t *testing.T) { - state := []*ResolvedPipelineRunTask{{ - TaskRunName: "pipelinerun-mytask1", - }, { - TaskRunName: "pipelinerun-mytask2", - }} - getTaskRun := func(name string) (*v1alpha1.TaskRun, error) { - if name == "pipelinerun-mytask1" { - return &trs[0], nil - } - if name == "pipelinerun-mytask2" { - return &trs[1], nil - } - return nil, errors.NewNotFound(v1alpha1.Resource("taskrun"), name) - } - err := ResolveTaskRuns(getTaskRun, state) - if err != nil { - t.Fatalf("Didn't expect error resolving taskruns but got %v", err) - } - if state[0].TaskRun != &trs[0] { - t.Errorf("Expected first task to resolve to first taskrun but was %v", state[0].TaskRun) - } - if state[1].TaskRun != &trs[1] { - t.Errorf("Expected second task to resolve to second taskrun but was %v", state[1].TaskRun) - } -} - -func TestResolveTaskRuns_SomeStarted(t *testing.T) { - state := []*ResolvedPipelineRunTask{{ - TaskRunName: "pipelinerun-mytask1", - }, { - TaskRunName: "pipelinerun-mytask2", - }} - getTaskRun := func(name string) (*v1alpha1.TaskRun, error) { - // only the first has started - if name == "pipelinerun-mytask1" { - return &trs[0], nil - } - return nil, errors.NewNotFound(v1alpha1.Resource("taskrun"), name) - } - err := ResolveTaskRuns(getTaskRun, state) - if err != nil { - t.Fatalf("Didn't expect error resolving taskruns but got %v", err) - } - if state[0].TaskRun != &trs[0] { - t.Errorf("Expected first task to resolve to first taskrun but was %v", state[0].TaskRun) - } - if state[1].TaskRun != nil { - t.Errorf("Expected second task to not resolve but was %v", state[1].TaskRun) - } -} - -func TestResolveTaskRuns_NoneStarted(t *testing.T) { - state := []*ResolvedPipelineRunTask{{ - TaskRunName: "pipelinerun-mytask1", - }, { - TaskRunName: "pipelinerun-mytask2", - }} - getTaskRun := func(name string) (*v1alpha1.TaskRun, error) { - return nil, errors.NewNotFound(v1alpha1.Resource("taskrun"), name) - } - err := ResolveTaskRuns(getTaskRun, state) - if err != nil { - t.Fatalf("Didn't expect error resolving taskruns but got %v", err) - } - if state[0].TaskRun != nil { - t.Errorf("Expected first task to not resolve but was %v", state[0].TaskRun) - } - if state[1].TaskRun != nil { - t.Errorf("Expected second task to not resolve but was %v", state[1].TaskRun) - } -} - -func TestResolveTaskRuns_Error(t *testing.T) { - state := []*ResolvedPipelineRunTask{{ - TaskRunName: "pipelinerun-mytask1", - }} - getTaskRun := func(name string) (*v1alpha1.TaskRun, error) { - return nil, fmt.Errorf("something has gone wrong") - } - err := ResolveTaskRuns(getTaskRun, state) - if err == nil { - t.Fatalf("Expected to get an error when unable to resolve task runs") - } -} - func TestValidateFrom(t *testing.T) { r := tb.PipelineResource("holygrail", namespace, tb.PipelineResourceSpec(v1alpha1.PipelineResourceTypeImage)) state := []*ResolvedPipelineRunTask{{ @@ -1072,9 +1370,10 @@ func TestResolvePipelineRun_withExistingTaskRuns(t *testing.T) { // that is not done as part of Run resolution getTask := func(name string) (v1alpha1.TaskInterface, error) { return task, nil } getClusterTask := func(name string) (v1alpha1.TaskInterface, error) { return nil, nil } + getTaskRun := func(name string) (*v1alpha1.TaskRun, error) { return nil, nil } getResource := func(name string) (*v1alpha1.PipelineResource, error) { return r, nil } - pipelineState, err := ResolvePipelineRun(pr, getTask, getClusterTask, getResource, p.Spec.Tasks, providedResources) + pipelineState, err := ResolvePipelineRun(pr, getTask, getTaskRun, getClusterTask, getResource, p.Spec.Tasks, providedResources) if err != nil { t.Fatalf("Error getting tasks for fake pipeline %s: %s", p.ObjectMeta.Name, err) } diff --git a/pkg/reconciler/v1alpha1/taskrun/resources/taskspec.go b/pkg/reconciler/v1alpha1/taskrun/resources/taskspec.go index 5435a6c15dc..33cb8a2ad8d 100644 --- a/pkg/reconciler/v1alpha1/taskrun/resources/taskspec.go +++ b/pkg/reconciler/v1alpha1/taskrun/resources/taskspec.go @@ -25,6 +25,7 @@ import ( // GetTask is a function used to retrieve Tasks. type GetTask func(string) (v1alpha1.TaskInterface, error) +type GetTaskRun func(string) (*v1alpha1.TaskRun, error) // GetClusterTask is a function that will retrieve the Task from name and namespace. type GetClusterTask func(name string) (v1alpha1.TaskInterface, error) diff --git a/test/builder/pipeline.go b/test/builder/pipeline.go index c9a613c2509..668d3314674 100644 --- a/test/builder/pipeline.go +++ b/test/builder/pipeline.go @@ -146,6 +146,12 @@ func PipelineTask(name, taskName string, ops ...PipelineTaskOp) PipelineSpecOp { } } +func Retries(retries int) PipelineTaskOp { + return func(pt *v1alpha1.PipelineTask) { + pt.Retries = retries + } +} + // RunAfter will update the provided Pipeline Task to indicate that it // should be run after the provided list of Pipeline Task names. func RunAfter(tasks ...string) PipelineTaskOp { diff --git a/test/builder/task.go b/test/builder/task.go index d3c8a59a367..44eea93f534 100644 --- a/test/builder/task.go +++ b/test/builder/task.go @@ -303,6 +303,12 @@ func Condition(condition apis.Condition) TaskRunStatusOp { } } +func Retry(retry v1alpha1.TaskRunStatus) TaskRunStatusOp { + return func(s *v1alpha1.TaskRunStatus) { + s.RetriesStatus = append(s.RetriesStatus, retry) + } +} + // StepState adds a StepState to the TaskRunStatus. func StepState(ops ...StepStateOp) TaskRunStatusOp { return func(s *v1alpha1.TaskRunStatus) { diff --git a/test/cancel_test.go b/test/cancel_test.go index 714a852b530..fb2464cb0da 100644 --- a/test/cancel_test.go +++ b/test/cancel_test.go @@ -32,127 +32,154 @@ import ( // verify that pipelinerun cancel lead to the the correct TaskRun statuses // and pod deletions. func TestTaskRunPipelineRunCancel(t *testing.T) { - c, namespace := setup(t) + type tests struct { + name string + retries bool + } + + tds := []tests{ + { + name: "With retries", + retries: true, + }, { + name: "No retries", + retries: false, + }, + } + t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + for _, tdd := range tds { + t.Run(tdd.name, func(t *testing.T) { - t.Logf("Creating Task in namespace %s", namespace) - task := tb.Task("banana", namespace, tb.TaskSpec( - tb.Step("foo", "ubuntu", tb.Command("/bin/bash"), tb.Args("-c", "sleep 5000")), - )) - if _, err := c.TaskClient.Create(task); err != nil { - t.Fatalf("Failed to create Task `banana`: %s", err) - } + var pipelineTask = tb.PipelineTask("foo", "banana") + if tdd.retries { + pipelineTask = tb.PipelineTask("foo", "banana", tb.Retries(1)) + } - t.Logf("Creating Pipeline in namespace %s", namespace) - pipeline := tb.Pipeline("tomatoes", namespace, - tb.PipelineSpec(tb.PipelineTask("foo", "banana")), - ) - if _, err := c.PipelineClient.Create(pipeline); err != nil { - t.Fatalf("Failed to create Pipeline `%s`: %s", "tomatoes", err) - } + c, namespace := setup(t) + t.Parallel() - pipelineRun := tb.PipelineRun("pear", namespace, tb.PipelineRunSpec(pipeline.Name)) + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) - t.Logf("Creating PipelineRun in namespace %s", namespace) - if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { - t.Fatalf("Failed to create PipelineRun `%s`: %s", "pear", err) - } + t.Logf("Creating Task in namespace %s", namespace) + task := tb.Task("banana", namespace, tb.TaskSpec( + tb.Step("foo", "ubuntu", tb.Command("/bin/bash"), tb.Args("-c", "sleep 5000")), + )) + if _, err := c.TaskClient.Create(task); err != nil { + t.Fatalf("Failed to create Task `banana`: %s", err) + } - t.Logf("Waiting for Pipelinerun %s in namespace %s to be started", "pear", namespace) - if err := WaitForPipelineRunState(c, "pear", pipelineRunTimeout, func(pr *v1alpha1.PipelineRun) (bool, error) { - c := pr.Status.GetCondition(apis.ConditionSucceeded) - if c != nil { - if c.Status == corev1.ConditionTrue || c.Status == corev1.ConditionFalse { - return true, fmt.Errorf("pipelineRun %s already finished", "pear") - } else if c.Status == corev1.ConditionUnknown && (c.Reason == "Running" || c.Reason == "Pending") { - return true, nil + t.Logf("Creating Pipeline in namespace %s", namespace) + pipeline := tb.Pipeline("tomatoes", namespace, + tb.PipelineSpec(pipelineTask), + ) + if _, err := c.PipelineClient.Create(pipeline); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", "tomatoes", err) } - } - return false, nil - }, "PipelineRunRunning"); err != nil { - t.Fatalf("Error waiting for PipelineRun %s to be running: %s", "pear", err) - } - taskrunList, err := c.TaskRunClient.List(metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=pear"}) - if err != nil { - t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", "pear", err) - } + pipelineRun := tb.PipelineRun("pear", namespace, tb.PipelineRunSpec(pipeline.Name)) - var wg sync.WaitGroup - t.Logf("Waiting for TaskRuns from PipelineRun %s in namespace %s to be running", "pear", namespace) - for _, taskrunItem := range taskrunList.Items { - wg.Add(1) - go func(name string) { - defer wg.Done() - err := WaitForTaskRunState(c, name, func(tr *v1alpha1.TaskRun) (bool, error) { - if c := tr.Status.GetCondition(apis.ConditionSucceeded); c != nil { - if c.IsTrue() || c.IsFalse() { - return true, fmt.Errorf("taskRun %s already finished!", name) - } else if c.IsUnknown() && (c.Reason == "Running" || c.Reason == "Pending") { + t.Logf("Creating PipelineRun in namespace %s", namespace) + if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { + t.Fatalf("Failed to create PipelineRun `%s`: %s", "pear", err) + } + + t.Logf("Waiting for Pipelinerun %s in namespace %s to be started", "pear", namespace) + if err := WaitForPipelineRunState(c, "pear", pipelineRunTimeout, func(pr *v1alpha1.PipelineRun) (bool, error) { + c := pr.Status.GetCondition(apis.ConditionSucceeded) + if c != nil { + if c.Status == corev1.ConditionTrue || c.Status == corev1.ConditionFalse { + return true, fmt.Errorf("pipelineRun %s already finished", "pear") + } else if c.Status == corev1.ConditionUnknown && (c.Reason == "Running" || c.Reason == "Pending") { return true, nil } } return false, nil - }, "TaskRunRunning") + }, "PipelineRunRunning"); err != nil { + t.Fatalf("Error waiting for PipelineRun %s to be running: %s", "pear", err) + } + + taskrunList, err := c.TaskRunClient.List(metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=pear"}) if err != nil { - t.Errorf("Error waiting for TaskRun %s to be running: %v", name, err) + t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", "pear", err) } - }(taskrunItem.Name) - } - wg.Wait() - pr, err := c.PipelineRunClient.Get("pear", metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to get PipelineRun `%s`: %s", "pear", err) - } + var wg sync.WaitGroup + t.Logf("Waiting for TaskRuns from PipelineRun %s in namespace %s to be running", "pear", namespace) + for _, taskrunItem := range taskrunList.Items { + wg.Add(1) + go func(name string) { + defer wg.Done() + err := WaitForTaskRunState(c, name, func(tr *v1alpha1.TaskRun) (bool, error) { + if c := tr.Status.GetCondition(apis.ConditionSucceeded); c != nil { + if c.IsTrue() || c.IsFalse() { + return true, fmt.Errorf("taskRun %s already finished!", name) + } else if c.IsUnknown() && (c.Reason == "Running" || c.Reason == "Pending") { + return true, nil + } + } + return false, nil + }, "TaskRunRunning") + if err != nil { + t.Errorf("Error waiting for TaskRun %s to be running: %v", name, err) + } + }(taskrunItem.Name) + } + wg.Wait() - pr.Spec.Status = v1alpha1.PipelineRunSpecStatusCancelled - if _, err := c.PipelineRunClient.Update(pr); err != nil { - t.Fatalf("Failed to cancel PipelineRun `%s`: %s", "pear", err) - } + pr, err := c.PipelineRunClient.Get("pear", metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get PipelineRun `%s`: %s", "pear", err) + } - t.Logf("Waiting for PipelineRun %s in namespace %s to be cancelled", "pear", namespace) - if err := WaitForPipelineRunState(c, "pear", pipelineRunTimeout, func(pr *v1alpha1.PipelineRun) (bool, error) { - if c := pr.Status.GetCondition(apis.ConditionSucceeded); c != nil { - if c.IsFalse() { - if c.Reason == "PipelineRunCancelled" { - return true, nil - } - return true, fmt.Errorf("pipelineRun %s completed with the wrong reason: %s", "pear", c.Reason) - } else if c.IsTrue() { - return true, fmt.Errorf("pipelineRun %s completed successfully, should have been cancelled", "pear") + pr.Spec.Status = v1alpha1.PipelineRunSpecStatusCancelled + if _, err := c.PipelineRunClient.Update(pr); err != nil { + t.Fatalf("Failed to cancel PipelineRun `%s`: %s", "pear", err) } - } - return false, nil - }, "PipelineRunCancelled"); err != nil { - t.Errorf("Error waiting for PipelineRun `pear` to finished: %s", err) - } - t.Logf("Waiting for TaskRuns in PipelineRun %s in namespace %s to be cancelled", "pear", namespace) - for _, taskrunItem := range taskrunList.Items { - wg.Add(1) - go func(name string) { - defer wg.Done() - err := WaitForTaskRunState(c, name, func(tr *v1alpha1.TaskRun) (bool, error) { - if c := tr.Status.GetCondition(apis.ConditionSucceeded); c != nil { + t.Logf("Waiting for PipelineRun %s in namespace %s to be cancelled", "pear", namespace) + if err := WaitForPipelineRunState(c, "pear", pipelineRunTimeout, func(pr *v1alpha1.PipelineRun) (bool, error) { + if c := pr.Status.GetCondition(apis.ConditionSucceeded); c != nil { if c.IsFalse() { - if c.Reason == "TaskRunCancelled" { + if c.Reason == "PipelineRunCancelled" { return true, nil } - return true, fmt.Errorf("taskRun %s completed with the wrong reason: %s", name, c.Reason) + return true, fmt.Errorf("pipelineRun %s completed with the wrong reason: %s", "pear", c.Reason) } else if c.IsTrue() { - return true, fmt.Errorf("taskRun %s completed successfully, should have been cancelled", name) + return true, fmt.Errorf("pipelineRun %s completed successfully, should have been cancelled", "pear") } } return false, nil - }, "TaskRunCancelled") - if err != nil { - t.Errorf("Error waiting for TaskRun %s to be finished: %v", name, err) + }, "PipelineRunCancelled"); err != nil { + t.Errorf("Error waiting for PipelineRun `pear` to finished: %s", err) + } + + t.Logf("Waiting for TaskRuns in PipelineRun %s in namespace %s to be cancelled", "pear", namespace) + for _, taskrunItem := range taskrunList.Items { + wg.Add(1) + go func(name string) { + defer wg.Done() + err := WaitForTaskRunState(c, name, func(tr *v1alpha1.TaskRun) (bool, error) { + if c := tr.Status.GetCondition(apis.ConditionSucceeded); c != nil { + if c.IsFalse() { + if c.Reason == "TaskRunCancelled" { + return true, nil + } + return true, fmt.Errorf("taskRun %s completed with the wrong reason: %s", name, c.Reason) + } else if c.IsTrue() { + return true, fmt.Errorf("taskRun %s completed successfully, should have been cancelled", name) + } + } + return false, nil + }, "TaskRunCancelled") + if err != nil { + t.Errorf("Error waiting for TaskRun %s to be finished: %v", name, err) + } + }(taskrunItem.Name) } - }(taskrunItem.Name) + wg.Wait() + }) } - wg.Wait() } diff --git a/test/pipelinerun_test.go b/test/pipelinerun_test.go index eee521fcc51..bdeaa857d67 100644 --- a/test/pipelinerun_test.go +++ b/test/pipelinerun_test.go @@ -410,11 +410,15 @@ func checkLabelPropagation(t *testing.T, c *clients, namespace string, pipelineR } assertLabelsMatch(t, labels, tr.ObjectMeta.Labels) - // Check label propagation to Pods. - pod := getPodForTaskRun(t, c.KubeClient, namespace, tr) + // PodName is "" iff a retry happened and pod is deleted // This label is added to every Pod by the TaskRun controller - labels[pipeline.GroupName+pipeline.TaskRunLabelKey] = tr.Name - assertLabelsMatch(t, labels, pod.ObjectMeta.Labels) + if tr.Status.PodName != "" { + // Check label propagation to Pods. + pod := getPodForTaskRun(t, c.KubeClient, namespace, tr) + // This label is added to every Pod by the TaskRun controller + labels[pipeline.GroupName+pipeline.TaskRunLabelKey] = tr.Name + assertLabelsMatch(t, labels, pod.ObjectMeta.Labels) + } } func getPodForTaskRun(t *testing.T, kubeClient *knativetest.KubeClient, namespace string, tr *v1alpha1.TaskRun) *corev1.Pod { diff --git a/test/timeout_test.go b/test/timeout_test.go index c7ccb1ad068..a8474b61df9 100644 --- a/test/timeout_test.go +++ b/test/timeout_test.go @@ -178,6 +178,58 @@ func TestPipelineRunTimeout(t *testing.T) { } } +func TestPipelineRunFailedAndRetry(t *testing.T) { + numberOfRetries := 2 + c, namespace := setup(t) + t.Parallel() + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + t.Logf("Creating Task in namespace %s", namespace) + task := tb.Task("banana", namespace, tb.TaskSpec( + tb.Step("foo", "busybox", tb.Command("/bin/sh"), tb.Args("-c", "exit 1")), + )) + if _, err := c.TaskClient.Create(task); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", "banana", err) + } + + pipeline := tb.Pipeline("tomatoes", namespace, + tb.PipelineSpec(tb.PipelineTask("foo", "banana", tb.Retries(numberOfRetries))), + ) + pipelineRun := tb.PipelineRun("pear", namespace, tb.PipelineRunSpec(pipeline.Name)) + if _, err := c.PipelineClient.Create(pipeline); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", pipeline.Name, err) + } + if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { + t.Fatalf("Failed to create PipelineRun `%s`: %s", pipelineRun.Name, err) + } + + t.Logf("Waiting for Pipelinerun %s in namespace %s to be started", pipelineRun.Name, namespace) + if err := WaitForPipelineRunState(c, pipelineRun.Name, timeout, PipelineRunFailed(pipelineRun.Name), "PipelineRunRunning"); err != nil { + t.Fatalf("Error waiting for PipelineRun %s to be failed: %s", pipelineRun.Name, err) + } + + r, err := c.PipelineRunClient.Get(pipelineRun.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Error getting pipeline %s", pipelineRun.Name) + } + + if len(r.Status.TaskRuns) != 1 { + t.Fatalf("Only one TaskRun is expected, but got %d", len(r.Status.TaskRuns)) + } + + for taskRunName := range r.Status.TaskRuns { + taskrun, err := c.TaskRunClient.Get(taskRunName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Error getting task run %s", taskRunName) + } + if len(taskrun.Status.RetriesStatus) != numberOfRetries { + t.Fatalf("expected %d retry, but got %d", numberOfRetries, len(r.Status.TaskRuns)) + } + } +} + // TestTaskRunTimeout is an integration test that will verify a TaskRun can be timed out. func TestTaskRunTimeout(t *testing.T) { c, namespace := setup(t)