diff --git a/pkg/artifacts/artifact_storage_test.go b/pkg/artifacts/artifact_storage_test.go index 76e4a5b8cc3..3113c06514b 100644 --- a/pkg/artifacts/artifact_storage_test.go +++ b/pkg/artifacts/artifact_storage_test.go @@ -296,7 +296,7 @@ func TestInitializeArtifactStorage(t *testing.T) { } // If the expected storage type is PVC, make sure we're actually creating that PVC. if c.storagetype == "pvc" { - _, err := fakekubeclient.CoreV1().PersistentVolumeClaims(pipelinerun.Namespace).Get(GetPVCName(pipelinerun), metav1.GetOptions{}) + _, err := fakekubeclient.CoreV1().PersistentVolumeClaims(pipelinerun.Namespace).Get(ctx, GetPVCName(pipelinerun), metav1.GetOptions{}) if err != nil { t.Fatalf("Error getting expected PVC %s for PipelineRun %s: %s", GetPVCName(pipelinerun), pipelinerun.Name, err) } @@ -456,14 +456,14 @@ func TestCleanupArtifactStorage(t *testing.T) { ArtifactBucket: ab, } ctx := config.ToContext(context.Background(), &configs) - _, err = fakekubeclient.CoreV1().PersistentVolumeClaims(pipelinerun.Namespace).Get(GetPVCName(pipelinerun), metav1.GetOptions{}) + _, err = fakekubeclient.CoreV1().PersistentVolumeClaims(pipelinerun.Namespace).Get(ctx, GetPVCName(pipelinerun), metav1.GetOptions{}) if err != nil { t.Fatalf("Error getting expected PVC %s for PipelineRun %s: %s", GetPVCName(pipelinerun), pipelinerun.Name, err) } if err := CleanupArtifactStorage(ctx, pipelinerun, fakekubeclient); err != nil { t.Fatalf("Error cleaning up artifact storage: %s", err) } - _, err = fakekubeclient.CoreV1().PersistentVolumeClaims(pipelinerun.Namespace).Get(GetPVCName(pipelinerun), metav1.GetOptions{}) + _, err = fakekubeclient.CoreV1().PersistentVolumeClaims(pipelinerun.Namespace).Get(ctx, GetPVCName(pipelinerun), metav1.GetOptions{}) if err == nil { t.Fatalf("Found PVC %s for PipelineRun %s after it should have been cleaned up", GetPVCName(pipelinerun), pipelinerun.Name) } else if !errors.IsNotFound(err) { diff --git a/pkg/artifacts/artifacts_storage.go b/pkg/artifacts/artifacts_storage.go index 55f0017167a..99b676bdb15 100644 --- a/pkg/artifacts/artifacts_storage.go +++ b/pkg/artifacts/artifacts_storage.go @@ -120,7 +120,7 @@ func InitializeArtifactStorage(ctx context.Context, images pipeline.Images, pr * func CleanupArtifactStorage(ctx context.Context, pr *v1beta1.PipelineRun, c kubernetes.Interface) error { if NeedsPVC(ctx) { - err := deletePVC(pr, c) + err := deletePVC(ctx, pr, c) if err != nil { return err } @@ -171,7 +171,7 @@ func NewArtifactBucketFromConfig(ctx context.Context, images pipeline.Images) *s } func createPVC(ctx context.Context, pr *v1beta1.PipelineRun, c kubernetes.Interface) (*corev1.PersistentVolumeClaim, error) { - if _, err := c.CoreV1().PersistentVolumeClaims(pr.Namespace).Get(GetPVCName(pr), metav1.GetOptions{}); err != nil { + if _, err := c.CoreV1().PersistentVolumeClaims(pr.Namespace).Get(ctx, GetPVCName(pr), metav1.GetOptions{}); err != nil { if errors.IsNotFound(err) { pvcConfig := config.FromContextOrDefaults(ctx).ArtifactPVC pvcSize, err := resource.ParseQuantity(pvcConfig.Size) @@ -192,7 +192,7 @@ func createPVC(ctx context.Context, pr *v1beta1.PipelineRun, c kubernetes.Interf } pvcSpec := GetPVCSpec(pr, pvcSize, pvcStorageClassName) - pvc, err := c.CoreV1().PersistentVolumeClaims(pr.Namespace).Create(pvcSpec) + pvc, err := c.CoreV1().PersistentVolumeClaims(pr.Namespace).Create(ctx, pvcSpec, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("failed to claim Persistent Volume %q due to error: %w", pr.Name, err) } @@ -203,12 +203,12 @@ func createPVC(ctx context.Context, pr *v1beta1.PipelineRun, c kubernetes.Interf return nil, nil } -func deletePVC(pr *v1beta1.PipelineRun, c kubernetes.Interface) error { - if _, err := c.CoreV1().PersistentVolumeClaims(pr.Namespace).Get(GetPVCName(pr), metav1.GetOptions{}); err != nil { +func deletePVC(ctx context.Context, pr *v1beta1.PipelineRun, c kubernetes.Interface) error { + if _, err := c.CoreV1().PersistentVolumeClaims(pr.Namespace).Get(ctx, GetPVCName(pr), metav1.GetOptions{}); err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("failed to get Persistent Volume %q due to error: %w", GetPVCName(pr), err) } - } else if err := c.CoreV1().PersistentVolumeClaims(pr.Namespace).Delete(GetPVCName(pr), &metav1.DeleteOptions{}); err != nil { + } else if err := c.CoreV1().PersistentVolumeClaims(pr.Namespace).Delete(ctx, GetPVCName(pr), metav1.DeleteOptions{}); err != nil { return fmt.Errorf("failed to delete Persistent Volume %q due to error: %w", pr.Name, err) } return nil diff --git a/pkg/pod/creds_init.go b/pkg/pod/creds_init.go index 15256247118..745227c4bb3 100644 --- a/pkg/pod/creds_init.go +++ b/pkg/pod/creds_init.go @@ -54,7 +54,7 @@ func credsInit(ctx context.Context, serviceAccountName, namespace string, kubecl serviceAccountName = config.DefaultServiceAccountValue } - sa, err := kubeclient.CoreV1().ServiceAccounts(namespace).Get(serviceAccountName, metav1.GetOptions{}) + sa, err := kubeclient.CoreV1().ServiceAccounts(namespace).Get(ctx, serviceAccountName, metav1.GetOptions{}) if err != nil { return nil, nil, nil, err } @@ -65,7 +65,7 @@ func credsInit(ctx context.Context, serviceAccountName, namespace string, kubecl var volumes []corev1.Volume args := []string{} for _, secretEntry := range sa.Secrets { - secret, err := kubeclient.CoreV1().Secrets(namespace).Get(secretEntry.Name, metav1.GetOptions{}) + secret, err := kubeclient.CoreV1().Secrets(namespace).Get(ctx, secretEntry.Name, metav1.GetOptions{}) if err != nil { return nil, nil, nil, err } diff --git a/pkg/pod/entrypoint.go b/pkg/pod/entrypoint.go index 033ec03c6f7..74685a86171 100644 --- a/pkg/pod/entrypoint.go +++ b/pkg/pod/entrypoint.go @@ -17,6 +17,7 @@ limitations under the License. package pod import ( + "context" "errors" "fmt" "path/filepath" @@ -162,8 +163,8 @@ func collectResultsName(results []v1beta1.TaskResult) string { // UpdateReady updates the Pod's annotations to signal the first step to start // by projecting the ready annotation via the Downward API. -func UpdateReady(kubeclient kubernetes.Interface, pod corev1.Pod) error { - newPod, err := kubeclient.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) +func UpdateReady(ctx context.Context, kubeclient kubernetes.Interface, pod corev1.Pod) error { + newPod, err := kubeclient.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("error getting Pod %q when updating ready annotation: %w", pod.Name, err) } @@ -175,7 +176,7 @@ func UpdateReady(kubeclient kubernetes.Interface, pod corev1.Pod) error { } if newPod.ObjectMeta.Annotations[readyAnnotation] != readyAnnotationValue { newPod.ObjectMeta.Annotations[readyAnnotation] = readyAnnotationValue - if _, err := kubeclient.CoreV1().Pods(newPod.Namespace).Update(newPod); err != nil { + if _, err := kubeclient.CoreV1().Pods(newPod.Namespace).Update(ctx, newPod, metav1.UpdateOptions{}); err != nil { return fmt.Errorf("error adding ready annotation to Pod %q: %w", pod.Name, err) } } @@ -184,8 +185,8 @@ func UpdateReady(kubeclient kubernetes.Interface, pod corev1.Pod) error { // StopSidecars updates sidecar containers in the Pod to a nop image, which // exits successfully immediately. -func StopSidecars(nopImage string, kubeclient kubernetes.Interface, pod corev1.Pod) error { - newPod, err := kubeclient.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) +func StopSidecars(ctx context.Context, nopImage string, kubeclient kubernetes.Interface, pod corev1.Pod) error { + newPod, err := kubeclient.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("error getting Pod %q when stopping sidecars: %w", pod.Name, err) } @@ -208,7 +209,7 @@ func StopSidecars(nopImage string, kubeclient kubernetes.Interface, pod corev1.P } } if updated { - if _, err := kubeclient.CoreV1().Pods(newPod.Namespace).Update(newPod); err != nil { + if _, err := kubeclient.CoreV1().Pods(newPod.Namespace).Update(ctx, newPod, metav1.UpdateOptions{}); err != nil { return fmt.Errorf("error stopping sidecars of Pod %q: %w", pod.Name, err) } } diff --git a/pkg/pod/entrypoint_lookup.go b/pkg/pod/entrypoint_lookup.go index 660000d4d3d..bfee539fc89 100644 --- a/pkg/pod/entrypoint_lookup.go +++ b/pkg/pod/entrypoint_lookup.go @@ -17,6 +17,7 @@ limitations under the License. package pod import ( + "context" "fmt" "github.com/google/go-containerregistry/pkg/name" @@ -30,7 +31,7 @@ type EntrypointCache interface { // Get the Image data for the given image reference. If the value is // not found in the cache, it will be fetched from the image registry, // possibly using K8s service account imagePullSecrets. - Get(ref name.Reference, namespace, serviceAccountName string) (v1.Image, error) + Get(ctx context.Context, ref name.Reference, namespace, serviceAccountName string) (v1.Image, error) // Update the cache with a new digest->Image mapping. This will avoid a // remote registry lookup next time Get is called. Set(digest name.Digest, img v1.Image) @@ -41,7 +42,7 @@ type EntrypointCache interface { // // Images that are not specified by digest will be specified by digest after // lookup in the resulting list of containers. -func resolveEntrypoints(cache EntrypointCache, namespace, serviceAccountName string, steps []corev1.Container) ([]corev1.Container, error) { +func resolveEntrypoints(ctx context.Context, cache EntrypointCache, namespace, serviceAccountName string, steps []corev1.Container) ([]corev1.Container, error) { // Keep a local cache of name->image lookups, just for the scope of // resolving this set of steps. If the image is pushed to before the // next run, we need to resolve its digest and entrypoint again, but we @@ -63,7 +64,7 @@ func resolveEntrypoints(cache EntrypointCache, namespace, serviceAccountName str } else { // Look it up in the cache. If it's not found in the // cache, it will be resolved from the registry. - img, err = cache.Get(origRef, namespace, serviceAccountName) + img, err = cache.Get(ctx, origRef, namespace, serviceAccountName) if err != nil { return nil, err } diff --git a/pkg/pod/entrypoint_lookup_impl.go b/pkg/pod/entrypoint_lookup_impl.go index ca85e91f89b..a544d9e6de0 100644 --- a/pkg/pod/entrypoint_lookup_impl.go +++ b/pkg/pod/entrypoint_lookup_impl.go @@ -17,6 +17,7 @@ limitations under the License. package pod import ( + "context" "fmt" "github.com/google/go-containerregistry/pkg/authn" @@ -48,7 +49,7 @@ func NewEntrypointCache(kubeclient kubernetes.Interface) (EntrypointCache, error }, nil } -func (e *entrypointCache) Get(ref name.Reference, namespace, serviceAccountName string) (v1.Image, error) { +func (e *entrypointCache) Get(ctx context.Context, ref name.Reference, namespace, serviceAccountName string) (v1.Image, error) { // If image is specified by digest, check the local cache. if digest, ok := ref.(name.Digest); ok { if img, ok := e.lru.Get(digest.String()); ok { @@ -59,7 +60,7 @@ func (e *entrypointCache) Get(ref name.Reference, namespace, serviceAccountName // If the image wasn't specified by digest, or if the entrypoint // wasn't found, we have to consult the remote registry, using // imagePullSecrets. - kc, err := k8schain.New(e.kubeclient, k8schain.Options{ + kc, err := k8schain.New(ctx, e.kubeclient, k8schain.Options{ Namespace: namespace, ServiceAccountName: serviceAccountName, }) diff --git a/pkg/pod/entrypoint_lookup_test.go b/pkg/pod/entrypoint_lookup_test.go index aeb9468077f..3abc1437894 100644 --- a/pkg/pod/entrypoint_lookup_test.go +++ b/pkg/pod/entrypoint_lookup_test.go @@ -17,6 +17,7 @@ limitations under the License. package pod import ( + "context" "fmt" "testing" @@ -30,6 +31,10 @@ import ( ) func TestResolveEntrypoints(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // Generate a random image with entrypoint configured. img, err := random.Image(1, 1) if err != nil { @@ -53,7 +58,7 @@ func TestResolveEntrypoints(t *testing.T) { "gcr.io/my/image:latest": &data{img: img}, } - got, err := resolveEntrypoints(cache, "namespace", "serviceAccountName", []corev1.Container{{ + got, err := resolveEntrypoints(ctx, cache, "namespace", "serviceAccountName", []corev1.Container{{ // This step specifies its command, so there's nothing to // resolve. Image: "fully-specified", @@ -110,7 +115,7 @@ type data struct { seen bool // Whether the image has been looked up before. } -func (f fakeCache) Get(ref name.Reference, _, _ string) (v1.Image, error) { +func (f fakeCache) Get(ctx context.Context, ref name.Reference, _, _ string) (v1.Image, error) { if d, ok := ref.(name.Digest); ok { if data, found := f[d.String()]; found { return data.img, nil diff --git a/pkg/pod/entrypoint_test.go b/pkg/pod/entrypoint_test.go index 2f560465913..4b9beccd8fb 100644 --- a/pkg/pod/entrypoint_test.go +++ b/pkg/pod/entrypoint_test.go @@ -17,6 +17,7 @@ limitations under the License. package pod import ( + "context" "testing" "time" @@ -296,12 +297,15 @@ func TestUpdateReady(t *testing.T) { }, }} { t.Run(c.desc, func(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() kubeclient := fakek8s.NewSimpleClientset(&c.pod) - if err := UpdateReady(kubeclient, c.pod); err != nil { + if err := UpdateReady(ctx, kubeclient, c.pod); err != nil { t.Errorf("UpdateReady: %v", err) } - got, err := kubeclient.CoreV1().Pods(c.pod.Namespace).Get(c.pod.Name, metav1.GetOptions{}) + got, err := kubeclient.CoreV1().Pods(c.pod.Namespace).Get(ctx, c.pod.Name, metav1.GetOptions{}) if err != nil { t.Errorf("Getting pod %q after update: %v", c.pod.Name, err) } else if d := cmp.Diff(c.wantAnnotations, got.Annotations); d != "" { @@ -411,12 +415,15 @@ func TestStopSidecars(t *testing.T) { wantContainers: []corev1.Container{stepContainer, sidecarContainer, injectedSidecar}, }} { t.Run(c.desc, func(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() kubeclient := fakek8s.NewSimpleClientset(&c.pod) - if err := StopSidecars(nopImage, kubeclient, c.pod); err != nil { + if err := StopSidecars(ctx, nopImage, kubeclient, c.pod); err != nil { t.Errorf("error stopping sidecar: %v", err) } - got, err := kubeclient.CoreV1().Pods(c.pod.Namespace).Get(c.pod.Name, metav1.GetOptions{}) + got, err := kubeclient.CoreV1().Pods(c.pod.Namespace).Get(ctx, c.pod.Name, metav1.GetOptions{}) if err != nil { t.Errorf("Getting pod %q after update: %v", c.pod.Name, err) } else if d := cmp.Diff(c.wantContainers, got.Spec.Containers); d != "" { diff --git a/pkg/pod/pod.go b/pkg/pod/pod.go index bf22d200bfb..e038f9034c2 100644 --- a/pkg/pod/pod.go +++ b/pkg/pod/pod.go @@ -134,7 +134,7 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1beta1.TaskRun, taskSpec } // Resolve entrypoint for any steps that don't specify command. - stepContainers, err = resolveEntrypoints(b.EntrypointCache, taskRun.Namespace, taskRun.Spec.ServiceAccountName, stepContainers) + stepContainers, err = resolveEntrypoints(ctx, b.EntrypointCache, taskRun.Namespace, taskRun.Spec.ServiceAccountName, stepContainers) if err != nil { return nil, err } @@ -148,7 +148,7 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1beta1.TaskRun, taskSpec initContainers = append(initContainers, entrypointInit) volumes = append(volumes, toolsVolume, downwardVolume) - limitRangeMin, err := getLimitRangeMinimum(taskRun.Namespace, b.KubeClient) + limitRangeMin, err := getLimitRangeMinimum(ctx, taskRun.Namespace, b.KubeClient) if err != nil { return nil, err } @@ -337,8 +337,8 @@ func nodeAffinityUsingAffinityAssistant(affinityAssistantName string) *corev1.Af // https://github.com/kubernetes/kubernetes/issues/79496, the // max LimitRange minimum must be found in the event of conflicting // container minimums specified. -func getLimitRangeMinimum(namespace string, kubeclient kubernetes.Interface) (corev1.ResourceList, error) { - limitRanges, err := kubeclient.CoreV1().LimitRanges(namespace).List(metav1.ListOptions{}) +func getLimitRangeMinimum(ctx context.Context, namespace string, kubeclient kubernetes.Interface) (corev1.ResourceList, error) { + limitRanges, err := kubeclient.CoreV1().LimitRanges(namespace).List(ctx, metav1.ListOptions{}) if err != nil { return nil, err } diff --git a/pkg/reconciler/pipelinerun/affinity_assistant.go b/pkg/reconciler/pipelinerun/affinity_assistant.go index 7dfd1f9b85f..baf31da6eb5 100644 --- a/pkg/reconciler/pipelinerun/affinity_assistant.go +++ b/pkg/reconciler/pipelinerun/affinity_assistant.go @@ -53,12 +53,12 @@ func (c *Reconciler) createAffinityAssistants(ctx context.Context, wb []v1beta1. for _, w := range wb { if w.PersistentVolumeClaim != nil || w.VolumeClaimTemplate != nil { affinityAssistantName := getAffinityAssistantName(w.Name, pr.Name) - _, err := c.KubeClientSet.AppsV1().StatefulSets(namespace).Get(affinityAssistantName, metav1.GetOptions{}) + _, err := c.KubeClientSet.AppsV1().StatefulSets(namespace).Get(ctx, affinityAssistantName, metav1.GetOptions{}) claimName := getClaimName(w, pr.GetOwnerReference()) switch { case apierrors.IsNotFound(err): affinityAssistantStatefulSet := affinityAssistantStatefulSet(affinityAssistantName, pr, claimName, c.Images.NopImage) - _, err := c.KubeClientSet.AppsV1().StatefulSets(namespace).Create(affinityAssistantStatefulSet) + _, err := c.KubeClientSet.AppsV1().StatefulSets(namespace).Create(ctx, affinityAssistantStatefulSet, metav1.CreateOptions{}) if err != nil { errs = append(errs, fmt.Errorf("failed to create StatefulSet %s: %s", affinityAssistantName, err)) } @@ -94,7 +94,7 @@ func (c *Reconciler) cleanupAffinityAssistants(ctx context.Context, pr *v1beta1. for _, w := range pr.Spec.Workspaces { if w.PersistentVolumeClaim != nil || w.VolumeClaimTemplate != nil { affinityAssistantStsName := getAffinityAssistantName(w.Name, pr.Name) - if err := c.KubeClientSet.AppsV1().StatefulSets(pr.Namespace).Delete(affinityAssistantStsName, &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + if err := c.KubeClientSet.AppsV1().StatefulSets(pr.Namespace).Delete(ctx, affinityAssistantStsName, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { errs = append(errs, fmt.Errorf("failed to delete StatefulSet %s: %s", affinityAssistantStsName, err)) } } diff --git a/pkg/reconciler/pipelinerun/affinity_assistant_test.go b/pkg/reconciler/pipelinerun/affinity_assistant_test.go index a294cdf334a..2d28012ba54 100644 --- a/pkg/reconciler/pipelinerun/affinity_assistant_test.go +++ b/pkg/reconciler/pipelinerun/affinity_assistant_test.go @@ -34,6 +34,10 @@ import ( // TestCreateAndDeleteOfAffinityAssistant tests to create and delete an Affinity Assistant // for a given PipelineRun with a PVC workspace func TestCreateAndDeleteOfAffinityAssistant(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c := Reconciler{ KubeClientSet: fakek8s.NewSimpleClientset(), Images: pipeline.Images{}, @@ -56,23 +60,23 @@ func TestCreateAndDeleteOfAffinityAssistant(t *testing.T) { }, } - err := c.createAffinityAssistants(context.Background(), testPipelineRun.Spec.Workspaces, testPipelineRun, testPipelineRun.Namespace) + err := c.createAffinityAssistants(ctx, testPipelineRun.Spec.Workspaces, testPipelineRun, testPipelineRun.Namespace) if err != nil { t.Errorf("unexpected error from createAffinityAssistants: %v", err) } expectedAffinityAssistantName := getAffinityAssistantName(workspaceName, testPipelineRun.Name) - _, err = c.KubeClientSet.AppsV1().StatefulSets(testPipelineRun.Namespace).Get(expectedAffinityAssistantName, metav1.GetOptions{}) + _, err = c.KubeClientSet.AppsV1().StatefulSets(testPipelineRun.Namespace).Get(ctx, expectedAffinityAssistantName, metav1.GetOptions{}) if err != nil { t.Errorf("unexpected error when retrieving StatefulSet: %v", err) } - err = c.cleanupAffinityAssistants(context.Background(), testPipelineRun) + err = c.cleanupAffinityAssistants(ctx, testPipelineRun) if err != nil { t.Errorf("unexpected error from cleanupAffinityAssistants: %v", err) } - _, err = c.KubeClientSet.AppsV1().StatefulSets(testPipelineRun.Namespace).Get(expectedAffinityAssistantName, metav1.GetOptions{}) + _, err = c.KubeClientSet.AppsV1().StatefulSets(testPipelineRun.Namespace).Get(ctx, expectedAffinityAssistantName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { t.Errorf("expected a NotFound response, got: %v", err) } diff --git a/pkg/reconciler/pipelinerun/cancel.go b/pkg/reconciler/pipelinerun/cancel.go index 81dcea11819..b7d18651974 100644 --- a/pkg/reconciler/pipelinerun/cancel.go +++ b/pkg/reconciler/pipelinerun/cancel.go @@ -17,6 +17,7 @@ limitations under the License. package pipelinerun import ( + "context" "encoding/json" "fmt" "log" @@ -48,7 +49,7 @@ func init() { } // cancelPipelineRun marks the PipelineRun as cancelled and any resolved TaskRun(s) too. -func cancelPipelineRun(logger *zap.SugaredLogger, pr *v1beta1.PipelineRun, clientSet clientset.Interface) error { +func cancelPipelineRun(ctx context.Context, logger *zap.SugaredLogger, pr *v1beta1.PipelineRun, clientSet clientset.Interface) error { errs := []string{} // Loop over the TaskRuns in the PipelineRun status. @@ -56,7 +57,7 @@ func cancelPipelineRun(logger *zap.SugaredLogger, pr *v1beta1.PipelineRun, clien for taskRunName := range pr.Status.TaskRuns { logger.Infof("cancelling TaskRun %s", taskRunName) - if _, err := clientSet.TektonV1beta1().TaskRuns(pr.Namespace).Patch(taskRunName, types.JSONPatchType, cancelPatchBytes, ""); err != nil { + if _, err := clientSet.TektonV1beta1().TaskRuns(pr.Namespace).Patch(ctx, taskRunName, types.JSONPatchType, cancelPatchBytes, metav1.PatchOptions{}, ""); err != nil { errs = append(errs, fmt.Errorf("Failed to patch TaskRun `%s` with cancellation: %s", taskRunName, err).Error()) continue } diff --git a/pkg/reconciler/pipelinerun/cancel_test.go b/pkg/reconciler/pipelinerun/cancel_test.go index 764adf72285..c5d1d0bf1a1 100644 --- a/pkg/reconciler/pipelinerun/cancel_test.go +++ b/pkg/reconciler/pipelinerun/cancel_test.go @@ -87,7 +87,7 @@ func TestCancelPipelineRun(t *testing.T) { ctx, cancel := context.WithCancel(ctx) defer cancel() c, _ := test.SeedTestData(t, ctx, d) - if err := cancelPipelineRun(logtesting.TestLogger(t), tc.pipelineRun, c.Pipeline); err != nil { + if err := cancelPipelineRun(ctx, logtesting.TestLogger(t), tc.pipelineRun, c.Pipeline); err != nil { t.Fatal(err) } // This PipelineRun should still be complete and false, and the status should reflect that @@ -95,7 +95,7 @@ func TestCancelPipelineRun(t *testing.T) { if cond.IsTrue() { t.Errorf("Expected PipelineRun status to be complete and false, but was %v", cond) } - l, err := c.Pipeline.TektonV1beta1().TaskRuns("").List(metav1.ListOptions{}) + l, err := c.Pipeline.TektonV1beta1().TaskRuns("").List(ctx, metav1.ListOptions{}) if err != nil { t.Fatal(err) } diff --git a/pkg/reconciler/pipelinerun/pipelinerun.go b/pkg/reconciler/pipelinerun/pipelinerun.go index 5879cd7bac2..44d38b684ee 100644 --- a/pkg/reconciler/pipelinerun/pipelinerun.go +++ b/pkg/reconciler/pipelinerun/pipelinerun.go @@ -188,7 +188,7 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, pr *v1beta1.PipelineRun) if pr.IsCancelled() { // If the pipelinerun is cancelled, cancel tasks and update status - err := cancelPipelineRun(logger, pr, c.PipelineClientSet) + err := cancelPipelineRun(ctx, logger, pr, c.PipelineClientSet) return c.finishReconcileUpdateEmitEvents(ctx, pr, before, err) } @@ -219,7 +219,7 @@ func (c *Reconciler) finishReconcileUpdateEmitEvents(ctx context.Context, pr *v1 afterCondition := pr.Status.GetCondition(apis.ConditionSucceeded) events.Emit(ctx, beforeCondition, afterCondition, pr) - _, err := c.updateLabelsAndAnnotations(pr) + _, err := c.updateLabelsAndAnnotations(ctx, pr) if err != nil { logger.Warn("Failed to update PipelineRun labels/annotations", zap.Error(err)) events.EmitError(controller.GetEventRecorder(ctx), err, pr) @@ -394,7 +394,7 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err // pipelineRunState is instantiated and updated on every reconcile cycle pipelineRunState, err := resources.ResolvePipelineRun(ctx, *pr, - func(name string) (v1beta1.TaskInterface, error) { + func(ctx context.Context, name string) (v1beta1.TaskInterface, error) { return c.taskLister.Tasks(pr.Namespace).Get(name) }, func(name string) (*v1beta1.TaskRun, error) { @@ -448,7 +448,7 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err if pipelineRunFacts.State.IsBeforeFirstTaskRun() { if pr.HasVolumeClaimTemplate() { // create workspace PVC from template - if err = c.pvcHandler.CreatePersistentVolumeClaimsForWorkspaces(pr.Spec.Workspaces, pr.GetOwnerReference(), pr.Namespace); err != nil { + if err = c.pvcHandler.CreatePersistentVolumeClaimsForWorkspaces(ctx, pr.Spec.Workspaces, pr.GetOwnerReference(), pr.Namespace); err != nil { logger.Errorf("Failed to create PVC for PipelineRun %s: %v", pr.Name, err) pr.Status.MarkFailed(volumeclaim.ReasonCouldntCreateWorkspacePVC, "Failed to create PVC for PipelineRun %s/%s Workspaces correctly: %s", @@ -596,7 +596,7 @@ func (c *Reconciler) createTaskRun(ctx context.Context, rprt *resources.Resolved Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown, }) - return c.PipelineClientSet.TektonV1beta1().TaskRuns(pr.Namespace).UpdateStatus(tr) + return c.PipelineClientSet.TektonV1beta1().TaskRuns(pr.Namespace).UpdateStatus(ctx, tr, metav1.UpdateOptions{}) } serviceAccountName, podTemplate := pr.GetTaskRunSpecs(rprt.PipelineTask.Name) @@ -647,7 +647,7 @@ func (c *Reconciler) createTaskRun(ctx context.Context, rprt *resources.Resolved resources.WrapSteps(&tr.Spec, rprt.PipelineTask, rprt.ResolvedTaskResources.Inputs, rprt.ResolvedTaskResources.Outputs, storageBasePath) logger.Infof("Creating a new TaskRun object %s", rprt.TaskRunName) - return c.PipelineClientSet.TektonV1beta1().TaskRuns(pr.Namespace).Create(tr) + return c.PipelineClientSet.TektonV1beta1().TaskRuns(pr.Namespace).Create(ctx, tr, metav1.CreateOptions{}) } // taskWorkspaceByWorkspaceVolumeSource is returning the WorkspaceBinding with the TaskRun specified name. @@ -808,7 +808,7 @@ func getTaskRunTimeout(ctx context.Context, pr *v1beta1.PipelineRun, rprt *resou return taskRunTimeout } -func (c *Reconciler) updateLabelsAndAnnotations(pr *v1beta1.PipelineRun) (*v1beta1.PipelineRun, error) { +func (c *Reconciler) updateLabelsAndAnnotations(ctx context.Context, pr *v1beta1.PipelineRun) (*v1beta1.PipelineRun, error) { newPr, err := c.pipelineRunLister.PipelineRuns(pr.Namespace).Get(pr.Name) if err != nil { return nil, fmt.Errorf("error getting PipelineRun %s when updating labels/annotations: %w", pr.Name, err) @@ -820,7 +820,7 @@ func (c *Reconciler) updateLabelsAndAnnotations(pr *v1beta1.PipelineRun) (*v1bet newPr = newPr.DeepCopy() newPr.Labels = pr.Labels newPr.Annotations = pr.Annotations - return c.PipelineClientSet.TektonV1beta1().PipelineRuns(pr.Namespace).Update(newPr) + return c.PipelineClientSet.TektonV1beta1().PipelineRuns(pr.Namespace).Update(ctx, newPr, metav1.UpdateOptions{}) } return newPr, nil } @@ -865,7 +865,7 @@ func (c *Reconciler) makeConditionCheckContainer(ctx context.Context, rprt *reso PodTemplate: podTemplate, }} - cctr, err := c.PipelineClientSet.TektonV1beta1().TaskRuns(pr.Namespace).Create(tr) + cctr, err := c.PipelineClientSet.TektonV1beta1().TaskRuns(pr.Namespace).Create(ctx, tr, metav1.CreateOptions{}) cc := v1beta1.ConditionCheck(*cctr) return &cc, err } diff --git a/pkg/reconciler/pipelinerun/pipelinerun_test.go b/pkg/reconciler/pipelinerun/pipelinerun_test.go index 7a883e0f90c..34ee0fcf27a 100644 --- a/pkg/reconciler/pipelinerun/pipelinerun_test.go +++ b/pkg/reconciler/pipelinerun/pipelinerun_test.go @@ -407,7 +407,7 @@ func TestReconcile(t *testing.T) { } // A PVC should have been created to deal with output -> input linking - ensurePVCCreated(t, clients, expectedTaskRun.GetPipelineRunPVCName(), "foo") + ensurePVCCreated(prt.TestAssets.Ctx, t, clients, expectedTaskRun.GetPipelineRunPVCName(), "foo") } func TestReconcile_PipelineSpecTaskSpec(t *testing.T) { @@ -1251,7 +1251,7 @@ func TestReconcileCancelledFailsTaskRunCancellation(t *testing.T) { } // Check that the PipelineRun is still running with correct error message - reconciledRun, err := clients.Pipeline.TektonV1beta1().PipelineRuns("foo").Get("test-pipeline-fails-to-cancel", metav1.GetOptions{}) + reconciledRun, err := clients.Pipeline.TektonV1beta1().PipelineRuns("foo").Get(testAssets.Ctx, "test-pipeline-fails-to-cancel", metav1.GetOptions{}) if err != nil { t.Fatalf("Somehow had error getting reconciled run out of fake client: %s", err) } @@ -1438,7 +1438,7 @@ func TestReconcileWithDifferentServiceAccounts(t *testing.T) { } for i := range ps[0].Spec.Tasks { // Check that the expected TaskRun was created - actual, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").Get(taskRunNames[i], metav1.GetOptions{}) + actual, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").Get(prt.TestAssets.Ctx, taskRunNames[i], metav1.GetOptions{}) if err != nil { t.Fatalf("Expected a TaskRun to be created, but it wasn't: %s", err) } @@ -1990,9 +1990,9 @@ func makeExpectedTr(condName, ccName string, labels, annotations map[string]stri ) } -func ensurePVCCreated(t *testing.T, clients test.Clients, name, namespace string) { +func ensurePVCCreated(ctx context.Context, t *testing.T, clients test.Clients, name, namespace string) { t.Helper() - _, err := clients.Kube.CoreV1().PersistentVolumeClaims(namespace).Get(name, metav1.GetOptions{}) + _, err := clients.Kube.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { t.Errorf("Expected PVC %s to be created for VolumeResource but did not exist", name) } @@ -2045,7 +2045,7 @@ func TestReconcileWithWhenExpressionsWithParameters(t *testing.T) { pipelineRun, clients := prt.reconcileRun("foo", prName, wantEvents, false) // Check that the expected TaskRun was created - actual, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(metav1.ListOptions{ + actual, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(prt.TestAssets.Ctx, metav1.ListOptions{ LabelSelector: "tekton.dev/pipelineTask=hello-world-1,tekton.dev/pipelineRun=test-pipeline-run", Limit: 1, }) @@ -2087,7 +2087,7 @@ func TestReconcileWithWhenExpressionsWithParameters(t *testing.T) { skippedTasks := []string{"hello-world-2"} for _, skippedTask := range skippedTasks { labelSelector := fmt.Sprintf("tekton.dev/pipelineTask=%s,tekton.dev/pipelineRun=test-pipeline-run-different-service-accs", skippedTask) - actualSkippedTask, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(metav1.ListOptions{ + actualSkippedTask, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(prt.TestAssets.Ctx, metav1.ListOptions{ LabelSelector: labelSelector, Limit: 1, }) @@ -2181,7 +2181,7 @@ func TestReconcileWithWhenExpressionsWithTaskResults(t *testing.T) { ), ) // Check that the expected TaskRun was created - actual, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(metav1.ListOptions{ + actual, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(prt.TestAssets.Ctx, metav1.ListOptions{ LabelSelector: "tekton.dev/pipelineTask=b-task,tekton.dev/pipelineRun=test-pipeline-run-different-service-accs", Limit: 1, }) @@ -2210,7 +2210,7 @@ func TestReconcileWithWhenExpressionsWithTaskResults(t *testing.T) { skippedTasks := []string{"c-task", "d-task"} for _, skippedTask := range skippedTasks { labelSelector := fmt.Sprintf("tekton.dev/pipelineTask=%s,tekton.dev/pipelineRun=test-pipeline-run-different-service-accs", skippedTask) - actualSkippedTask, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(metav1.ListOptions{ + actualSkippedTask, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(prt.TestAssets.Ctx, metav1.ListOptions{ LabelSelector: labelSelector, Limit: 1, }) @@ -2284,7 +2284,7 @@ func TestReconcileWithAffinityAssistantStatefulSet(t *testing.T) { } } - taskRuns, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(metav1.ListOptions{}) + taskRuns, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(prt.TestAssets.Ctx, metav1.ListOptions{}) if err != nil { t.Fatalf("unexpected error when listing TaskRuns: %v", err) } @@ -2369,7 +2369,7 @@ func TestReconcileWithVolumeClaimTemplateWorkspace(t *testing.T) { t.Errorf("expected the created PVC to be named %s. It was named %s", expectedPVCName, pvcNames[0]) } - taskRuns, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(metav1.ListOptions{}) + taskRuns, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(prt.TestAssets.Ctx, metav1.ListOptions{}) if err != nil { t.Fatalf("unexpected error when listing TaskRuns: %v", err) } @@ -2425,7 +2425,7 @@ func TestReconcileWithVolumeClaimTemplateWorkspaceUsingSubPaths(t *testing.T) { reconciledRun, clients := prt.reconcileRun("foo", "test-pipeline-run", []string{}, false) - taskRuns, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(metav1.ListOptions{}) + taskRuns, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(prt.TestAssets.Ctx, metav1.ListOptions{}) if err != nil { t.Fatalf("unexpected error when listing TaskRuns: %v", err) } @@ -2568,7 +2568,7 @@ func TestReconcileWithTaskResults(t *testing.T) { ), ) // Check that the expected TaskRun was created - actual, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(metav1.ListOptions{ + actual, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(prt.TestAssets.Ctx, metav1.ListOptions{ LabelSelector: "tekton.dev/pipelineTask=b-task,tekton.dev/pipelineRun=test-pipeline-run-different-service-accs", Limit: 1, }) @@ -2643,7 +2643,7 @@ func TestReconcileWithTaskResultsEmbeddedNoneStarted(t *testing.T) { ), ) // Check that the expected TaskRun was created (only) - actual, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(metav1.ListOptions{}) + actual, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(prt.TestAssets.Ctx, metav1.ListOptions{}) if err != nil { t.Fatalf("Failure to list TaskRun's %s", err) } @@ -3957,7 +3957,7 @@ func (prt PipelineRunTest) reconcileRun(namespace, pipelineRunName string, wantE prt.Test.Fatalf("Error reconciling: %s", reconcileError) } // Check that the PipelineRun was reconciled correctly - reconciledRun, err := clients.Pipeline.TektonV1beta1().PipelineRuns(namespace).Get(pipelineRunName, metav1.GetOptions{}) + reconciledRun, err := clients.Pipeline.TektonV1beta1().PipelineRuns(namespace).Get(prt.TestAssets.Ctx, pipelineRunName, metav1.GetOptions{}) if err != nil { prt.Test.Fatalf("Somehow had error getting reconciled run out of fake client: %s", err) } diff --git a/pkg/reconciler/pipelinerun/resources/pipelineref.go b/pkg/reconciler/pipelinerun/resources/pipelineref.go index 3ad299eb281..7872de14fb7 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelineref.go +++ b/pkg/reconciler/pipelinerun/resources/pipelineref.go @@ -17,6 +17,7 @@ limitations under the License. package resources import ( + "context" "fmt" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" @@ -32,10 +33,10 @@ type LocalPipelineRefResolver struct { // GetPipeline will resolve a Pipeline from the local cluster using a versioned Tekton client. It will // return an error if it can't find an appropriate Pipeline for any reason. -func (l *LocalPipelineRefResolver) GetPipeline(name string) (v1beta1.PipelineInterface, error) { +func (l *LocalPipelineRefResolver) GetPipeline(ctx context.Context, name string) (v1beta1.PipelineInterface, error) { // If we are going to resolve this reference locally, we need a namespace scope. if l.Namespace == "" { return nil, fmt.Errorf("Must specify namespace to resolve reference to pipeline %s", name) } - return l.Tektonclient.TektonV1beta1().Pipelines(l.Namespace).Get(name, metav1.GetOptions{}) + return l.Tektonclient.TektonV1beta1().Pipelines(l.Namespace).Get(ctx, name, metav1.GetOptions{}) } diff --git a/pkg/reconciler/pipelinerun/resources/pipelineref_test.go b/pkg/reconciler/pipelinerun/resources/pipelineref_test.go index 9e4b0c4e87c..5733a4a6ee3 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelineref_test.go +++ b/pkg/reconciler/pipelinerun/resources/pipelineref_test.go @@ -17,6 +17,7 @@ package resources_test import ( + "context" "testing" "github.com/google/go-cmp/cmp" @@ -61,6 +62,9 @@ func TestPipelineRef(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() tektonclient := fake.NewSimpleClientset(tc.pipelines...) lc := &resources.LocalPipelineRefResolver{ @@ -68,7 +72,7 @@ func TestPipelineRef(t *testing.T) { Tektonclient: tektonclient, } - task, err := lc.GetPipeline(tc.ref.Name) + task, err := lc.GetPipeline(ctx, tc.ref.Name) if tc.wantErr && err == nil { t.Fatal("Expected error but found nil instead") } else if !tc.wantErr && err != nil { diff --git a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go index 7c23ea4e0d7..34953c0a17e 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go +++ b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go @@ -332,7 +332,7 @@ func ResolvePipelineRun( if pt.TaskRef.Kind == v1beta1.ClusterTaskKind { t, err = getClusterTask(pt.TaskRef.Name) } else { - t, err = getTask(pt.TaskRef.Name) + t, err = getTask(ctx, pt.TaskRef.Name) } if err != nil { return nil, &TaskNotFoundError{ diff --git a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go index ecfd37cd602..41c7992db6d 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go +++ b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go @@ -977,7 +977,7 @@ func TestResolvePipelineRun(t *testing.T) { } // The Task "task" doesn't actually take any inputs or outputs, but validating // that is not done as part of Run resolution - getTask := func(name string) (v1beta1.TaskInterface, error) { return task, nil } + getTask := func(ctx context.Context, name string) (v1beta1.TaskInterface, error) { return task, nil } getTaskRun := func(name string) (*v1beta1.TaskRun, error) { return nil, nil } getClusterTask := func(name string) (v1beta1.TaskInterface, error) { return nil, nil } getCondition := func(name string) (*v1alpha1.Condition, error) { return nil, nil } @@ -1052,7 +1052,7 @@ func TestResolvePipelineRun_PipelineTaskHasNoResources(t *testing.T) { }} providedResources := map[string]*resourcev1alpha1.PipelineResource{} - getTask := func(name string) (v1beta1.TaskInterface, error) { return task, nil } + getTask := func(ctx context.Context, name string) (v1beta1.TaskInterface, error) { return task, nil } getTaskRun := func(name string) (*v1beta1.TaskRun, error) { return &trs[0], nil } getClusterTask := func(name string) (v1beta1.TaskInterface, error) { return clustertask, nil } getCondition := func(name string) (*v1alpha1.Condition, error) { return nil, nil } @@ -1090,7 +1090,7 @@ func TestResolvePipelineRun_TaskDoesntExist(t *testing.T) { providedResources := map[string]*resourcev1alpha1.PipelineResource{} // Return an error when the Task is retrieved, as if it didn't exist - getTask := func(name string) (v1beta1.TaskInterface, error) { + getTask := func(ctx context.Context, name string) (v1beta1.TaskInterface, error) { return nil, kerrors.NewNotFound(v1beta1.Resource("task"), name) } getClusterTask := func(name string) (v1beta1.TaskInterface, error) { @@ -1140,7 +1140,7 @@ func TestResolvePipelineRun_ResourceBindingsDontExist(t *testing.T) { }} providedResources := map[string]*resourcev1alpha1.PipelineResource{} - getTask := func(name string) (v1beta1.TaskInterface, error) { return task, nil } + getTask := func(ctx context.Context, name string) (v1beta1.TaskInterface, error) { return task, nil } getTaskRun := func(name string) (*v1beta1.TaskRun, error) { return &trs[0], nil } getClusterTask := func(name string) (v1beta1.TaskInterface, error) { return clustertask, nil } getCondition := func(name string) (*v1alpha1.Condition, error) { @@ -1200,7 +1200,7 @@ func TestResolvePipelineRun_withExistingTaskRuns(t *testing.T) { // The Task "task" doesn't actually take any inputs or outputs, but validating // that is not done as part of Run resolution - getTask := func(name string) (v1beta1.TaskInterface, error) { return task, nil } + getTask := func(ctx context.Context, name string) (v1beta1.TaskInterface, error) { return task, nil } getClusterTask := func(name string) (v1beta1.TaskInterface, error) { return nil, nil } getTaskRun := func(name string) (*v1beta1.TaskRun, error) { return nil, nil } getCondition := func(name string) (*v1alpha1.Condition, error) { return nil, nil } @@ -1252,7 +1252,7 @@ func TestResolvedPipelineRun_PipelineTaskHasOptionalResources(t *testing.T) { }, } - getTask := func(name string) (v1beta1.TaskInterface, error) { return taskWithOptionalResourcesDeprecated, nil } + getTask := func(ctx context.Context, name string) (v1beta1.TaskInterface, error) { return taskWithOptionalResourcesDeprecated, nil } getTaskRun := func(name string) (*v1beta1.TaskRun, error) { return nil, nil } getClusterTask := func(name string) (v1beta1.TaskInterface, error) { return nil, nil } getCondition := func(name string) (*v1alpha1.Condition, error) { return nil, nil } @@ -1303,7 +1303,7 @@ func TestResolveConditionChecks(t *testing.T) { }} providedResources := map[string]*resourcev1alpha1.PipelineResource{} - getTask := func(name string) (v1beta1.TaskInterface, error) { return task, nil } + getTask := func(ctx context.Context, name string) (v1beta1.TaskInterface, error) { return task, nil } getClusterTask := func(name string) (v1beta1.TaskInterface, error) { return nil, errors.New("should not get called") } getCondition := func(name string) (*v1alpha1.Condition, error) { return &condition, nil } pr := v1beta1.PipelineRun{ @@ -1414,7 +1414,7 @@ func TestResolveConditionChecks_MultipleConditions(t *testing.T) { }} providedResources := map[string]*resourcev1alpha1.PipelineResource{} - getTask := func(name string) (v1beta1.TaskInterface, error) { return task, nil } + getTask := func(ctx context.Context, name string) (v1beta1.TaskInterface, error) { return task, nil } getClusterTask := func(name string) (v1beta1.TaskInterface, error) { return nil, errors.New("should not get called") } getCondition := func(name string) (*v1alpha1.Condition, error) { return &condition, nil } pr := v1beta1.PipelineRun{ @@ -1486,7 +1486,7 @@ func TestResolveConditionChecks_ConditionDoesNotExist(t *testing.T) { }} providedResources := map[string]*resourcev1alpha1.PipelineResource{} - getTask := func(name string) (v1beta1.TaskInterface, error) { return task, nil } + getTask := func(ctx context.Context, name string) (v1beta1.TaskInterface, error) { return task, nil } getTaskRun := func(name string) (*v1beta1.TaskRun, error) { if name == ccName { return nil, fmt.Errorf("should not be called") @@ -1541,7 +1541,7 @@ func TestResolveConditionCheck_UseExistingConditionCheckName(t *testing.T) { }} providedResources := map[string]*resourcev1alpha1.PipelineResource{} - getTask := func(name string) (v1beta1.TaskInterface, error) { return task, nil } + getTask := func(ctx context.Context, name string) (v1beta1.TaskInterface, error) { return task, nil } getTaskRun := func(name string) (*v1beta1.TaskRun, error) { if name == ccName { return cc, nil @@ -1616,7 +1616,7 @@ func TestResolvedConditionCheck_WithResources(t *testing.T) { Conditions: []v1beta1.PipelineTaskCondition{ptc}, }} - getTask := func(name string) (v1beta1.TaskInterface, error) { return task, nil } + getTask := func(ctx context.Context, name string) (v1beta1.TaskInterface, error) { return task, nil } getTaskRun := func(name string) (*v1beta1.TaskRun, error) { return nil, nil } getClusterTask := func(name string) (v1beta1.TaskInterface, error) { return nil, errors.New("should not get called") } @@ -1823,7 +1823,7 @@ func TestResolvePipeline_WhenExpressions(t *testing.T) { providedResources := map[string]*resourcev1alpha1.PipelineResource{} - getTask := func(name string) (v1beta1.TaskInterface, error) { return task, nil } + getTask := func(ctx context.Context, name string) (v1beta1.TaskInterface, error) { return task, nil } getClusterTask := func(name string) (v1beta1.TaskInterface, error) { return nil, errors.New("should not get called") } getCondition := func(name string) (*v1alpha1.Condition, error) { return &condition, nil } pr := v1beta1.PipelineRun{ diff --git a/pkg/reconciler/pipelinerun/resources/pipelinespec.go b/pkg/reconciler/pipelinerun/resources/pipelinespec.go index b6faa8e407a..0364cfbbdb7 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelinespec.go +++ b/pkg/reconciler/pipelinerun/resources/pipelinespec.go @@ -25,7 +25,7 @@ import ( ) // GetPipeline is a function used to retrieve Pipelines. -type GetPipeline func(string) (v1beta1.PipelineInterface, error) +type GetPipeline func(context.Context, string) (v1beta1.PipelineInterface, error) // GetPipelineData will retrieve the Pipeline metadata and Spec associated with the // provided PipelineRun. This can come from a reference Pipeline or from the PipelineRun's @@ -36,7 +36,7 @@ func GetPipelineData(ctx context.Context, pipelineRun *v1beta1.PipelineRun, getP switch { case pipelineRun.Spec.PipelineRef != nil && pipelineRun.Spec.PipelineRef.Name != "": // Get related pipeline for pipelinerun - t, err := getPipeline(pipelineRun.Spec.PipelineRef.Name) + t, err := getPipeline(ctx, pipelineRun.Spec.PipelineRef.Name) if err != nil { return nil, nil, fmt.Errorf("error when listing pipelines for pipelineRun %s: %w", pipelineRun.Name, err) } diff --git a/pkg/reconciler/pipelinerun/resources/pipelinespec_test.go b/pkg/reconciler/pipelinerun/resources/pipelinespec_test.go index 10c1e672b65..0aa50ba5c93 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelinespec_test.go +++ b/pkg/reconciler/pipelinerun/resources/pipelinespec_test.go @@ -49,7 +49,7 @@ func TestGetPipelineSpec_Ref(t *testing.T) { }, }, } - gt := func(n string) (v1beta1.PipelineInterface, error) { return pipeline, nil } + gt := func(ctx context.Context, n string) (v1beta1.PipelineInterface, error) { return pipeline, nil } pipelineMeta, pipelineSpec, err := GetPipelineData(context.Background(), pr, gt) if err != nil { @@ -81,7 +81,7 @@ func TestGetPipelineSpec_Embedded(t *testing.T) { }, }, } - gt := func(n string) (v1beta1.PipelineInterface, error) { return nil, errors.New("shouldn't be called") } + gt := func(ctx context.Context, n string) (v1beta1.PipelineInterface, error) { return nil, errors.New("shouldn't be called") } pipelineMeta, pipelineSpec, err := GetPipelineData(context.Background(), pr, gt) if err != nil { @@ -103,7 +103,7 @@ func TestGetPipelineSpec_Invalid(t *testing.T) { Name: "mypipelinerun", }, } - gt := func(n string) (v1beta1.PipelineInterface, error) { return nil, errors.New("shouldn't be called") } + gt := func(ctx context.Context, n string) (v1beta1.PipelineInterface, error) { return nil, errors.New("shouldn't be called") } _, _, err := GetPipelineData(context.Background(), tr, gt) if err == nil { t.Fatalf("Expected error resolving spec with no embedded or referenced pipeline spec but didn't get error") @@ -121,7 +121,7 @@ func TestGetPipelineSpec_Error(t *testing.T) { }, }, } - gt := func(n string) (v1beta1.PipelineInterface, error) { return nil, errors.New("something went wrong") } + gt := func(ctx context.Context, n string) (v1beta1.PipelineInterface, error) { return nil, errors.New("something went wrong") } _, _, err := GetPipelineData(context.Background(), tr, gt) if err == nil { t.Fatalf("Expected error when unable to find referenced Pipeline but got none") diff --git a/pkg/reconciler/taskrun/resources/taskref.go b/pkg/reconciler/taskrun/resources/taskref.go index 8d259eae0be..ebc9cbe2f74 100644 --- a/pkg/reconciler/taskrun/resources/taskref.go +++ b/pkg/reconciler/taskrun/resources/taskref.go @@ -17,6 +17,7 @@ limitations under the License. package resources import ( + "context" "fmt" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" @@ -33,9 +34,9 @@ type LocalTaskRefResolver struct { // GetTask will resolve either a Task or ClusterTask from the local cluster using a versioned Tekton client. It will // return an error if it can't find an appropriate Task for any reason. -func (l *LocalTaskRefResolver) GetTask(name string) (v1beta1.TaskInterface, error) { +func (l *LocalTaskRefResolver) GetTask(ctx context.Context, name string) (v1beta1.TaskInterface, error) { if l.Kind == v1beta1.ClusterTaskKind { - task, err := l.Tektonclient.TektonV1beta1().ClusterTasks().Get(name, metav1.GetOptions{}) + task, err := l.Tektonclient.TektonV1beta1().ClusterTasks().Get(ctx, name, metav1.GetOptions{}) if err != nil { return nil, err } @@ -46,5 +47,5 @@ func (l *LocalTaskRefResolver) GetTask(name string) (v1beta1.TaskInterface, erro if l.Namespace == "" { return nil, fmt.Errorf("Must specify namespace to resolve reference to task %s", name) } - return l.Tektonclient.TektonV1beta1().Tasks(l.Namespace).Get(name, metav1.GetOptions{}) + return l.Tektonclient.TektonV1beta1().Tasks(l.Namespace).Get(ctx, name, metav1.GetOptions{}) } diff --git a/pkg/reconciler/taskrun/resources/taskref_test.go b/pkg/reconciler/taskrun/resources/taskref_test.go index d3a5accc44d..0a72749b230 100644 --- a/pkg/reconciler/taskrun/resources/taskref_test.go +++ b/pkg/reconciler/taskrun/resources/taskref_test.go @@ -17,6 +17,7 @@ package resources_test import ( + "context" "testing" "github.com/google/go-cmp/cmp" @@ -75,6 +76,9 @@ func TestTaskRef(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() tektonclient := fake.NewSimpleClientset(tc.tasks...) lc := &resources.LocalTaskRefResolver{ @@ -83,7 +87,7 @@ func TestTaskRef(t *testing.T) { Tektonclient: tektonclient, } - task, err := lc.GetTask(tc.ref.Name) + task, err := lc.GetTask(ctx, tc.ref.Name) if tc.wantErr && err == nil { t.Fatal("Expected error but found nil instead") } else if !tc.wantErr && err != nil { diff --git a/pkg/reconciler/taskrun/resources/taskspec.go b/pkg/reconciler/taskrun/resources/taskspec.go index 2f27002d2c4..eeadf9befd6 100644 --- a/pkg/reconciler/taskrun/resources/taskspec.go +++ b/pkg/reconciler/taskrun/resources/taskspec.go @@ -26,7 +26,7 @@ import ( ) // GetTask is a function used to retrieve Tasks. -type GetTask func(string) (v1beta1.TaskInterface, error) +type GetTask func(context.Context, string) (v1beta1.TaskInterface, error) type GetTaskRun func(string) (*v1beta1.TaskRun, error) // GetClusterTask is a function that will retrieve the Task from name and namespace. @@ -41,7 +41,7 @@ func GetTaskData(ctx context.Context, taskRun *v1beta1.TaskRun, getTask GetTask) switch { case taskRun.Spec.TaskRef != nil && taskRun.Spec.TaskRef.Name != "": // Get related task for taskrun - t, err := getTask(taskRun.Spec.TaskRef.Name) + t, err := getTask(ctx, taskRun.Spec.TaskRef.Name) if err != nil { return nil, nil, fmt.Errorf("error when listing tasks for taskRun %s: %w", taskRun.Name, err) } diff --git a/pkg/reconciler/taskrun/resources/taskspec_test.go b/pkg/reconciler/taskrun/resources/taskspec_test.go index 2a1d6ab179b..803513d7b33 100644 --- a/pkg/reconciler/taskrun/resources/taskspec_test.go +++ b/pkg/reconciler/taskrun/resources/taskspec_test.go @@ -47,7 +47,7 @@ func TestGetTaskSpec_Ref(t *testing.T) { }, }, } - gt := func(n string) (v1beta1.TaskInterface, error) { return task, nil } + gt := func(ctx context.Context, n string) (v1beta1.TaskInterface, error) { return task, nil } taskMeta, taskSpec, err := GetTaskData(context.Background(), tr, gt) if err != nil { @@ -76,7 +76,7 @@ func TestGetTaskSpec_Embedded(t *testing.T) { }, }, } - gt := func(n string) (v1beta1.TaskInterface, error) { return nil, errors.New("shouldn't be called") } + gt := func(ctx context.Context, n string) (v1beta1.TaskInterface, error) { return nil, errors.New("shouldn't be called") } taskMeta, taskSpec, err := GetTaskData(context.Background(), tr, gt) if err != nil { @@ -98,7 +98,7 @@ func TestGetTaskSpec_Invalid(t *testing.T) { Name: "mytaskrun", }, } - gt := func(n string) (v1beta1.TaskInterface, error) { return nil, errors.New("shouldn't be called") } + gt := func(ctx context.Context, n string) (v1beta1.TaskInterface, error) { return nil, errors.New("shouldn't be called") } _, _, err := GetTaskData(context.Background(), tr, gt) if err == nil { t.Fatalf("Expected error resolving spec with no embedded or referenced task spec but didn't get error") @@ -116,7 +116,7 @@ func TestGetTaskSpec_Error(t *testing.T) { }, }, } - gt := func(n string) (v1beta1.TaskInterface, error) { return nil, errors.New("something went wrong") } + gt := func(ctx context.Context, n string) (v1beta1.TaskInterface, error) { return nil, errors.New("something went wrong") } _, _, err := GetTaskData(context.Background(), tr, gt) if err == nil { t.Fatalf("Expected error when unable to find referenced Task but got none") diff --git a/pkg/reconciler/taskrun/taskrun.go b/pkg/reconciler/taskrun/taskrun.go index 3f7d3193de4..44c4d28d53f 100644 --- a/pkg/reconciler/taskrun/taskrun.go +++ b/pkg/reconciler/taskrun/taskrun.go @@ -112,7 +112,7 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, tr *v1beta1.TaskRun) pkg cloudEventErr := cloudevent.SendCloudEvents(tr, c.cloudEventClient, logger) // Regardless of `err`, we must write back any status update that may have // been generated by `sendCloudEvents` - _, updateErr := c.updateLabelsAndAnnotations(tr) + _, updateErr := c.updateLabelsAndAnnotations(ctx, tr) merr = multierror.Append(cloudEventErr, updateErr) if cloudEventErr != nil { // Let's keep timeouts and sidecars running as long as we're trying to @@ -120,10 +120,10 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, tr *v1beta1.TaskRun) pkg return merr.ErrorOrNil() } c.timeoutHandler.Release(tr.GetNamespacedName()) - pod, err := c.KubeClientSet.CoreV1().Pods(tr.Namespace).Get(tr.Status.PodName, metav1.GetOptions{}) + pod, err := c.KubeClientSet.CoreV1().Pods(tr.Namespace).Get(ctx, tr.Status.PodName, metav1.GetOptions{}) if err == nil { logger.Debugf("Stopping sidecars for TaskRun %q of Pod %q", tr.Name, tr.Status.PodName) - err = podconvert.StopSidecars(c.Images.NopImage, c.KubeClientSet, *pod) + err = podconvert.StopSidecars(ctx, c.Images.NopImage, c.KubeClientSet, *pod) if err == nil { // Check if any SidecarStatuses are still shown as Running after stopping // Sidecars. If any Running, update SidecarStatuses based on Pod ContainerStatuses. @@ -202,7 +202,7 @@ func (c *Reconciler) finishReconcileUpdateEmitEvents(ctx context.Context, tr *v1 // Send k8s events and cloud events (when configured) events.Emit(ctx, beforeCondition, afterCondition, tr) - _, err := c.updateLabelsAndAnnotations(tr) + _, err := c.updateLabelsAndAnnotations(ctx, tr) if err != nil { events.EmitError(controller.GetEventRecorder(ctx), err, tr) } @@ -339,7 +339,7 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1beta1.TaskRun, var err error if tr.Status.PodName != "" { - pod, err = c.KubeClientSet.CoreV1().Pods(tr.Namespace).Get(tr.Status.PodName, metav1.GetOptions{}) + pod, err = c.KubeClientSet.CoreV1().Pods(tr.Namespace).Get(ctx, tr.Status.PodName, metav1.GetOptions{}) if k8serrors.IsNotFound(err) { // Keep going, this will result in the Pod being created below. } else if err != nil { @@ -350,7 +350,7 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1beta1.TaskRun, return err } } else { - pos, err := c.KubeClientSet.CoreV1().Pods(tr.Namespace).List(metav1.ListOptions{ + pos, err := c.KubeClientSet.CoreV1().Pods(tr.Namespace).List(ctx, metav1.ListOptions{ LabelSelector: getLabelSelector(tr), }) if err != nil { @@ -367,7 +367,7 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1beta1.TaskRun, if pod == nil { if tr.HasVolumeClaimTemplate() { - if err := c.pvcHandler.CreatePersistentVolumeClaimsForWorkspaces(tr.Spec.Workspaces, tr.GetOwnerReference(), tr.Namespace); err != nil { + if err := c.pvcHandler.CreatePersistentVolumeClaimsForWorkspaces(ctx, tr.Spec.Workspaces, tr.GetOwnerReference(), tr.Namespace); err != nil { logger.Errorf("Failed to create PVC for TaskRun %s: %v", tr.Name, err) tr.Status.MarkResourceFailed(volumeclaim.ReasonCouldntCreateWorkspacePVC, fmt.Errorf("Failed to create PVC for TaskRun %s workspaces correctly: %s", @@ -399,7 +399,7 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1beta1.TaskRun, } if podconvert.SidecarsReady(pod.Status) { - if err := podconvert.UpdateReady(c.KubeClientSet, *pod); err != nil { + if err := podconvert.UpdateReady(ctx, c.KubeClientSet, *pod); err != nil { return err } } @@ -449,7 +449,7 @@ func (c *Reconciler) updateTaskRunWithDefaultWorkspaces(ctx context.Context, tr return nil } -func (c *Reconciler) updateLabelsAndAnnotations(tr *v1beta1.TaskRun) (*v1beta1.TaskRun, error) { +func (c *Reconciler) updateLabelsAndAnnotations(ctx context.Context, tr *v1beta1.TaskRun) (*v1beta1.TaskRun, error) { newTr, err := c.taskRunLister.TaskRuns(tr.Namespace).Get(tr.Name) if err != nil { return nil, fmt.Errorf("error getting TaskRun %s when updating labels/annotations: %w", tr.Name, err) @@ -461,7 +461,7 @@ func (c *Reconciler) updateLabelsAndAnnotations(tr *v1beta1.TaskRun) (*v1beta1.T newTr = newTr.DeepCopy() newTr.Labels = tr.Labels newTr.Annotations = tr.Annotations - return c.PipelineClientSet.TektonV1beta1().TaskRuns(tr.Namespace).Update(newTr) + return c.PipelineClientSet.TektonV1beta1().TaskRuns(tr.Namespace).Update(ctx, newTr, metav1.UpdateOptions{}) } return newTr, nil } @@ -520,7 +520,7 @@ func (c *Reconciler) failTaskRun(ctx context.Context, tr *v1beta1.TaskRun, reaso // tr.Status.PodName will be empty if the pod was never successfully created. This condition // can be reached, for example, by the pod never being schedulable due to limits imposed by // a namespace's ResourceQuota. - err := c.KubeClientSet.CoreV1().Pods(tr.Namespace).Delete(tr.Status.PodName, &metav1.DeleteOptions{}) + err := c.KubeClientSet.CoreV1().Pods(tr.Namespace).Delete(ctx, tr.Status.PodName, metav1.DeleteOptions{}) if err != nil && !k8serrors.IsNotFound(err) { logger.Infof("Failed to terminate pod: %v", err) return err @@ -635,7 +635,7 @@ func (c *Reconciler) createPod(ctx context.Context, tr *v1beta1.TaskRun, rtr *re return nil, fmt.Errorf("translating TaskSpec to Pod: %w", err) } - pod, err = c.KubeClientSet.CoreV1().Pods(tr.Namespace).Create(pod) + pod, err = c.KubeClientSet.CoreV1().Pods(tr.Namespace).Create(ctx, pod, metav1.CreateOptions{}) if err == nil && willOverwritePodSetAffinity(tr) { if recorder := controller.GetEventRecorder(ctx); recorder != nil { recorder.Eventf(tr, corev1.EventTypeWarning, "PodAffinityOverwrite", "Pod template affinity is overwritten by affinity assistant for pod %q", pod.Name) diff --git a/pkg/reconciler/taskrun/taskrun_test.go b/pkg/reconciler/taskrun/taskrun_test.go index 91b85d2207b..ca21e5c0f53 100644 --- a/pkg/reconciler/taskrun/taskrun_test.go +++ b/pkg/reconciler/taskrun/taskrun_test.go @@ -535,7 +535,7 @@ func TestReconcile_ExplicitDefaultSA(t *testing.T) { t.Errorf("Expected actions to be logged in the kubeclient, got none") } - tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(tc.taskRun.Namespace).Get(tc.taskRun.Name, metav1.GetOptions{}) + tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(tc.taskRun.Namespace).Get(testAssets.Ctx, tc.taskRun.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("getting updated taskrun: %v", err) } @@ -551,7 +551,7 @@ func TestReconcile_ExplicitDefaultSA(t *testing.T) { t.Fatalf("Reconcile didn't set pod name") } - pod, err := clients.Kube.CoreV1().Pods(tr.Namespace).Get(tr.Status.PodName, metav1.GetOptions{}) + pod, err := clients.Kube.CoreV1().Pods(tr.Namespace).Get(testAssets.Ctx, tr.Status.PodName, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to fetch build pod: %v", err) } @@ -705,12 +705,12 @@ func TestReconcile_FeatureFlags(t *testing.T) { if saName == "" { saName = "default" } - if _, err := clients.Kube.CoreV1().ServiceAccounts(tc.taskRun.Namespace).Create(&corev1.ServiceAccount{ + if _, err := clients.Kube.CoreV1().ServiceAccounts(tc.taskRun.Namespace).Create(testAssets.Ctx, &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: saName, Namespace: tc.taskRun.Namespace, }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatal(err) } if err := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(tc.taskRun)); err != nil { @@ -720,7 +720,7 @@ func TestReconcile_FeatureFlags(t *testing.T) { t.Errorf("Expected actions to be logged in the kubeclient, got none") } - tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(tc.taskRun.Namespace).Get(tc.taskRun.Name, metav1.GetOptions{}) + tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(tc.taskRun.Namespace).Get(testAssets.Ctx, tc.taskRun.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("getting updated taskrun: %v", err) } @@ -736,7 +736,7 @@ func TestReconcile_FeatureFlags(t *testing.T) { t.Fatalf("Reconcile didn't set pod name") } - pod, err := clients.Kube.CoreV1().Pods(tr.Namespace).Get(tr.Status.PodName, metav1.GetOptions{}) + pod, err := clients.Kube.CoreV1().Pods(tr.Namespace).Get(testAssets.Ctx, tr.Status.PodName, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to fetch build pod: %v", err) } @@ -789,12 +789,12 @@ func TestReconcile_CloudEvents(t *testing.T) { c := testAssets.Controller clients := testAssets.Clients saName := "default" - if _, err := clients.Kube.CoreV1().ServiceAccounts(taskRun.Namespace).Create(&corev1.ServiceAccount{ + if _, err := clients.Kube.CoreV1().ServiceAccounts(taskRun.Namespace).Create(testAssets.Ctx, &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: saName, Namespace: taskRun.Namespace, }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatal(err) } @@ -805,7 +805,7 @@ func TestReconcile_CloudEvents(t *testing.T) { t.Errorf("Expected actions to be logged in the kubeclient, got none") } - tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{}) + tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(testAssets.Ctx, taskRun.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("getting updated taskrun: %v", err) } @@ -1494,12 +1494,12 @@ func TestReconcile(t *testing.T) { if saName == "" { saName = "default" } - if _, err := clients.Kube.CoreV1().ServiceAccounts(tc.taskRun.Namespace).Create(&corev1.ServiceAccount{ + if _, err := clients.Kube.CoreV1().ServiceAccounts(tc.taskRun.Namespace).Create(testAssets.Ctx, &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: saName, Namespace: tc.taskRun.Namespace, }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatal(err) } @@ -1510,7 +1510,7 @@ func TestReconcile(t *testing.T) { t.Errorf("Expected actions to be logged in the kubeclient, got none") } - tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(tc.taskRun.Namespace).Get(tc.taskRun.Name, metav1.GetOptions{}) + tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(tc.taskRun.Namespace).Get(testAssets.Ctx, tc.taskRun.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("getting updated taskrun: %v", err) } @@ -1526,7 +1526,7 @@ func TestReconcile(t *testing.T) { t.Fatalf("Reconcile didn't set pod name") } - pod, err := clients.Kube.CoreV1().Pods(tr.Namespace).Get(tr.Status.PodName, metav1.GetOptions{}) + pod, err := clients.Kube.CoreV1().Pods(tr.Namespace).Get(testAssets.Ctx, tr.Status.PodName, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to fetch build pod: %v", err) } @@ -1564,12 +1564,12 @@ func TestReconcile_SetsStartTime(t *testing.T) { clients := testAssets.Clients t.Logf("Creating SA %s in %s", "default", "foo") - if _, err := clients.Kube.CoreV1().ServiceAccounts("foo").Create(&corev1.ServiceAccount{ + if _, err := clients.Kube.CoreV1().ServiceAccounts("foo").Create(testAssets.Ctx, &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: "default", Namespace: "foo", }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatal(err) } @@ -1577,7 +1577,7 @@ func TestReconcile_SetsStartTime(t *testing.T) { t.Errorf("expected no error reconciling valid TaskRun but got %v", err) } - newTr, err := testAssets.Clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{}) + newTr, err := testAssets.Clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(testAssets.Ctx, taskRun.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Expected TaskRun %s to exist but instead got error when getting it: %v", taskRun.Name, err) } @@ -1684,7 +1684,7 @@ func TestReconcileInvalidTaskRuns(t *testing.T) { t.Errorf(err.Error()) } - newTr, err := testAssets.Clients.Pipeline.TektonV1beta1().TaskRuns(tc.taskRun.Namespace).Get(tc.taskRun.Name, metav1.GetOptions{}) + newTr, err := testAssets.Clients.Pipeline.TektonV1beta1().TaskRuns(tc.taskRun.Namespace).Get(testAssets.Ctx, tc.taskRun.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Expected TaskRun %s to exist but instead got error when getting it: %v", tc.taskRun.Name, err) } @@ -1782,7 +1782,7 @@ func TestReconcilePodUpdateStatus(t *testing.T) { if err := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(taskRun)); err != nil { t.Fatalf("Unexpected error when Reconcile() : %v", err) } - newTr, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{}) + newTr, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(testAssets.Ctx, taskRun.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Expected TaskRun %s to exist but instead got error when getting it: %v", taskRun.Name, err) } @@ -1807,7 +1807,7 @@ func TestReconcilePodUpdateStatus(t *testing.T) { pod.Status = corev1.PodStatus{ Phase: corev1.PodSucceeded, } - if _, err := clients.Kube.CoreV1().Pods(taskRun.Namespace).UpdateStatus(pod); err != nil { + if _, err := clients.Kube.CoreV1().Pods(taskRun.Namespace).UpdateStatus(testAssets.Ctx, pod, metav1.UpdateOptions{}); err != nil { t.Errorf("Unexpected error while updating build: %v", err) } @@ -1819,7 +1819,7 @@ func TestReconcilePodUpdateStatus(t *testing.T) { t.Fatalf("Unexpected error when Reconcile(): %v", err) } - newTr, err = clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{}) + newTr, err = clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(testAssets.Ctx, taskRun.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error fetching taskrun: %v", err) } @@ -1868,7 +1868,7 @@ func TestReconcileOnCompletedTaskRun(t *testing.T) { if err := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(taskRun)); err != nil { t.Fatalf("Unexpected error when reconciling completed TaskRun : %v", err) } - newTr, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{}) + newTr, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(testAssets.Ctx, taskRun.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Expected completed TaskRun %s to exist but instead got error when getting it: %v", taskRun.Name, err) } @@ -1900,7 +1900,7 @@ func TestReconcileOnCancelledTaskRun(t *testing.T) { if err := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(taskRun)); err != nil { t.Fatalf("Unexpected error when reconciling completed TaskRun : %v", err) } - newTr, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{}) + newTr, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(testAssets.Ctx, taskRun.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Expected completed TaskRun %s to exist but instead got error when getting it: %v", taskRun.Name, err) } @@ -2015,7 +2015,7 @@ func TestReconcileTimeouts(t *testing.T) { if err := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(tc.taskRun)); err != nil { t.Fatalf("Unexpected error when reconciling completed TaskRun : %v", err) } - newTr, err := clients.Pipeline.TektonV1beta1().TaskRuns(tc.taskRun.Namespace).Get(tc.taskRun.Name, metav1.GetOptions{}) + newTr, err := clients.Pipeline.TektonV1beta1().TaskRuns(tc.taskRun.Namespace).Get(testAssets.Ctx, tc.taskRun.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Expected completed TaskRun %s to exist but instead got error when getting it: %v", tc.taskRun.Name, err) } @@ -2266,12 +2266,12 @@ func TestReconcileCloudEvents(t *testing.T) { if saName == "" { saName = "default" } - if _, err := clients.Kube.CoreV1().ServiceAccounts(tc.taskRun.Namespace).Create(&corev1.ServiceAccount{ + if _, err := clients.Kube.CoreV1().ServiceAccounts(tc.taskRun.Namespace).Create(testAssets.Ctx, &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: saName, Namespace: tc.taskRun.Namespace, }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatal(err) } @@ -2279,7 +2279,7 @@ func TestReconcileCloudEvents(t *testing.T) { t.Errorf("expected no error. Got error %v", err) } - tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(tc.taskRun.Namespace).Get(tc.taskRun.Name, metav1.GetOptions{}) + tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(tc.taskRun.Namespace).Get(testAssets.Ctx, tc.taskRun.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("getting updated taskrun: %v", err) } @@ -2321,7 +2321,7 @@ func TestReconcile_Single_SidecarState(t *testing.T) { t.Errorf("expected no error reconciling valid TaskRun but got %v", err) } - getTaskRun, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{}) + getTaskRun, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(testAssets.Ctx, taskRun.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Expected completed TaskRun %s to exist but instead got error when getting it: %v", taskRun.Name, err) } @@ -2378,7 +2378,7 @@ func TestReconcile_Multiple_SidecarStates(t *testing.T) { t.Errorf("expected no error reconciling valid TaskRun but got %v", err) } - getTaskRun, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{}) + getTaskRun, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(testAssets.Ctx, taskRun.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Expected completed TaskRun %s to exist but instead got error when getting it: %v", taskRun.Name, err) } @@ -2438,7 +2438,7 @@ func TestReconcileWorkspaceMissing(t *testing.T) { t.Fatalf("Expected to see a permanent error when reconciling invalid TaskRun, got %s instead", err) } - tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{}) + tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(testAssets.Ctx, taskRun.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Expected TaskRun %s to exist but instead got error when getting it: %v", taskRun.Name, err) } @@ -2484,12 +2484,12 @@ func TestReconcileValidDefaultWorkspace(t *testing.T) { clients := testAssets.Clients t.Logf("Creating SA %s in %s", "default", "foo") - if _, err := clients.Kube.CoreV1().ServiceAccounts("foo").Create(&corev1.ServiceAccount{ + if _, err := clients.Kube.CoreV1().ServiceAccounts("foo").Create(testAssets.Ctx, &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: "default", Namespace: "foo", }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatal(err) } @@ -2497,7 +2497,7 @@ func TestReconcileValidDefaultWorkspace(t *testing.T) { t.Errorf("Expected no error reconciling valid TaskRun but got %v", err) } - tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{}) + tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(testAssets.Ctx, taskRun.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Expected TaskRun %s to exist but instead got error when getting it: %v", taskRun.Name, err) } @@ -2540,12 +2540,12 @@ func TestReconcileInvalidDefaultWorkspace(t *testing.T) { clients := testAssets.Clients t.Logf("Creating SA %s in %s", "default", "foo") - if _, err := clients.Kube.CoreV1().ServiceAccounts("foo").Create(&corev1.ServiceAccount{ + if _, err := clients.Kube.CoreV1().ServiceAccounts("foo").Create(testAssets.Ctx, &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: "default", Namespace: "foo", }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatal(err) } @@ -2607,12 +2607,12 @@ func TestReconcileValidDefaultWorkspaceOmittedOptionalWorkspace(t *testing.T) { clients := testAssets.Clients t.Logf("Creating SA %s in %s", "default", "foo") - if _, err := clients.Kube.CoreV1().ServiceAccounts("default").Create(&corev1.ServiceAccount{ + if _, err := clients.Kube.CoreV1().ServiceAccounts("default").Create(testAssets.Ctx, &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: "default", Namespace: "default", }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatal(err) } @@ -2620,12 +2620,12 @@ func TestReconcileValidDefaultWorkspaceOmittedOptionalWorkspace(t *testing.T) { t.Errorf("Unexpected reconcile error for TaskRun %q: %v", taskRunOmittingWorkspace.Name, err) } - tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRunOmittingWorkspace.Namespace).Get(taskRunOmittingWorkspace.Name, metav1.GetOptions{}) + tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRunOmittingWorkspace.Namespace).Get(testAssets.Ctx, taskRunOmittingWorkspace.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Error getting TaskRun %q: %v", taskRunOmittingWorkspace.Name, err) } - pod, err := clients.Kube.CoreV1().Pods(taskRunOmittingWorkspace.Namespace).Get(tr.Status.PodName, metav1.GetOptions{}) + pod, err := clients.Kube.CoreV1().Pods(taskRunOmittingWorkspace.Namespace).Get(testAssets.Ctx, tr.Status.PodName, metav1.GetOptions{}) if err != nil { t.Fatalf("Error getting Pod for TaskRun %q: %v", taskRunOmittingWorkspace.Name, err) } @@ -2719,7 +2719,7 @@ func TestReconcileTaskResourceResolutionAndValidation(t *testing.T) { t.Fatalf("Expected to see a permanent error when reconciling invalid TaskRun, got %s instead", reconcileErr) } - tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(tt.d.TaskRuns[0].Namespace).Get(tt.d.TaskRuns[0].Name, metav1.GetOptions{}) + tr, err := clients.Pipeline.TektonV1beta1().TaskRuns(tt.d.TaskRuns[0].Namespace).Get(testAssets.Ctx, tt.d.TaskRuns[0].Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Expected TaskRun %s to exist but instead got error when getting it: %v", tt.d.TaskRuns[0].Name, err) } @@ -2773,23 +2773,23 @@ func TestReconcileWithWorkspacesIncompatibleWithAffinityAssistant(t *testing.T) clients := testAssets.Clients t.Logf("Creating SA %s in %s", "default", "foo") - if _, err := clients.Kube.CoreV1().ServiceAccounts("foo").Create(&corev1.ServiceAccount{ + if _, err := clients.Kube.CoreV1().ServiceAccounts("foo").Create(testAssets.Ctx, &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: "default", Namespace: "foo", }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatal(err) } _ = testAssets.Controller.Reconciler.Reconcile(context.Background(), getRunName(taskRun)) - _, err := clients.Pipeline.TektonV1beta1().Tasks(taskRun.Namespace).Get(taskWithTwoWorkspaces.Name, metav1.GetOptions{}) + _, err := clients.Pipeline.TektonV1beta1().Tasks(taskRun.Namespace).Get(testAssets.Ctx, taskWithTwoWorkspaces.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("krux: %v", err) } - ttt, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{}) + ttt, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(testAssets.Ctx, taskRun.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("expected TaskRun %s to exist but instead got error when getting it: %v", taskRun.Name, err) } @@ -2836,12 +2836,12 @@ func TestReconcileWorkspaceWithVolumeClaimTemplate(t *testing.T) { clients := testAssets.Clients t.Logf("Creating SA %s in %s", "default", "foo") - if _, err := clients.Kube.CoreV1().ServiceAccounts("foo").Create(&corev1.ServiceAccount{ + if _, err := clients.Kube.CoreV1().ServiceAccounts("foo").Create(testAssets.Ctx, &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: "default", Namespace: "foo", }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatal(err) } @@ -2849,7 +2849,7 @@ func TestReconcileWorkspaceWithVolumeClaimTemplate(t *testing.T) { t.Errorf("expected no error reconciling valid TaskRun but got %v", err) } - ttt, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{}) + ttt, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(testAssets.Ctx, taskRun.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("expected TaskRun %s to exist but instead got error when getting it: %v", taskRun.Name, err) } @@ -2861,7 +2861,7 @@ func TestReconcileWorkspaceWithVolumeClaimTemplate(t *testing.T) { } expectedPVCName := fmt.Sprintf("%s-%s", claimName, "a521418087") - _, err = clients.Kube.CoreV1().PersistentVolumeClaims(taskRun.Namespace).Get(expectedPVCName, metav1.GetOptions{}) + _, err = clients.Kube.CoreV1().PersistentVolumeClaims(taskRun.Namespace).Get(testAssets.Ctx, expectedPVCName, metav1.GetOptions{}) if err != nil { t.Fatalf("expected PVC %s to exist but instead got error when getting it: %v", expectedPVCName, err) } diff --git a/pkg/reconciler/volumeclaim/pvchandler.go b/pkg/reconciler/volumeclaim/pvchandler.go index 7b2ceda88c6..1713a85bcdc 100644 --- a/pkg/reconciler/volumeclaim/pvchandler.go +++ b/pkg/reconciler/volumeclaim/pvchandler.go @@ -17,6 +17,7 @@ limitations under the License. package volumeclaim import ( + "context" "crypto/sha256" "fmt" @@ -36,7 +37,7 @@ const ( ) type PvcHandler interface { - CreatePersistentVolumeClaimsForWorkspaces(wb []v1beta1.WorkspaceBinding, ownerReference metav1.OwnerReference, namespace string) error + CreatePersistentVolumeClaimsForWorkspaces(ctx context.Context, wb []v1beta1.WorkspaceBinding, ownerReference metav1.OwnerReference, namespace string) error } type defaultPVCHandler struct { @@ -52,13 +53,13 @@ func NewPVCHandler(clientset clientset.Interface, logger *zap.SugaredLogger) Pvc // where claim-name is provided by the user in the volumeClaimTemplate, and owner-name is the name of the // resource with the volumeClaimTemplate declared, a PipelineRun or TaskRun. If the PVC did not exist, a new PVC // with that name is created with the provided OwnerReference. -func (c *defaultPVCHandler) CreatePersistentVolumeClaimsForWorkspaces(wb []v1beta1.WorkspaceBinding, ownerReference metav1.OwnerReference, namespace string) error { +func (c *defaultPVCHandler) CreatePersistentVolumeClaimsForWorkspaces(ctx context.Context, wb []v1beta1.WorkspaceBinding, ownerReference metav1.OwnerReference, namespace string) error { var errs []error for _, claim := range getPersistentVolumeClaims(wb, ownerReference, namespace) { - _, err := c.clientset.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) + _, err := c.clientset.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{}) switch { case apierrors.IsNotFound(err): - _, err := c.clientset.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim) + _, err := c.clientset.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(ctx, claim, metav1.CreateOptions{}) if err != nil { errs = append(errs, fmt.Errorf("failed to create PVC %s: %s", claim.Name, err)) } diff --git a/pkg/reconciler/volumeclaim/pvchandler_test.go b/pkg/reconciler/volumeclaim/pvchandler_test.go index 8efebb5d3e1..f0f74cbbd59 100644 --- a/pkg/reconciler/volumeclaim/pvchandler_test.go +++ b/pkg/reconciler/volumeclaim/pvchandler_test.go @@ -17,6 +17,7 @@ limitations under the License. package volumeclaim import ( + "context" "fmt" "testing" @@ -64,6 +65,9 @@ func TestCreatePersistentVolumeClaimsForWorkspaces(t *testing.T) { Spec: corev1.PersistentVolumeClaimSpec{}, }, }} + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() ownerRef := metav1.OwnerReference{Name: ownerName} namespace := "ns" @@ -72,13 +76,13 @@ func TestCreatePersistentVolumeClaimsForWorkspaces(t *testing.T) { // when - err := pvcHandler.CreatePersistentVolumeClaimsForWorkspaces(workspaces, ownerRef, namespace) + err := pvcHandler.CreatePersistentVolumeClaimsForWorkspaces(ctx, workspaces, ownerRef, namespace) if err != nil { t.Fatalf("unexpexted error: %v", err) } expectedPVCName := claimName1 + "-ad02547921" - pvc, err := fakekubeclient.CoreV1().PersistentVolumeClaims(namespace).Get(expectedPVCName, metav1.GetOptions{}) + pvc, err := fakekubeclient.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, expectedPVCName, metav1.GetOptions{}) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -126,6 +130,9 @@ func TestCreatePersistentVolumeClaimsForWorkspacesWithoutMetadata(t *testing.T) Spec: corev1.PersistentVolumeClaimSpec{}, }, }} + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() ownerRef := metav1.OwnerReference{Name: ownerName} namespace := "ns" @@ -134,13 +141,13 @@ func TestCreatePersistentVolumeClaimsForWorkspacesWithoutMetadata(t *testing.T) // when - err := pvcHandler.CreatePersistentVolumeClaimsForWorkspaces(workspaces, ownerRef, namespace) + err := pvcHandler.CreatePersistentVolumeClaimsForWorkspaces(ctx, workspaces, ownerRef, namespace) if err != nil { t.Fatalf("unexpexted error: %v", err) } expectedPVCName := fmt.Sprintf("%s-%s", "pvc", "3fc56c2bb2") - pvc, err := fakekubeclient.CoreV1().PersistentVolumeClaims(namespace).Get(expectedPVCName, metav1.GetOptions{}) + pvc, err := fakekubeclient.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, expectedPVCName, metav1.GetOptions{}) if err != nil { t.Fatalf("unexpected error: %v", err) } diff --git a/pkg/timeout/handler.go b/pkg/timeout/handler.go index 6c8c0179d0c..540f912a599 100644 --- a/pkg/timeout/handler.go +++ b/pkg/timeout/handler.go @@ -170,7 +170,7 @@ func backoffDuration(count uint, jf jitterFunc) time.Duration { // checkPipelineRunTimeouts function creates goroutines to wait for pipelinerun to // finish/timeout in a given namespace func (t *Handler) checkPipelineRunTimeouts(ctx context.Context, namespace string, pipelineclientset clientset.Interface) { - pipelineRuns, err := pipelineclientset.TektonV1beta1().PipelineRuns(namespace).List(metav1.ListOptions{}) + pipelineRuns, err := pipelineclientset.TektonV1beta1().PipelineRuns(namespace).List(ctx, metav1.ListOptions{}) if err != nil { t.logger.Errorf("Can't get pipelinerun list in namespace %s: %s", namespace, err) return @@ -194,7 +194,7 @@ func (t *Handler) CheckTimeouts(ctx context.Context, namespace string, kubeclien namespaceNames := []string{namespace} // all namespaces if namespace == "" { - namespaces, err := kubeclientset.CoreV1().Namespaces().List(metav1.ListOptions{}) + namespaces, err := kubeclientset.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) if err != nil { t.logger.Errorf("Can't get namespaces list: %s", err) return @@ -214,7 +214,7 @@ func (t *Handler) CheckTimeouts(ctx context.Context, namespace string, kubeclien // checkTaskRunTimeouts function creates goroutines to wait for pipelinerun to // finish/timeout in a given namespace func (t *Handler) checkTaskRunTimeouts(ctx context.Context, namespace string, pipelineclientset clientset.Interface) { - taskruns, err := pipelineclientset.TektonV1beta1().TaskRuns(namespace).List(metav1.ListOptions{}) + taskruns, err := pipelineclientset.TektonV1beta1().TaskRuns(namespace).List(ctx, metav1.ListOptions{}) if err != nil { t.logger.Errorf("Can't get taskrun list in namespace %s: %s", namespace, err) return diff --git a/test/artifact_bucket_test.go b/test/artifact_bucket_test.go index e9b271cc02d..e7af76d07bc 100644 --- a/test/artifact_bucket_test.go +++ b/test/artifact_bucket_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "fmt" "io/ioutil" "os" @@ -49,23 +50,27 @@ const ( // TestStorageBucketPipelineRun is an integration test that will verify a pipeline // can use a bucket for temporary storage of artifacts shared between tasks func TestStorageBucketPipelineRun(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + configFilePath := os.Getenv("GCP_SERVICE_ACCOUNT_KEY_PATH") if configFilePath == "" { t.Skip("GCP_SERVICE_ACCOUNT_KEY_PATH variable is not set.") } - c, namespace := setup(t) + c, namespace := setup(ctx, t) // Bucket tests can't run in parallel without causing issues with other tests. - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) bucketName := fmt.Sprintf("build-pipeline-test-%s-%d", namespace, time.Now().Unix()) t.Logf("Creating Secret %s", bucketSecretName) - if _, err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Create(getBucketSecret(t, configFilePath, namespace)); err != nil { + if _, err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Create(ctx, getBucketSecret(t, configFilePath, namespace), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Secret %q: %v", bucketSecretName, err) } - defer deleteBucketSecret(c, t, namespace) + defer deleteBucketSecret(ctx, c, t, namespace) t.Logf("Creating GCS bucket %s", bucketName) createbuckettask := &v1beta1.Task{ @@ -96,7 +101,7 @@ func TestStorageBucketPipelineRun(t *testing.T) { } t.Logf("Creating Task %s", "createbuckettask") - if _, err := c.TaskClient.Create(createbuckettask); err != nil { + if _, err := c.TaskClient.Create(ctx, createbuckettask, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", "createbuckettask", err) } @@ -108,17 +113,17 @@ func TestStorageBucketPipelineRun(t *testing.T) { } t.Logf("Creating TaskRun %s", "createbuckettaskrun") - if _, err := c.TaskRunClient.Create(createbuckettaskrun); err != nil { + if _, err := c.TaskRunClient.Create(ctx, createbuckettaskrun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create TaskRun `%s`: %s", "createbuckettaskrun", err) } - if err := WaitForTaskRunState(c, "createbuckettaskrun", TaskRunSucceed("createbuckettaskrun"), "TaskRunSuccess"); err != nil { + if err := WaitForTaskRunState(ctx, c, "createbuckettaskrun", TaskRunSucceed("createbuckettaskrun"), "TaskRunSuccess"); err != nil { t.Errorf("Error waiting for TaskRun %s to finish: %s", "createbuckettaskrun", err) } - defer runTaskToDeleteBucket(c, t, namespace, bucketName, bucketSecretName, bucketSecretKey) + defer runTaskToDeleteBucket(ctx, c, t, namespace, bucketName, bucketSecretName, bucketSecretKey) - originalConfigMap, err := c.KubeClient.Kube.CoreV1().ConfigMaps(systemNamespace).Get(config.GetArtifactBucketConfigName(), metav1.GetOptions{}) + originalConfigMap, err := c.KubeClient.Kube.CoreV1().ConfigMaps(systemNamespace).Get(ctx, config.GetArtifactBucketConfigName(), metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get ConfigMap `%s`: %s", config.GetArtifactBucketConfigName(), err) } @@ -130,10 +135,10 @@ func TestStorageBucketPipelineRun(t *testing.T) { config.BucketServiceAccountSecretNameKey: bucketSecretName, config.BucketServiceAccountSecretKeyKey: bucketSecretKey, } - if err := updateConfigMap(c.KubeClient, systemNamespace, config.GetArtifactBucketConfigName(), configMapData); err != nil { + if err := updateConfigMap(ctx, c.KubeClient, systemNamespace, config.GetArtifactBucketConfigName(), configMapData); err != nil { t.Fatal(err) } - defer resetConfigMap(t, c, systemNamespace, config.GetArtifactBucketConfigName(), originalConfigMapData) + defer resetConfigMap(ctx, t, c, systemNamespace, config.GetArtifactBucketConfigName(), originalConfigMapData) t.Logf("Creating Git PipelineResource %s", helloworldResourceName) helloworldResource := tb.PipelineResource(helloworldResourceName, tb.PipelineResourceSpec( @@ -142,7 +147,7 @@ func TestStorageBucketPipelineRun(t *testing.T) { tb.PipelineResourceSpecParam("Revision", "master"), ), ) - if _, err := c.PipelineResourceClient.Create(helloworldResource); err != nil { + if _, err := c.PipelineResourceClient.Create(ctx, helloworldResource, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline Resource `%s`: %s", helloworldResourceName, err) } @@ -171,7 +176,7 @@ func TestStorageBucketPipelineRun(t *testing.T) { }, }, } - if _, err := c.TaskClient.Create(addFileTask); err != nil { + if _, err := c.TaskClient.Create(ctx, addFileTask, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", addFileTaskName, err) } @@ -190,7 +195,7 @@ func TestStorageBucketPipelineRun(t *testing.T) { }, }, } - if _, err := c.TaskClient.Create(readFileTask); err != nil { + if _, err := c.TaskClient.Create(ctx, readFileTask, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", runFileTaskName, err) } @@ -223,7 +228,7 @@ func TestStorageBucketPipelineRun(t *testing.T) { }}, }, } - if _, err := c.PipelineClient.Create(bucketTestPipeline); err != nil { + if _, err := c.PipelineClient.Create(ctx, bucketTestPipeline, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", bucketTestPipelineName, err) } @@ -238,12 +243,12 @@ func TestStorageBucketPipelineRun(t *testing.T) { }}, }, } - if _, err := c.PipelineRunClient.Create(bucketTestPipelineRun); err != nil { + if _, err := c.PipelineRunClient.Create(ctx, bucketTestPipelineRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PipelineRun `%s`: %s", bucketTestPipelineRunName, err) } // Verify status of PipelineRun (wait for it) - if err := WaitForPipelineRunState(c, bucketTestPipelineRunName, timeout, PipelineRunSucceed(bucketTestPipelineRunName), "PipelineRunCompleted"); err != nil { + if err := WaitForPipelineRunState(ctx, c, bucketTestPipelineRunName, timeout, PipelineRunSucceed(bucketTestPipelineRunName), "PipelineRunCompleted"); err != nil { t.Errorf("Error waiting for PipelineRun %s to finish: %s", bucketTestPipelineRunName, err) t.Fatalf("PipelineRun execution failed") } @@ -251,8 +256,8 @@ func TestStorageBucketPipelineRun(t *testing.T) { // updateConfigMap updates the config map for specified @name with values. We can't use the one from knativetest because // it assumes that Data is already a non-nil map, and by default, it isn't! -func updateConfigMap(client *knativetest.KubeClient, name string, configName string, values map[string]string) error { - configMap, err := client.GetConfigMap(name).Get(configName, metav1.GetOptions{}) +func updateConfigMap(ctx context.Context, client *knativetest.KubeClient, name string, configName string, values map[string]string) error { + configMap, err := client.GetConfigMap(name).Get(ctx, configName, metav1.GetOptions{}) if err != nil { return err } @@ -265,7 +270,7 @@ func updateConfigMap(client *knativetest.KubeClient, name string, configName str configMap.Data[key] = value } - _, err = client.GetConfigMap(name).Update(configMap) + _, err = client.GetConfigMap(name).Update(ctx, configMap, metav1.UpdateOptions{}) return err } @@ -286,19 +291,19 @@ func getBucketSecret(t *testing.T, configFilePath, namespace string) *corev1.Sec } } -func deleteBucketSecret(c *clients, t *testing.T, namespace string) { - if err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Delete(bucketSecretName, &metav1.DeleteOptions{}); err != nil { +func deleteBucketSecret(ctx context.Context, c *clients, t *testing.T, namespace string) { + if err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Delete(ctx, bucketSecretName, metav1.DeleteOptions{}); err != nil { t.Fatalf("Failed to delete Secret `%s`: %s", bucketSecretName, err) } } -func resetConfigMap(t *testing.T, c *clients, namespace, configName string, values map[string]string) { - if err := updateConfigMap(c.KubeClient, namespace, configName, values); err != nil { +func resetConfigMap(ctx context.Context, t *testing.T, c *clients, namespace, configName string, values map[string]string) { + if err := updateConfigMap(ctx, c.KubeClient, namespace, configName, values); err != nil { t.Log(err) } } -func runTaskToDeleteBucket(c *clients, t *testing.T, namespace, bucketName, bucketSecretName, bucketSecretKey string) { +func runTaskToDeleteBucket(ctx context.Context, c *clients, t *testing.T, namespace, bucketName, bucketSecretName, bucketSecretKey string) { deletelbuckettask := &v1beta1.Task{ ObjectMeta: metav1.ObjectMeta{Name: "deletelbuckettask", Namespace: namespace}, Spec: v1beta1.TaskSpec{ @@ -327,7 +332,7 @@ func runTaskToDeleteBucket(c *clients, t *testing.T, namespace, bucketName, buck } t.Logf("Creating Task %s", "deletelbuckettask") - if _, err := c.TaskClient.Create(deletelbuckettask); err != nil { + if _, err := c.TaskClient.Create(ctx, deletelbuckettask, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", "deletelbuckettask", err) } @@ -339,11 +344,11 @@ func runTaskToDeleteBucket(c *clients, t *testing.T, namespace, bucketName, buck } t.Logf("Creating TaskRun %s", "deletelbuckettaskrun") - if _, err := c.TaskRunClient.Create(deletelbuckettaskrun); err != nil { + if _, err := c.TaskRunClient.Create(ctx, deletelbuckettaskrun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create TaskRun `%s`: %s", "deletelbuckettaskrun", err) } - if err := WaitForTaskRunState(c, "deletelbuckettaskrun", TaskRunSucceed("deletelbuckettaskrun"), "TaskRunSuccess"); err != nil { + if err := WaitForTaskRunState(ctx, c, "deletelbuckettaskrun", TaskRunSucceed("deletelbuckettaskrun"), "TaskRunSuccess"); err != nil { t.Errorf("Error waiting for TaskRun %s to finish: %s", "deletelbuckettaskrun", err) } } diff --git a/test/build_logs.go b/test/build_logs.go index 9b7eac9b0b0..02600af1c24 100644 --- a/test/build_logs.go +++ b/test/build_logs.go @@ -17,6 +17,7 @@ limitations under the License. package test import ( + "context" "fmt" "io/ioutil" "strings" @@ -28,16 +29,16 @@ import ( ) // CollectPodLogs will get the logs for all containers in a Pod -func CollectPodLogs(c *clients, podName, namespace string, logf logging.FormatLogger) { - logs, err := getContainersLogsFromPod(c.KubeClient.Kube, podName, namespace) +func CollectPodLogs(ctx context.Context, c *clients, podName, namespace string, logf logging.FormatLogger) { + logs, err := getContainersLogsFromPod(ctx, c.KubeClient.Kube, podName, namespace) if err != nil { logf("Could not get logs for pod %s: %s", podName, err) } logf("build logs %s", logs) } -func getContainersLogsFromPod(c kubernetes.Interface, pod, namespace string) (string, error) { - p, err := c.CoreV1().Pods(namespace).Get(pod, metav1.GetOptions{}) +func getContainersLogsFromPod(ctx context.Context, c kubernetes.Interface, pod, namespace string) (string, error) { + p, err := c.CoreV1().Pods(namespace).Get(ctx, pod, metav1.GetOptions{}) if err != nil { return "", err } @@ -45,7 +46,7 @@ func getContainersLogsFromPod(c kubernetes.Interface, pod, namespace string) (st sb := strings.Builder{} for _, container := range p.Spec.Containers { sb.WriteString(fmt.Sprintf("\n>>> Container %s:\n", container.Name)) - logs, err := getContainerLogsFromPod(c, pod, container.Name, namespace) + logs, err := getContainerLogsFromPod(ctx, c, pod, container.Name, namespace) if err != nil { return "", err } @@ -54,10 +55,10 @@ func getContainersLogsFromPod(c kubernetes.Interface, pod, namespace string) (st return sb.String(), nil } -func getContainerLogsFromPod(c kubernetes.Interface, pod, container, namespace string) (string, error) { +func getContainerLogsFromPod(ctx context.Context, c kubernetes.Interface, pod, container, namespace string) (string, error) { sb := strings.Builder{} req := c.CoreV1().Pods(namespace).GetLogs(pod, &corev1.PodLogOptions{Follow: true, Container: container}) - rc, err := req.Stream() + rc, err := req.Stream(ctx) if err != nil { return "", err } diff --git a/test/cancel_test.go b/test/cancel_test.go index 9fb7e762caf..ad0875e75a7 100644 --- a/test/cancel_test.go +++ b/test/cancel_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "encoding/json" "fmt" "sync" @@ -39,11 +40,14 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { // the retrying TaskRun to retry. for _, numRetries := range []int{0, 1} { t.Run(fmt.Sprintf("retries=%d", numRetries), func(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) pipelineRunName := "cancel-me" pipelineRun := &v1beta1.PipelineRun{ @@ -67,16 +71,16 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { } t.Logf("Creating PipelineRun in namespace %s", namespace) - if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { + if _, err := c.PipelineRunClient.Create(ctx, pipelineRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PipelineRun `%s`: %s", pipelineRunName, err) } t.Logf("Waiting for Pipelinerun %s in namespace %s to be started", pipelineRunName, namespace) - if err := WaitForPipelineRunState(c, pipelineRunName, pipelineRunTimeout, Running(pipelineRunName), "PipelineRunRunning"); err != nil { + if err := WaitForPipelineRunState(ctx, c, pipelineRunName, pipelineRunTimeout, Running(pipelineRunName), "PipelineRunRunning"); err != nil { t.Fatalf("Error waiting for PipelineRun %s to be running: %s", pipelineRunName, err) } - taskrunList, err := c.TaskRunClient.List(metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=" + pipelineRunName}) + taskrunList, err := c.TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=" + pipelineRunName}) if err != nil { t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", pipelineRunName, err) } @@ -87,7 +91,7 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { wg.Add(1) go func(name string) { defer wg.Done() - err := WaitForTaskRunState(c, name, Running(name), "TaskRunRunning") + err := WaitForTaskRunState(ctx, c, name, Running(name), "TaskRunRunning") if err != nil { t.Errorf("Error waiting for TaskRun %s to be running: %v", name, err) } @@ -95,7 +99,7 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { } wg.Wait() - pr, err := c.PipelineRunClient.Get(pipelineRunName, metav1.GetOptions{}) + pr, err := c.PipelineRunClient.Get(ctx, pipelineRunName, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get PipelineRun `%s`: %s", pipelineRunName, err) } @@ -109,12 +113,12 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { if err != nil { t.Fatalf("failed to marshal patch bytes in order to cancel") } - if _, err := c.PipelineRunClient.Patch(pr.Name, types.JSONPatchType, patchBytes, ""); err != nil { + if _, err := c.PipelineRunClient.Patch(ctx, pr.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{}, ""); err != nil { t.Fatalf("Failed to patch PipelineRun `%s` with cancellation: %s", pipelineRunName, err) } t.Logf("Waiting for PipelineRun %s in namespace %s to be cancelled", pipelineRunName, namespace) - if err := WaitForPipelineRunState(c, pipelineRunName, pipelineRunTimeout, FailedWithReason("PipelineRunCancelled", pipelineRunName), "PipelineRunCancelled"); err != nil { + if err := WaitForPipelineRunState(ctx, c, pipelineRunName, pipelineRunTimeout, FailedWithReason("PipelineRunCancelled", pipelineRunName), "PipelineRunCancelled"); err != nil { t.Errorf("Error waiting for PipelineRun %q to finished: %s", pipelineRunName, err) } @@ -123,7 +127,7 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { wg.Add(1) go func(name string) { defer wg.Done() - err := WaitForTaskRunState(c, name, FailedWithReason("TaskRunCancelled", name), "TaskRunCancelled") + err := WaitForTaskRunState(ctx, c, name, FailedWithReason("TaskRunCancelled", name), "TaskRunCancelled") if err != nil { t.Errorf("Error waiting for TaskRun %s to be finished: %v", name, err) } @@ -132,7 +136,7 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { wg.Wait() var trName []string - taskrunList, err = c.TaskRunClient.List(metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=" + pipelineRunName}) + taskrunList, err = c.TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=" + pipelineRunName}) if err != nil { t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", pipelineRunName, err) } @@ -144,7 +148,7 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { // Expected failure events: 1 for the pipelinerun cancel, 1 for each TaskRun expectedNumberOfEvents := 1 + len(trName) t.Logf("Making sure %d events were created from pipelinerun with kinds %v", expectedNumberOfEvents, matchKinds) - events, err := collectMatchingEvents(c.KubeClient, namespace, matchKinds, "Failed") + events, err := collectMatchingEvents(ctx, c.KubeClient, namespace, matchKinds, "Failed") if err != nil { t.Fatalf("Failed to collect matching events: %q", err) } diff --git a/test/cluster_resource_test.go b/test/cluster_resource_test.go index da8f4901c95..366840fd3a7 100644 --- a/test/cluster_resource_test.go +++ b/test/cluster_resource_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "testing" tb "github.com/tektoncd/pipeline/internal/builder/v1beta1" @@ -31,45 +32,49 @@ import ( ) func TestClusterResource(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + secretName := "hw-secret" configName := "hw-config" resourceName := "helloworld-cluster" taskName := "helloworld-cluster-task" taskRunName := "helloworld-cluster-taskrun" - c, namespace := setup(t) + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) t.Logf("Creating secret %s", secretName) - if _, err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Create(getClusterResourceTaskSecret(namespace, secretName)); err != nil { + if _, err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Create(ctx, getClusterResourceTaskSecret(namespace, secretName), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Secret `%s`: %s", secretName, err) } t.Logf("Creating configMap %s", configName) - if _, err := c.KubeClient.Kube.CoreV1().ConfigMaps(namespace).Create(getClusterConfigMap(namespace, configName)); err != nil { + if _, err := c.KubeClient.Kube.CoreV1().ConfigMaps(namespace).Create(ctx, getClusterConfigMap(namespace, configName), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create configMap `%s`: %s", configName, err) } t.Logf("Creating cluster PipelineResource %s", resourceName) - if _, err := c.PipelineResourceClient.Create(getClusterResource(resourceName, secretName)); err != nil { + if _, err := c.PipelineResourceClient.Create(ctx, getClusterResource(resourceName, secretName), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create cluster Pipeline Resource `%s`: %s", resourceName, err) } t.Logf("Creating Task %s", taskName) - if _, err := c.TaskClient.Create(getClusterResourceTask(namespace, taskName, configName)); err != nil { + if _, err := c.TaskClient.Create(ctx, getClusterResourceTask(namespace, taskName, configName), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", taskName, err) } t.Logf("Creating TaskRun %s", taskRunName) - if _, err := c.TaskRunClient.Create(getClusterResourceTaskRun(namespace, taskRunName, taskName, resourceName)); err != nil { + if _, err := c.TaskRunClient.Create(ctx, getClusterResourceTaskRun(namespace, taskRunName, taskName, resourceName), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Taskrun `%s`: %s", taskRunName, err) } // Verify status of TaskRun (wait for it) - if err := WaitForTaskRunState(c, taskRunName, TaskRunSucceed(taskRunName), "TaskRunCompleted"); err != nil { + if err := WaitForTaskRunState(ctx, c, taskRunName, TaskRunSucceed(taskRunName), "TaskRunCompleted"); err != nil { t.Errorf("Error waiting for TaskRun %s to finish: %s", taskRunName, err) } } diff --git a/test/controller.go b/test/controller.go index 2c4fdf12191..a3a5c030e28 100644 --- a/test/controller.go +++ b/test/controller.go @@ -44,6 +44,7 @@ import ( corev1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" coreinformers "k8s.io/client-go/informers/core/v1" fakekubeclientset "k8s.io/client-go/kubernetes/fake" @@ -180,76 +181,76 @@ func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers c.Pipeline.PrependReactor("*", "pipelineruns", AddToInformer(t, i.PipelineRun.Informer().GetIndexer())) for _, pr := range d.PipelineRuns { pr := pr.DeepCopy() // Avoid assumptions that the informer's copy is modified. - if _, err := c.Pipeline.TektonV1beta1().PipelineRuns(pr.Namespace).Create(pr); err != nil { + if _, err := c.Pipeline.TektonV1beta1().PipelineRuns(pr.Namespace).Create(ctx, pr, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } c.Pipeline.PrependReactor("*", "pipelines", AddToInformer(t, i.Pipeline.Informer().GetIndexer())) for _, p := range d.Pipelines { p := p.DeepCopy() // Avoid assumptions that the informer's copy is modified. - if _, err := c.Pipeline.TektonV1beta1().Pipelines(p.Namespace).Create(p); err != nil { + if _, err := c.Pipeline.TektonV1beta1().Pipelines(p.Namespace).Create(ctx, p, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } c.Pipeline.PrependReactor("*", "taskruns", AddToInformer(t, i.TaskRun.Informer().GetIndexer())) for _, tr := range d.TaskRuns { tr := tr.DeepCopy() // Avoid assumptions that the informer's copy is modified. - if _, err := c.Pipeline.TektonV1beta1().TaskRuns(tr.Namespace).Create(tr); err != nil { + if _, err := c.Pipeline.TektonV1beta1().TaskRuns(tr.Namespace).Create(ctx, tr, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } c.Pipeline.PrependReactor("*", "tasks", AddToInformer(t, i.Task.Informer().GetIndexer())) for _, ta := range d.Tasks { ta := ta.DeepCopy() // Avoid assumptions that the informer's copy is modified. - if _, err := c.Pipeline.TektonV1beta1().Tasks(ta.Namespace).Create(ta); err != nil { + if _, err := c.Pipeline.TektonV1beta1().Tasks(ta.Namespace).Create(ctx, ta, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } c.Pipeline.PrependReactor("*", "clustertasks", AddToInformer(t, i.ClusterTask.Informer().GetIndexer())) for _, ct := range d.ClusterTasks { ct := ct.DeepCopy() // Avoid assumptions that the informer's copy is modified. - if _, err := c.Pipeline.TektonV1beta1().ClusterTasks().Create(ct); err != nil { + if _, err := c.Pipeline.TektonV1beta1().ClusterTasks().Create(ctx, ct, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } c.Resource.PrependReactor("*", "pipelineresources", AddToInformer(t, i.PipelineResource.Informer().GetIndexer())) for _, r := range d.PipelineResources { r := r.DeepCopy() // Avoid assumptions that the informer's copy is modified. - if _, err := c.Resource.TektonV1alpha1().PipelineResources(r.Namespace).Create(r); err != nil { + if _, err := c.Resource.TektonV1alpha1().PipelineResources(r.Namespace).Create(ctx, r, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } c.Pipeline.PrependReactor("*", "conditions", AddToInformer(t, i.Condition.Informer().GetIndexer())) for _, cond := range d.Conditions { cond := cond.DeepCopy() // Avoid assumptions that the informer's copy is modified. - if _, err := c.Pipeline.TektonV1alpha1().Conditions(cond.Namespace).Create(cond); err != nil { + if _, err := c.Pipeline.TektonV1alpha1().Conditions(cond.Namespace).Create(ctx, cond, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } c.Kube.PrependReactor("*", "pods", AddToInformer(t, i.Pod.Informer().GetIndexer())) for _, p := range d.Pods { p := p.DeepCopy() // Avoid assumptions that the informer's copy is modified. - if _, err := c.Kube.CoreV1().Pods(p.Namespace).Create(p); err != nil { + if _, err := c.Kube.CoreV1().Pods(p.Namespace).Create(ctx, p, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } for _, n := range d.Namespaces { n := n.DeepCopy() // Avoid assumptions that the informer's copy is modified. - if _, err := c.Kube.CoreV1().Namespaces().Create(n); err != nil { + if _, err := c.Kube.CoreV1().Namespaces().Create(ctx, n, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } c.Kube.PrependReactor("*", "configmaps", AddToInformer(t, i.ConfigMap.Informer().GetIndexer())) for _, cm := range d.ConfigMaps { cm := cm.DeepCopy() // Avoid assumptions that the informer's copy is modified. - if _, err := c.Kube.CoreV1().ConfigMaps(cm.Namespace).Create(cm); err != nil { + if _, err := c.Kube.CoreV1().ConfigMaps(cm.Namespace).Create(ctx, cm, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } c.Kube.PrependReactor("*", "serviceaccounts", AddToInformer(t, i.ServiceAccount.Informer().GetIndexer())) for _, sa := range d.ServiceAccounts { sa := sa.DeepCopy() // Avoid assumptions that the informer's copy is modified. - if _, err := c.Kube.CoreV1().ServiceAccounts(sa.Namespace).Create(sa); err != nil { + if _, err := c.Kube.CoreV1().ServiceAccounts(sa.Namespace).Create(ctx, sa, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } diff --git a/test/dag_test.go b/test/dag_test.go index e1f45032fd6..35ccf2a562f 100644 --- a/test/dag_test.go +++ b/test/dag_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "math" "sort" "strings" @@ -44,11 +45,14 @@ import ( // | // pipeline-task-4 func TestDAGPipelineRun(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) // Create the Task that echoes text repoTaskResource := v1beta1.TaskResource{ResourceDeclaration: v1beta1.ResourceDeclaration{ @@ -74,7 +78,7 @@ func TestDAGPipelineRun(t *testing.T) { }}, }, } - if _, err := c.TaskClient.Create(echoTask); err != nil { + if _, err := c.TaskClient.Create(ctx, echoTask, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create echo Task: %s", err) } @@ -89,7 +93,7 @@ func TestDAGPipelineRun(t *testing.T) { }}, }, } - if _, err := c.PipelineResourceClient.Create(repoResource); err != nil { + if _, err := c.PipelineResourceClient.Create(ctx, repoResource, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create simple repo PipelineResource: %s", err) } @@ -178,7 +182,7 @@ func TestDAGPipelineRun(t *testing.T) { }}, }, } - if _, err := c.PipelineClient.Create(pipeline); err != nil { + if _, err := c.PipelineClient.Create(ctx, pipeline, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create dag-pipeline: %s", err) } pipelineRun := &v1beta1.PipelineRun{ @@ -191,21 +195,21 @@ func TestDAGPipelineRun(t *testing.T) { }}, }, } - if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { + if _, err := c.PipelineRunClient.Create(ctx, pipelineRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create dag-pipeline-run PipelineRun: %s", err) } t.Logf("Waiting for DAG pipeline to complete") - if err := WaitForPipelineRunState(c, "dag-pipeline-run", pipelineRunTimeout, PipelineRunSucceed("dag-pipeline-run"), "PipelineRunSuccess"); err != nil { + if err := WaitForPipelineRunState(ctx, c, "dag-pipeline-run", pipelineRunTimeout, PipelineRunSucceed("dag-pipeline-run"), "PipelineRunSuccess"); err != nil { t.Fatalf("Error waiting for PipelineRun to finish: %s", err) } - verifyExpectedOrder(t, c.TaskRunClient) + verifyExpectedOrder(ctx, t, c.TaskRunClient) } -func verifyExpectedOrder(t *testing.T, c clientset.TaskRunInterface) { +func verifyExpectedOrder(ctx context.Context, t *testing.T, c clientset.TaskRunInterface) { t.Logf("Verifying order of execution") - taskRunsResp, err := c.List(metav1.ListOptions{}) + taskRunsResp, err := c.List(ctx, metav1.ListOptions{}) if err != nil { t.Fatalf("Couldn't get TaskRuns (so that we could check when they executed): %v", err) } diff --git a/test/duplicate_test.go b/test/duplicate_test.go index fd19ba9d183..70682860e7b 100644 --- a/test/duplicate_test.go +++ b/test/duplicate_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "fmt" "sync" "testing" @@ -32,11 +33,14 @@ import ( // TestDuplicatePodTaskRun creates 10 builds and checks that each of them has only one build pod. func TestDuplicatePodTaskRun(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) var wg sync.WaitGroup for i := 0; i < 25; i++ { @@ -56,18 +60,18 @@ func TestDuplicatePodTaskRun(t *testing.T) { }, }, } - if _, err := c.TaskRunClient.Create(taskrun); err != nil { + if _, err := c.TaskRunClient.Create(ctx, taskrun, metav1.CreateOptions{}); err != nil { t.Fatalf("Error creating taskrun: %v", err) } go func(t *testing.T) { defer wg.Done() - if err := WaitForTaskRunState(c, taskrunName, TaskRunSucceed(taskrunName), "TaskRunDuplicatePodTaskRunFailed"); err != nil { + if err := WaitForTaskRunState(ctx, c, taskrunName, TaskRunSucceed(taskrunName), "TaskRunDuplicatePodTaskRunFailed"); err != nil { t.Errorf("Error waiting for TaskRun to finish: %s", err) return } - pods, err := c.KubeClient.Kube.CoreV1().Pods(namespace).List(metav1.ListOptions{ + pods, err := c.KubeClient.Kube.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=%s", pipeline.GroupName+pipeline.TaskRunLabelKey, taskrunName), }) if err != nil { diff --git a/test/embed_test.go b/test/embed_test.go index 310ad4c1332..b7f9df505c5 100644 --- a/test/embed_test.go +++ b/test/embed_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "fmt" "testing" @@ -41,22 +42,25 @@ const ( // TestTaskRun_EmbeddedResource is an integration test that will verify a very simple "hello world" TaskRun can be // executed with an embedded resource spec. func TestTaskRun_EmbeddedResource(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) t.Logf("Creating Task and TaskRun in namespace %s", namespace) - if _, err := c.TaskClient.Create(getEmbeddedTask(namespace, []string{"/bin/sh", "-c", fmt.Sprintf("echo %s", taskOutput)})); err != nil { + if _, err := c.TaskClient.Create(ctx, getEmbeddedTask(namespace, []string{"/bin/sh", "-c", fmt.Sprintf("echo %s", taskOutput)}), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", embedTaskName, err) } - if _, err := c.TaskRunClient.Create(getEmbeddedTaskRun(namespace)); err != nil { + if _, err := c.TaskRunClient.Create(ctx, getEmbeddedTaskRun(namespace), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create TaskRun `%s`: %s", embedTaskRunName, err) } t.Logf("Waiting for TaskRun %s in namespace %s to complete", embedTaskRunName, namespace) - if err := WaitForTaskRunState(c, embedTaskRunName, TaskRunSucceed(embedTaskRunName), "TaskRunSuccess"); err != nil { + if err := WaitForTaskRunState(ctx, c, embedTaskRunName, TaskRunSucceed(embedTaskRunName), "TaskRunSuccess"); err != nil { t.Errorf("Error waiting for TaskRun %s to finish: %s", embedTaskRunName, err) } diff --git a/test/entrypoint_test.go b/test/entrypoint_test.go index 9af518bfb17..1eede3a846e 100644 --- a/test/entrypoint_test.go +++ b/test/entrypoint_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "testing" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" @@ -34,14 +35,17 @@ const epTaskRunName = "ep-task-run" // that doesn't have a cmd defined. In addition to making sure the steps // are executed in the order specified func TestEntrypointRunningStepsInOrder(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) t.Logf("Creating TaskRun in namespace %s", namespace) - if _, err := c.TaskRunClient.Create(&v1beta1.TaskRun{ + if _, err := c.TaskRunClient.Create(ctx, &v1beta1.TaskRun{ ObjectMeta: metav1.ObjectMeta{Name: epTaskRunName, Namespace: namespace}, Spec: v1beta1.TaskRunSpec{ TaskSpec: &v1beta1.TaskSpec{ @@ -54,12 +58,12 @@ func TestEntrypointRunningStepsInOrder(t *testing.T) { }}, }, }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create TaskRun: %s", err) } t.Logf("Waiting for TaskRun in namespace %s to finish successfully", namespace) - if err := WaitForTaskRunState(c, epTaskRunName, TaskRunSucceed(epTaskRunName), "TaskRunSuccess"); err != nil { + if err := WaitForTaskRunState(ctx, c, epTaskRunName, TaskRunSucceed(epTaskRunName), "TaskRunSuccess"); err != nil { t.Errorf("Error waiting for TaskRun to finish successfully: %s", err) } diff --git a/test/examples_test.go b/test/examples_test.go index 926394bf357..6cbc2b9a41c 100644 --- a/test/examples_test.go +++ b/test/examples_test.go @@ -20,6 +20,7 @@ package test import ( "bytes" + "context" "errors" "io/ioutil" "os" @@ -52,15 +53,15 @@ func getCreatedTektonCRD(input []byte, kind string) (string, error) { return string(submatch[1]), nil } -func waitValidatePipelineRunDone(t *testing.T, c *clients, pipelineRunName string) { - if err := WaitForPipelineRunState(c, pipelineRunName, pipelineRunTimeout, Succeed(pipelineRunName), pipelineRunName); err != nil { +func waitValidatePipelineRunDone(ctx context.Context, t *testing.T, c *clients, pipelineRunName string) { + if err := WaitForPipelineRunState(ctx, c, pipelineRunName, pipelineRunTimeout, Succeed(pipelineRunName), pipelineRunName); err != nil { t.Fatalf("Failed waiting for pipeline run done: %v", err) } } -func waitValidateTaskRunDone(t *testing.T, c *clients, taskRunName string) { +func waitValidateTaskRunDone(ctx context.Context, t *testing.T, c *clients, taskRunName string) { // Per test basis - if err := WaitForTaskRunState(c, taskRunName, Succeed(taskRunName), taskRunName); err != nil { + if err := WaitForTaskRunState(ctx, c, taskRunName, Succeed(taskRunName), taskRunName); err != nil { t.Fatalf("Failed waiting for task run done: %v", err) } } @@ -101,25 +102,28 @@ func koCreate(input []byte, namespace string) ([]byte, error) { // clientset. Test state is used for logging. deleteClusterTask does not wait // for the clustertask to be deleted, so it is still possible to have name // conflicts during test -func deleteClusterTask(t *testing.T, c *clients, name string) { +func deleteClusterTask(ctx context.Context, t *testing.T, c *clients, name string) { t.Logf("Deleting clustertask %s", name) - if err := c.ClusterTaskClient.Delete(name, &metav1.DeleteOptions{}); err != nil { + if err := c.ClusterTaskClient.Delete(ctx, name, metav1.DeleteOptions{}); err != nil { t.Fatalf("Failed to delete clustertask: %v", err) } } -type waitFunc func(t *testing.T, c *clients, name string) +type waitFunc func(ctx context.Context, t *testing.T, c *clients, name string) func exampleTest(path string, waitValidateFunc waitFunc, kind string) func(t *testing.T) { return func(t *testing.T) { t.Parallel() + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() // Setup unique namespaces for each test so they can run in complete // isolation - c, namespace := setup(t) + c, namespace := setup(ctx, t) - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) inputExample, err := ioutil.ReadFile(path) if err != nil { @@ -151,13 +155,13 @@ func exampleTest(path string, waitValidateFunc waitFunc, kind string) func(t *te // be cleaned up clustertask, err := getCreatedTektonCRD(out, "clustertask") if clustertask != "" { - knativetest.CleanupOnInterrupt(func() { deleteClusterTask(t, c, clustertask) }, t.Logf) - defer deleteClusterTask(t, c, clustertask) + knativetest.CleanupOnInterrupt(func() { deleteClusterTask(ctx, t, c, clustertask) }, t.Logf) + defer deleteClusterTask(ctx, t, c, clustertask) } else if err != nil { t.Fatalf("Failed to get created clustertask: %v", err) } - waitValidateFunc(t, c, name) + waitValidateFunc(ctx, t, c, name) } } diff --git a/test/git_checkout_test.go b/test/git_checkout_test.go index ac160446039..be2a7e4284b 100644 --- a/test/git_checkout_test.go +++ b/test/git_checkout_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "strings" "testing" @@ -97,12 +98,15 @@ func TestGitPipelineRun(t *testing.T) { }} { t.Run(tc.name, func(t *testing.T) { t.Parallel() - c, namespace := setup(t) - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) t.Logf("Creating Git PipelineResource %s", gitSourceResourceName) - if _, err := c.PipelineResourceClient.Create(&v1alpha1.PipelineResource{ + if _, err := c.PipelineResourceClient.Create(ctx, &v1alpha1.PipelineResource{ ObjectMeta: metav1.ObjectMeta{Name: gitSourceResourceName}, Spec: v1alpha1.PipelineResourceSpec{ Type: v1alpha1.PipelineResourceTypeGit, @@ -113,12 +117,12 @@ func TestGitPipelineRun(t *testing.T) { {Name: "sslVerify", Value: tc.sslVerify}, }, }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline Resource `%s`: %s", gitSourceResourceName, err) } t.Logf("Creating PipelineRun %s", gitTestPipelineRunName) - if _, err := c.PipelineRunClient.Create(&v1beta1.PipelineRun{ + if _, err := c.PipelineRunClient.Create(ctx, &v1beta1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{Name: gitTestPipelineRunName}, Spec: v1beta1.PipelineRunSpec{ Resources: []v1beta1.PipelineResourceBinding{{ @@ -151,11 +155,11 @@ func TestGitPipelineRun(t *testing.T) { }}, }, }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PipelineRun %q: %s", gitTestPipelineRunName, err) } - if err := WaitForPipelineRunState(c, gitTestPipelineRunName, timeout, PipelineRunSucceed(gitTestPipelineRunName), "PipelineRunCompleted"); err != nil { + if err := WaitForPipelineRunState(ctx, c, gitTestPipelineRunName, timeout, PipelineRunSucceed(gitTestPipelineRunName), "PipelineRunCompleted"); err != nil { t.Errorf("Error waiting for PipelineRun %s to finish: %s", gitTestPipelineRunName, err) t.Fatalf("PipelineRun execution failed") } @@ -180,12 +184,15 @@ func TestGitPipelineRunFail(t *testing.T) { }} { t.Run(tc.name, func(t *testing.T) { t.Parallel() - c, namespace := setup(t) - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) t.Logf("Creating Git PipelineResource %s", gitSourceResourceName) - if _, err := c.PipelineResourceClient.Create(&v1alpha1.PipelineResource{ + if _, err := c.PipelineResourceClient.Create(ctx, &v1alpha1.PipelineResource{ ObjectMeta: metav1.ObjectMeta{Name: gitSourceResourceName}, Spec: v1alpha1.PipelineResourceSpec{ Type: v1alpha1.PipelineResourceTypeGit, @@ -195,12 +202,12 @@ func TestGitPipelineRunFail(t *testing.T) { {Name: "httpsProxy", Value: tc.httpsproxy}, }, }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline Resource `%s`: %s", gitSourceResourceName, err) } t.Logf("Creating PipelineRun %s", gitTestPipelineRunName) - if _, err := c.PipelineRunClient.Create(&v1beta1.PipelineRun{ + if _, err := c.PipelineRunClient.Create(ctx, &v1beta1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{Name: gitTestPipelineRunName}, Spec: v1beta1.PipelineRunSpec{ Resources: []v1beta1.PipelineResourceBinding{{ @@ -233,18 +240,18 @@ func TestGitPipelineRunFail(t *testing.T) { }}, }, }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PipelineRun %q: %s", gitTestPipelineRunName, err) } - if err := WaitForPipelineRunState(c, gitTestPipelineRunName, timeout, PipelineRunSucceed(gitTestPipelineRunName), "PipelineRunCompleted"); err != nil { - taskruns, err := c.TaskRunClient.List(metav1.ListOptions{}) + if err := WaitForPipelineRunState(ctx, c, gitTestPipelineRunName, timeout, PipelineRunSucceed(gitTestPipelineRunName), "PipelineRunCompleted"); err != nil { + taskruns, err := c.TaskRunClient.List(ctx, metav1.ListOptions{}) if err != nil { t.Errorf("Error getting TaskRun list for PipelineRun %s %s", gitTestPipelineRunName, err) } for _, tr := range taskruns.Items { if tr.Status.PodName != "" { - p, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(tr.Status.PodName, metav1.GetOptions{}) + p, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(ctx, tr.Status.PodName, metav1.GetOptions{}) if err != nil { t.Fatalf("Error getting pod `%s` in namespace `%s`", tr.Status.PodName, namespace) } @@ -253,7 +260,7 @@ func TestGitPipelineRunFail(t *testing.T) { if strings.HasPrefix(stat.Name, "step-git-source-"+gitSourceResourceName) { if stat.State.Terminated != nil { req := c.KubeClient.Kube.CoreV1().Pods(namespace).GetLogs(p.Name, &corev1.PodLogOptions{Container: stat.Name}) - logContent, err := req.Do().Raw() + logContent, err := req.Do(ctx).Raw() if err != nil { t.Fatalf("Error getting pod logs for pod `%s` and container `%s` in namespace `%s`", tr.Status.PodName, stat.Name, namespace) } diff --git a/test/helm_task_test.go b/test/helm_task_test.go index 036b3072201..eee2521c352 100644 --- a/test/helm_task_test.go +++ b/test/helm_task_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "fmt" "testing" @@ -52,56 +53,59 @@ var ( // and then using helm to deploy it func TestHelmDeployPipelineRun(t *testing.T) { repo := ensureDockerRepo(t) - c, namespace := setup(t) - setupClusterBindingForHelm(c, t, namespace) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) + setupClusterBindingForHelm(ctx, c, t, namespace) - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) t.Logf("Creating Git PipelineResource %s", sourceResourceName) - if _, err := c.PipelineResourceClient.Create(getGoHelloworldGitResource()); err != nil { + if _, err := c.PipelineResourceClient.Create(ctx, getGoHelloworldGitResource(), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline Resource `%s`: %s", sourceResourceName, err) } t.Logf("Creating Image PipelineResource %s", sourceImageName) - if _, err := c.PipelineResourceClient.Create(getHelmImageResource(repo)); err != nil { + if _, err := c.PipelineResourceClient.Create(ctx, getHelmImageResource(repo), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline Resource `%s`: %s", sourceImageName, err) } t.Logf("Creating Task %s", createImageTaskName) - if _, err := c.TaskClient.Create(getCreateImageTask(namespace)); err != nil { + if _, err := c.TaskClient.Create(ctx, getCreateImageTask(namespace), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", createImageTaskName, err) } t.Logf("Creating Task %s", helmDeployTaskName) - if _, err := c.TaskClient.Create(getHelmDeployTask(namespace)); err != nil { + if _, err := c.TaskClient.Create(ctx, getHelmDeployTask(namespace), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", helmDeployTaskName, err) } t.Logf("Creating Task %s", checkServiceTaskName) - if _, err := c.TaskClient.Create(getCheckServiceTask(namespace)); err != nil { + if _, err := c.TaskClient.Create(ctx, getCheckServiceTask(namespace), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", checkServiceTaskName, err) } t.Logf("Creating Pipeline %s", helmDeployPipelineName) - if _, err := c.PipelineClient.Create(getHelmDeployPipeline(namespace)); err != nil { + if _, err := c.PipelineClient.Create(ctx, getHelmDeployPipeline(namespace), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", helmDeployPipelineName, err) } t.Logf("Creating PipelineRun %s", helmDeployPipelineRunName) - if _, err := c.PipelineRunClient.Create(getHelmDeployPipelineRun(namespace)); err != nil { + if _, err := c.PipelineRunClient.Create(ctx, getHelmDeployPipelineRun(namespace), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", helmDeployPipelineRunName, err) } // Verify status of PipelineRun (wait for it) - if err := WaitForPipelineRunState(c, helmDeployPipelineRunName, timeout, PipelineRunSucceed(helmDeployPipelineRunName), "PipelineRunCompleted"); err != nil { + if err := WaitForPipelineRunState(ctx, c, helmDeployPipelineRunName, timeout, PipelineRunSucceed(helmDeployPipelineRunName), "PipelineRunCompleted"); err != nil { t.Errorf("Error waiting for PipelineRun %s to finish: %s", helmDeployPipelineRunName, err) t.Fatalf("PipelineRun execution failed; helm may or may not have been installed :(") } // cleanup task to remove helm releases from cluster and cluster role bindings, will not fail the test if it fails, just log - knativetest.CleanupOnInterrupt(func() { helmCleanup(c, t, namespace) }, t.Logf) - defer helmCleanup(c, t, namespace) + knativetest.CleanupOnInterrupt(func() { helmCleanup(ctx, c, t, namespace) }, t.Logf) + defer helmCleanup(ctx, c, t, namespace) } func getGoHelloworldGitResource() *v1alpha1.PipelineResource { @@ -279,7 +283,7 @@ func getHelmDeployPipelineRun(namespace string) *v1beta1.PipelineRun { } } -func setupClusterBindingForHelm(c *clients, t *testing.T, namespace string) { +func setupClusterBindingForHelm(ctx context.Context, c *clients, t *testing.T, namespace string) { clusterRoleBindings[0] = &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix("default-tiller"), @@ -298,26 +302,26 @@ func setupClusterBindingForHelm(c *clients, t *testing.T, namespace string) { for _, crb := range clusterRoleBindings { t.Logf("Creating Cluster Role binding %s for helm", crb.Name) - if _, err := c.KubeClient.Kube.RbacV1beta1().ClusterRoleBindings().Create(crb); err != nil { + if _, err := c.KubeClient.Kube.RbacV1beta1().ClusterRoleBindings().Create(ctx, crb, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create cluster role binding for Helm %s", err) } } } -func helmCleanup(c *clients, t *testing.T, namespace string) { +func helmCleanup(ctx context.Context, c *clients, t *testing.T, namespace string) { t.Logf("Cleaning up helm from cluster...") - removeAllHelmReleases(c, t, namespace) + removeAllHelmReleases(ctx, c, t, namespace) for _, crb := range clusterRoleBindings { t.Logf("Deleting Cluster Role binding %s for helm", crb.Name) - if err := c.KubeClient.Kube.RbacV1beta1().ClusterRoleBindings().Delete(crb.Name, &metav1.DeleteOptions{}); err != nil { + if err := c.KubeClient.Kube.RbacV1beta1().ClusterRoleBindings().Delete(ctx, crb.Name, metav1.DeleteOptions{}); err != nil { t.Fatalf("Failed to delete cluster role binding for Helm %s", err) } } } -func removeAllHelmReleases(c *clients, t *testing.T, namespace string) { +func removeAllHelmReleases(ctx context.Context, c *clients, t *testing.T, namespace string) { helmRemoveAllTaskName := "helm-remove-all-task" helmRemoveAllTask := &v1beta1.Task{ ObjectMeta: metav1.ObjectMeta{Name: helmRemoveAllTaskName, Namespace: namespace}, @@ -340,17 +344,17 @@ func removeAllHelmReleases(c *clients, t *testing.T, namespace string) { } t.Logf("Creating Task %s", helmRemoveAllTaskName) - if _, err := c.TaskClient.Create(helmRemoveAllTask); err != nil { + if _, err := c.TaskClient.Create(ctx, helmRemoveAllTask, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", helmRemoveAllTaskName, err) } t.Logf("Creating TaskRun %s", helmRemoveAllTaskRunName) - if _, err := c.TaskRunClient.Create(helmRemoveAllTaskRun); err != nil { + if _, err := c.TaskRunClient.Create(ctx, helmRemoveAllTaskRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create TaskRun `%s`: %s", helmRemoveAllTaskRunName, err) } t.Logf("Waiting for TaskRun %s in namespace %s to complete", helmRemoveAllTaskRunName, namespace) - if err := WaitForTaskRunState(c, helmRemoveAllTaskRunName, TaskRunSucceed(helmRemoveAllTaskRunName), "TaskRunSuccess"); err != nil { + if err := WaitForTaskRunState(ctx, c, helmRemoveAllTaskRunName, TaskRunSucceed(helmRemoveAllTaskRunName), "TaskRunSuccess"); err != nil { t.Logf("TaskRun %s failed to finish: %s", helmRemoveAllTaskRunName, err) } } diff --git a/test/init_test.go b/test/init_test.go index c1658cc2617..dacf4dfeedb 100644 --- a/test/init_test.go +++ b/test/init_test.go @@ -21,6 +21,7 @@ limitations under the License. package test import ( + "context" "flag" "fmt" "os" @@ -47,7 +48,7 @@ func init() { flag.BoolVar(&skipRootUserTests, "skipRootUserTests", false, "Skip tests that require root user") } -func setup(t *testing.T, fn ...func(*testing.T, *clients, string)) (*clients, string) { +func setup(ctx context.Context, t *testing.T, fn ...func(context.Context, *testing.T, *clients, string)) (*clients, string) { skipIfExcluded(t) t.Helper() @@ -56,11 +57,11 @@ func setup(t *testing.T, fn ...func(*testing.T, *clients, string)) (*clients, st initializeLogsAndMetrics(t) c := newClients(t, knativetest.Flags.Kubeconfig, knativetest.Flags.Cluster, namespace) - createNamespace(t, namespace, c.KubeClient) - verifyServiceAccountExistence(t, namespace, c.KubeClient) + createNamespace(ctx, t, namespace, c.KubeClient) + verifyServiceAccountExistence(ctx, t, namespace, c.KubeClient) for _, f := range fn { - f(t, c, namespace) + f(ctx, t, c, namespace) } return c, namespace @@ -76,34 +77,34 @@ func header(logf logging.FormatLogger, text string) { logf(bar) } -func tearDown(t *testing.T, cs *clients, namespace string) { +func tearDown(ctx context.Context, t *testing.T, cs *clients, namespace string) { t.Helper() if cs.KubeClient == nil { return } if t.Failed() { header(t.Logf, fmt.Sprintf("Dumping objects from %s", namespace)) - bs, err := getCRDYaml(cs, namespace) + bs, err := getCRDYaml(ctx, cs, namespace) if err != nil { t.Error(err) } else { t.Log(string(bs)) } header(t.Logf, fmt.Sprintf("Dumping logs from Pods in the %s", namespace)) - taskruns, err := cs.TaskRunClient.List(metav1.ListOptions{}) + taskruns, err := cs.TaskRunClient.List(ctx, metav1.ListOptions{}) if err != nil { t.Errorf("Error getting TaskRun list %s", err) } for _, tr := range taskruns.Items { if tr.Status.PodName != "" { - CollectPodLogs(cs, tr.Status.PodName, namespace, t.Logf) + CollectPodLogs(ctx, cs, tr.Status.PodName, namespace, t.Logf) } } } if os.Getenv("TEST_KEEP_NAMESPACES") == "" && !t.Failed() { t.Logf("Deleting namespace %s", namespace) - if err := cs.KubeClient.Kube.CoreV1().Namespaces().Delete(namespace, &metav1.DeleteOptions{}); err != nil { + if err := cs.KubeClient.Kube.CoreV1().Namespaces().Delete(ctx, namespace, metav1.DeleteOptions{}); err != nil { t.Errorf("Failed to delete namespace %s: %s", namespace, err) } } @@ -121,27 +122,27 @@ func initializeLogsAndMetrics(t *testing.T) { }) } -func createNamespace(t *testing.T, namespace string, kubeClient *knativetest.KubeClient) { +func createNamespace(ctx context.Context, t *testing.T, namespace string, kubeClient *knativetest.KubeClient) { t.Logf("Create namespace %s to deploy to", namespace) labels := map[string]string{ "tekton.dev/test-e2e": "true", } - if _, err := kubeClient.Kube.CoreV1().Namespaces().Create(&corev1.Namespace{ + if _, err := kubeClient.Kube.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: namespace, Labels: labels, }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create namespace %s for tests: %s", namespace, err) } } -func verifyServiceAccountExistence(t *testing.T, namespace string, kubeClient *knativetest.KubeClient) { +func verifyServiceAccountExistence(ctx context.Context, t *testing.T, namespace string, kubeClient *knativetest.KubeClient) { defaultSA := "default" t.Logf("Verify SA %q is created in namespace %q", defaultSA, namespace) if err := wait.PollImmediate(interval, timeout, func() (bool, error) { - _, err := kubeClient.Kube.CoreV1().ServiceAccounts(namespace).Get(defaultSA, metav1.GetOptions{}) + _, err := kubeClient.Kube.CoreV1().ServiceAccounts(namespace).Get(ctx, defaultSA, metav1.GetOptions{}) if err != nil && errors.IsNotFound(err) { return false, nil } @@ -160,7 +161,7 @@ func TestMain(m *testing.M) { os.Exit(c) } -func getCRDYaml(cs *clients, ns string) ([]byte, error) { +func getCRDYaml(ctx context.Context, cs *clients, ns string) ([]byte, error) { var output []byte printOrAdd := func(i interface{}) { bs, err := yaml.Marshal(i) @@ -171,7 +172,7 @@ func getCRDYaml(cs *clients, ns string) ([]byte, error) { output = append(output, bs...) } - ps, err := cs.PipelineClient.List(metav1.ListOptions{}) + ps, err := cs.PipelineClient.List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("could not get pipeline: %w", err) } @@ -179,7 +180,7 @@ func getCRDYaml(cs *clients, ns string) ([]byte, error) { printOrAdd(i) } - prs, err := cs.PipelineResourceClient.List(metav1.ListOptions{}) + prs, err := cs.PipelineResourceClient.List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("could not get pipelinerun resource: %w", err) } @@ -187,7 +188,7 @@ func getCRDYaml(cs *clients, ns string) ([]byte, error) { printOrAdd(i) } - prrs, err := cs.PipelineRunClient.List(metav1.ListOptions{}) + prrs, err := cs.PipelineRunClient.List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("could not get pipelinerun: %w", err) } @@ -195,14 +196,14 @@ func getCRDYaml(cs *clients, ns string) ([]byte, error) { printOrAdd(i) } - ts, err := cs.TaskClient.List(metav1.ListOptions{}) + ts, err := cs.TaskClient.List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("could not get tasks: %w", err) } for _, i := range ts.Items { printOrAdd(i) } - trs, err := cs.TaskRunClient.List(metav1.ListOptions{}) + trs, err := cs.TaskRunClient.List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("could not get taskrun: %w", err) } @@ -210,7 +211,7 @@ func getCRDYaml(cs *clients, ns string) ([]byte, error) { printOrAdd(i) } - pods, err := cs.KubeClient.Kube.CoreV1().Pods(ns).List(metav1.ListOptions{}) + pods, err := cs.KubeClient.Kube.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("could not get pods: %w", err) } diff --git a/test/kaniko_task_test.go b/test/kaniko_task_test.go index 651c84475c9..f88f8b5bbad 100644 --- a/test/kaniko_task_test.go +++ b/test/kaniko_task_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "fmt" "strings" "testing" @@ -45,45 +46,49 @@ const ( // TestTaskRun is an integration test that will verify a TaskRun using kaniko func TestKanikoTaskRun(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if skipRootUserTests { t.Skip("Skip test as skipRootUserTests set to true") } - c, namespace := setup(t, withRegistry) + c, namespace := setup(ctx, t, withRegistry) t.Parallel() repo := fmt.Sprintf("registry.%s:5000/kanikotasktest", namespace) - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) t.Logf("Creating Git PipelineResource %s", kanikoGitResourceName) - if _, err := c.PipelineResourceClient.Create(getGitResource()); err != nil { + if _, err := c.PipelineResourceClient.Create(ctx, getGitResource(), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline Resource `%s`: %s", kanikoGitResourceName, err) } t.Logf("Creating Image PipelineResource %s", repo) - if _, err := c.PipelineResourceClient.Create(getImageResource(repo)); err != nil { + if _, err := c.PipelineResourceClient.Create(ctx, getImageResource(repo), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline Resource `%s`: %s", kanikoGitResourceName, err) } t.Logf("Creating Task %s", kanikoTaskName) - if _, err := c.TaskClient.Create(getTask(repo, namespace)); err != nil { + if _, err := c.TaskClient.Create(ctx, getTask(repo, namespace), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", kanikoTaskName, err) } t.Logf("Creating TaskRun %s", kanikoTaskRunName) - if _, err := c.TaskRunClient.Create(getTaskRun(namespace)); err != nil { + if _, err := c.TaskRunClient.Create(ctx, getTaskRun(namespace), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create TaskRun `%s`: %s", kanikoTaskRunName, err) } // Verify status of TaskRun (wait for it) - if err := WaitForTaskRunState(c, kanikoTaskRunName, Succeed(kanikoTaskRunName), "TaskRunCompleted"); err != nil { + if err := WaitForTaskRunState(ctx, c, kanikoTaskRunName, Succeed(kanikoTaskRunName), "TaskRunCompleted"); err != nil { t.Errorf("Error waiting for TaskRun %s to finish: %s", kanikoTaskRunName, err) } - tr, err := c.TaskRunClient.Get(kanikoTaskRunName, metav1.GetOptions{}) + tr, err := c.TaskRunClient.Get(ctx, kanikoTaskRunName, metav1.GetOptions{}) if err != nil { t.Errorf("Error retrieving taskrun: %s", err) } @@ -206,7 +211,10 @@ func getTaskRun(namespace string) *v1beta1.TaskRun { func getRemoteDigest(t *testing.T, c *clients, namespace, image string) (string, error) { t.Helper() podName := "skopeo-jq" - if _, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Create(&corev1.Pod{ + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if _, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Create(ctx, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, Name: podName, @@ -220,15 +228,15 @@ func getRemoteDigest(t *testing.T, c *clients, namespace, image string) (string, }}, RestartPolicy: corev1.RestartPolicyNever, }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create the skopeo-jq pod: %v", err) } - if err := WaitForPodState(c, podName, namespace, func(pod *corev1.Pod) (bool, error) { + if err := WaitForPodState(ctx, c, podName, namespace, func(pod *corev1.Pod) (bool, error) { return pod.Status.Phase == "Succeeded" || pod.Status.Phase == "Failed", nil }, "PodContainersTerminated"); err != nil { t.Fatalf("Error waiting for Pod %q to terminate: %v", podName, err) } - logs, err := getContainerLogsFromPod(c.KubeClient.Kube, podName, "skopeo", namespace) + logs, err := getContainerLogsFromPod(ctx, c.KubeClient.Kube, podName, "skopeo", namespace) if err != nil { t.Fatalf("Could not get logs for pod %s: %s", podName, err) } diff --git a/test/pipelinefinally_test.go b/test/pipelinefinally_test.go index c6cda756d33..4dd6daf0412 100644 --- a/test/pipelinefinally_test.go +++ b/test/pipelinefinally_test.go @@ -16,6 +16,7 @@ limitations under the License. package test import ( + "context" "strings" "testing" @@ -32,27 +33,30 @@ import ( ) func TestPipelineLevelFinally_OneDAGTaskFailed_Failure(t *testing.T) { - c, namespace := setup(t) - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) cond := getCondition("failedcondition", namespace) - if _, err := c.ConditionClient.Create(cond); err != nil { + if _, err := c.ConditionClient.Create(ctx, cond, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Condition `%s`: %s", cond1Name, err) } task := getFailTask("failtask", namespace) - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create dag Task: %s", err) } delayedTask := getDelaySuccessTask("delayed-task", namespace) - if _, err := c.TaskClient.Create(delayedTask); err != nil { + if _, err := c.TaskClient.Create(ctx, delayedTask, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create dag Task: %s", err) } finalTask := getSuccessTask("successtask", namespace) - if _, err := c.TaskClient.Create(finalTask); err != nil { + if _, err := c.TaskClient.Create(ctx, finalTask, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create final Task: %s", err) } @@ -71,20 +75,20 @@ func TestPipelineLevelFinally_OneDAGTaskFailed_Failure(t *testing.T) { "finaltask1": "successtask", }, ) - if _, err := c.PipelineClient.Create(pipeline); err != nil { + if _, err := c.PipelineClient.Create(ctx, pipeline, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline: %s", err) } pipelineRun := getPipelineRun(namespace, "pipelinerun-failed-dag-tasks", "pipeline-failed-dag-tasks") - if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { + if _, err := c.PipelineRunClient.Create(ctx, pipelineRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline Run `%s`: %s", "pipelinerun-failed-dag-tasks", err) } - if err := WaitForPipelineRunState(c, "pipelinerun-failed-dag-tasks", timeout, PipelineRunFailed("pipelinerun-failed-dag-tasks"), "PipelineRunFailed"); err != nil { + if err := WaitForPipelineRunState(ctx, c, "pipelinerun-failed-dag-tasks", timeout, PipelineRunFailed("pipelinerun-failed-dag-tasks"), "PipelineRunFailed"); err != nil { t.Fatalf("Waiting for PipelineRun %s to fail: %v", "pipelinerun-failed-dag-tasks", err) } - taskrunList, err := c.TaskRunClient.List(metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=pipelinerun-failed-dag-tasks"}) + taskrunList, err := c.TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=pipelinerun-failed-dag-tasks"}) if err != nil { t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", "pipelinerun-failed-dag-tasks", err) } @@ -99,7 +103,7 @@ func TestPipelineLevelFinally_OneDAGTaskFailed_Failure(t *testing.T) { } dagTask1EndTime = taskrunItem.Status.CompletionTime case strings.HasPrefix(n, "pipelinerun-failed-dag-tasks-dagtask2"): - if err := WaitForTaskRunState(c, n, TaskRunSucceed(n), "TaskRunSuccess"); err != nil { + if err := WaitForTaskRunState(ctx, c, n, TaskRunSucceed(n), "TaskRunSuccess"); err != nil { t.Errorf("Error waiting for TaskRun to succeed: %v", err) } dagTask2EndTime = taskrunItem.Status.CompletionTime @@ -108,7 +112,7 @@ func TestPipelineLevelFinally_OneDAGTaskFailed_Failure(t *testing.T) { t.Fatalf("TaskRun %s for dag task should have skipped due to condition failure", n) } case strings.HasPrefix(n, "pipelinerun-failed-dag-tasks-finaltask1"): - if err := WaitForTaskRunState(c, n, TaskRunSucceed(n), "TaskRunSuccess"); err != nil { + if err := WaitForTaskRunState(ctx, c, n, TaskRunSucceed(n), "TaskRunSuccess"); err != nil { t.Errorf("Error waiting for TaskRun to succeed: %v", err) } finalTaskStartTime = taskrunItem.Status.StartTime @@ -123,17 +127,20 @@ func TestPipelineLevelFinally_OneDAGTaskFailed_Failure(t *testing.T) { } func TestPipelineLevelFinally_OneFinalTaskFailed_Failure(t *testing.T) { - c, namespace := setup(t) - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) task := getSuccessTask("successtask", namespace) - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create dag Task: %s", err) } finalTask := getFailTask("failtask", namespace) - if _, err := c.TaskClient.Create(finalTask); err != nil { + if _, err := c.TaskClient.Create(ctx, finalTask, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create final Task: %s", err) } @@ -148,21 +155,21 @@ func TestPipelineLevelFinally_OneFinalTaskFailed_Failure(t *testing.T) { "finaltask1": "failtask", }, ) - if _, err := c.PipelineClient.Create(pipeline); err != nil { + if _, err := c.PipelineClient.Create(ctx, pipeline, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline: %s", err) } pipelineRun := getPipelineRun(namespace, "pipelinerun-failed-final-tasks", "pipeline-failed-final-tasks") - if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { + if _, err := c.PipelineRunClient.Create(ctx, pipelineRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline Run `%s`: %s", "pipelinerun-failed-final-tasks", err) } - if err := WaitForPipelineRunState(c, "pipelinerun-failed-final-tasks", timeout, PipelineRunFailed("pipelinerun-failed-final-tasks"), "PipelineRunFailed"); err != nil { + if err := WaitForPipelineRunState(ctx, c, "pipelinerun-failed-final-tasks", timeout, PipelineRunFailed("pipelinerun-failed-final-tasks"), "PipelineRunFailed"); err != nil { t.Errorf("Error waiting for PipelineRun %s to finish: %s", "pipelinerun-failed-final-tasks", err) t.Fatalf("PipelineRun execution failed") } - taskrunList, err := c.TaskRunClient.List(metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=pipelinerun-failed-final-tasks"}) + taskrunList, err := c.TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=pipelinerun-failed-final-tasks"}) if err != nil { t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", "pipelinerun-failed-final-tasks", err) } diff --git a/test/pipelinerun_test.go b/test/pipelinerun_test.go index 51127de33da..a684432ac86 100644 --- a/test/pipelinerun_test.go +++ b/test/pipelinerun_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "encoding/base64" "fmt" "strings" @@ -56,7 +57,7 @@ func TestPipelineRun(t *testing.T) { t.Parallel() type tests struct { name string - testSetup func(t *testing.T, c *clients, namespace string, index int) + testSetup func(ctx context.Context, t *testing.T, c *clients, namespace string, index int) expectedTaskRuns []string expectedNumberOfEvents int pipelineRunFunc func(int, string) *v1beta1.PipelineRun @@ -64,21 +65,21 @@ func TestPipelineRun(t *testing.T) { tds := []tests{{ name: "fan-in and fan-out", - testSetup: func(t *testing.T, c *clients, namespace string, index int) { + testSetup: func(ctx context.Context, t *testing.T, c *clients, namespace string, index int) { t.Helper() for _, task := range getFanInFanOutTasks(namespace) { - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", task.Name, err) } } for _, res := range getFanInFanOutGitResources() { - if _, err := c.PipelineResourceClient.Create(res); err != nil { + if _, err := c.PipelineResourceClient.Create(ctx, res, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline Resource `%s`: %s", kanikoGitResourceName, err) } } - if _, err := c.PipelineClient.Create(getFanInFanOutPipeline(index, namespace)); err != nil { + if _, err := c.PipelineClient.Create(ctx, getFanInFanOutPipeline(index, namespace), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err) } }, @@ -88,13 +89,13 @@ func TestPipelineRun(t *testing.T) { expectedNumberOfEvents: 5, }, { name: "service account propagation and pipeline param", - testSetup: func(t *testing.T, c *clients, namespace string, index int) { + testSetup: func(ctx context.Context, t *testing.T, c *clients, namespace string, index int) { t.Helper() - if _, err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Create(getPipelineRunSecret(index, namespace)); err != nil { + if _, err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Create(ctx, getPipelineRunSecret(index, namespace), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create secret `%s`: %s", getName(secretName, index), err) } - if _, err := c.KubeClient.Kube.CoreV1().ServiceAccounts(namespace).Create(getPipelineRunServiceAccount(index, namespace)); err != nil { + if _, err := c.KubeClient.Kube.CoreV1().ServiceAccounts(namespace).Create(ctx, getPipelineRunServiceAccount(index, namespace), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create SA `%s`: %s", getName(saName, index), err) } @@ -116,11 +117,11 @@ func TestPipelineRun(t *testing.T) { }, }, } - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", getName(taskName, index), err) } - if _, err := c.PipelineClient.Create(getHelloWorldPipelineWithSingularTask(index, namespace)); err != nil { + if _, err := c.PipelineClient.Create(ctx, getHelloWorldPipelineWithSingularTask(index, namespace), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err) } }, @@ -130,10 +131,10 @@ func TestPipelineRun(t *testing.T) { pipelineRunFunc: getHelloWorldPipelineRun, }, { name: "pipeline succeeds when task skipped due to failed condition", - testSetup: func(t *testing.T, c *clients, namespace string, index int) { + testSetup: func(ctx context.Context, t *testing.T, c *clients, namespace string, index int) { t.Helper() cond := getFailingCondition() - if _, err := c.ConditionClient.Create(cond); err != nil { + if _, err := c.ConditionClient.Create(ctx, cond, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Condition `%s`: %s", cond1Name, err) } @@ -147,10 +148,10 @@ func TestPipelineRun(t *testing.T) { }}}, }, } - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", getName(taskName, index), err) } - if _, err := c.PipelineClient.Create(getPipelineWithFailingCondition(index, namespace)); err != nil { + if _, err := c.PipelineClient.Create(ctx, getPipelineWithFailingCondition(index, namespace), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err) } }, @@ -160,17 +161,17 @@ func TestPipelineRun(t *testing.T) { pipelineRunFunc: getConditionalPipelineRun, }, { name: "pipelinerun succeeds with LimitRange minimum in namespace", - testSetup: func(t *testing.T, c *clients, namespace string, index int) { + testSetup: func(ctx context.Context, t *testing.T, c *clients, namespace string, index int) { t.Helper() - if _, err := c.KubeClient.Kube.CoreV1().LimitRanges(namespace).Create(getLimitRange("prlimitrange", namespace, "100m", "99Mi", "100m")); err != nil { + if _, err := c.KubeClient.Kube.CoreV1().LimitRanges(namespace).Create(ctx, getLimitRange("prlimitrange", namespace, "100m", "99Mi", "100m"), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create LimitRange `%s`: %s", "prlimitrange", err) } - if _, err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Create(getPipelineRunSecret(index, namespace)); err != nil { + if _, err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Create(ctx, getPipelineRunSecret(index, namespace), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create secret `%s`: %s", getName(secretName, index), err) } - if _, err := c.KubeClient.Kube.CoreV1().ServiceAccounts(namespace).Create(getPipelineRunServiceAccount(index, namespace)); err != nil { + if _, err := c.KubeClient.Kube.CoreV1().ServiceAccounts(namespace).Create(ctx, getPipelineRunServiceAccount(index, namespace), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create SA `%s`: %s", getName(saName, index), err) } @@ -192,10 +193,10 @@ func TestPipelineRun(t *testing.T) { }, }, } - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", fmt.Sprint("task", index), err) } - if _, err := c.PipelineClient.Create(getHelloWorldPipelineWithSingularTask(index, namespace)); err != nil { + if _, err := c.PipelineClient.Create(ctx, getHelloWorldPipelineWithSingularTask(index, namespace), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err) } }, @@ -210,27 +211,30 @@ func TestPipelineRun(t *testing.T) { td := td t.Parallel() - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) t.Logf("Setting up test resources for %q test in namespace %s", td.name, namespace) - td.testSetup(t, c, namespace, i) + td.testSetup(ctx, t, c, namespace, i) prName := fmt.Sprintf("%s%d", pipelineRunName, i) - pipelineRun, err := c.PipelineRunClient.Create(td.pipelineRunFunc(i, namespace)) + pipelineRun, err := c.PipelineRunClient.Create(ctx, td.pipelineRunFunc(i, namespace), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create PipelineRun `%s`: %s", prName, err) } t.Logf("Waiting for PipelineRun %s in namespace %s to complete", prName, namespace) - if err := WaitForPipelineRunState(c, prName, pipelineRunTimeout, PipelineRunSucceed(prName), "PipelineRunSuccess"); err != nil { + if err := WaitForPipelineRunState(ctx, c, prName, pipelineRunTimeout, PipelineRunSucceed(prName), "PipelineRunSuccess"); err != nil { t.Fatalf("Error waiting for PipelineRun %s to finish: %s", prName, err) } t.Logf("Making sure the expected TaskRuns %s were created", td.expectedTaskRuns) - actualTaskrunList, err := c.TaskRunClient.List(metav1.ListOptions{LabelSelector: fmt.Sprintf("tekton.dev/pipelineRun=%s", prName)}) + actualTaskrunList, err := c.TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("tekton.dev/pipelineRun=%s", prName)}) if err != nil { t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", prName, err) } @@ -244,7 +248,7 @@ func TestPipelineRun(t *testing.T) { } } expectedTaskRunNames = append(expectedTaskRunNames, taskRunName) - r, err := c.TaskRunClient.Get(taskRunName, metav1.GetOptions{}) + r, err := c.TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{}) if err != nil { t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err) } @@ -253,16 +257,16 @@ func TestPipelineRun(t *testing.T) { } t.Logf("Checking that labels were propagated correctly for TaskRun %s", r.Name) - checkLabelPropagation(t, c, namespace, prName, r) + checkLabelPropagation(ctx, t, c, namespace, prName, r) t.Logf("Checking that annotations were propagated correctly for TaskRun %s", r.Name) - checkAnnotationPropagation(t, c, namespace, prName, r) + checkAnnotationPropagation(ctx, t, c, namespace, prName, r) } matchKinds := map[string][]string{"PipelineRun": {prName}, "TaskRun": expectedTaskRunNames} t.Logf("Making sure %d events were created from taskrun and pipelinerun with kinds %v", td.expectedNumberOfEvents, matchKinds) - events, err := collectMatchingEvents(c.KubeClient, namespace, matchKinds, "Succeeded") + events, err := collectMatchingEvents(ctx, c.KubeClient, namespace, matchKinds, "Succeeded") if err != nil { t.Fatalf("Failed to collect matching events: %q", err) } @@ -281,7 +285,7 @@ func TestPipelineRun(t *testing.T) { // the PersistentVolumeClaims has the DeletionTimestamp if err := wait.PollImmediate(interval, timeout, func() (bool, error) { // Check to make sure the PipelineRun's artifact storage PVC has been "deleted" at the end of the run. - pvc, errWait := c.KubeClient.Kube.CoreV1().PersistentVolumeClaims(namespace).Get(artifacts.GetPVCName(pipelineRun), metav1.GetOptions{}) + pvc, errWait := c.KubeClient.Kube.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, artifacts.GetPVCName(pipelineRun), metav1.GetOptions{}) if errWait != nil && !errors.IsNotFound(errWait) { return true, fmt.Errorf("error looking up PVC %s for PipelineRun %s: %s", artifacts.GetPVCName(pipelineRun), prName, errWait) } @@ -554,10 +558,10 @@ func getName(namespace string, suffix int) string { // collectMatchingEvents collects list of events under 5 seconds that match // 1. matchKinds which is a map of Kind of Object with name of objects // 2. reason which is the expected reason of event -func collectMatchingEvents(kubeClient *knativetest.KubeClient, namespace string, kinds map[string][]string, reason string) ([]*corev1.Event, error) { +func collectMatchingEvents(ctx context.Context, kubeClient *knativetest.KubeClient, namespace string, kinds map[string][]string, reason string) ([]*corev1.Event, error) { var events []*corev1.Event - watchEvents, err := kubeClient.Kube.CoreV1().Events(namespace).Watch(metav1.ListOptions{}) + watchEvents, err := kubeClient.Kube.CoreV1().Events(namespace).Watch(ctx, metav1.ListOptions{}) // close watchEvents channel defer watchEvents.Stop() if err != nil { @@ -586,17 +590,17 @@ func collectMatchingEvents(kubeClient *knativetest.KubeClient, namespace string, // checkLabelPropagation checks that labels are correctly propagating from // Pipelines, PipelineRuns, and Tasks to TaskRuns and Pods. -func checkLabelPropagation(t *testing.T, c *clients, namespace string, pipelineRunName string, tr *v1beta1.TaskRun) { +func checkLabelPropagation(ctx context.Context, t *testing.T, c *clients, namespace string, pipelineRunName string, tr *v1beta1.TaskRun) { // Our controllers add 4 labels automatically. If custom labels are set on // the Pipeline, PipelineRun, or Task then the map will have to be resized. labels := make(map[string]string, 4) // Check label propagation to PipelineRuns. - pr, err := c.PipelineRunClient.Get(pipelineRunName, metav1.GetOptions{}) + pr, err := c.PipelineRunClient.Get(ctx, pipelineRunName, metav1.GetOptions{}) if err != nil { t.Fatalf("Couldn't get expected PipelineRun for %s: %s", tr.Name, err) } - p, err := c.PipelineClient.Get(pr.Spec.PipelineRef.Name, metav1.GetOptions{}) + p, err := c.PipelineClient.Get(ctx, pr.Spec.PipelineRef.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Couldn't get expected Pipeline for %s: %s", pr.Name, err) } @@ -614,7 +618,7 @@ func checkLabelPropagation(t *testing.T, c *clients, namespace string, pipelineR // This label is added to every TaskRun by the PipelineRun controller labels[pipeline.GroupName+pipeline.PipelineRunLabelKey] = pr.Name if tr.Spec.TaskRef != nil { - task, err := c.TaskClient.Get(tr.Spec.TaskRef.Name, metav1.GetOptions{}) + task, err := c.TaskClient.Get(ctx, tr.Spec.TaskRef.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Couldn't get expected Task for %s: %s", tr.Name, err) } @@ -630,7 +634,7 @@ func checkLabelPropagation(t *testing.T, c *clients, namespace string, pipelineR // This label is added to every Pod by the TaskRun controller if tr.Status.PodName != "" { // Check label propagation to Pods. - pod := getPodForTaskRun(t, c.KubeClient, namespace, tr) + pod := getPodForTaskRun(ctx, t, c.KubeClient, namespace, tr) // This label is added to every Pod by the TaskRun controller labels[pipeline.GroupName+pipeline.TaskRunLabelKey] = tr.Name assertLabelsMatch(t, labels, pod.ObjectMeta.Labels) @@ -639,15 +643,15 @@ func checkLabelPropagation(t *testing.T, c *clients, namespace string, pipelineR // checkAnnotationPropagation checks that annotations are correctly propagating from // Pipelines, PipelineRuns, and Tasks to TaskRuns and Pods. -func checkAnnotationPropagation(t *testing.T, c *clients, namespace string, pipelineRunName string, tr *v1beta1.TaskRun) { +func checkAnnotationPropagation(ctx context.Context, t *testing.T, c *clients, namespace string, pipelineRunName string, tr *v1beta1.TaskRun) { annotations := make(map[string]string) // Check annotation propagation to PipelineRuns. - pr, err := c.PipelineRunClient.Get(pipelineRunName, metav1.GetOptions{}) + pr, err := c.PipelineRunClient.Get(ctx, pipelineRunName, metav1.GetOptions{}) if err != nil { t.Fatalf("Couldn't get expected PipelineRun for %s: %s", tr.Name, err) } - p, err := c.PipelineClient.Get(pr.Spec.PipelineRef.Name, metav1.GetOptions{}) + p, err := c.PipelineClient.Get(ctx, pr.Spec.PipelineRef.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Couldn't get expected Pipeline for %s: %s", pr.Name, err) } @@ -661,7 +665,7 @@ func checkAnnotationPropagation(t *testing.T, c *clients, namespace string, pipe annotations[key] = val } if tr.Spec.TaskRef != nil { - task, err := c.TaskClient.Get(tr.Spec.TaskRef.Name, metav1.GetOptions{}) + task, err := c.TaskClient.Get(ctx, tr.Spec.TaskRef.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Couldn't get expected Task for %s: %s", tr.Name, err) } @@ -672,13 +676,13 @@ func checkAnnotationPropagation(t *testing.T, c *clients, namespace string, pipe assertAnnotationsMatch(t, annotations, tr.ObjectMeta.Annotations) // Check annotation propagation to Pods. - pod := getPodForTaskRun(t, c.KubeClient, namespace, tr) + pod := getPodForTaskRun(ctx, t, c.KubeClient, namespace, tr) assertAnnotationsMatch(t, annotations, pod.ObjectMeta.Annotations) } -func getPodForTaskRun(t *testing.T, kubeClient *knativetest.KubeClient, namespace string, tr *v1beta1.TaskRun) *corev1.Pod { +func getPodForTaskRun(ctx context.Context, t *testing.T, kubeClient *knativetest.KubeClient, namespace string, tr *v1beta1.TaskRun) *corev1.Pod { // The Pod name has a random suffix, so we filter by label to find the one we care about. - pods, err := kubeClient.Kube.CoreV1().Pods(namespace).List(metav1.ListOptions{ + pods, err := kubeClient.Kube.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ LabelSelector: pipeline.GroupName + pipeline.TaskRunLabelKey + " = " + tr.Name, }) if err != nil { diff --git a/test/registry_test.go b/test/registry_test.go index a5215732468..1a9129e8039 100644 --- a/test/registry_test.go +++ b/test/registry_test.go @@ -18,6 +18,7 @@ limitations under the License. package test import ( + "context" "testing" appsv1 "k8s.io/api/apps/v1" @@ -25,12 +26,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func withRegistry(t *testing.T, c *clients, namespace string) { +func withRegistry(ctx context.Context, t *testing.T, c *clients, namespace string) { deployment := getRegistryDeployment(namespace) - if _, err := c.KubeClient.Kube.AppsV1().Deployments(namespace).Create(deployment); err != nil { + if _, err := c.KubeClient.Kube.AppsV1().Deployments(namespace).Create(ctx, deployment, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create the local registry deployment: %v", err) } - if err := WaitForDeploymentState(c, deployment.Name, namespace, func(d *appsv1.Deployment) (bool, error) { + if err := WaitForDeploymentState(ctx, c, deployment.Name, namespace, func(d *appsv1.Deployment) (bool, error) { var replicas int32 = 1 if d.Spec.Replicas != nil { replicas = *d.Spec.Replicas @@ -41,7 +42,7 @@ func withRegistry(t *testing.T, c *clients, namespace string) { } service := getRegistryService(namespace) - if _, err := c.KubeClient.Kube.CoreV1().Services(namespace).Create(service); err != nil { + if _, err := c.KubeClient.Kube.CoreV1().Services(namespace).Create(ctx, service, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create the local registry service: %v", err) } } diff --git a/test/retry_test.go b/test/retry_test.go index 8a51d305a2a..ce6625d831f 100644 --- a/test/retry_test.go +++ b/test/retry_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "testing" "time" @@ -33,15 +34,18 @@ import ( // TestTaskRunRetry tests that retries behave as expected, by creating multiple // Pods for the same TaskRun each time it fails, up to the configured max. func TestTaskRunRetry(t *testing.T) { - c, namespace := setup(t) - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) // Create a PipelineRun with a single TaskRun that can only fail, // configured to retry 5 times. pipelineRunName := "retry-pipeline" numRetries := 5 - if _, err := c.PipelineRunClient.Create(&v1beta1.PipelineRun{ + if _, err := c.PipelineRunClient.Create(ctx, &v1beta1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{Name: pipelineRunName}, Spec: v1beta1.PipelineRunSpec{ PipelineSpec: &v1beta1.PipelineSpec{ @@ -57,17 +61,17 @@ func TestTaskRunRetry(t *testing.T) { }}, }, }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PipelineRun %q: %v", pipelineRunName, err) } // Wait for the PipelineRun to fail, when retries are exhausted. - if err := WaitForPipelineRunState(c, pipelineRunName, 5*time.Minute, PipelineRunFailed(pipelineRunName), "PipelineRunFailed"); err != nil { + if err := WaitForPipelineRunState(ctx, c, pipelineRunName, 5*time.Minute, PipelineRunFailed(pipelineRunName), "PipelineRunFailed"); err != nil { t.Fatalf("Waiting for PipelineRun to fail: %v", err) } // Get the status of the PipelineRun. - pr, err := c.PipelineRunClient.Get(pipelineRunName, metav1.GetOptions{}) + pr, err := c.PipelineRunClient.Get(ctx, pipelineRunName, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get PipelineRun %q: %v", pipelineRunName, err) } @@ -83,7 +87,7 @@ func TestTaskRunRetry(t *testing.T) { } // There should only be one TaskRun created. - trs, err := c.TaskRunClient.List(metav1.ListOptions{}) + trs, err := c.TaskRunClient.List(ctx, metav1.ListOptions{}) if err != nil { t.Errorf("Failed to list TaskRuns: %v", err) } else if len(trs.Items) != 1 { @@ -105,7 +109,7 @@ func TestTaskRunRetry(t *testing.T) { } // There should be N Pods created, all failed, all owned by the TaskRun. - pods, err := c.KubeClient.Kube.CoreV1().Pods(namespace).List(metav1.ListOptions{}) + pods, err := c.KubeClient.Kube.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{}) // We expect N+1 Pods total, one for each failed and retried attempt, and one for the final attempt. wantPods := numRetries + 1 diff --git a/test/secret.go b/test/secret.go index 37ea00c080a..9428ebbf919 100644 --- a/test/secret.go +++ b/test/secret.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "fmt" "io/ioutil" "os" @@ -35,6 +36,9 @@ import ( // otherwise. func CreateGCPServiceAccountSecret(t *testing.T, c *knativetest.KubeClient, namespace string, secretName string) (bool, error) { t.Helper() + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() file := os.Getenv("GCP_SERVICE_ACCOUNT_KEY_PATH") if file == "" { t.Logf("Not creating service account secret, relying on default credentials in namespace %s.", namespace) @@ -56,7 +60,7 @@ func CreateGCPServiceAccountSecret(t *testing.T, c *knativetest.KubeClient, name sec.Data = map[string][]byte{ "config.json": bs, } - _, err = c.Kube.CoreV1().Secrets(namespace).Create(sec) + _, err = c.Kube.CoreV1().Secrets(namespace).Create(ctx, sec, metav1.CreateOptions{}) t.Log("Creating service account secret") return true, err diff --git a/test/sidecar_test.go b/test/sidecar_test.go index 86d0788e1a8..77e6f87e84e 100644 --- a/test/sidecar_test.go +++ b/test/sidecar_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "fmt" "testing" "time" @@ -54,11 +55,14 @@ func TestSidecarTaskSupport(t *testing.T) { sidecarCommand: []string{"echo", "\"hello from sidecar\""}, }} - clients, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + clients, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, clients, namespace) }, t.Logf) - defer tearDown(t, clients, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, clients, namespace) }, t.Logf) + defer tearDown(ctx, t, clients, namespace) for i, test := range tests { t.Run(test.desc, func(t *testing.T) { @@ -85,26 +89,26 @@ func TestSidecarTaskSupport(t *testing.T) { } t.Logf("Creating Task %q", sidecarTaskName) - if _, err := clients.TaskClient.Create(task); err != nil { + if _, err := clients.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create Task %q: %v", sidecarTaskName, err) } t.Logf("Creating TaskRun %q", sidecarTaskRunName) - if _, err := clients.TaskRunClient.Create(taskRun); err != nil { + if _, err := clients.TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create TaskRun %q: %v", sidecarTaskRunName, err) } - if err := WaitForTaskRunState(clients, sidecarTaskRunName, Succeed(sidecarTaskRunName), "TaskRunSucceed"); err != nil { + if err := WaitForTaskRunState(ctx, clients, sidecarTaskRunName, Succeed(sidecarTaskRunName), "TaskRunSucceed"); err != nil { t.Errorf("Error waiting for TaskRun %q to finish: %v", sidecarTaskRunName, err) } - tr, err := clients.TaskRunClient.Get(sidecarTaskRunName, metav1.GetOptions{}) + tr, err := clients.TaskRunClient.Get(ctx, sidecarTaskRunName, metav1.GetOptions{}) if err != nil { t.Errorf("Error getting Taskrun: %v", err) } podName := tr.Status.PodName - if err := WaitForPodState(clients, podName, namespace, func(pod *corev1.Pod) (bool, error) { + if err := WaitForPodState(ctx, clients, podName, namespace, func(pod *corev1.Pod) (bool, error) { terminatedCount := 0 for _, c := range pod.Status.ContainerStatuses { if c.State.Terminated != nil { @@ -116,7 +120,7 @@ func TestSidecarTaskSupport(t *testing.T) { t.Errorf("Error waiting for Pod %q to terminate both the primary and sidecar containers: %v", podName, err) } - pod, err := clients.KubeClient.Kube.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) + pod, err := clients.KubeClient.Kube.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{}) if err != nil { t.Errorf("Error getting TaskRun pod: %v", err) } @@ -145,7 +149,7 @@ func TestSidecarTaskSupport(t *testing.T) { t.Errorf("Either the primary or sidecar containers did not terminate") } - trCheckSidecarStatus, err := clients.TaskRunClient.Get(sidecarTaskRunName, metav1.GetOptions{}) + trCheckSidecarStatus, err := clients.TaskRunClient.Get(ctx, sidecarTaskRunName, metav1.GetOptions{}) if err != nil { t.Errorf("Error getting TaskRun: %v", err) } diff --git a/test/start_time_test.go b/test/start_time_test.go index 9657c66eec3..baecd57e1a4 100644 --- a/test/start_time_test.go +++ b/test/start_time_test.go @@ -15,6 +15,7 @@ limitations under the License. package test import ( + "context" "testing" "time" @@ -31,12 +32,15 @@ import ( // Scheduling and reporting specifics can result in start times being reported // more than 10s apart, but they shouldn't be less than 10s apart. func TestStartTime(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) t.Logf("Creating TaskRun in namespace %q", namespace) - tr, err := c.TaskRunClient.Create(&v1beta1.TaskRun{ + tr, err := c.TaskRunClient.Create(ctx, &v1beta1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "start-time-test-", Namespace: namespace, @@ -61,16 +65,16 @@ func TestStartTime(t *testing.T) { }}, }, }, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatalf("Error creating TaskRun: %v", err) } t.Logf("Created TaskRun %q in namespace %q", tr.Name, namespace) // Wait for the TaskRun to complete. - if err := WaitForTaskRunState(c, tr.Name, TaskRunSucceed(tr.Name), "TaskRunSuccess"); err != nil { + if err := WaitForTaskRunState(ctx, c, tr.Name, TaskRunSucceed(tr.Name), "TaskRunSuccess"); err != nil { t.Errorf("Error waiting for TaskRun to succeed: %v", err) } - tr, err = c.TaskRunClient.Get(tr.Name, metav1.GetOptions{}) + tr, err = c.TaskRunClient.Get(ctx, tr.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Error getting TaskRun: %v", err) } diff --git a/test/status_test.go b/test/status_test.go index 8382d2c0bde..a7c3baae01f 100644 --- a/test/status_test.go +++ b/test/status_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "testing" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" @@ -31,11 +32,14 @@ import ( // verify a very simple "hello world" TaskRun and PipelineRun failure // execution lead to the correct TaskRun status. func TestTaskRunPipelineRunStatus(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) t.Logf("Creating Task and TaskRun in namespace %s", namespace) task := &v1beta1.Task{ @@ -47,7 +51,7 @@ func TestTaskRunPipelineRunStatus(t *testing.T) { }}}, }, } - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task: %s", err) } taskRun := &v1beta1.TaskRun{ @@ -57,12 +61,12 @@ func TestTaskRunPipelineRunStatus(t *testing.T) { ServiceAccountName: "inexistent", }, } - if _, err := c.TaskRunClient.Create(taskRun); err != nil { + if _, err := c.TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create TaskRun: %s", err) } t.Logf("Waiting for TaskRun in namespace %s to fail", namespace) - if err := WaitForTaskRunState(c, "apple", TaskRunFailed("apple"), "BuildValidationFailed"); err != nil { + if err := WaitForTaskRunState(ctx, c, "apple", TaskRunFailed("apple"), "BuildValidationFailed"); err != nil { t.Errorf("Error waiting for TaskRun to finish: %s", err) } @@ -82,15 +86,15 @@ func TestTaskRunPipelineRunStatus(t *testing.T) { ServiceAccountName: "inexistent", }, } - if _, err := c.PipelineClient.Create(pipeline); err != nil { + if _, err := c.PipelineClient.Create(ctx, pipeline, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", "tomatoes", err) } - if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { + if _, err := c.PipelineRunClient.Create(ctx, pipelineRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PipelineRun `%s`: %s", "pear", err) } t.Logf("Waiting for PipelineRun in namespace %s to fail", namespace) - if err := WaitForPipelineRunState(c, "pear", pipelineRunTimeout, PipelineRunFailed("pear"), "BuildValidationFailed"); err != nil { + if err := WaitForPipelineRunState(ctx, c, "pear", pipelineRunTimeout, PipelineRunFailed("pear"), "BuildValidationFailed"); err != nil { t.Errorf("Error waiting for TaskRun to finish: %s", err) } } diff --git a/test/taskrun_test.go b/test/taskrun_test.go index 5158167ce98..34b87b8fab4 100644 --- a/test/taskrun_test.go +++ b/test/taskrun_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "strings" "testing" @@ -31,11 +32,15 @@ import ( ) func TestTaskRunFailure(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) taskRunName := "failing-taskrun" @@ -58,7 +63,7 @@ func TestTaskRunFailure(t *testing.T) { }}}, }, } - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task: %s", err) } taskRun := &v1beta1.TaskRun{ @@ -67,16 +72,16 @@ func TestTaskRunFailure(t *testing.T) { TaskRef: &v1beta1.TaskRef{Name: "failing-task"}, }, } - if _, err := c.TaskRunClient.Create(taskRun); err != nil { + if _, err := c.TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create TaskRun: %s", err) } t.Logf("Waiting for TaskRun in namespace %s to fail", namespace) - if err := WaitForTaskRunState(c, taskRunName, TaskRunFailed(taskRunName), "TaskRunFailed"); err != nil { + if err := WaitForTaskRunState(ctx, c, taskRunName, TaskRunFailed(taskRunName), "TaskRunFailed"); err != nil { t.Errorf("Error waiting for TaskRun to finish: %s", err) } - taskrun, err := c.TaskRunClient.Get(taskRunName, metav1.GetOptions{}) + taskrun, err := c.TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{}) if err != nil { t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err) } @@ -117,11 +122,14 @@ func TestTaskRunFailure(t *testing.T) { } func TestTaskRunStatus(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) taskRunName := "status-taskrun" @@ -139,7 +147,7 @@ func TestTaskRunStatus(t *testing.T) { }}}, }, } - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task: %s", err) } taskRun := &v1beta1.TaskRun{ @@ -148,16 +156,16 @@ func TestTaskRunStatus(t *testing.T) { TaskRef: &v1beta1.TaskRef{Name: "status-task"}, }, } - if _, err := c.TaskRunClient.Create(taskRun); err != nil { + if _, err := c.TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create TaskRun: %s", err) } t.Logf("Waiting for TaskRun in namespace %s to fail", namespace) - if err := WaitForTaskRunState(c, taskRunName, TaskRunSucceed(taskRunName), "TaskRunSucceed"); err != nil { + if err := WaitForTaskRunState(ctx, c, taskRunName, TaskRunSucceed(taskRunName), "TaskRunSucceed"); err != nil { t.Errorf("Error waiting for TaskRun to finish: %s", err) } - taskrun, err := c.TaskRunClient.Get(taskRunName, metav1.GetOptions{}) + taskrun, err := c.TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{}) if err != nil { t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err) } diff --git a/test/timeout_test.go b/test/timeout_test.go index de27a4dda6f..2ecdb29af41 100644 --- a/test/timeout_test.go +++ b/test/timeout_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "fmt" "sync" "testing" @@ -36,11 +37,14 @@ import ( // verify that pipelinerun timeout works and leads to the the correct TaskRun statuses // and pod deletions. func TestPipelineRunTimeout(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) t.Logf("Creating Task in namespace %s", namespace) task := &v1beta1.Task{ @@ -53,7 +57,7 @@ func TestPipelineRunTimeout(t *testing.T) { }}}, }, } - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", "banana", err) } @@ -73,19 +77,19 @@ func TestPipelineRunTimeout(t *testing.T) { Timeout: &metav1.Duration{Duration: 5 * time.Second}, }, } - if _, err := c.PipelineClient.Create(pipeline); err != nil { + if _, err := c.PipelineClient.Create(ctx, pipeline, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", pipeline.Name, err) } - if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { + if _, err := c.PipelineRunClient.Create(ctx, pipelineRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PipelineRun `%s`: %s", pipelineRun.Name, err) } t.Logf("Waiting for Pipelinerun %s in namespace %s to be started", pipelineRun.Name, namespace) - if err := WaitForPipelineRunState(c, pipelineRun.Name, timeout, Running(pipelineRun.Name), "PipelineRunRunning"); err != nil { + if err := WaitForPipelineRunState(ctx, c, pipelineRun.Name, timeout, Running(pipelineRun.Name), "PipelineRunRunning"); err != nil { t.Fatalf("Error waiting for PipelineRun %s to be running: %s", pipelineRun.Name, err) } - taskrunList, err := c.TaskRunClient.List(metav1.ListOptions{LabelSelector: fmt.Sprintf("tekton.dev/pipelineRun=%s", pipelineRun.Name)}) + taskrunList, err := c.TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("tekton.dev/pipelineRun=%s", pipelineRun.Name)}) if err != nil { t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", pipelineRun.Name, err) } @@ -96,7 +100,7 @@ func TestPipelineRunTimeout(t *testing.T) { for _, taskrunItem := range taskrunList.Items { go func(name string) { - err := WaitForTaskRunState(c, name, Running(name), "TaskRunRunning") + err := WaitForTaskRunState(ctx, c, name, Running(name), "TaskRunRunning") errChan <- err }(taskrunItem.Name) } @@ -107,12 +111,12 @@ func TestPipelineRunTimeout(t *testing.T) { } } - if _, err := c.PipelineRunClient.Get(pipelineRun.Name, metav1.GetOptions{}); err != nil { + if _, err := c.PipelineRunClient.Get(ctx, pipelineRun.Name, metav1.GetOptions{}); err != nil { t.Fatalf("Failed to get PipelineRun `%s`: %s", pipelineRun.Name, err) } t.Logf("Waiting for PipelineRun %s in namespace %s to be timed out", pipelineRun.Name, namespace) - if err := WaitForPipelineRunState(c, pipelineRun.Name, timeout, FailedWithReason(v1beta1.PipelineRunReasonTimedOut.String(), pipelineRun.Name), "PipelineRunTimedOut"); err != nil { + if err := WaitForPipelineRunState(ctx, c, pipelineRun.Name, timeout, FailedWithReason(v1beta1.PipelineRunReasonTimedOut.String(), pipelineRun.Name), "PipelineRunTimedOut"); err != nil { t.Errorf("Error waiting for PipelineRun %s to finish: %s", pipelineRun.Name, err) } @@ -122,7 +126,7 @@ func TestPipelineRunTimeout(t *testing.T) { wg.Add(1) go func(name string) { defer wg.Done() - err := WaitForTaskRunState(c, name, FailedWithReason("TaskRunTimeout", name), "TaskRunTimeout") + err := WaitForTaskRunState(ctx, c, name, FailedWithReason("TaskRunTimeout", name), "TaskRunTimeout") if err != nil { t.Errorf("Error waiting for TaskRun %s to timeout: %s", name, err) } @@ -130,7 +134,7 @@ func TestPipelineRunTimeout(t *testing.T) { } wg.Wait() - if _, err := c.PipelineRunClient.Get(pipelineRun.Name, metav1.GetOptions{}); err != nil { + if _, err := c.PipelineRunClient.Get(ctx, pipelineRun.Name, metav1.GetOptions{}); err != nil { t.Fatalf("Failed to get PipelineRun `%s`: %s", pipelineRun.Name, err) } @@ -151,26 +155,29 @@ func TestPipelineRunTimeout(t *testing.T) { PipelineRef: &v1beta1.PipelineRef{Name: "peppers"}, }, } - if _, err := c.PipelineClient.Create(secondPipeline); err != nil { + if _, err := c.PipelineClient.Create(ctx, secondPipeline, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", secondPipeline.Name, err) } - if _, err := c.PipelineRunClient.Create(secondPipelineRun); err != nil { + if _, err := c.PipelineRunClient.Create(ctx, secondPipelineRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PipelineRun `%s`: %s", secondPipelineRun.Name, err) } t.Logf("Waiting for PipelineRun %s in namespace %s to complete", secondPipelineRun.Name, namespace) - if err := WaitForPipelineRunState(c, secondPipelineRun.Name, timeout, PipelineRunSucceed(secondPipelineRun.Name), "PipelineRunSuccess"); err != nil { + if err := WaitForPipelineRunState(ctx, c, secondPipelineRun.Name, timeout, PipelineRunSucceed(secondPipelineRun.Name), "PipelineRunSuccess"); err != nil { t.Fatalf("Error waiting for PipelineRun %s to finish: %s", secondPipelineRun.Name, err) } } // TestTaskRunTimeout is an integration test that will verify a TaskRun can be timed out. func TestTaskRunTimeout(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) t.Logf("Creating Task and TaskRun in namespace %s", namespace) task := &v1beta1.Task{ @@ -183,7 +190,7 @@ func TestTaskRunTimeout(t *testing.T) { }}}, }, } - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", "giraffe", err) } taskRun := &v1beta1.TaskRun{ @@ -195,16 +202,16 @@ func TestTaskRunTimeout(t *testing.T) { Timeout: &metav1.Duration{Duration: 30 * time.Second}, }, } - if _, err := c.TaskRunClient.Create(taskRun); err != nil { + if _, err := c.TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create TaskRun `%s`: %s", taskRun.Name, err) } t.Logf("Waiting for TaskRun %s in namespace %s to complete", "run-giraffe", namespace) - if err := WaitForTaskRunState(c, taskRun.Name, FailedWithReason(v1beta1.TaskRunReasonTimedOut.String(), taskRun.Name), v1beta1.TaskRunReasonTimedOut.String()); err != nil { + if err := WaitForTaskRunState(ctx, c, taskRun.Name, FailedWithReason(v1beta1.TaskRunReasonTimedOut.String(), taskRun.Name), v1beta1.TaskRunReasonTimedOut.String()); err != nil { t.Errorf("Error waiting for TaskRun %s to finish: %s", "run-giraffe", err) } - tr, err := c.TaskRunClient.Get(taskRun.Name, metav1.GetOptions{}) + tr, err := c.TaskRunClient.Get(ctx, taskRun.Name, metav1.GetOptions{}) if err != nil { t.Errorf("Error retrieving TaskRun %s: %v", taskRun.Name, err) } @@ -220,11 +227,14 @@ func TestTaskRunTimeout(t *testing.T) { } func TestPipelineTaskTimeout(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) t.Logf("Creating Tasks in namespace %s", namespace) task1 := &v1beta1.Task{ @@ -248,10 +258,10 @@ func TestPipelineTaskTimeout(t *testing.T) { }, } - if _, err := c.TaskClient.Create(task1); err != nil { + if _, err := c.TaskClient.Create(ctx, task1, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", task1.Name, err) } - if _, err := c.TaskClient.Create(task2); err != nil { + if _, err := c.TaskClient.Create(ctx, task2, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", task2.Name, err) } @@ -276,19 +286,19 @@ func TestPipelineTaskTimeout(t *testing.T) { }, } - if _, err := c.PipelineClient.Create(pipeline); err != nil { + if _, err := c.PipelineClient.Create(ctx, pipeline, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", pipeline.Name, err) } - if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { + if _, err := c.PipelineRunClient.Create(ctx, pipelineRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PipelineRun `%s`: %s", pipelineRun.Name, err) } t.Logf("Waiting for Pipelinerun %s in namespace %s to be started", pipelineRun.Name, namespace) - if err := WaitForPipelineRunState(c, pipelineRun.Name, timeout, Running(pipelineRun.Name), "PipelineRunRunning"); err != nil { + if err := WaitForPipelineRunState(ctx, c, pipelineRun.Name, timeout, Running(pipelineRun.Name), "PipelineRunRunning"); err != nil { t.Fatalf("Error waiting for PipelineRun %s to be running: %s", pipelineRun.Name, err) } - taskrunList, err := c.TaskRunClient.List(metav1.ListOptions{LabelSelector: fmt.Sprintf("tekton.dev/pipelineRun=%s", pipelineRun.Name)}) + taskrunList, err := c.TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("tekton.dev/pipelineRun=%s", pipelineRun.Name)}) if err != nil { t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", pipelineRun.Name, err) } @@ -299,7 +309,7 @@ func TestPipelineTaskTimeout(t *testing.T) { for _, taskrunItem := range taskrunList.Items { go func(name string) { - err := WaitForTaskRunState(c, name, Running(name), "TaskRunRunning") + err := WaitForTaskRunState(ctx, c, name, Running(name), "TaskRunRunning") errChan <- err }(taskrunItem.Name) } @@ -310,12 +320,12 @@ func TestPipelineTaskTimeout(t *testing.T) { } } - if _, err := c.PipelineRunClient.Get(pipelineRun.Name, metav1.GetOptions{}); err != nil { + if _, err := c.PipelineRunClient.Get(ctx, pipelineRun.Name, metav1.GetOptions{}); err != nil { t.Fatalf("Failed to get PipelineRun `%s`: %s", pipelineRun.Name, err) } t.Logf("Waiting for PipelineRun %s with PipelineTask timeout in namespace %s to fail", pipelineRun.Name, namespace) - if err := WaitForPipelineRunState(c, pipelineRun.Name, timeout, FailedWithReason(v1beta1.PipelineRunReasonFailed.String(), pipelineRun.Name), "PipelineRunTimedOut"); err != nil { + if err := WaitForPipelineRunState(ctx, c, pipelineRun.Name, timeout, FailedWithReason(v1beta1.PipelineRunReasonFailed.String(), pipelineRun.Name), "PipelineRunTimedOut"); err != nil { t.Fatalf("Error waiting for PipelineRun %s to finish: %s", pipelineRun.Name, err) } @@ -326,7 +336,7 @@ func TestPipelineTaskTimeout(t *testing.T) { go func(tr v1beta1.TaskRun) { defer wg.Done() name := tr.Name - err := WaitForTaskRunState(c, name, func(ca apis.ConditionAccessor) (bool, error) { + err := WaitForTaskRunState(ctx, c, name, func(ca apis.ConditionAccessor) (bool, error) { cond := ca.GetCondition(apis.ConditionSucceeded) if cond != nil { if tr.Spec.TaskRef.Name == task1.Name && cond.Status == corev1.ConditionTrue { diff --git a/test/v1alpha1/artifact_bucket_test.go b/test/v1alpha1/artifact_bucket_test.go index 30a8661dbf5..ece56808197 100644 --- a/test/v1alpha1/artifact_bucket_test.go +++ b/test/v1alpha1/artifact_bucket_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "fmt" "io/ioutil" "os" @@ -47,23 +48,27 @@ const ( // TestStorageBucketPipelineRun is an integration test that will verify a pipeline // can use a bucket for temporary storage of artifacts shared between tasks func TestStorageBucketPipelineRun(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + configFilePath := os.Getenv("GCP_SERVICE_ACCOUNT_KEY_PATH") if configFilePath == "" { t.Skip("GCP_SERVICE_ACCOUNT_KEY_PATH variable is not set.") } - c, namespace := setup(t) + c, namespace := setup(ctx, t) // Bucket tests can't run in parallel without causing issues with other tests. - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) bucketName := fmt.Sprintf("build-pipeline-test-%s-%d", namespace, time.Now().Unix()) t.Logf("Creating Secret %s", bucketSecretName) - if _, err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Create(getBucketSecret(t, configFilePath, namespace)); err != nil { + if _, err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Create(ctx, getBucketSecret(t, configFilePath, namespace), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Secret %q: %v", bucketSecretName, err) } - defer deleteBucketSecret(c, t, namespace) + defer deleteBucketSecret(ctx, c, t, namespace) t.Logf("Creating GCS bucket %s", bucketName) createbuckettask := tb.Task("createbuckettask", tb.TaskSpec( @@ -82,7 +87,7 @@ func TestStorageBucketPipelineRun(t *testing.T) { ) t.Logf("Creating Task %s", "createbuckettask") - if _, err := c.TaskClient.Create(createbuckettask); err != nil { + if _, err := c.TaskClient.Create(ctx, createbuckettask, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", "createbuckettask", err) } @@ -90,17 +95,17 @@ func TestStorageBucketPipelineRun(t *testing.T) { tb.TaskRunSpec(tb.TaskRunTaskRef("createbuckettask"))) t.Logf("Creating TaskRun %s", "createbuckettaskrun") - if _, err := c.TaskRunClient.Create(createbuckettaskrun); err != nil { + if _, err := c.TaskRunClient.Create(ctx, createbuckettaskrun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create TaskRun `%s`: %s", "createbuckettaskrun", err) } - if err := WaitForTaskRunState(c, "createbuckettaskrun", TaskRunSucceed("createbuckettaskrun"), "TaskRunSuccess"); err != nil { + if err := WaitForTaskRunState(ctx, c, "createbuckettaskrun", TaskRunSucceed("createbuckettaskrun"), "TaskRunSuccess"); err != nil { t.Errorf("Error waiting for TaskRun %s to finish: %s", "createbuckettaskrun", err) } - defer runTaskToDeleteBucket(c, t, namespace, bucketName, bucketSecretName, bucketSecretKey) + defer runTaskToDeleteBucket(ctx, c, t, namespace, bucketName, bucketSecretName, bucketSecretKey) - originalConfigMap, err := c.KubeClient.Kube.CoreV1().ConfigMaps(systemNamespace).Get(config.GetArtifactBucketConfigName(), metav1.GetOptions{}) + originalConfigMap, err := c.KubeClient.Kube.CoreV1().ConfigMaps(systemNamespace).Get(ctx, config.GetArtifactBucketConfigName(), metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get ConfigMap `%s`: %s", config.GetArtifactBucketConfigName(), err) } @@ -112,10 +117,10 @@ func TestStorageBucketPipelineRun(t *testing.T) { config.BucketServiceAccountSecretNameKey: bucketSecretName, config.BucketServiceAccountSecretKeyKey: bucketSecretKey, } - if err := updateConfigMap(c.KubeClient, systemNamespace, config.GetArtifactBucketConfigName(), configMapData); err != nil { + if err := updateConfigMap(ctx, c.KubeClient, systemNamespace, config.GetArtifactBucketConfigName(), configMapData); err != nil { t.Fatal(err) } - defer resetConfigMap(t, c, systemNamespace, config.GetArtifactBucketConfigName(), originalConfigMapData) + defer resetConfigMap(ctx, t, c, systemNamespace, config.GetArtifactBucketConfigName(), originalConfigMapData) t.Logf("Creating Git PipelineResource %s", helloworldResourceName) helloworldResource := tb.PipelineResource(helloworldResourceName, tb.PipelineResourceSpec( @@ -124,7 +129,7 @@ func TestStorageBucketPipelineRun(t *testing.T) { tb.PipelineResourceSpecParam("Revision", "master"), ), ) - if _, err := c.PipelineResourceClient.Create(helloworldResource); err != nil { + if _, err := c.PipelineResourceClient.Create(ctx, helloworldResource, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline Resource `%s`: %s", helloworldResourceName, err) } @@ -138,7 +143,7 @@ func TestStorageBucketPipelineRun(t *testing.T) { tb.Step("ubuntu", tb.StepName("make-executable"), tb.StepCommand("chmod"), tb.StepArgs("+x", "/workspace/helloworldgit/newfile")), )) - if _, err := c.TaskClient.Create(addFileTask); err != nil { + if _, err := c.TaskClient.Create(ctx, addFileTask, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", addFileTaskName, err) } @@ -147,7 +152,7 @@ func TestStorageBucketPipelineRun(t *testing.T) { tb.TaskInputs(tb.InputsResource(helloworldResourceName, v1alpha1.PipelineResourceTypeGit)), tb.Step("ubuntu", tb.StepName("runfile"), tb.StepCommand("/workspace/helloworld/newfile")), )) - if _, err := c.TaskClient.Create(readFileTask); err != nil { + if _, err := c.TaskClient.Create(ctx, readFileTask, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", runFileTaskName, err) } @@ -162,7 +167,7 @@ func TestStorageBucketPipelineRun(t *testing.T) { tb.PipelineTaskInputResource("helloworldgit", "source-repo", tb.From("addfile")), ), )) - if _, err := c.PipelineClient.Create(bucketTestPipeline); err != nil { + if _, err := c.PipelineClient.Create(ctx, bucketTestPipeline, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", bucketTestPipelineName, err) } @@ -171,12 +176,12 @@ func TestStorageBucketPipelineRun(t *testing.T) { bucketTestPipelineName, tb.PipelineRunResourceBinding("source-repo", tb.PipelineResourceBindingRef(helloworldResourceName)), )) - if _, err := c.PipelineRunClient.Create(bucketTestPipelineRun); err != nil { + if _, err := c.PipelineRunClient.Create(ctx, bucketTestPipelineRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PipelineRun `%s`: %s", bucketTestPipelineRunName, err) } // Verify status of PipelineRun (wait for it) - if err := WaitForPipelineRunState(c, bucketTestPipelineRunName, timeout, PipelineRunSucceed(bucketTestPipelineRunName), "PipelineRunCompleted"); err != nil { + if err := WaitForPipelineRunState(ctx, c, bucketTestPipelineRunName, timeout, PipelineRunSucceed(bucketTestPipelineRunName), "PipelineRunCompleted"); err != nil { t.Errorf("Error waiting for PipelineRun %s to finish: %s", bucketTestPipelineRunName, err) t.Fatalf("PipelineRun execution failed") } @@ -184,8 +189,8 @@ func TestStorageBucketPipelineRun(t *testing.T) { // updateConfigMap updates the config map for specified @name with values. We can't use the one from knativetest because // it assumes that Data is already a non-nil map, and by default, it isn't! -func updateConfigMap(client *knativetest.KubeClient, name string, configName string, values map[string]string) error { - configMap, err := client.GetConfigMap(name).Get(configName, metav1.GetOptions{}) +func updateConfigMap(ctx context.Context, client *knativetest.KubeClient, name string, configName string, values map[string]string) error { + configMap, err := client.GetConfigMap(name).Get(ctx, configName, metav1.GetOptions{}) if err != nil { return err } @@ -198,7 +203,7 @@ func updateConfigMap(client *knativetest.KubeClient, name string, configName str configMap.Data[key] = value } - _, err = client.GetConfigMap(name).Update(configMap) + _, err = client.GetConfigMap(name).Update(ctx, configMap, metav1.UpdateOptions{}) return err } @@ -219,19 +224,19 @@ func getBucketSecret(t *testing.T, configFilePath, namespace string) *corev1.Sec } } -func deleteBucketSecret(c *clients, t *testing.T, namespace string) { - if err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Delete(bucketSecretName, &metav1.DeleteOptions{}); err != nil { +func deleteBucketSecret(ctx context.Context, c *clients, t *testing.T, namespace string) { + if err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Delete(ctx, bucketSecretName, metav1.DeleteOptions{}); err != nil { t.Fatalf("Failed to delete Secret `%s`: %s", bucketSecretName, err) } } -func resetConfigMap(t *testing.T, c *clients, namespace, configName string, values map[string]string) { - if err := updateConfigMap(c.KubeClient, namespace, configName, values); err != nil { +func resetConfigMap(ctx context.Context, t *testing.T, c *clients, namespace, configName string, values map[string]string) { + if err := updateConfigMap(ctx, c.KubeClient, namespace, configName, values); err != nil { t.Log(err) } } -func runTaskToDeleteBucket(c *clients, t *testing.T, namespace, bucketName, bucketSecretName, bucketSecretKey string) { +func runTaskToDeleteBucket(ctx context.Context, c *clients, t *testing.T, namespace, bucketName, bucketSecretName, bucketSecretKey string) { deletelbuckettask := tb.Task("deletelbuckettask", tb.TaskSpec( tb.TaskVolume("bucket-secret-volume", tb.VolumeSource(corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ @@ -248,7 +253,7 @@ func runTaskToDeleteBucket(c *clients, t *testing.T, namespace, bucketName, buck ) t.Logf("Creating Task %s", "deletelbuckettask") - if _, err := c.TaskClient.Create(deletelbuckettask); err != nil { + if _, err := c.TaskClient.Create(ctx, deletelbuckettask, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", "deletelbuckettask", err) } @@ -256,11 +261,11 @@ func runTaskToDeleteBucket(c *clients, t *testing.T, namespace, bucketName, buck tb.TaskRunSpec(tb.TaskRunTaskRef("deletelbuckettask"))) t.Logf("Creating TaskRun %s", "deletelbuckettaskrun") - if _, err := c.TaskRunClient.Create(deletelbuckettaskrun); err != nil { + if _, err := c.TaskRunClient.Create(ctx, deletelbuckettaskrun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create TaskRun `%s`: %s", "deletelbuckettaskrun", err) } - if err := WaitForTaskRunState(c, "deletelbuckettaskrun", TaskRunSucceed("deletelbuckettaskrun"), "TaskRunSuccess"); err != nil { + if err := WaitForTaskRunState(ctx, c, "deletelbuckettaskrun", TaskRunSucceed("deletelbuckettaskrun"), "TaskRunSuccess"); err != nil { t.Errorf("Error waiting for TaskRun %s to finish: %s", "deletelbuckettaskrun", err) } } diff --git a/test/v1alpha1/build_logs.go b/test/v1alpha1/build_logs.go index 9b7eac9b0b0..02600af1c24 100644 --- a/test/v1alpha1/build_logs.go +++ b/test/v1alpha1/build_logs.go @@ -17,6 +17,7 @@ limitations under the License. package test import ( + "context" "fmt" "io/ioutil" "strings" @@ -28,16 +29,16 @@ import ( ) // CollectPodLogs will get the logs for all containers in a Pod -func CollectPodLogs(c *clients, podName, namespace string, logf logging.FormatLogger) { - logs, err := getContainersLogsFromPod(c.KubeClient.Kube, podName, namespace) +func CollectPodLogs(ctx context.Context, c *clients, podName, namespace string, logf logging.FormatLogger) { + logs, err := getContainersLogsFromPod(ctx, c.KubeClient.Kube, podName, namespace) if err != nil { logf("Could not get logs for pod %s: %s", podName, err) } logf("build logs %s", logs) } -func getContainersLogsFromPod(c kubernetes.Interface, pod, namespace string) (string, error) { - p, err := c.CoreV1().Pods(namespace).Get(pod, metav1.GetOptions{}) +func getContainersLogsFromPod(ctx context.Context, c kubernetes.Interface, pod, namespace string) (string, error) { + p, err := c.CoreV1().Pods(namespace).Get(ctx, pod, metav1.GetOptions{}) if err != nil { return "", err } @@ -45,7 +46,7 @@ func getContainersLogsFromPod(c kubernetes.Interface, pod, namespace string) (st sb := strings.Builder{} for _, container := range p.Spec.Containers { sb.WriteString(fmt.Sprintf("\n>>> Container %s:\n", container.Name)) - logs, err := getContainerLogsFromPod(c, pod, container.Name, namespace) + logs, err := getContainerLogsFromPod(ctx, c, pod, container.Name, namespace) if err != nil { return "", err } @@ -54,10 +55,10 @@ func getContainersLogsFromPod(c kubernetes.Interface, pod, namespace string) (st return sb.String(), nil } -func getContainerLogsFromPod(c kubernetes.Interface, pod, container, namespace string) (string, error) { +func getContainerLogsFromPod(ctx context.Context, c kubernetes.Interface, pod, container, namespace string) (string, error) { sb := strings.Builder{} req := c.CoreV1().Pods(namespace).GetLogs(pod, &corev1.PodLogOptions{Follow: true, Container: container}) - rc, err := req.Stream() + rc, err := req.Stream(ctx) if err != nil { return "", err } diff --git a/test/v1alpha1/cancel_test.go b/test/v1alpha1/cancel_test.go index 467c716a94f..39ca095855b 100644 --- a/test/v1alpha1/cancel_test.go +++ b/test/v1alpha1/cancel_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "encoding/json" "fmt" "sync" @@ -40,11 +41,14 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { // the retrying TaskRun to retry. for _, numRetries := range []int{0, 1} { t.Run(fmt.Sprintf("retries=%d", numRetries), func(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) pipelineRunName := "cancel-me" pipelineRun := &v1alpha1.PipelineRun{ @@ -68,16 +72,16 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { } t.Logf("Creating PipelineRun in namespace %s", namespace) - if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { + if _, err := c.PipelineRunClient.Create(ctx, pipelineRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PipelineRun `%s`: %s", pipelineRunName, err) } t.Logf("Waiting for Pipelinerun %s in namespace %s to be started", pipelineRunName, namespace) - if err := WaitForPipelineRunState(c, pipelineRunName, pipelineRunTimeout, Running(pipelineRunName), "PipelineRunRunning"); err != nil { + if err := WaitForPipelineRunState(ctx, c, pipelineRunName, pipelineRunTimeout, Running(pipelineRunName), "PipelineRunRunning"); err != nil { t.Fatalf("Error waiting for PipelineRun %s to be running: %s", pipelineRunName, err) } - taskrunList, err := c.TaskRunClient.List(metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=" + pipelineRunName}) + taskrunList, err := c.TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=" + pipelineRunName}) if err != nil { t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", pipelineRunName, err) } @@ -88,7 +92,7 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { wg.Add(1) go func(name string) { defer wg.Done() - err := WaitForTaskRunState(c, name, Running(name), "TaskRunRunning") + err := WaitForTaskRunState(ctx, c, name, Running(name), "TaskRunRunning") if err != nil { t.Errorf("Error waiting for TaskRun %s to be running: %v", name, err) } @@ -96,7 +100,7 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { } wg.Wait() - pr, err := c.PipelineRunClient.Get(pipelineRunName, metav1.GetOptions{}) + pr, err := c.PipelineRunClient.Get(ctx, pipelineRunName, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get PipelineRun `%s`: %s", pipelineRunName, err) } @@ -110,12 +114,12 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { if err != nil { t.Fatalf("failed to marshal patch bytes in order to cancel") } - if _, err := c.PipelineRunClient.Patch(pr.Name, types.JSONPatchType, patchBytes, ""); err != nil { + if _, err := c.PipelineRunClient.Patch(ctx, pr.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{}, ""); err != nil { t.Fatalf("Failed to patch PipelineRun `%s` with cancellation: %s", pipelineRunName, err) } t.Logf("Waiting for PipelineRun %s in namespace %s to be cancelled", pipelineRunName, namespace) - if err := WaitForPipelineRunState(c, pipelineRunName, pipelineRunTimeout, FailedWithReason("PipelineRunCancelled", pipelineRunName), "PipelineRunCancelled"); err != nil { + if err := WaitForPipelineRunState(ctx, c, pipelineRunName, pipelineRunTimeout, FailedWithReason("PipelineRunCancelled", pipelineRunName), "PipelineRunCancelled"); err != nil { t.Errorf("Error waiting for PipelineRun %q to finished: %s", pipelineRunName, err) } @@ -124,7 +128,7 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { wg.Add(1) go func(name string) { defer wg.Done() - err := WaitForTaskRunState(c, name, FailedWithReason("TaskRunCancelled", name), "TaskRunCancelled") + err := WaitForTaskRunState(ctx, c, name, FailedWithReason("TaskRunCancelled", name), "TaskRunCancelled") if err != nil { t.Errorf("Error waiting for TaskRun %s to be finished: %v", name, err) } @@ -133,7 +137,7 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { wg.Wait() var trName []string - taskrunList, err = c.TaskRunClient.List(metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=" + pipelineRunName}) + taskrunList, err = c.TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=" + pipelineRunName}) if err != nil { t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", pipelineRunName, err) } @@ -144,7 +148,7 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { // Expected failure events: 1 for the pipelinerun cancel, 1 for each TaskRun expectedNumberOfEvents := 1 + len(trName) t.Logf("Making sure %d events were created from pipelinerun with kinds %v", expectedNumberOfEvents, matchKinds) - events, err := collectMatchingEvents(c.KubeClient, namespace, matchKinds, "Failed") + events, err := collectMatchingEvents(ctx, c.KubeClient, namespace, matchKinds, "Failed") if err != nil { t.Fatalf("Failed to collect matching events: %q", err) } diff --git a/test/v1alpha1/cluster_resource_test.go b/test/v1alpha1/cluster_resource_test.go index bf5a6a9de97..57dd521665f 100644 --- a/test/v1alpha1/cluster_resource_test.go +++ b/test/v1alpha1/cluster_resource_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "testing" tb "github.com/tektoncd/pipeline/internal/builder/v1alpha1" @@ -35,39 +36,42 @@ func TestClusterResource(t *testing.T) { taskName := "helloworld-cluster-task" taskRunName := "helloworld-cluster-taskrun" - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) t.Logf("Creating secret %s", secretName) - if _, err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Create(getClusterResourceTaskSecret(namespace, secretName)); err != nil { + if _, err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Create(ctx, getClusterResourceTaskSecret(namespace, secretName), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Secret `%s`: %s", secretName, err) } t.Logf("Creating configMap %s", configName) - if _, err := c.KubeClient.Kube.CoreV1().ConfigMaps(namespace).Create(getClusterConfigMap(namespace, configName)); err != nil { + if _, err := c.KubeClient.Kube.CoreV1().ConfigMaps(namespace).Create(ctx, getClusterConfigMap(namespace, configName), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create configMap `%s`: %s", configName, err) } t.Logf("Creating cluster PipelineResource %s", resourceName) - if _, err := c.PipelineResourceClient.Create(getClusterResource(namespace, resourceName, secretName)); err != nil { + if _, err := c.PipelineResourceClient.Create(ctx, getClusterResource(namespace, resourceName, secretName), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create cluster Pipeline Resource `%s`: %s", resourceName, err) } t.Logf("Creating Task %s", taskName) - if _, err := c.TaskClient.Create(getClusterResourceTask(taskName, configName)); err != nil { + if _, err := c.TaskClient.Create(ctx, getClusterResourceTask(taskName, configName), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", taskName, err) } t.Logf("Creating TaskRun %s", taskRunName) - if _, err := c.TaskRunClient.Create(getClusterResourceTaskRun(namespace, taskRunName, taskName, resourceName)); err != nil { + if _, err := c.TaskRunClient.Create(ctx, getClusterResourceTaskRun(namespace, taskRunName, taskName, resourceName), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Taskrun `%s`: %s", taskRunName, err) } // Verify status of TaskRun (wait for it) - if err := WaitForTaskRunState(c, taskRunName, TaskRunSucceed(taskRunName), "TaskRunCompleted"); err != nil { + if err := WaitForTaskRunState(ctx, c, taskRunName, TaskRunSucceed(taskRunName), "TaskRunCompleted"); err != nil { t.Errorf("Error waiting for TaskRun %s to finish: %s", taskRunName, err) } } diff --git a/test/v1alpha1/controller.go b/test/v1alpha1/controller.go index e02c848b049..c1914c1ce4c 100644 --- a/test/v1alpha1/controller.go +++ b/test/v1alpha1/controller.go @@ -36,6 +36,7 @@ import ( fakeresourceclient "github.com/tektoncd/pipeline/pkg/client/resource/injection/client/fake" fakeresourceinformer "github.com/tektoncd/pipeline/pkg/client/resource/injection/informers/resource/v1alpha1/pipelineresource/fake" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" coreinformers "k8s.io/client-go/informers/core/v1" fakekubeclientset "k8s.io/client-go/kubernetes/fake" fakekubeclient "knative.dev/pkg/client/injection/kube/client/fake" @@ -107,7 +108,7 @@ func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers if err := i.PipelineRun.Informer().GetIndexer().Add(pr); err != nil { t.Fatal(err) } - if _, err := c.Pipeline.TektonV1alpha1().PipelineRuns(pr.Namespace).Create(pr); err != nil { + if _, err := c.Pipeline.TektonV1alpha1().PipelineRuns(pr.Namespace).Create(ctx, pr, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } @@ -115,7 +116,7 @@ func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers if err := i.Pipeline.Informer().GetIndexer().Add(p); err != nil { t.Fatal(err) } - if _, err := c.Pipeline.TektonV1alpha1().Pipelines(p.Namespace).Create(p); err != nil { + if _, err := c.Pipeline.TektonV1alpha1().Pipelines(p.Namespace).Create(ctx, p, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } @@ -123,7 +124,7 @@ func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers if err := i.TaskRun.Informer().GetIndexer().Add(tr); err != nil { t.Fatal(err) } - if _, err := c.Pipeline.TektonV1alpha1().TaskRuns(tr.Namespace).Create(tr); err != nil { + if _, err := c.Pipeline.TektonV1alpha1().TaskRuns(tr.Namespace).Create(ctx, tr, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } @@ -131,7 +132,7 @@ func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers if err := i.Task.Informer().GetIndexer().Add(ta); err != nil { t.Fatal(err) } - if _, err := c.Pipeline.TektonV1alpha1().Tasks(ta.Namespace).Create(ta); err != nil { + if _, err := c.Pipeline.TektonV1alpha1().Tasks(ta.Namespace).Create(ctx, ta, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } @@ -139,7 +140,7 @@ func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers if err := i.ClusterTask.Informer().GetIndexer().Add(ct); err != nil { t.Fatal(err) } - if _, err := c.Pipeline.TektonV1alpha1().ClusterTasks().Create(ct); err != nil { + if _, err := c.Pipeline.TektonV1alpha1().ClusterTasks().Create(ctx, ct, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } @@ -147,7 +148,7 @@ func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers if err := i.PipelineResource.Informer().GetIndexer().Add(r); err != nil { t.Fatal(err) } - if _, err := c.Resource.TektonV1alpha1().PipelineResources(r.Namespace).Create(r); err != nil { + if _, err := c.Resource.TektonV1alpha1().PipelineResources(r.Namespace).Create(ctx, r, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } @@ -155,7 +156,7 @@ func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers if err := i.Condition.Informer().GetIndexer().Add(cond); err != nil { t.Fatal(err) } - if _, err := c.Pipeline.TektonV1alpha1().Conditions(cond.Namespace).Create(cond); err != nil { + if _, err := c.Pipeline.TektonV1alpha1().Conditions(cond.Namespace).Create(ctx, cond, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } @@ -163,12 +164,12 @@ func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers if err := i.Pod.Informer().GetIndexer().Add(p); err != nil { t.Fatal(err) } - if _, err := c.Kube.CoreV1().Pods(p.Namespace).Create(p); err != nil { + if _, err := c.Kube.CoreV1().Pods(p.Namespace).Create(ctx, p, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } for _, n := range d.Namespaces { - if _, err := c.Kube.CoreV1().Namespaces().Create(n); err != nil { + if _, err := c.Kube.CoreV1().Namespaces().Create(ctx, n, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } diff --git a/test/v1alpha1/dag_test.go b/test/v1alpha1/dag_test.go index 1cdd6a51a67..134bacfd9a8 100644 --- a/test/v1alpha1/dag_test.go +++ b/test/v1alpha1/dag_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "math" "sort" "strings" @@ -45,11 +46,15 @@ import ( // | // pipeline-task-4 func TestDAGPipelineRun(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) // Create the Task that echoes text repoTaskResource := v1alpha1.TaskResource{ResourceDeclaration: v1alpha1.ResourceDeclaration{ @@ -75,7 +80,7 @@ func TestDAGPipelineRun(t *testing.T) { }}, }}, } - if _, err := c.TaskClient.Create(echoTask); err != nil { + if _, err := c.TaskClient.Create(ctx, echoTask, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create echo Task: %s", err) } @@ -90,7 +95,7 @@ func TestDAGPipelineRun(t *testing.T) { }}, }, } - if _, err := c.PipelineResourceClient.Create(repoResource); err != nil { + if _, err := c.PipelineResourceClient.Create(ctx, repoResource, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create simple repo PipelineResource: %s", err) } @@ -179,7 +184,7 @@ func TestDAGPipelineRun(t *testing.T) { }}, }, } - if _, err := c.PipelineClient.Create(pipeline); err != nil { + if _, err := c.PipelineClient.Create(ctx, pipeline, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create dag-pipeline: %s", err) } pipelineRun := &v1alpha1.PipelineRun{ @@ -192,21 +197,21 @@ func TestDAGPipelineRun(t *testing.T) { }}, }, } - if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { + if _, err := c.PipelineRunClient.Create(ctx, pipelineRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create dag-pipeline-run PipelineRun: %s", err) } t.Logf("Waiting for DAG pipeline to complete") - if err := WaitForPipelineRunState(c, "dag-pipeline-run", pipelineRunTimeout, PipelineRunSucceed("dag-pipeline-run"), "PipelineRunSuccess"); err != nil { + if err := WaitForPipelineRunState(ctx, c, "dag-pipeline-run", pipelineRunTimeout, PipelineRunSucceed("dag-pipeline-run"), "PipelineRunSuccess"); err != nil { t.Fatalf("Error waiting for PipelineRun to finish: %s", err) } - verifyExpectedOrder(t, c.TaskRunClient) + verifyExpectedOrder(ctx, t, c.TaskRunClient) } -func verifyExpectedOrder(t *testing.T, c clientset.TaskRunInterface) { +func verifyExpectedOrder(ctx context.Context, t *testing.T, c clientset.TaskRunInterface) { t.Logf("Verifying order of execution") - taskRunsResp, err := c.List(metav1.ListOptions{}) + taskRunsResp, err := c.List(ctx, metav1.ListOptions{}) if err != nil { t.Fatalf("Couldn't get TaskRuns (so that we could check when they executed): %v", err) } diff --git a/test/v1alpha1/duplicate_test.go b/test/v1alpha1/duplicate_test.go index 8bd4d3ad5fe..19be6e7e729 100644 --- a/test/v1alpha1/duplicate_test.go +++ b/test/v1alpha1/duplicate_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "fmt" "sync" "testing" @@ -31,11 +32,15 @@ import ( // TestDuplicatePodTaskRun creates 10 builds and checks that each of them has only one build pod. func TestDuplicatePodTaskRun(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) var wg sync.WaitGroup for i := 0; i < 25; i++ { @@ -49,18 +54,18 @@ func TestDuplicatePodTaskRun(t *testing.T) { tb.StepArgs("simple"), )), )) - if _, err := c.TaskRunClient.Create(taskrun); err != nil { + if _, err := c.TaskRunClient.Create(ctx, taskrun, metav1.CreateOptions{}); err != nil { t.Fatalf("Error creating taskrun: %v", err) } go func(t *testing.T) { defer wg.Done() - if err := WaitForTaskRunState(c, taskrunName, TaskRunSucceed(taskrunName), "TaskRunDuplicatePodTaskRunFailed"); err != nil { + if err := WaitForTaskRunState(ctx, c, taskrunName, TaskRunSucceed(taskrunName), "TaskRunDuplicatePodTaskRunFailed"); err != nil { t.Errorf("Error waiting for TaskRun to finish: %s", err) return } - pods, err := c.KubeClient.Kube.CoreV1().Pods(namespace).List(metav1.ListOptions{ + pods, err := c.KubeClient.Kube.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=%s", pipeline.GroupName+pipeline.TaskRunLabelKey, taskrunName), }) if err != nil { diff --git a/test/v1alpha1/embed_test.go b/test/v1alpha1/embed_test.go index 31ee559d17a..c2c467e1a9b 100644 --- a/test/v1alpha1/embed_test.go +++ b/test/v1alpha1/embed_test.go @@ -19,11 +19,13 @@ limitations under the License. package test import ( + "context" "fmt" "testing" tb "github.com/tektoncd/pipeline/internal/builder/v1alpha1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" knativetest "knative.dev/pkg/test" ) @@ -38,22 +40,25 @@ const ( // TestTaskRun_EmbeddedResource is an integration test that will verify a very simple "hello world" TaskRun can be // executed with an embedded resource spec. func TestTaskRun_EmbeddedResource(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) t.Logf("Creating Task and TaskRun in namespace %s", namespace) - if _, err := c.TaskClient.Create(getEmbeddedTask([]string{"/bin/sh", "-c", fmt.Sprintf("echo %s", taskOutput)})); err != nil { + if _, err := c.TaskClient.Create(ctx, getEmbeddedTask([]string{"/bin/sh", "-c", fmt.Sprintf("echo %s", taskOutput)}), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", embedTaskName, err) } - if _, err := c.TaskRunClient.Create(getEmbeddedTaskRun(namespace)); err != nil { + if _, err := c.TaskRunClient.Create(ctx, getEmbeddedTaskRun(namespace), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create TaskRun `%s`: %s", embedTaskRunName, err) } t.Logf("Waiting for TaskRun %s in namespace %s to complete", embedTaskRunName, namespace) - if err := WaitForTaskRunState(c, embedTaskRunName, TaskRunSucceed(embedTaskRunName), "TaskRunSuccess"); err != nil { + if err := WaitForTaskRunState(ctx, c, embedTaskRunName, TaskRunSucceed(embedTaskRunName), "TaskRunSuccess"); err != nil { t.Errorf("Error waiting for TaskRun %s to finish: %s", embedTaskRunName, err) } diff --git a/test/v1alpha1/entrypoint_test.go b/test/v1alpha1/entrypoint_test.go index 892b212dd93..5130d418a09 100644 --- a/test/v1alpha1/entrypoint_test.go +++ b/test/v1alpha1/entrypoint_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "testing" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" @@ -35,14 +36,17 @@ const epTaskRunName = "ep-task-run" // that doesn't have a cmd defined. In addition to making sure the steps // are executed in the order specified func TestEntrypointRunningStepsInOrder(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) t.Logf("Creating TaskRun in namespace %s", namespace) - if _, err := c.TaskRunClient.Create(&v1alpha1.TaskRun{ + if _, err := c.TaskRunClient.Create(ctx, &v1alpha1.TaskRun{ ObjectMeta: metav1.ObjectMeta{Name: epTaskRunName, Namespace: namespace}, Spec: v1alpha1.TaskRunSpec{ TaskSpec: &v1alpha1.TaskSpec{TaskSpec: v1beta1.TaskSpec{ @@ -55,12 +59,12 @@ func TestEntrypointRunningStepsInOrder(t *testing.T) { }}, }}, }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create TaskRun: %s", err) } t.Logf("Waiting for TaskRun in namespace %s to finish successfully", namespace) - if err := WaitForTaskRunState(c, epTaskRunName, TaskRunSucceed(epTaskRunName), "TaskRunSuccess"); err != nil { + if err := WaitForTaskRunState(ctx, c, epTaskRunName, TaskRunSucceed(epTaskRunName), "TaskRunSuccess"); err != nil { t.Errorf("Error waiting for TaskRun to finish successfully: %s", err) } diff --git a/test/v1alpha1/git_checkout_test.go b/test/v1alpha1/git_checkout_test.go index b4093d1194e..13952dddf97 100644 --- a/test/v1alpha1/git_checkout_test.go +++ b/test/v1alpha1/git_checkout_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "strings" "testing" @@ -95,12 +96,15 @@ func TestGitPipelineRun(t *testing.T) { }} { t.Run(tc.name, func(t *testing.T) { t.Parallel() - c, namespace := setup(t) - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) t.Logf("Creating Git PipelineResource %s", gitSourceResourceName) - if _, err := c.PipelineResourceClient.Create(&v1alpha1.PipelineResource{ + if _, err := c.PipelineResourceClient.Create(ctx, &v1alpha1.PipelineResource{ ObjectMeta: metav1.ObjectMeta{Name: gitSourceResourceName}, Spec: v1alpha1.PipelineResourceSpec{ Type: v1alpha1.PipelineResourceTypeGit, @@ -111,12 +115,12 @@ func TestGitPipelineRun(t *testing.T) { {Name: "sslVerify", Value: tc.sslVerify}, }, }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline Resource `%s`: %s", gitSourceResourceName, err) } t.Logf("Creating PipelineRun %s", gitTestPipelineRunName) - if _, err := c.PipelineRunClient.Create(&v1alpha1.PipelineRun{ + if _, err := c.PipelineRunClient.Create(ctx, &v1alpha1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{Name: gitTestPipelineRunName}, Spec: v1alpha1.PipelineRunSpec{ Resources: []v1alpha1.PipelineResourceBinding{{ @@ -149,11 +153,11 @@ func TestGitPipelineRun(t *testing.T) { }}, }, }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PipelineRun %q: %s", gitTestPipelineRunName, err) } - if err := WaitForPipelineRunState(c, gitTestPipelineRunName, timeout, PipelineRunSucceed(gitTestPipelineRunName), "PipelineRunCompleted"); err != nil { + if err := WaitForPipelineRunState(ctx, c, gitTestPipelineRunName, timeout, PipelineRunSucceed(gitTestPipelineRunName), "PipelineRunCompleted"); err != nil { t.Errorf("Error waiting for PipelineRun %s to finish: %s", gitTestPipelineRunName, err) t.Fatalf("PipelineRun execution failed") } @@ -178,12 +182,15 @@ func TestGitPipelineRunFail(t *testing.T) { }} { t.Run(tc.name, func(t *testing.T) { t.Parallel() - c, namespace := setup(t) - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) t.Logf("Creating Git PipelineResource %s", gitSourceResourceName) - if _, err := c.PipelineResourceClient.Create(&v1alpha1.PipelineResource{ + if _, err := c.PipelineResourceClient.Create(ctx, &v1alpha1.PipelineResource{ ObjectMeta: metav1.ObjectMeta{Name: gitSourceResourceName}, Spec: v1alpha1.PipelineResourceSpec{ Type: v1alpha1.PipelineResourceTypeGit, @@ -193,12 +200,12 @@ func TestGitPipelineRunFail(t *testing.T) { {Name: "httpsProxy", Value: tc.httpsproxy}, }, }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline Resource `%s`: %s", gitSourceResourceName, err) } t.Logf("Creating PipelineRun %s", gitTestPipelineRunName) - if _, err := c.PipelineRunClient.Create(&v1alpha1.PipelineRun{ + if _, err := c.PipelineRunClient.Create(ctx, &v1alpha1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{Name: gitTestPipelineRunName}, Spec: v1alpha1.PipelineRunSpec{ Resources: []v1alpha1.PipelineResourceBinding{{ @@ -231,18 +238,18 @@ func TestGitPipelineRunFail(t *testing.T) { }}, }, }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PipelineRun %q: %s", gitTestPipelineRunName, err) } - if err := WaitForPipelineRunState(c, gitTestPipelineRunName, timeout, PipelineRunSucceed(gitTestPipelineRunName), "PipelineRunCompleted"); err != nil { - taskruns, err := c.TaskRunClient.List(metav1.ListOptions{}) + if err := WaitForPipelineRunState(ctx, c, gitTestPipelineRunName, timeout, PipelineRunSucceed(gitTestPipelineRunName), "PipelineRunCompleted"); err != nil { + taskruns, err := c.TaskRunClient.List(ctx, metav1.ListOptions{}) if err != nil { t.Errorf("Error getting TaskRun list for PipelineRun %s %s", gitTestPipelineRunName, err) } for _, tr := range taskruns.Items { if tr.Status.PodName != "" { - p, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(tr.Status.PodName, metav1.GetOptions{}) + p, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(ctx, tr.Status.PodName, metav1.GetOptions{}) if err != nil { t.Fatalf("Error getting pod `%s` in namespace `%s`", tr.Status.PodName, namespace) } @@ -251,7 +258,7 @@ func TestGitPipelineRunFail(t *testing.T) { if strings.HasPrefix(stat.Name, "step-git-source-"+gitSourceResourceName) { if stat.State.Terminated != nil { req := c.KubeClient.Kube.CoreV1().Pods(namespace).GetLogs(p.Name, &corev1.PodLogOptions{Container: stat.Name}) - logContent, err := req.Do().Raw() + logContent, err := req.Do(ctx).Raw() if err != nil { t.Fatalf("Error getting pod logs for pod `%s` and container `%s` in namespace `%s`", tr.Status.PodName, stat.Name, namespace) } diff --git a/test/v1alpha1/init_test.go b/test/v1alpha1/init_test.go index 4420c1626df..af6f059116e 100644 --- a/test/v1alpha1/init_test.go +++ b/test/v1alpha1/init_test.go @@ -21,6 +21,7 @@ limitations under the License. package test import ( + "context" "flag" "fmt" "os" @@ -50,18 +51,18 @@ func init() { flag.BoolVar(&skipRootUserTests, "skipRootUserTests", false, "Skip tests that require root user") } -func setup(t *testing.T, fn ...func(*testing.T, *clients, string)) (*clients, string) { +func setup(ctx context.Context, t *testing.T, fn ...func(context.Context, *testing.T, *clients, string)) (*clients, string) { t.Helper() namespace := names.SimpleNameGenerator.RestrictLengthWithRandomSuffix("arendelle") initializeLogsAndMetrics(t) c := newClients(t, knativetest.Flags.Kubeconfig, knativetest.Flags.Cluster, namespace) - createNamespace(t, namespace, c.KubeClient) - verifyServiceAccountExistence(t, namespace, c.KubeClient) + createNamespace(ctx, t, namespace, c.KubeClient) + verifyServiceAccountExistence(ctx, t, namespace, c.KubeClient) for _, f := range fn { - f(t, c, namespace) + f(ctx, t, c, namespace) } return c, namespace @@ -77,34 +78,34 @@ func header(logf logging.FormatLogger, text string) { logf(bar) } -func tearDown(t *testing.T, cs *clients, namespace string) { +func tearDown(ctx context.Context, t *testing.T, cs *clients, namespace string) { t.Helper() if cs.KubeClient == nil { return } if t.Failed() { header(t.Logf, fmt.Sprintf("Dumping objects from %s", namespace)) - bs, err := getCRDYaml(cs, namespace) + bs, err := getCRDYaml(ctx, cs, namespace) if err != nil { t.Error(err) } else { t.Log(string(bs)) } header(t.Logf, fmt.Sprintf("Dumping logs from Pods in the %s", namespace)) - taskruns, err := cs.TaskRunClient.List(metav1.ListOptions{}) + taskruns, err := cs.TaskRunClient.List(ctx, metav1.ListOptions{}) if err != nil { t.Errorf("Error getting TaskRun list %s", err) } for _, tr := range taskruns.Items { if tr.Status.PodName != "" { - CollectPodLogs(cs, tr.Status.PodName, namespace, t.Logf) + CollectPodLogs(ctx, cs, tr.Status.PodName, namespace, t.Logf) } } } if os.Getenv("TEST_KEEP_NAMESPACES") == "" { t.Logf("Deleting namespace %s", namespace) - if err := cs.KubeClient.Kube.CoreV1().Namespaces().Delete(namespace, &metav1.DeleteOptions{}); err != nil { + if err := cs.KubeClient.Kube.CoreV1().Namespaces().Delete(ctx, namespace, metav1.DeleteOptions{}); err != nil { t.Errorf("Failed to delete namespace %s: %s", namespace, err) } } @@ -122,27 +123,27 @@ func initializeLogsAndMetrics(t *testing.T) { }) } -func createNamespace(t *testing.T, namespace string, kubeClient *knativetest.KubeClient) { +func createNamespace(ctx context.Context, t *testing.T, namespace string, kubeClient *knativetest.KubeClient) { t.Logf("Create namespace %s to deploy to", namespace) labels := map[string]string{ "tekton.dev/test-e2e": "true", } - if _, err := kubeClient.Kube.CoreV1().Namespaces().Create(&corev1.Namespace{ + if _, err := kubeClient.Kube.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: namespace, Labels: labels, }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create namespace %s for tests: %s", namespace, err) } } -func verifyServiceAccountExistence(t *testing.T, namespace string, kubeClient *knativetest.KubeClient) { +func verifyServiceAccountExistence(ctx context.Context, t *testing.T, namespace string, kubeClient *knativetest.KubeClient) { defaultSA := "default" t.Logf("Verify SA %q is created in namespace %q", defaultSA, namespace) if err := wait.PollImmediate(interval, timeout, func() (bool, error) { - _, err := kubeClient.Kube.CoreV1().ServiceAccounts(namespace).Get(defaultSA, metav1.GetOptions{}) + _, err := kubeClient.Kube.CoreV1().ServiceAccounts(namespace).Get(ctx, defaultSA, metav1.GetOptions{}) if err != nil && errors.IsNotFound(err) { return false, nil } @@ -161,7 +162,7 @@ func TestMain(m *testing.M) { os.Exit(c) } -func getCRDYaml(cs *clients, ns string) ([]byte, error) { +func getCRDYaml(ctx context.Context, cs *clients, ns string) ([]byte, error) { var output []byte printOrAdd := func(i interface{}) { bs, err := yaml.Marshal(i) @@ -172,7 +173,7 @@ func getCRDYaml(cs *clients, ns string) ([]byte, error) { output = append(output, bs...) } - ps, err := cs.PipelineClient.List(metav1.ListOptions{}) + ps, err := cs.PipelineClient.List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("could not get pipeline: %w", err) } @@ -180,7 +181,7 @@ func getCRDYaml(cs *clients, ns string) ([]byte, error) { printOrAdd(i) } - prs, err := cs.PipelineResourceClient.List(metav1.ListOptions{}) + prs, err := cs.PipelineResourceClient.List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("could not get pipelinerun resource: %w", err) } @@ -188,7 +189,7 @@ func getCRDYaml(cs *clients, ns string) ([]byte, error) { printOrAdd(i) } - prrs, err := cs.PipelineRunClient.List(metav1.ListOptions{}) + prrs, err := cs.PipelineRunClient.List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("could not get pipelinerun: %w", err) } @@ -196,14 +197,14 @@ func getCRDYaml(cs *clients, ns string) ([]byte, error) { printOrAdd(i) } - ts, err := cs.TaskClient.List(metav1.ListOptions{}) + ts, err := cs.TaskClient.List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("could not get tasks: %w", err) } for _, i := range ts.Items { printOrAdd(i) } - trs, err := cs.TaskRunClient.List(metav1.ListOptions{}) + trs, err := cs.TaskRunClient.List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("could not get taskrun: %w", err) } @@ -211,7 +212,7 @@ func getCRDYaml(cs *clients, ns string) ([]byte, error) { printOrAdd(i) } - pods, err := cs.KubeClient.Kube.CoreV1().Pods(ns).List(metav1.ListOptions{}) + pods, err := cs.KubeClient.Kube.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("could not get pods: %w", err) } diff --git a/test/v1alpha1/kaniko_task_test.go b/test/v1alpha1/kaniko_task_test.go index 06f0fce293e..e17594287ae 100644 --- a/test/v1alpha1/kaniko_task_test.go +++ b/test/v1alpha1/kaniko_task_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "fmt" "strings" "testing" @@ -47,41 +48,44 @@ func TestKanikoTaskRun(t *testing.T) { t.Skip("Skip test as skipRootUserTests set to true") } - c, namespace := setup(t, withRegistry) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t, withRegistry) t.Parallel() repo := fmt.Sprintf("registry.%s:5000/kanikotasktest", namespace) - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) t.Logf("Creating Git PipelineResource %s", kanikoGitResourceName) - if _, err := c.PipelineResourceClient.Create(getGitResource(namespace)); err != nil { + if _, err := c.PipelineResourceClient.Create(ctx, getGitResource(namespace), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline Resource `%s`: %s", kanikoGitResourceName, err) } t.Logf("Creating Image PipelineResource %s", repo) - if _, err := c.PipelineResourceClient.Create(getImageResource(namespace, repo)); err != nil { + if _, err := c.PipelineResourceClient.Create(ctx, getImageResource(namespace, repo), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline Resource `%s`: %s", kanikoGitResourceName, err) } t.Logf("Creating Task %s", kanikoTaskName) - if _, err := c.TaskClient.Create(getTask(repo, namespace)); err != nil { + if _, err := c.TaskClient.Create(ctx, getTask(repo, namespace), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", kanikoTaskName, err) } t.Logf("Creating TaskRun %s", kanikoTaskRunName) - if _, err := c.TaskRunClient.Create(getTaskRun(namespace)); err != nil { + if _, err := c.TaskRunClient.Create(ctx, getTaskRun(namespace), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create TaskRun `%s`: %s", kanikoTaskRunName, err) } // Verify status of TaskRun (wait for it) - if err := WaitForTaskRunState(c, kanikoTaskRunName, Succeed(kanikoTaskRunName), "TaskRunCompleted"); err != nil { + if err := WaitForTaskRunState(ctx, c, kanikoTaskRunName, Succeed(kanikoTaskRunName), "TaskRunCompleted"); err != nil { t.Errorf("Error waiting for TaskRun %s to finish: %s", kanikoTaskRunName, err) } - tr, err := c.TaskRunClient.Get(kanikoTaskRunName, metav1.GetOptions{}) + tr, err := c.TaskRunClient.Get(ctx, kanikoTaskRunName, metav1.GetOptions{}) if err != nil { t.Errorf("Error retrieving taskrun: %s", err) } @@ -114,7 +118,7 @@ func TestKanikoTaskRun(t *testing.T) { } // match the local digest, which is first capture group against the remote image - remoteDigest, err := getRemoteDigest(t, c, namespace, repo) + remoteDigest, err := getRemoteDigest(ctx, t, c, namespace, repo) if err != nil { t.Fatalf("Expected to get digest for remote image %s: %v", repo, err) } @@ -181,10 +185,10 @@ func getTaskRun(namespace string) *v1alpha1.TaskRun { // to the "outside" of the test, this means it can be query by the test itself. It can only be query from // a pod in the namespace. skopeo is able to do that query and we use jq to extract the digest from its // output. The image used for this pod is build in the tektoncd/plumbing repository. -func getRemoteDigest(t *testing.T, c *clients, namespace, image string) (string, error) { +func getRemoteDigest(ctx context.Context, t *testing.T, c *clients, namespace, image string) (string, error) { t.Helper() podName := "skopeo-jq" - if _, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Create(&corev1.Pod{ + if _, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Create(ctx, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, Name: podName, @@ -198,15 +202,15 @@ func getRemoteDigest(t *testing.T, c *clients, namespace, image string) (string, }}, RestartPolicy: corev1.RestartPolicyNever, }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create the skopeo-jq pod: %v", err) } - if err := WaitForPodState(c, podName, namespace, func(pod *corev1.Pod) (bool, error) { + if err := WaitForPodState(ctx, c, podName, namespace, func(pod *corev1.Pod) (bool, error) { return pod.Status.Phase == "Succeeded" || pod.Status.Phase == "Failed", nil }, "PodContainersTerminated"); err != nil { t.Fatalf("Error waiting for Pod %q to terminate: %v", podName, err) } - logs, err := getContainerLogsFromPod(c.KubeClient.Kube, podName, "skopeo", namespace) + logs, err := getContainerLogsFromPod(ctx, c.KubeClient.Kube, podName, "skopeo", namespace) if err != nil { t.Fatalf("Could not get logs for pod %s: %s", podName, err) } diff --git a/test/v1alpha1/pipelinerun_test.go b/test/v1alpha1/pipelinerun_test.go index 02bd1fbf4b1..3eee0dbaf08 100644 --- a/test/v1alpha1/pipelinerun_test.go +++ b/test/v1alpha1/pipelinerun_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "encoding/base64" "fmt" "strings" @@ -52,7 +53,7 @@ func TestPipelineRun(t *testing.T) { t.Parallel() type tests struct { name string - testSetup func(t *testing.T, c *clients, namespace string, index int) + testSetup func(ctx context.Context, t *testing.T, c *clients, namespace string, index int) expectedTaskRuns []string expectedNumberOfEvents int pipelineRunFunc func(int, string) *v1alpha1.PipelineRun @@ -60,21 +61,21 @@ func TestPipelineRun(t *testing.T) { tds := []tests{{ name: "fan-in and fan-out", - testSetup: func(t *testing.T, c *clients, namespace string, index int) { + testSetup: func(ctx context.Context, t *testing.T, c *clients, namespace string, index int) { t.Helper() for _, task := range getFanInFanOutTasks() { - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", task.Name, err) } } for _, res := range getFanInFanOutGitResources() { - if _, err := c.PipelineResourceClient.Create(res); err != nil { + if _, err := c.PipelineResourceClient.Create(ctx, res, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline Resource `%s`: %s", kanikoGitResourceName, err) } } - if _, err := c.PipelineClient.Create(getFanInFanOutPipeline(index)); err != nil { + if _, err := c.PipelineClient.Create(ctx, getFanInFanOutPipeline(index), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err) } }, @@ -84,13 +85,13 @@ func TestPipelineRun(t *testing.T) { expectedNumberOfEvents: 5, }, { name: "service account propagation and pipeline param", - testSetup: func(t *testing.T, c *clients, namespace string, index int) { + testSetup: func(ctx context.Context, t *testing.T, c *clients, namespace string, index int) { t.Helper() - if _, err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Create(getPipelineRunSecret(index)); err != nil { + if _, err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Create(ctx, getPipelineRunSecret(index), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create secret `%s`: %s", getName(secretName, index), err) } - if _, err := c.KubeClient.Kube.CoreV1().ServiceAccounts(namespace).Create(getPipelineRunServiceAccount(index)); err != nil { + if _, err := c.KubeClient.Kube.CoreV1().ServiceAccounts(namespace).Create(ctx, getPipelineRunServiceAccount(index), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create SA `%s`: %s", getName(saName, index), err) } @@ -103,11 +104,11 @@ func TestPipelineRun(t *testing.T) { tb.StepArgs("copy", "$(inputs.params.path)", "$(inputs.params.dest)"), ), )) - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", getName(taskName, index), err) } - if _, err := c.PipelineClient.Create(getHelloWorldPipelineWithSingularTask(index)); err != nil { + if _, err := c.PipelineClient.Create(ctx, getHelloWorldPipelineWithSingularTask(index), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err) } }, @@ -117,10 +118,10 @@ func TestPipelineRun(t *testing.T) { pipelineRunFunc: getHelloWorldPipelineRun, }, { name: "pipeline succeeds when task skipped due to failed condition", - testSetup: func(t *testing.T, c *clients, namespace string, index int) { + testSetup: func(ctx context.Context, t *testing.T, c *clients, namespace string, index int) { t.Helper() cond := getFailingCondition(namespace) - if _, err := c.ConditionClient.Create(cond); err != nil { + if _, err := c.ConditionClient.Create(ctx, cond, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Condition `%s`: %s", cond1Name, err) } @@ -130,10 +131,10 @@ func TestPipelineRun(t *testing.T) { tb.StepArgs("-c", "echo hello, world"), ), )) - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", getName(taskName, index), err) } - if _, err := c.PipelineClient.Create(getPipelineWithFailingCondition(index)); err != nil { + if _, err := c.PipelineClient.Create(ctx, getPipelineWithFailingCondition(index), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err) } }, @@ -147,27 +148,30 @@ func TestPipelineRun(t *testing.T) { t.Run(td.name, func(t *testing.T) { td := td t.Parallel() - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) t.Logf("Setting up test resources for %q test in namespace %s", td.name, namespace) - td.testSetup(t, c, namespace, i) + td.testSetup(ctx, t, c, namespace, i) prName := fmt.Sprintf("%s%d", pipelineRunName, i) - pipelineRun, err := c.PipelineRunClient.Create(td.pipelineRunFunc(i, namespace)) + pipelineRun, err := c.PipelineRunClient.Create(ctx, td.pipelineRunFunc(i, namespace), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create PipelineRun `%s`: %s", prName, err) } t.Logf("Waiting for PipelineRun %s in namespace %s to complete", prName, namespace) - if err := WaitForPipelineRunState(c, prName, pipelineRunTimeout, PipelineRunSucceed(prName), "PipelineRunSuccess"); err != nil { + if err := WaitForPipelineRunState(ctx, c, prName, pipelineRunTimeout, PipelineRunSucceed(prName), "PipelineRunSuccess"); err != nil { t.Fatalf("Error waiting for PipelineRun %s to finish: %s", prName, err) } t.Logf("Making sure the expected TaskRuns %s were created", td.expectedTaskRuns) - actualTaskrunList, err := c.TaskRunClient.List(metav1.ListOptions{LabelSelector: fmt.Sprintf("tekton.dev/pipelineRun=%s", prName)}) + actualTaskrunList, err := c.TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("tekton.dev/pipelineRun=%s", prName)}) if err != nil { t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", prName, err) } @@ -181,7 +185,7 @@ func TestPipelineRun(t *testing.T) { } } expectedTaskRunNames = append(expectedTaskRunNames, taskRunName) - r, err := c.TaskRunClient.Get(taskRunName, metav1.GetOptions{}) + r, err := c.TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{}) if err != nil { t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err) } @@ -190,16 +194,16 @@ func TestPipelineRun(t *testing.T) { } t.Logf("Checking that labels were propagated correctly for TaskRun %s", r.Name) - checkLabelPropagation(t, c, namespace, prName, r) + checkLabelPropagation(ctx, t, c, namespace, prName, r) t.Logf("Checking that annotations were propagated correctly for TaskRun %s", r.Name) - checkAnnotationPropagation(t, c, namespace, prName, r) + checkAnnotationPropagation(ctx, t, c, namespace, prName, r) } matchKinds := map[string][]string{"PipelineRun": {prName}, "TaskRun": expectedTaskRunNames} t.Logf("Making sure %d events were created from taskrun and pipelinerun with kinds %v", td.expectedNumberOfEvents, matchKinds) - events, err := collectMatchingEvents(c.KubeClient, namespace, matchKinds, "Succeeded") + events, err := collectMatchingEvents(ctx, c.KubeClient, namespace, matchKinds, "Succeeded") if err != nil { t.Fatalf("Failed to collect matching events: %q", err) } @@ -218,7 +222,7 @@ func TestPipelineRun(t *testing.T) { // the PersistentVolumeClaims has the DeletionTimestamp if err := wait.PollImmediate(interval, timeout, func() (bool, error) { // Check to make sure the PipelineRun's artifact storage PVC has been "deleted" at the end of the run. - pvc, errWait := c.KubeClient.Kube.CoreV1().PersistentVolumeClaims(namespace).Get(artifacts.GetPVCName(pipelineRun), metav1.GetOptions{}) + pvc, errWait := c.KubeClient.Kube.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, artifacts.GetPVCName(pipelineRun), metav1.GetOptions{}) if errWait != nil && !errors.IsNotFound(errWait) { return true, fmt.Errorf("error looking up PVC %s for PipelineRun %s: %s", artifacts.GetPVCName(pipelineRun), prName, errWait) } @@ -391,10 +395,10 @@ func getName(namespace string, suffix int) string { // collectMatchingEvents collects list of events under 5 seconds that match // 1. matchKinds which is a map of Kind of Object with name of objects // 2. reason which is the expected reason of event -func collectMatchingEvents(kubeClient *knativetest.KubeClient, namespace string, kinds map[string][]string, reason string) ([]*corev1.Event, error) { +func collectMatchingEvents(ctx context.Context, kubeClient *knativetest.KubeClient, namespace string, kinds map[string][]string, reason string) ([]*corev1.Event, error) { var events []*corev1.Event - watchEvents, err := kubeClient.Kube.CoreV1().Events(namespace).Watch(metav1.ListOptions{}) + watchEvents, err := kubeClient.Kube.CoreV1().Events(namespace).Watch(ctx, metav1.ListOptions{}) // close watchEvents channel defer watchEvents.Stop() if err != nil { @@ -423,17 +427,17 @@ func collectMatchingEvents(kubeClient *knativetest.KubeClient, namespace string, // checkLabelPropagation checks that labels are correctly propagating from // Pipelines, PipelineRuns, and Tasks to TaskRuns and Pods. -func checkLabelPropagation(t *testing.T, c *clients, namespace string, pipelineRunName string, tr *v1alpha1.TaskRun) { +func checkLabelPropagation(ctx context.Context, t *testing.T, c *clients, namespace string, pipelineRunName string, tr *v1alpha1.TaskRun) { // Our controllers add 4 labels automatically. If custom labels are set on // the Pipeline, PipelineRun, or Task then the map will have to be resized. labels := make(map[string]string, 4) // Check label propagation to PipelineRuns. - pr, err := c.PipelineRunClient.Get(pipelineRunName, metav1.GetOptions{}) + pr, err := c.PipelineRunClient.Get(ctx, pipelineRunName, metav1.GetOptions{}) if err != nil { t.Fatalf("Couldn't get expected PipelineRun for %s: %s", tr.Name, err) } - p, err := c.PipelineClient.Get(pr.Spec.PipelineRef.Name, metav1.GetOptions{}) + p, err := c.PipelineClient.Get(ctx, pr.Spec.PipelineRef.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Couldn't get expected Pipeline for %s: %s", pr.Name, err) } @@ -451,7 +455,7 @@ func checkLabelPropagation(t *testing.T, c *clients, namespace string, pipelineR // This label is added to every TaskRun by the PipelineRun controller labels[pipeline.GroupName+pipeline.PipelineRunLabelKey] = pr.Name if tr.Spec.TaskRef != nil { - task, err := c.TaskClient.Get(tr.Spec.TaskRef.Name, metav1.GetOptions{}) + task, err := c.TaskClient.Get(ctx, tr.Spec.TaskRef.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Couldn't get expected Task for %s: %s", tr.Name, err) } @@ -467,7 +471,7 @@ func checkLabelPropagation(t *testing.T, c *clients, namespace string, pipelineR // This label is added to every Pod by the TaskRun controller if tr.Status.PodName != "" { // Check label propagation to Pods. - pod := getPodForTaskRun(t, c.KubeClient, namespace, tr) + pod := getPodForTaskRun(ctx, t, c.KubeClient, namespace, tr) // This label is added to every Pod by the TaskRun controller labels[pipeline.GroupName+pipeline.TaskRunLabelKey] = tr.Name assertLabelsMatch(t, labels, pod.ObjectMeta.Labels) @@ -476,15 +480,15 @@ func checkLabelPropagation(t *testing.T, c *clients, namespace string, pipelineR // checkAnnotationPropagation checks that annotations are correctly propagating from // Pipelines, PipelineRuns, and Tasks to TaskRuns and Pods. -func checkAnnotationPropagation(t *testing.T, c *clients, namespace string, pipelineRunName string, tr *v1alpha1.TaskRun) { +func checkAnnotationPropagation(ctx context.Context, t *testing.T, c *clients, namespace string, pipelineRunName string, tr *v1alpha1.TaskRun) { annotations := make(map[string]string) // Check annotation propagation to PipelineRuns. - pr, err := c.PipelineRunClient.Get(pipelineRunName, metav1.GetOptions{}) + pr, err := c.PipelineRunClient.Get(ctx, pipelineRunName, metav1.GetOptions{}) if err != nil { t.Fatalf("Couldn't get expected PipelineRun for %s: %s", tr.Name, err) } - p, err := c.PipelineClient.Get(pr.Spec.PipelineRef.Name, metav1.GetOptions{}) + p, err := c.PipelineClient.Get(ctx, pr.Spec.PipelineRef.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Couldn't get expected Pipeline for %s: %s", pr.Name, err) } @@ -498,7 +502,7 @@ func checkAnnotationPropagation(t *testing.T, c *clients, namespace string, pipe annotations[key] = val } if tr.Spec.TaskRef != nil { - task, err := c.TaskClient.Get(tr.Spec.TaskRef.Name, metav1.GetOptions{}) + task, err := c.TaskClient.Get(ctx, tr.Spec.TaskRef.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Couldn't get expected Task for %s: %s", tr.Name, err) } @@ -509,13 +513,13 @@ func checkAnnotationPropagation(t *testing.T, c *clients, namespace string, pipe assertAnnotationsMatch(t, annotations, tr.ObjectMeta.Annotations) // Check annotation propagation to Pods. - pod := getPodForTaskRun(t, c.KubeClient, namespace, tr) + pod := getPodForTaskRun(ctx, t, c.KubeClient, namespace, tr) assertAnnotationsMatch(t, annotations, pod.ObjectMeta.Annotations) } -func getPodForTaskRun(t *testing.T, kubeClient *knativetest.KubeClient, namespace string, tr *v1alpha1.TaskRun) *corev1.Pod { +func getPodForTaskRun(ctx context.Context, t *testing.T, kubeClient *knativetest.KubeClient, namespace string, tr *v1alpha1.TaskRun) *corev1.Pod { // The Pod name has a random suffix, so we filter by label to find the one we care about. - pods, err := kubeClient.Kube.CoreV1().Pods(namespace).List(metav1.ListOptions{ + pods, err := kubeClient.Kube.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ LabelSelector: pipeline.GroupName + pipeline.TaskRunLabelKey + " = " + tr.Name, }) if err != nil { diff --git a/test/v1alpha1/registry_test.go b/test/v1alpha1/registry_test.go index 32654b62c4f..6f1b18b597a 100644 --- a/test/v1alpha1/registry_test.go +++ b/test/v1alpha1/registry_test.go @@ -18,6 +18,7 @@ limitations under the License. package test import ( + "context" "testing" appsv1 "k8s.io/api/apps/v1" @@ -25,12 +26,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func withRegistry(t *testing.T, c *clients, namespace string) { +func withRegistry(ctx context.Context, t *testing.T, c *clients, namespace string) { deployment := getRegistryDeployment(namespace) - if _, err := c.KubeClient.Kube.AppsV1().Deployments(namespace).Create(deployment); err != nil { + if _, err := c.KubeClient.Kube.AppsV1().Deployments(namespace).Create(ctx, deployment, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create the local registry deployment: %v", err) } - if err := WaitForDeploymentState(c, deployment.Name, namespace, func(d *appsv1.Deployment) (bool, error) { + if err := WaitForDeploymentState(ctx, c, deployment.Name, namespace, func(d *appsv1.Deployment) (bool, error) { var replicas int32 = 1 if d.Spec.Replicas != nil { replicas = *d.Spec.Replicas @@ -41,7 +42,7 @@ func withRegistry(t *testing.T, c *clients, namespace string) { } service := getRegistryService(namespace) - if _, err := c.KubeClient.Kube.CoreV1().Services(namespace).Create(service); err != nil { + if _, err := c.KubeClient.Kube.CoreV1().Services(namespace).Create(ctx, service, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create the local registry service: %v", err) } } diff --git a/test/v1alpha1/retry_test.go b/test/v1alpha1/retry_test.go index ab7add10491..3f04ef63376 100644 --- a/test/v1alpha1/retry_test.go +++ b/test/v1alpha1/retry_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "testing" "time" @@ -34,15 +35,18 @@ import ( // TestTaskRunRetry tests that retries behave as expected, by creating multiple // Pods for the same TaskRun each time it fails, up to the configured max. func TestTaskRunRetry(t *testing.T) { - c, namespace := setup(t) - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) // Create a PipelineRun with a single TaskRun that can only fail, // configured to retry 5 times. pipelineRunName := "retry-pipeline" numRetries := 5 - if _, err := c.PipelineRunClient.Create(&v1alpha1.PipelineRun{ + if _, err := c.PipelineRunClient.Create(ctx, &v1alpha1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{Name: pipelineRunName}, Spec: v1alpha1.PipelineRunSpec{ PipelineSpec: &v1alpha1.PipelineSpec{ @@ -58,17 +62,17 @@ func TestTaskRunRetry(t *testing.T) { }}, }, }, - }); err != nil { + }, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PipelineRun %q: %v", pipelineRunName, err) } // Wait for the PipelineRun to fail, when retries are exhausted. - if err := WaitForPipelineRunState(c, pipelineRunName, 5*time.Minute, PipelineRunFailed(pipelineRunName), "PipelineRunFailed"); err != nil { + if err := WaitForPipelineRunState(ctx, c, pipelineRunName, 5*time.Minute, PipelineRunFailed(pipelineRunName), "PipelineRunFailed"); err != nil { t.Fatalf("Waiting for PipelineRun to fail: %v", err) } // Get the status of the PipelineRun. - pr, err := c.PipelineRunClient.Get(pipelineRunName, metav1.GetOptions{}) + pr, err := c.PipelineRunClient.Get(ctx, pipelineRunName, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get PipelineRun %q: %v", pipelineRunName, err) } @@ -84,7 +88,7 @@ func TestTaskRunRetry(t *testing.T) { } // There should only be one TaskRun created. - trs, err := c.TaskRunClient.List(metav1.ListOptions{}) + trs, err := c.TaskRunClient.List(ctx, metav1.ListOptions{}) if err != nil { t.Errorf("Failed to list TaskRuns: %v", err) } else if len(trs.Items) != 1 { @@ -106,7 +110,7 @@ func TestTaskRunRetry(t *testing.T) { } // There should be N Pods created, all failed, all owned by the TaskRun. - pods, err := c.KubeClient.Kube.CoreV1().Pods(namespace).List(metav1.ListOptions{}) + pods, err := c.KubeClient.Kube.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{}) // We expect N+1 Pods total, one for each failed and retried attempt, and one for the final attempt. wantPods := numRetries + 1 diff --git a/test/v1alpha1/secret.go b/test/v1alpha1/secret.go index 37ea00c080a..83391295b36 100644 --- a/test/v1alpha1/secret.go +++ b/test/v1alpha1/secret.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "fmt" "io/ioutil" "os" @@ -35,6 +36,10 @@ import ( // otherwise. func CreateGCPServiceAccountSecret(t *testing.T, c *knativetest.KubeClient, namespace string, secretName string) (bool, error) { t.Helper() + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + file := os.Getenv("GCP_SERVICE_ACCOUNT_KEY_PATH") if file == "" { t.Logf("Not creating service account secret, relying on default credentials in namespace %s.", namespace) @@ -56,7 +61,7 @@ func CreateGCPServiceAccountSecret(t *testing.T, c *knativetest.KubeClient, name sec.Data = map[string][]byte{ "config.json": bs, } - _, err = c.Kube.CoreV1().Secrets(namespace).Create(sec) + _, err = c.Kube.CoreV1().Secrets(namespace).Create(ctx, sec, metav1.CreateOptions{}) t.Log("Creating service account secret") return true, err diff --git a/test/v1alpha1/sidecar_test.go b/test/v1alpha1/sidecar_test.go index 4964f65469f..ac4f4e5f4e5 100644 --- a/test/v1alpha1/sidecar_test.go +++ b/test/v1alpha1/sidecar_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "fmt" "testing" "time" @@ -55,11 +56,14 @@ func TestSidecarTaskSupport(t *testing.T) { sidecarCommand: []string{"echo", "\"hello from sidecar\""}, }} - clients, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + clients, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, clients, namespace) }, t.Logf) - defer tearDown(t, clients, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, clients, namespace) }, t.Logf) + defer tearDown(ctx, t, clients, namespace) for i, test := range tests { t.Run(test.desc, func(t *testing.T) { @@ -92,26 +96,26 @@ func TestSidecarTaskSupport(t *testing.T) { } t.Logf("Creating Task %q", sidecarTaskName) - if _, err := clients.TaskClient.Create(task); err != nil { + if _, err := clients.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create Task %q: %v", sidecarTaskName, err) } t.Logf("Creating TaskRun %q", sidecarTaskRunName) - if _, err := clients.TaskRunClient.Create(taskRun); err != nil { + if _, err := clients.TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create TaskRun %q: %v", sidecarTaskRunName, err) } - if err := WaitForTaskRunState(clients, sidecarTaskRunName, Succeed(sidecarTaskRunName), "TaskRunSucceed"); err != nil { + if err := WaitForTaskRunState(ctx, clients, sidecarTaskRunName, Succeed(sidecarTaskRunName), "TaskRunSucceed"); err != nil { t.Errorf("Error waiting for TaskRun %q to finish: %v", sidecarTaskRunName, err) } - tr, err := clients.TaskRunClient.Get(sidecarTaskRunName, metav1.GetOptions{}) + tr, err := clients.TaskRunClient.Get(ctx, sidecarTaskRunName, metav1.GetOptions{}) if err != nil { t.Errorf("Error getting Taskrun: %v", err) } podName := tr.Status.PodName - if err := WaitForPodState(clients, podName, namespace, func(pod *corev1.Pod) (bool, error) { + if err := WaitForPodState(ctx, clients, podName, namespace, func(pod *corev1.Pod) (bool, error) { terminatedCount := 0 for _, c := range pod.Status.ContainerStatuses { if c.State.Terminated != nil { @@ -123,7 +127,7 @@ func TestSidecarTaskSupport(t *testing.T) { t.Errorf("Error waiting for Pod %q to terminate both the primary and sidecar containers: %v", podName, err) } - pod, err := clients.KubeClient.Kube.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) + pod, err := clients.KubeClient.Kube.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{}) if err != nil { t.Errorf("Error getting TaskRun pod: %v", err) } @@ -152,7 +156,7 @@ func TestSidecarTaskSupport(t *testing.T) { t.Errorf("Either the primary or sidecar containers did not terminate") } - trCheckSidecarStatus, err := clients.TaskRunClient.Get(sidecarTaskRunName, metav1.GetOptions{}) + trCheckSidecarStatus, err := clients.TaskRunClient.Get(ctx, sidecarTaskRunName, metav1.GetOptions{}) if err != nil { t.Errorf("Error getting TaskRun: %v", err) } diff --git a/test/v1alpha1/start_time_test.go b/test/v1alpha1/start_time_test.go index 8dd8e742a70..8630fe8fe88 100644 --- a/test/v1alpha1/start_time_test.go +++ b/test/v1alpha1/start_time_test.go @@ -15,6 +15,7 @@ limitations under the License. package test import ( + "context" "testing" "time" @@ -32,12 +33,16 @@ import ( // Scheduling and reporting specifics can result in start times being reported // more than 10s apart, but they shouldn't be less than 10s apart. func TestStartTime(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) t.Logf("Creating TaskRun in namespace %q", namespace) - tr, err := c.TaskRunClient.Create(&v1alpha1.TaskRun{ + tr, err := c.TaskRunClient.Create(ctx, &v1alpha1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "start-time-test-", Namespace: namespace, @@ -62,16 +67,16 @@ func TestStartTime(t *testing.T) { }}, }}, }, - }) + }, metav1.CreateOptions{}) if err != nil { t.Fatalf("Error creating TaskRun: %v", err) } t.Logf("Created TaskRun %q in namespace %q", tr.Name, namespace) // Wait for the TaskRun to complete. - if err := WaitForTaskRunState(c, tr.Name, TaskRunSucceed(tr.Name), "TaskRunSuccess"); err != nil { + if err := WaitForTaskRunState(ctx, c, tr.Name, TaskRunSucceed(tr.Name), "TaskRunSuccess"); err != nil { t.Errorf("Error waiting for TaskRun to succeed: %v", err) } - tr, err = c.TaskRunClient.Get(tr.Name, metav1.GetOptions{}) + tr, err = c.TaskRunClient.Get(ctx, tr.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Error getting TaskRun: %v", err) } diff --git a/test/v1alpha1/status_test.go b/test/v1alpha1/status_test.go index 58fcc8f2e0b..bba40b78377 100644 --- a/test/v1alpha1/status_test.go +++ b/test/v1alpha1/status_test.go @@ -19,9 +19,11 @@ limitations under the License. package test import ( + "context" "testing" tb "github.com/tektoncd/pipeline/internal/builder/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" knativetest "knative.dev/pkg/test" ) @@ -29,28 +31,31 @@ import ( // verify a very simple "hello world" TaskRun and PipelineRun failure // execution lead to the correct TaskRun status. func TestTaskRunPipelineRunStatus(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) t.Logf("Creating Task and TaskRun in namespace %s", namespace) task := tb.Task("banana", tb.TaskSpec( tb.Step("busybox", tb.StepCommand("ls", "-la")), )) - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task: %s", err) } taskRun := tb.TaskRun("apple", tb.TaskRunSpec( tb.TaskRunTaskRef("banana"), tb.TaskRunServiceAccountName("inexistent"), )) - if _, err := c.TaskRunClient.Create(taskRun); err != nil { + if _, err := c.TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create TaskRun: %s", err) } t.Logf("Waiting for TaskRun in namespace %s to fail", namespace) - if err := WaitForTaskRunState(c, "apple", TaskRunFailed("apple"), "BuildValidationFailed"); err != nil { + if err := WaitForTaskRunState(ctx, c, "apple", TaskRunFailed("apple"), "BuildValidationFailed"); err != nil { t.Errorf("Error waiting for TaskRun to finish: %s", err) } @@ -60,15 +65,15 @@ func TestTaskRunPipelineRunStatus(t *testing.T) { pipelineRun := tb.PipelineRun("pear", tb.PipelineRunSpec( "tomatoes", tb.PipelineRunServiceAccountName("inexistent"), )) - if _, err := c.PipelineClient.Create(pipeline); err != nil { + if _, err := c.PipelineClient.Create(ctx, pipeline, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", "tomatoes", err) } - if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { + if _, err := c.PipelineRunClient.Create(ctx, pipelineRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PipelineRun `%s`: %s", "pear", err) } t.Logf("Waiting for PipelineRun in namespace %s to fail", namespace) - if err := WaitForPipelineRunState(c, "pear", pipelineRunTimeout, PipelineRunFailed("pear"), "BuildValidationFailed"); err != nil { + if err := WaitForPipelineRunState(ctx, c, "pear", pipelineRunTimeout, PipelineRunFailed("pear"), "BuildValidationFailed"); err != nil { t.Errorf("Error waiting for TaskRun to finish: %s", err) } } diff --git a/test/v1alpha1/taskrun_test.go b/test/v1alpha1/taskrun_test.go index 11c73d18587..1dfbbcb90ef 100644 --- a/test/v1alpha1/taskrun_test.go +++ b/test/v1alpha1/taskrun_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "strings" "testing" @@ -32,11 +33,14 @@ import ( ) func TestTaskRunFailure(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) taskRunName := "failing-taskrun" @@ -52,22 +56,22 @@ func TestTaskRunFailure(t *testing.T) { tb.StepCommand("/bin/sh"), tb.StepArgs("-c", "sleep 30s"), ), )) - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task: %s", err) } taskRun := tb.TaskRun(taskRunName, tb.TaskRunSpec( tb.TaskRunTaskRef("failing-task"), )) - if _, err := c.TaskRunClient.Create(taskRun); err != nil { + if _, err := c.TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create TaskRun: %s", err) } t.Logf("Waiting for TaskRun in namespace %s to fail", namespace) - if err := WaitForTaskRunState(c, taskRunName, TaskRunFailed(taskRunName), "TaskRunFailed"); err != nil { + if err := WaitForTaskRunState(ctx, c, taskRunName, TaskRunFailed(taskRunName), "TaskRunFailed"); err != nil { t.Errorf("Error waiting for TaskRun to finish: %s", err) } - taskrun, err := c.TaskRunClient.Get(taskRunName, metav1.GetOptions{}) + taskrun, err := c.TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{}) if err != nil { t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err) } @@ -108,11 +112,14 @@ func TestTaskRunFailure(t *testing.T) { } func TestTaskRunStatus(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) taskRunName := "status-taskrun" @@ -124,22 +131,22 @@ func TestTaskRunStatus(t *testing.T) { tb.StepCommand("/bin/sh"), tb.StepArgs("-c", "echo hello"), ), )) - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task: %s", err) } taskRun := tb.TaskRun(taskRunName, tb.TaskRunSpec( tb.TaskRunTaskRef("status-task"), )) - if _, err := c.TaskRunClient.Create(taskRun); err != nil { + if _, err := c.TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create TaskRun: %s", err) } t.Logf("Waiting for TaskRun in namespace %s to fail", namespace) - if err := WaitForTaskRunState(c, taskRunName, TaskRunSucceed(taskRunName), "TaskRunSucceed"); err != nil { + if err := WaitForTaskRunState(ctx, c, taskRunName, TaskRunSucceed(taskRunName), "TaskRunSucceed"); err != nil { t.Errorf("Error waiting for TaskRun to finish: %s", err) } - taskrun, err := c.TaskRunClient.Get(taskRunName, metav1.GetOptions{}) + taskrun, err := c.TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{}) if err != nil { t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err) } diff --git a/test/v1alpha1/timeout_test.go b/test/v1alpha1/timeout_test.go index d9ac9a15236..2cdbbd9c9b0 100644 --- a/test/v1alpha1/timeout_test.go +++ b/test/v1alpha1/timeout_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "fmt" "sync" "testing" @@ -36,16 +37,19 @@ import ( // verify that pipelinerun timeout works and leads to the the correct TaskRun statuses // and pod deletions. func TestPipelineRunTimeout(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) t.Logf("Creating Task in namespace %s", namespace) task := tb.Task("banana", tb.TaskSpec( tb.Step("busybox", tb.StepCommand("/bin/sh"), tb.StepArgs("-c", "sleep 10")))) - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", "banana", err) } @@ -55,19 +59,19 @@ func TestPipelineRunTimeout(t *testing.T) { pipelineRun := tb.PipelineRun("pear", tb.PipelineRunSpec(pipeline.Name, tb.PipelineRunTimeout(5*time.Second), )) - if _, err := c.PipelineClient.Create(pipeline); err != nil { + if _, err := c.PipelineClient.Create(ctx, pipeline, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", pipeline.Name, err) } - if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { + if _, err := c.PipelineRunClient.Create(ctx, pipelineRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PipelineRun `%s`: %s", pipelineRun.Name, err) } t.Logf("Waiting for Pipelinerun %s in namespace %s to be started", pipelineRun.Name, namespace) - if err := WaitForPipelineRunState(c, pipelineRun.Name, timeout, Running(pipelineRun.Name), "PipelineRunRunning"); err != nil { + if err := WaitForPipelineRunState(ctx, c, pipelineRun.Name, timeout, Running(pipelineRun.Name), "PipelineRunRunning"); err != nil { t.Fatalf("Error waiting for PipelineRun %s to be running: %s", pipelineRun.Name, err) } - taskrunList, err := c.TaskRunClient.List(metav1.ListOptions{LabelSelector: fmt.Sprintf("tekton.dev/pipelineRun=%s", pipelineRun.Name)}) + taskrunList, err := c.TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("tekton.dev/pipelineRun=%s", pipelineRun.Name)}) if err != nil { t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", pipelineRun.Name, err) } @@ -78,7 +82,7 @@ func TestPipelineRunTimeout(t *testing.T) { for _, taskrunItem := range taskrunList.Items { go func(name string) { - err := WaitForTaskRunState(c, name, Running(name), "TaskRunRunning") + err := WaitForTaskRunState(ctx, c, name, Running(name), "TaskRunRunning") errChan <- err }(taskrunItem.Name) } @@ -89,12 +93,12 @@ func TestPipelineRunTimeout(t *testing.T) { } } - if _, err := c.PipelineRunClient.Get(pipelineRun.Name, metav1.GetOptions{}); err != nil { + if _, err := c.PipelineRunClient.Get(ctx, pipelineRun.Name, metav1.GetOptions{}); err != nil { t.Fatalf("Failed to get PipelineRun `%s`: %s", pipelineRun.Name, err) } t.Logf("Waiting for PipelineRun %s in namespace %s to be timed out", pipelineRun.Name, namespace) - if err := WaitForPipelineRunState(c, pipelineRun.Name, timeout, FailedWithReason("PipelineRunTimeout", pipelineRun.Name), "PipelineRunTimedOut"); err != nil { + if err := WaitForPipelineRunState(ctx, c, pipelineRun.Name, timeout, FailedWithReason("PipelineRunTimeout", pipelineRun.Name), "PipelineRunTimedOut"); err != nil { t.Errorf("Error waiting for PipelineRun %s to finish: %s", pipelineRun.Name, err) } @@ -104,7 +108,7 @@ func TestPipelineRunTimeout(t *testing.T) { wg.Add(1) go func(name string) { defer wg.Done() - err := WaitForTaskRunState(c, name, FailedWithReason("TaskRunTimeout", name), "TaskRunTimeout") + err := WaitForTaskRunState(ctx, c, name, FailedWithReason("TaskRunTimeout", name), "TaskRunTimeout") if err != nil { t.Errorf("Error waiting for TaskRun %s to timeout: %s", name, err) } @@ -112,7 +116,7 @@ func TestPipelineRunTimeout(t *testing.T) { } wg.Wait() - if _, err := c.PipelineRunClient.Get(pipelineRun.Name, metav1.GetOptions{}); err != nil { + if _, err := c.PipelineRunClient.Get(ctx, pipelineRun.Name, metav1.GetOptions{}); err != nil { t.Fatalf("Failed to get PipelineRun `%s`: %s", pipelineRun.Name, err) } @@ -121,51 +125,57 @@ func TestPipelineRunTimeout(t *testing.T) { secondPipeline := tb.Pipeline("peppers", tb.PipelineSpec(tb.PipelineTask("foo", "banana"))) secondPipelineRun := tb.PipelineRun("kiwi", tb.PipelineRunSpec("peppers")) - if _, err := c.PipelineClient.Create(secondPipeline); err != nil { + if _, err := c.PipelineClient.Create(ctx, secondPipeline, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", secondPipeline.Name, err) } - if _, err := c.PipelineRunClient.Create(secondPipelineRun); err != nil { + if _, err := c.PipelineRunClient.Create(ctx, secondPipelineRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PipelineRun `%s`: %s", secondPipelineRun.Name, err) } t.Logf("Waiting for PipelineRun %s in namespace %s to complete", secondPipelineRun.Name, namespace) - if err := WaitForPipelineRunState(c, secondPipelineRun.Name, timeout, PipelineRunSucceed(secondPipelineRun.Name), "PipelineRunSuccess"); err != nil { + if err := WaitForPipelineRunState(ctx, c, secondPipelineRun.Name, timeout, PipelineRunSucceed(secondPipelineRun.Name), "PipelineRunSuccess"); err != nil { t.Fatalf("Error waiting for PipelineRun %s to finish: %s", secondPipelineRun.Name, err) } } // TestTaskRunTimeout is an integration test that will verify a TaskRun can be timed out. func TestTaskRunTimeout(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) t.Logf("Creating Task and TaskRun in namespace %s", namespace) - if _, err := c.TaskClient.Create(tb.Task("giraffe", - tb.TaskSpec(tb.Step("busybox", tb.StepCommand("/bin/sh"), tb.StepArgs("-c", "sleep 3000"))))); err != nil { + if _, err := c.TaskClient.Create(ctx, tb.Task("giraffe", + tb.TaskSpec(tb.Step("busybox", tb.StepCommand("/bin/sh"), tb.StepArgs("-c", "sleep 3000")))), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", "giraffe", err) } - if _, err := c.TaskRunClient.Create(tb.TaskRun("run-giraffe", tb.TaskRunSpec(tb.TaskRunTaskRef("giraffe"), + if _, err := c.TaskRunClient.Create(ctx, tb.TaskRun("run-giraffe", tb.TaskRunSpec(tb.TaskRunTaskRef("giraffe"), // Do not reduce this timeout. Taskrun e2e test is also verifying // if reconcile is triggered from timeout handler and not by pod informers - tb.TaskRunTimeout(30*time.Second)))); err != nil { + tb.TaskRunTimeout(30*time.Second))), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create TaskRun `%s`: %s", "run-giraffe", err) } t.Logf("Waiting for TaskRun %s in namespace %s to complete", "run-giraffe", namespace) - if err := WaitForTaskRunState(c, "run-giraffe", FailedWithReason("TaskRunTimeout", "run-giraffe"), "TaskRunTimeout"); err != nil { + if err := WaitForTaskRunState(ctx, c, "run-giraffe", FailedWithReason("TaskRunTimeout", "run-giraffe"), "TaskRunTimeout"); err != nil { t.Errorf("Error waiting for TaskRun %s to finish: %s", "run-giraffe", err) } } func TestPipelineTaskTimeout(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) t.Logf("Creating Tasks in namespace %s", namespace) task1 := tb.Task("success", tb.TaskSpec( @@ -174,10 +184,10 @@ func TestPipelineTaskTimeout(t *testing.T) { task2 := tb.Task("timeout", tb.TaskSpec( tb.Step("busybox", tb.StepCommand("sleep"), tb.StepArgs("10s")))) - if _, err := c.TaskClient.Create(task1); err != nil { + if _, err := c.TaskClient.Create(ctx, task1, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", task1.Name, err) } - if _, err := c.TaskClient.Create(task2); err != nil { + if _, err := c.TaskClient.Create(ctx, task2, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", task2.Name, err) } @@ -190,19 +200,19 @@ func TestPipelineTaskTimeout(t *testing.T) { pipelineRun := tb.PipelineRun("prtasktimeout", tb.PipelineRunSpec(pipeline.Name)) - if _, err := c.PipelineClient.Create(pipeline); err != nil { + if _, err := c.PipelineClient.Create(ctx, pipeline, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", pipeline.Name, err) } - if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { + if _, err := c.PipelineRunClient.Create(ctx, pipelineRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PipelineRun `%s`: %s", pipelineRun.Name, err) } t.Logf("Waiting for Pipelinerun %s in namespace %s to be started", pipelineRun.Name, namespace) - if err := WaitForPipelineRunState(c, pipelineRun.Name, timeout, Running(pipelineRun.Name), "PipelineRunRunning"); err != nil { + if err := WaitForPipelineRunState(ctx, c, pipelineRun.Name, timeout, Running(pipelineRun.Name), "PipelineRunRunning"); err != nil { t.Fatalf("Error waiting for PipelineRun %s to be running: %s", pipelineRun.Name, err) } - taskrunList, err := c.TaskRunClient.List(metav1.ListOptions{LabelSelector: fmt.Sprintf("tekton.dev/pipelineRun=%s", pipelineRun.Name)}) + taskrunList, err := c.TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("tekton.dev/pipelineRun=%s", pipelineRun.Name)}) if err != nil { t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", pipelineRun.Name, err) } @@ -213,7 +223,7 @@ func TestPipelineTaskTimeout(t *testing.T) { for _, taskrunItem := range taskrunList.Items { go func(name string) { - err := WaitForTaskRunState(c, name, Running(name), "TaskRunRunning") + err := WaitForTaskRunState(ctx, c, name, Running(name), "TaskRunRunning") errChan <- err }(taskrunItem.Name) } @@ -224,12 +234,12 @@ func TestPipelineTaskTimeout(t *testing.T) { } } - if _, err := c.PipelineRunClient.Get(pipelineRun.Name, metav1.GetOptions{}); err != nil { + if _, err := c.PipelineRunClient.Get(ctx, pipelineRun.Name, metav1.GetOptions{}); err != nil { t.Fatalf("Failed to get PipelineRun `%s`: %s", pipelineRun.Name, err) } t.Logf("Waiting for PipelineRun %s with PipelineTask timeout in namespace %s to fail", pipelineRun.Name, namespace) - if err := WaitForPipelineRunState(c, pipelineRun.Name, timeout, FailedWithReason("Failed", pipelineRun.Name), "PipelineRunTimedOut"); err != nil { + if err := WaitForPipelineRunState(ctx, c, pipelineRun.Name, timeout, FailedWithReason("Failed", pipelineRun.Name), "PipelineRunTimedOut"); err != nil { t.Errorf("Error waiting for PipelineRun %s to finish: %s", pipelineRun.Name, err) } @@ -240,7 +250,7 @@ func TestPipelineTaskTimeout(t *testing.T) { go func(tr v1alpha1.TaskRun) { defer wg.Done() name := tr.Name - err := WaitForTaskRunState(c, name, func(ca apis.ConditionAccessor) (bool, error) { + err := WaitForTaskRunState(ctx, c, name, func(ca apis.ConditionAccessor) (bool, error) { cond := ca.GetCondition(apis.ConditionSucceeded) if cond != nil { if tr.Spec.TaskRef.Name == task1.Name && cond.Status == corev1.ConditionTrue { diff --git a/test/v1alpha1/wait.go b/test/v1alpha1/wait.go index a327fb348c1..efa9ab247cd 100644 --- a/test/v1alpha1/wait.go +++ b/test/v1alpha1/wait.go @@ -69,13 +69,13 @@ type ConditionAccessorFn func(ca apis.ConditionAccessor) (bool, error) // interval until inState returns `true` indicating it is done, returns an // error or timeout. desc will be used to name the metric that is emitted to // track how long it took for name to get into the state checked by inState. -func WaitForTaskRunState(c *clients, name string, inState ConditionAccessorFn, desc string) error { +func WaitForTaskRunState(ctx context.Context, c *clients, name string, inState ConditionAccessorFn, desc string) error { metricName := fmt.Sprintf("WaitForTaskRunState/%s/%s", name, desc) _, span := trace.StartSpan(context.Background(), metricName) defer span.End() return wait.PollImmediate(interval, timeout, func() (bool, error) { - r, err := c.TaskRunClient.Get(name, metav1.GetOptions{}) + r, err := c.TaskRunClient.Get(ctx, name, metav1.GetOptions{}) if err != nil { return true, err } @@ -87,13 +87,13 @@ func WaitForTaskRunState(c *clients, name string, inState ConditionAccessorFn, d // from client every interval until inState returns `true` indicating it is done, // returns an error or timeout. desc will be used to name the metric that is emitted to // track how long it took for name to get into the state checked by inState. -func WaitForDeploymentState(c *clients, name string, namespace string, inState func(d *appsv1.Deployment) (bool, error), desc string) error { +func WaitForDeploymentState(ctx context.Context, c *clients, name string, namespace string, inState func(d *appsv1.Deployment) (bool, error), desc string) error { metricName := fmt.Sprintf("WaitForDeploymentState/%s/%s", name, desc) _, span := trace.StartSpan(context.Background(), metricName) defer span.End() return wait.PollImmediate(interval, timeout, func() (bool, error) { - d, err := c.KubeClient.Kube.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{}) + d, err := c.KubeClient.Kube.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return true, err } @@ -105,13 +105,13 @@ func WaitForDeploymentState(c *clients, name string, namespace string, inState f // interval until inState returns `true` indicating it is done, returns an // error or timeout. desc will be used to name the metric that is emitted to // track how long it took for name to get into the state checked by inState. -func WaitForPodState(c *clients, name string, namespace string, inState func(r *corev1.Pod) (bool, error), desc string) error { +func WaitForPodState(ctx context.Context, c *clients, name string, namespace string, inState func(r *corev1.Pod) (bool, error), desc string) error { metricName := fmt.Sprintf("WaitForPodState/%s/%s", name, desc) _, span := trace.StartSpan(context.Background(), metricName) defer span.End() return wait.PollImmediate(interval, timeout, func() (bool, error) { - r, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) + r, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return true, err } @@ -123,13 +123,13 @@ func WaitForPodState(c *clients, name string, namespace string, inState func(r * // interval until inState returns `true` indicating it is done, returns an // error or timeout. desc will be used to name the metric that is emitted to // track how long it took for name to get into the state checked by inState. -func WaitForPipelineRunState(c *clients, name string, polltimeout time.Duration, inState ConditionAccessorFn, desc string) error { +func WaitForPipelineRunState(ctx context.Context, c *clients, name string, polltimeout time.Duration, inState ConditionAccessorFn, desc string) error { metricName := fmt.Sprintf("WaitForPipelineRunState/%s/%s", name, desc) _, span := trace.StartSpan(context.Background(), metricName) defer span.End() return wait.PollImmediate(interval, polltimeout, func() (bool, error) { - r, err := c.PipelineRunClient.Get(name, metav1.GetOptions{}) + r, err := c.PipelineRunClient.Get(ctx, name, metav1.GetOptions{}) if err != nil { return true, err } @@ -141,13 +141,13 @@ func WaitForPipelineRunState(c *clients, name string, polltimeout time.Duration, // interval until an external ip is assigned indicating it is done, returns an // error or timeout. desc will be used to name the metric that is emitted to // track how long it took for name to get into the state checked by inState. -func WaitForServiceExternalIPState(c *clients, namespace, name string, inState func(s *corev1.Service) (bool, error), desc string) error { +func WaitForServiceExternalIPState(ctx context.Context, c *clients, namespace, name string, inState func(s *corev1.Service) (bool, error), desc string) error { metricName := fmt.Sprintf("WaitForServiceExternalIPState/%s/%s", name, desc) _, span := trace.StartSpan(context.Background(), metricName) defer span.End() return wait.PollImmediate(interval, timeout, func() (bool, error) { - r, err := c.KubeClient.Kube.CoreV1().Services(namespace).Get(name, metav1.GetOptions{}) + r, err := c.KubeClient.Kube.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return true, err } diff --git a/test/v1alpha1/wait_example_test.go b/test/v1alpha1/wait_example_test.go index 968626971c9..b801e20dad1 100644 --- a/test/v1alpha1/wait_example_test.go +++ b/test/v1alpha1/wait_example_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "time" corev1 "k8s.io/api/core/v1" @@ -39,8 +40,12 @@ type testingT interface { } func ExampleWaitForTaskRunState() { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // […] setup the test, get clients - if err := WaitForTaskRunState(c, "taskRunName", func(ca apis.ConditionAccessor) (bool, error) { + if err := WaitForTaskRunState(ctx, c, "taskRunName", func(ca apis.ConditionAccessor) (bool, error) { c := ca.GetCondition(apis.ConditionSucceeded) if c != nil { if c.Status == corev1.ConditionTrue { @@ -54,8 +59,12 @@ func ExampleWaitForTaskRunState() { } func ExampleWaitForPipelineRunState() { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // […] setup the test, get clients - if err := WaitForPipelineRunState(c, "pipelineRunName", 1*time.Minute, func(ca apis.ConditionAccessor) (bool, error) { + if err := WaitForPipelineRunState(ctx, c, "pipelineRunName", 1*time.Minute, func(ca apis.ConditionAccessor) (bool, error) { c := ca.GetCondition(apis.ConditionSucceeded) if c != nil { if c.Status == corev1.ConditionTrue { diff --git a/test/v1alpha1/wait_test.go b/test/v1alpha1/wait_test.go index dfdabb351f5..2c289941716 100644 --- a/test/v1alpha1/wait_test.go +++ b/test/v1alpha1/wait_test.go @@ -43,9 +43,9 @@ func TestWaitForTaskRunStateSucceed(t *testing.T) { }}, }}, } - c, cancel := fakeClients(t, d) + c, ctx, cancel := fakeClients(t, d) defer cancel() - if err := WaitForTaskRunState(c, "foo", Succeed("foo"), "TestTaskRunSucceed"); err != nil { + if err := WaitForTaskRunState(ctx, c, "foo", Succeed("foo"), "TestTaskRunSucceed"); err != nil { t.Fatal(err) } } @@ -58,9 +58,9 @@ func TestWaitForTaskRunStateFailed(t *testing.T) { }}, }}, } - c, cancel := fakeClients(t, d) + c, ctx, cancel := fakeClients(t, d) defer cancel() - err := WaitForTaskRunState(c, "foo", TaskRunFailed("foo"), "TestTaskRunFailed") + err := WaitForTaskRunState(ctx, c, "foo", TaskRunFailed("foo"), "TestTaskRunFailed") if err != nil { t.Fatal(err) } @@ -75,9 +75,9 @@ func TestWaitForPipelineRunStateSucceed(t *testing.T) { }}, }}, } - c, cancel := fakeClients(t, d) + c, ctx, cancel := fakeClients(t, d) defer cancel() - err := WaitForPipelineRunState(c, "bar", 2*time.Second, PipelineRunSucceed("bar"), "TestWaitForPipelineRunSucceed") + err := WaitForPipelineRunState(ctx, c, "bar", 2*time.Second, PipelineRunSucceed("bar"), "TestWaitForPipelineRunSucceed") if err != nil { t.Fatal(err) } @@ -92,15 +92,15 @@ func TestWaitForPipelineRunStateFailed(t *testing.T) { }}, }}, } - c, cancel := fakeClients(t, d) + c, ctx, cancel := fakeClients(t, d) defer cancel() - err := WaitForPipelineRunState(c, "bar", 2*time.Second, Failed("bar"), "TestWaitForPipelineRunFailed") + err := WaitForPipelineRunState(ctx, c, "bar", 2*time.Second, Failed("bar"), "TestWaitForPipelineRunFailed") if err != nil { t.Fatal(err) } } -func fakeClients(t *testing.T, d Data) (*clients, func()) { +func fakeClients(t *testing.T, d Data) (*clients, context.Context, func()) { ctx, _ := rtesting.SetupFakeContext(t) ctx, cancel := context.WithCancel(ctx) fakeClients, _ := SeedTestData(t, ctx, d) @@ -110,5 +110,5 @@ func fakeClients(t *testing.T, d Data) (*clients, func()) { PipelineRunClient: fakeClients.Pipeline.TektonV1alpha1().PipelineRuns(""), TaskClient: fakeClients.Pipeline.TektonV1alpha1().Tasks(""), TaskRunClient: fakeClients.Pipeline.TektonV1alpha1().TaskRuns(""), - }, cancel + }, ctx, cancel } diff --git a/test/v1alpha1/workingdir_test.go b/test/v1alpha1/workingdir_test.go index 78f32babefb..97307485e3a 100644 --- a/test/v1alpha1/workingdir_test.go +++ b/test/v1alpha1/workingdir_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "strings" "testing" @@ -34,16 +35,19 @@ const ( ) func TestWorkingDirCreated(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) task := tb.Task(wdTaskName, tb.TaskSpec( tb.Step("ubuntu", tb.StepWorkingDir("/workspace/HELLOMOTO"), tb.StepArgs("-c", "echo YES")), )) - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task: %s", err) } @@ -51,23 +55,23 @@ func TestWorkingDirCreated(t *testing.T) { taskRun := tb.TaskRun(wdTaskRunName, tb.TaskRunSpec( tb.TaskRunTaskRef(wdTaskName), tb.TaskRunServiceAccountName("default"), )) - if _, err := c.TaskRunClient.Create(taskRun); err != nil { + if _, err := c.TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create TaskRun: %s", err) } t.Logf("Waiting for TaskRun in namespace %s to finish successfully", namespace) - if err := WaitForTaskRunState(c, wdTaskRunName, TaskRunSucceed(wdTaskRunName), "TaskRunSuccess"); err != nil { + if err := WaitForTaskRunState(ctx, c, wdTaskRunName, TaskRunSucceed(wdTaskRunName), "TaskRunSuccess"); err != nil { t.Errorf("Error waiting for TaskRun to finish successfully: %s", err) } - tr, err := c.TaskRunClient.Get(wdTaskRunName, metav1.GetOptions{}) + tr, err := c.TaskRunClient.Get(ctx, wdTaskRunName, metav1.GetOptions{}) if err != nil { t.Errorf("Error retrieving taskrun: %s", err) } if tr.Status.PodName == "" { t.Fatal("Error getting a PodName (empty)") } - p, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(tr.Status.PodName, metav1.GetOptions{}) + p, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(ctx, tr.Status.PodName, metav1.GetOptions{}) if err != nil { t.Fatalf("Error getting pod `%s` in namespace `%s`", tr.Status.PodName, namespace) } @@ -75,7 +79,7 @@ func TestWorkingDirCreated(t *testing.T) { if strings.HasPrefix(stat.Name, "working-dir-initializer") { if stat.State.Terminated != nil { req := c.KubeClient.Kube.CoreV1().Pods(namespace).GetLogs(p.Name, &corev1.PodLogOptions{Container: stat.Name}) - logContent, err := req.Do().Raw() + logContent, err := req.Do(ctx).Raw() if err != nil { t.Fatalf("Error getting pod logs for pod `%s` and container `%s` in namespace `%s`", tr.Status.PodName, stat.Name, namespace) } @@ -88,16 +92,19 @@ func TestWorkingDirCreated(t *testing.T) { } func TestWorkingDirIgnoredNonSlashWorkspace(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) task := tb.Task(wdTaskName, tb.TaskSpec( tb.Step("ubuntu", tb.StepWorkingDir("/HELLOMOTO"), tb.StepArgs("-c", "echo YES")), )) - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task: %s", err) } @@ -105,21 +112,21 @@ func TestWorkingDirIgnoredNonSlashWorkspace(t *testing.T) { taskRun := tb.TaskRun(wdTaskRunName, tb.TaskRunSpec( tb.TaskRunTaskRef(wdTaskName), tb.TaskRunServiceAccountName("default"), )) - if _, err := c.TaskRunClient.Create(taskRun); err != nil { + if _, err := c.TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create TaskRun: %s", err) } t.Logf("Waiting for TaskRun in namespace %s to finish successfully", namespace) - if err := WaitForTaskRunState(c, wdTaskRunName, TaskRunSucceed(wdTaskRunName), "TaskRunSuccess"); err != nil { + if err := WaitForTaskRunState(ctx, c, wdTaskRunName, TaskRunSucceed(wdTaskRunName), "TaskRunSuccess"); err != nil { t.Errorf("Error waiting for TaskRun to finish successfully: %s", err) } - tr, err := c.TaskRunClient.Get(wdTaskRunName, metav1.GetOptions{}) + tr, err := c.TaskRunClient.Get(ctx, wdTaskRunName, metav1.GetOptions{}) if err != nil { t.Errorf("Error retrieving taskrun: %s", err) } - p, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(tr.Status.PodName, metav1.GetOptions{}) + p, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(ctx, tr.Status.PodName, metav1.GetOptions{}) if err != nil { t.Fatalf("Error getting pod `%s` in namespace `%s`", tr.Status.PodName, namespace) } diff --git a/test/v1alpha1/workspace_test.go b/test/v1alpha1/workspace_test.go index 11498effd7f..81fa94bdcb1 100644 --- a/test/v1alpha1/workspace_test.go +++ b/test/v1alpha1/workspace_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "strings" "testing" "time" @@ -30,19 +31,22 @@ import ( ) func TestWorkspaceReadOnlyDisallowsWrite(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) taskName := "write-disallowed" taskRunName := "write-disallowed-tr" - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) task := tb.Task(taskName, tb.TaskSpec( tb.Step("alpine", tb.StepScript("echo foo > /workspace/test/file")), tb.TaskWorkspace("test", "test workspace", "/workspace/test", true), )) - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task: %s", err) } @@ -50,23 +54,23 @@ func TestWorkspaceReadOnlyDisallowsWrite(t *testing.T) { tb.TaskRunTaskRef(taskName), tb.TaskRunServiceAccountName("default"), tb.TaskRunWorkspaceEmptyDir("test", ""), )) - if _, err := c.TaskRunClient.Create(taskRun); err != nil { + if _, err := c.TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create TaskRun: %s", err) } t.Logf("Waiting for TaskRun in namespace %s to finish", namespace) - if err := WaitForTaskRunState(c, taskRunName, TaskRunFailed(taskRunName), "error"); err != nil { + if err := WaitForTaskRunState(ctx, c, taskRunName, TaskRunFailed(taskRunName), "error"); err != nil { t.Errorf("Error waiting for TaskRun to finish with error: %s", err) } - tr, err := c.TaskRunClient.Get(taskRunName, metav1.GetOptions{}) + tr, err := c.TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{}) if err != nil { t.Errorf("Error retrieving taskrun: %s", err) } if tr.Status.PodName == "" { t.Fatal("Error getting a PodName (empty)") } - p, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(tr.Status.PodName, metav1.GetOptions{}) + p, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(ctx, tr.Status.PodName, metav1.GetOptions{}) if err != nil { t.Fatalf("Error getting pod `%s` in namespace `%s`", tr.Status.PodName, namespace) @@ -74,7 +78,7 @@ func TestWorkspaceReadOnlyDisallowsWrite(t *testing.T) { for _, stat := range p.Status.ContainerStatuses { if strings.Contains(stat.Name, "step-attempt-write") { req := c.KubeClient.Kube.CoreV1().Pods(namespace).GetLogs(p.Name, &corev1.PodLogOptions{Container: stat.Name}) - logContent, err := req.Do().Raw() + logContent, err := req.Do(ctx).Raw() if err != nil { t.Fatalf("Error getting pod logs for pod `%s` and container `%s` in namespace `%s`", tr.Status.PodName, stat.Name, namespace) } @@ -86,20 +90,23 @@ func TestWorkspaceReadOnlyDisallowsWrite(t *testing.T) { } func TestWorkspacePipelineRunDuplicateWorkspaceEntriesInvalid(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) taskName := "read-workspace" pipelineName := "read-workspace-pipeline" pipelineRunName := "read-workspace-pipelinerun" - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) task := tb.Task(taskName, tb.TaskSpec( tb.Step("alpine", tb.StepScript("cat /workspace/test/file")), tb.TaskWorkspace("test", "test workspace", "/workspace/test/file", true), )) - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task: %s", err) } @@ -107,7 +114,7 @@ func TestWorkspacePipelineRunDuplicateWorkspaceEntriesInvalid(t *testing.T) { tb.PipelineWorkspaceDeclaration("foo"), tb.PipelineTask("task1", taskName, tb.PipelineTaskWorkspaceBinding("test", "foo", "")), )) - if _, err := c.PipelineClient.Create(pipeline); err != nil { + if _, err := c.PipelineClient.Create(ctx, pipeline, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline: %s", err) } @@ -119,7 +126,7 @@ func TestWorkspacePipelineRunDuplicateWorkspaceEntriesInvalid(t *testing.T) { tb.PipelineRunWorkspaceBindingEmptyDir("foo"), ), ) - _, err := c.PipelineRunClient.Create(pipelineRun) + _, err := c.PipelineRunClient.Create(ctx, pipelineRun, metav1.CreateOptions{}) if err == nil || !strings.Contains(err.Error(), "provided by pipelinerun more than once") { t.Fatalf("Expected error when creating pipelinerun with duplicate workspace entries but received: %v", err) @@ -127,20 +134,23 @@ func TestWorkspacePipelineRunDuplicateWorkspaceEntriesInvalid(t *testing.T) { } func TestWorkspacePipelineRunMissingWorkspaceInvalid(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) taskName := "read-workspace" pipelineName := "read-workspace-pipeline" pipelineRunName := "read-workspace-pipelinerun" - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) task := tb.Task(taskName, tb.TaskSpec( tb.Step("alpine", tb.StepScript("cat /workspace/test/file")), tb.TaskWorkspace("test", "test workspace", "/workspace/test/file", true), )) - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task: %s", err) } @@ -148,7 +158,7 @@ func TestWorkspacePipelineRunMissingWorkspaceInvalid(t *testing.T) { tb.PipelineWorkspaceDeclaration("foo"), tb.PipelineTask("task1", taskName, tb.PipelineTaskWorkspaceBinding("test", "foo", "")), )) - if _, err := c.PipelineClient.Create(pipeline); err != nil { + if _, err := c.PipelineClient.Create(ctx, pipeline, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline: %s", err) } @@ -157,11 +167,11 @@ func TestWorkspacePipelineRunMissingWorkspaceInvalid(t *testing.T) { pipelineName, ), ) - if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { + if _, err := c.PipelineRunClient.Create(ctx, pipelineRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PipelineRun: %s", err) } - if err := WaitForPipelineRunState(c, pipelineRunName, 10*time.Second, FailedWithMessage(`pipeline requires workspace with name "foo" be provided by pipelinerun`, pipelineRunName), "PipelineRunHasCondition"); err != nil { + if err := WaitForPipelineRunState(ctx, c, pipelineRunName, 10*time.Second, FailedWithMessage(`pipeline requires workspace with name "foo" be provided by pipelinerun`, pipelineRunName), "PipelineRunHasCondition"); err != nil { t.Fatalf("Failed to wait for PipelineRun %q to finish: %s", pipelineRunName, err) } diff --git a/test/wait.go b/test/wait.go index a327fb348c1..efa9ab247cd 100644 --- a/test/wait.go +++ b/test/wait.go @@ -69,13 +69,13 @@ type ConditionAccessorFn func(ca apis.ConditionAccessor) (bool, error) // interval until inState returns `true` indicating it is done, returns an // error or timeout. desc will be used to name the metric that is emitted to // track how long it took for name to get into the state checked by inState. -func WaitForTaskRunState(c *clients, name string, inState ConditionAccessorFn, desc string) error { +func WaitForTaskRunState(ctx context.Context, c *clients, name string, inState ConditionAccessorFn, desc string) error { metricName := fmt.Sprintf("WaitForTaskRunState/%s/%s", name, desc) _, span := trace.StartSpan(context.Background(), metricName) defer span.End() return wait.PollImmediate(interval, timeout, func() (bool, error) { - r, err := c.TaskRunClient.Get(name, metav1.GetOptions{}) + r, err := c.TaskRunClient.Get(ctx, name, metav1.GetOptions{}) if err != nil { return true, err } @@ -87,13 +87,13 @@ func WaitForTaskRunState(c *clients, name string, inState ConditionAccessorFn, d // from client every interval until inState returns `true` indicating it is done, // returns an error or timeout. desc will be used to name the metric that is emitted to // track how long it took for name to get into the state checked by inState. -func WaitForDeploymentState(c *clients, name string, namespace string, inState func(d *appsv1.Deployment) (bool, error), desc string) error { +func WaitForDeploymentState(ctx context.Context, c *clients, name string, namespace string, inState func(d *appsv1.Deployment) (bool, error), desc string) error { metricName := fmt.Sprintf("WaitForDeploymentState/%s/%s", name, desc) _, span := trace.StartSpan(context.Background(), metricName) defer span.End() return wait.PollImmediate(interval, timeout, func() (bool, error) { - d, err := c.KubeClient.Kube.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{}) + d, err := c.KubeClient.Kube.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return true, err } @@ -105,13 +105,13 @@ func WaitForDeploymentState(c *clients, name string, namespace string, inState f // interval until inState returns `true` indicating it is done, returns an // error or timeout. desc will be used to name the metric that is emitted to // track how long it took for name to get into the state checked by inState. -func WaitForPodState(c *clients, name string, namespace string, inState func(r *corev1.Pod) (bool, error), desc string) error { +func WaitForPodState(ctx context.Context, c *clients, name string, namespace string, inState func(r *corev1.Pod) (bool, error), desc string) error { metricName := fmt.Sprintf("WaitForPodState/%s/%s", name, desc) _, span := trace.StartSpan(context.Background(), metricName) defer span.End() return wait.PollImmediate(interval, timeout, func() (bool, error) { - r, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) + r, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return true, err } @@ -123,13 +123,13 @@ func WaitForPodState(c *clients, name string, namespace string, inState func(r * // interval until inState returns `true` indicating it is done, returns an // error or timeout. desc will be used to name the metric that is emitted to // track how long it took for name to get into the state checked by inState. -func WaitForPipelineRunState(c *clients, name string, polltimeout time.Duration, inState ConditionAccessorFn, desc string) error { +func WaitForPipelineRunState(ctx context.Context, c *clients, name string, polltimeout time.Duration, inState ConditionAccessorFn, desc string) error { metricName := fmt.Sprintf("WaitForPipelineRunState/%s/%s", name, desc) _, span := trace.StartSpan(context.Background(), metricName) defer span.End() return wait.PollImmediate(interval, polltimeout, func() (bool, error) { - r, err := c.PipelineRunClient.Get(name, metav1.GetOptions{}) + r, err := c.PipelineRunClient.Get(ctx, name, metav1.GetOptions{}) if err != nil { return true, err } @@ -141,13 +141,13 @@ func WaitForPipelineRunState(c *clients, name string, polltimeout time.Duration, // interval until an external ip is assigned indicating it is done, returns an // error or timeout. desc will be used to name the metric that is emitted to // track how long it took for name to get into the state checked by inState. -func WaitForServiceExternalIPState(c *clients, namespace, name string, inState func(s *corev1.Service) (bool, error), desc string) error { +func WaitForServiceExternalIPState(ctx context.Context, c *clients, namespace, name string, inState func(s *corev1.Service) (bool, error), desc string) error { metricName := fmt.Sprintf("WaitForServiceExternalIPState/%s/%s", name, desc) _, span := trace.StartSpan(context.Background(), metricName) defer span.End() return wait.PollImmediate(interval, timeout, func() (bool, error) { - r, err := c.KubeClient.Kube.CoreV1().Services(namespace).Get(name, metav1.GetOptions{}) + r, err := c.KubeClient.Kube.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return true, err } diff --git a/test/wait_example_test.go b/test/wait_example_test.go index 968626971c9..b801e20dad1 100644 --- a/test/wait_example_test.go +++ b/test/wait_example_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "time" corev1 "k8s.io/api/core/v1" @@ -39,8 +40,12 @@ type testingT interface { } func ExampleWaitForTaskRunState() { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // […] setup the test, get clients - if err := WaitForTaskRunState(c, "taskRunName", func(ca apis.ConditionAccessor) (bool, error) { + if err := WaitForTaskRunState(ctx, c, "taskRunName", func(ca apis.ConditionAccessor) (bool, error) { c := ca.GetCondition(apis.ConditionSucceeded) if c != nil { if c.Status == corev1.ConditionTrue { @@ -54,8 +59,12 @@ func ExampleWaitForTaskRunState() { } func ExampleWaitForPipelineRunState() { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // […] setup the test, get clients - if err := WaitForPipelineRunState(c, "pipelineRunName", 1*time.Minute, func(ca apis.ConditionAccessor) (bool, error) { + if err := WaitForPipelineRunState(ctx, c, "pipelineRunName", 1*time.Minute, func(ca apis.ConditionAccessor) (bool, error) { c := ca.GetCondition(apis.ConditionSucceeded) if c != nil { if c.Status == corev1.ConditionTrue { diff --git a/test/workingdir_test.go b/test/workingdir_test.go index f828f5794cc..1cca739524a 100644 --- a/test/workingdir_test.go +++ b/test/workingdir_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "strings" "testing" @@ -34,11 +35,14 @@ const ( ) func TestWorkingDirCreated(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) task := &v1beta1.Task{ ObjectMeta: metav1.ObjectMeta{Name: wdTaskName, Namespace: namespace}, @@ -50,7 +54,7 @@ func TestWorkingDirCreated(t *testing.T) { }}}, }, } - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task: %s", err) } @@ -62,23 +66,23 @@ func TestWorkingDirCreated(t *testing.T) { ServiceAccountName: "default", }, } - if _, err := c.TaskRunClient.Create(taskRun); err != nil { + if _, err := c.TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create TaskRun: %s", err) } t.Logf("Waiting for TaskRun in namespace %s to finish successfully", namespace) - if err := WaitForTaskRunState(c, wdTaskRunName, TaskRunSucceed(wdTaskRunName), "TaskRunSuccess"); err != nil { + if err := WaitForTaskRunState(ctx, c, wdTaskRunName, TaskRunSucceed(wdTaskRunName), "TaskRunSuccess"); err != nil { t.Errorf("Error waiting for TaskRun to finish successfully: %s", err) } - tr, err := c.TaskRunClient.Get(wdTaskRunName, metav1.GetOptions{}) + tr, err := c.TaskRunClient.Get(ctx, wdTaskRunName, metav1.GetOptions{}) if err != nil { t.Errorf("Error retrieving taskrun: %s", err) } if tr.Status.PodName == "" { t.Fatal("Error getting a PodName (empty)") } - p, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(tr.Status.PodName, metav1.GetOptions{}) + p, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(ctx, tr.Status.PodName, metav1.GetOptions{}) if err != nil { t.Fatalf("Error getting pod `%s` in namespace `%s`", tr.Status.PodName, namespace) } @@ -86,7 +90,7 @@ func TestWorkingDirCreated(t *testing.T) { if strings.HasPrefix(stat.Name, "working-dir-initializer") { if stat.State.Terminated != nil { req := c.KubeClient.Kube.CoreV1().Pods(namespace).GetLogs(p.Name, &corev1.PodLogOptions{Container: stat.Name}) - logContent, err := req.Do().Raw() + logContent, err := req.Do(ctx).Raw() if err != nil { t.Fatalf("Error getting pod logs for pod `%s` and container `%s` in namespace `%s`", tr.Status.PodName, stat.Name, namespace) } @@ -99,11 +103,14 @@ func TestWorkingDirCreated(t *testing.T) { } func TestWorkingDirIgnoredNonSlashWorkspace(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) t.Parallel() - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) task := &v1beta1.Task{ ObjectMeta: metav1.ObjectMeta{Name: wdTaskName, Namespace: namespace}, @@ -115,7 +122,7 @@ func TestWorkingDirIgnoredNonSlashWorkspace(t *testing.T) { }}}, }, } - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task: %s", err) } @@ -127,21 +134,21 @@ func TestWorkingDirIgnoredNonSlashWorkspace(t *testing.T) { ServiceAccountName: "default", }, } - if _, err := c.TaskRunClient.Create(taskRun); err != nil { + if _, err := c.TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create TaskRun: %s", err) } t.Logf("Waiting for TaskRun in namespace %s to finish successfully", namespace) - if err := WaitForTaskRunState(c, wdTaskRunName, TaskRunSucceed(wdTaskRunName), "TaskRunSuccess"); err != nil { + if err := WaitForTaskRunState(ctx, c, wdTaskRunName, TaskRunSucceed(wdTaskRunName), "TaskRunSuccess"); err != nil { t.Errorf("Error waiting for TaskRun to finish successfully: %s", err) } - tr, err := c.TaskRunClient.Get(wdTaskRunName, metav1.GetOptions{}) + tr, err := c.TaskRunClient.Get(ctx, wdTaskRunName, metav1.GetOptions{}) if err != nil { t.Errorf("Error retrieving taskrun: %s", err) } - p, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(tr.Status.PodName, metav1.GetOptions{}) + p, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(ctx, tr.Status.PodName, metav1.GetOptions{}) if err != nil { t.Fatalf("Error getting pod `%s` in namespace `%s`", tr.Status.PodName, namespace) } diff --git a/test/workspace_test.go b/test/workspace_test.go index 09cd0793659..9c2d9257f4b 100644 --- a/test/workspace_test.go +++ b/test/workspace_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "context" "strings" "testing" "time" @@ -30,13 +31,16 @@ import ( ) func TestWorkspaceReadOnlyDisallowsWrite(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) taskName := "write-disallowed" taskRunName := "write-disallowed-tr" - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) task := &v1beta1.Task{ ObjectMeta: metav1.ObjectMeta{Name: taskName, Namespace: namespace}, @@ -53,7 +57,7 @@ func TestWorkspaceReadOnlyDisallowsWrite(t *testing.T) { }}, }, } - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task: %s", err) } @@ -68,23 +72,23 @@ func TestWorkspaceReadOnlyDisallowsWrite(t *testing.T) { }}, }, } - if _, err := c.TaskRunClient.Create(taskRun); err != nil { + if _, err := c.TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create TaskRun: %s", err) } t.Logf("Waiting for TaskRun in namespace %s to finish", namespace) - if err := WaitForTaskRunState(c, taskRunName, TaskRunFailed(taskRunName), "error"); err != nil { + if err := WaitForTaskRunState(ctx, c, taskRunName, TaskRunFailed(taskRunName), "error"); err != nil { t.Errorf("Error waiting for TaskRun to finish with error: %s", err) } - tr, err := c.TaskRunClient.Get(taskRunName, metav1.GetOptions{}) + tr, err := c.TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{}) if err != nil { t.Errorf("Error retrieving taskrun: %s", err) } if tr.Status.PodName == "" { t.Fatal("Error getting a PodName (empty)") } - p, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(tr.Status.PodName, metav1.GetOptions{}) + p, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(ctx, tr.Status.PodName, metav1.GetOptions{}) if err != nil { t.Fatalf("Error getting pod `%s` in namespace `%s`", tr.Status.PodName, namespace) @@ -92,7 +96,7 @@ func TestWorkspaceReadOnlyDisallowsWrite(t *testing.T) { for _, stat := range p.Status.ContainerStatuses { if strings.Contains(stat.Name, "step-attempt-write") { req := c.KubeClient.Kube.CoreV1().Pods(namespace).GetLogs(p.Name, &corev1.PodLogOptions{Container: stat.Name}) - logContent, err := req.Do().Raw() + logContent, err := req.Do(ctx).Raw() if err != nil { t.Fatalf("Error getting pod logs for pod `%s` and container `%s` in namespace `%s`", tr.Status.PodName, stat.Name, namespace) } @@ -104,14 +108,17 @@ func TestWorkspaceReadOnlyDisallowsWrite(t *testing.T) { } func TestWorkspacePipelineRunDuplicateWorkspaceEntriesInvalid(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) taskName := "read-workspace" pipelineName := "read-workspace-pipeline" pipelineRunName := "read-workspace-pipelinerun" - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) task := &v1beta1.Task{ ObjectMeta: metav1.ObjectMeta{Name: taskName, Namespace: namespace}, @@ -128,7 +135,7 @@ func TestWorkspacePipelineRunDuplicateWorkspaceEntriesInvalid(t *testing.T) { }}, }, } - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task: %s", err) } @@ -148,7 +155,7 @@ func TestWorkspacePipelineRunDuplicateWorkspaceEntriesInvalid(t *testing.T) { }}, }, } - if _, err := c.PipelineClient.Create(pipeline); err != nil { + if _, err := c.PipelineClient.Create(ctx, pipeline, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline: %s", err) } @@ -165,7 +172,7 @@ func TestWorkspacePipelineRunDuplicateWorkspaceEntriesInvalid(t *testing.T) { }}, }, } - _, err := c.PipelineRunClient.Create(pipelineRun) + _, err := c.PipelineRunClient.Create(ctx, pipelineRun, metav1.CreateOptions{}) if err == nil || !strings.Contains(err.Error(), "provided by pipelinerun more than once") { t.Fatalf("Expected error when creating pipelinerun with duplicate workspace entries but received: %v", err) @@ -173,14 +180,17 @@ func TestWorkspacePipelineRunDuplicateWorkspaceEntriesInvalid(t *testing.T) { } func TestWorkspacePipelineRunMissingWorkspaceInvalid(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) taskName := "read-workspace" pipelineName := "read-workspace-pipeline" pipelineRunName := "read-workspace-pipelinerun" - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) task := &v1beta1.Task{ ObjectMeta: metav1.ObjectMeta{Name: taskName, Namespace: namespace}, @@ -197,7 +207,7 @@ func TestWorkspacePipelineRunMissingWorkspaceInvalid(t *testing.T) { }}, }, } - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task: %s", err) } @@ -217,7 +227,7 @@ func TestWorkspacePipelineRunMissingWorkspaceInvalid(t *testing.T) { }}, }, } - if _, err := c.PipelineClient.Create(pipeline); err != nil { + if _, err := c.PipelineClient.Create(ctx, pipeline, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline: %s", err) } @@ -227,11 +237,11 @@ func TestWorkspacePipelineRunMissingWorkspaceInvalid(t *testing.T) { PipelineRef: &v1beta1.PipelineRef{Name: pipelineName}, }, } - if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { + if _, err := c.PipelineRunClient.Create(ctx, pipelineRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create PipelineRun: %s", err) } - if err := WaitForPipelineRunState(c, pipelineRunName, 10*time.Second, FailedWithMessage(`pipeline requires workspace with name "foo" be provided by pipelinerun`, pipelineRunName), "PipelineRunHasCondition"); err != nil { + if err := WaitForPipelineRunState(ctx, c, pipelineRunName, 10*time.Second, FailedWithMessage(`pipeline requires workspace with name "foo" be provided by pipelinerun`, pipelineRunName), "PipelineRunHasCondition"); err != nil { t.Fatalf("Failed to wait for PipelineRun %q to finish: %s", pipelineRunName, err) } } @@ -240,13 +250,16 @@ func TestWorkspacePipelineRunMissingWorkspaceInvalid(t *testing.T) { // randomized volume name matches the workspaces..volume variable injected into // a user's task specs. func TestWorkspaceVolumeNameMatchesVolumeVariableReplacement(t *testing.T) { - c, namespace := setup(t) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t) taskName := "foo-task" taskRunName := "foo-taskrun" - knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) - defer tearDown(t, c, namespace) + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) task := &v1beta1.Task{ ObjectMeta: metav1.ObjectMeta{Name: taskName, Namespace: namespace}, @@ -265,7 +278,7 @@ func TestWorkspaceVolumeNameMatchesVolumeVariableReplacement(t *testing.T) { }}, }, } - if _, err := c.TaskClient.Create(task); err != nil { + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task: %s", err) } @@ -280,23 +293,23 @@ func TestWorkspaceVolumeNameMatchesVolumeVariableReplacement(t *testing.T) { }}, }, } - if _, err := c.TaskRunClient.Create(taskRun); err != nil { + if _, err := c.TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create TaskRun: %s", err) } t.Logf("Waiting for TaskRun in namespace %s to finish", namespace) - if err := WaitForTaskRunState(c, taskRunName, TaskRunSucceed(taskRunName), "success"); err != nil { + if err := WaitForTaskRunState(ctx, c, taskRunName, TaskRunSucceed(taskRunName), "success"); err != nil { t.Errorf("Error waiting for TaskRun to finish with error: %s", err) } - tr, err := c.TaskRunClient.Get(taskRunName, metav1.GetOptions{}) + tr, err := c.TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{}) if err != nil { t.Errorf("Error retrieving taskrun: %s", err) } if tr.Status.PodName == "" { t.Fatal("Error getting a PodName (empty)") } - p, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(tr.Status.PodName, metav1.GetOptions{}) + p, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(ctx, tr.Status.PodName, metav1.GetOptions{}) if err != nil { t.Fatalf("Error getting pod `%s` in namespace `%s`", tr.Status.PodName, namespace)