Skip to content

Commit

Permalink
fix: propagate context.Context
Browse files Browse the repository at this point in the history
client-go v0.18.8 changed all the client functions to take a
context.Context parameter. This updates the code to pass the
context.Context parameter so that it is compatible with client-go
v0.18.8. No functional changes were made with this change.
  • Loading branch information
eddie4941 authored and tekton-robot committed Oct 6, 2020
1 parent 6b55813 commit b9a21a6
Show file tree
Hide file tree
Showing 84 changed files with 1,206 additions and 916 deletions.
6 changes: 3 additions & 3 deletions pkg/artifacts/artifact_storage_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,7 @@ func TestInitializeArtifactStorage(t *testing.T) {
}
// If the expected storage type is PVC, make sure we're actually creating that PVC.
if c.storagetype == "pvc" {
_, err := fakekubeclient.CoreV1().PersistentVolumeClaims(pipelinerun.Namespace).Get(GetPVCName(pipelinerun), metav1.GetOptions{})
_, err := fakekubeclient.CoreV1().PersistentVolumeClaims(pipelinerun.Namespace).Get(ctx, GetPVCName(pipelinerun), metav1.GetOptions{})
if err != nil {
t.Fatalf("Error getting expected PVC %s for PipelineRun %s: %s", GetPVCName(pipelinerun), pipelinerun.Name, err)
}
Expand Down Expand Up @@ -456,14 +456,14 @@ func TestCleanupArtifactStorage(t *testing.T) {
ArtifactBucket: ab,
}
ctx := config.ToContext(context.Background(), &configs)
_, err = fakekubeclient.CoreV1().PersistentVolumeClaims(pipelinerun.Namespace).Get(GetPVCName(pipelinerun), metav1.GetOptions{})
_, err = fakekubeclient.CoreV1().PersistentVolumeClaims(pipelinerun.Namespace).Get(ctx, GetPVCName(pipelinerun), metav1.GetOptions{})
if err != nil {
t.Fatalf("Error getting expected PVC %s for PipelineRun %s: %s", GetPVCName(pipelinerun), pipelinerun.Name, err)
}
if err := CleanupArtifactStorage(ctx, pipelinerun, fakekubeclient); err != nil {
t.Fatalf("Error cleaning up artifact storage: %s", err)
}
_, err = fakekubeclient.CoreV1().PersistentVolumeClaims(pipelinerun.Namespace).Get(GetPVCName(pipelinerun), metav1.GetOptions{})
_, err = fakekubeclient.CoreV1().PersistentVolumeClaims(pipelinerun.Namespace).Get(ctx, GetPVCName(pipelinerun), metav1.GetOptions{})
if err == nil {
t.Fatalf("Found PVC %s for PipelineRun %s after it should have been cleaned up", GetPVCName(pipelinerun), pipelinerun.Name)
} else if !errors.IsNotFound(err) {
Expand Down
12 changes: 6 additions & 6 deletions pkg/artifacts/artifacts_storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ func InitializeArtifactStorage(ctx context.Context, images pipeline.Images, pr *
func CleanupArtifactStorage(ctx context.Context, pr *v1beta1.PipelineRun, c kubernetes.Interface) error {

if NeedsPVC(ctx) {
err := deletePVC(pr, c)
err := deletePVC(ctx, pr, c)
if err != nil {
return err
}
Expand Down Expand Up @@ -171,7 +171,7 @@ func NewArtifactBucketFromConfig(ctx context.Context, images pipeline.Images) *s
}

func createPVC(ctx context.Context, pr *v1beta1.PipelineRun, c kubernetes.Interface) (*corev1.PersistentVolumeClaim, error) {
if _, err := c.CoreV1().PersistentVolumeClaims(pr.Namespace).Get(GetPVCName(pr), metav1.GetOptions{}); err != nil {
if _, err := c.CoreV1().PersistentVolumeClaims(pr.Namespace).Get(ctx, GetPVCName(pr), metav1.GetOptions{}); err != nil {
if errors.IsNotFound(err) {
pvcConfig := config.FromContextOrDefaults(ctx).ArtifactPVC
pvcSize, err := resource.ParseQuantity(pvcConfig.Size)
Expand All @@ -192,7 +192,7 @@ func createPVC(ctx context.Context, pr *v1beta1.PipelineRun, c kubernetes.Interf
}

pvcSpec := GetPVCSpec(pr, pvcSize, pvcStorageClassName)
pvc, err := c.CoreV1().PersistentVolumeClaims(pr.Namespace).Create(pvcSpec)
pvc, err := c.CoreV1().PersistentVolumeClaims(pr.Namespace).Create(ctx, pvcSpec, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("failed to claim Persistent Volume %q due to error: %w", pr.Name, err)
}
Expand All @@ -203,12 +203,12 @@ func createPVC(ctx context.Context, pr *v1beta1.PipelineRun, c kubernetes.Interf
return nil, nil
}

func deletePVC(pr *v1beta1.PipelineRun, c kubernetes.Interface) error {
if _, err := c.CoreV1().PersistentVolumeClaims(pr.Namespace).Get(GetPVCName(pr), metav1.GetOptions{}); err != nil {
func deletePVC(ctx context.Context, pr *v1beta1.PipelineRun, c kubernetes.Interface) error {
if _, err := c.CoreV1().PersistentVolumeClaims(pr.Namespace).Get(ctx, GetPVCName(pr), metav1.GetOptions{}); err != nil {
if !errors.IsNotFound(err) {
return fmt.Errorf("failed to get Persistent Volume %q due to error: %w", GetPVCName(pr), err)
}
} else if err := c.CoreV1().PersistentVolumeClaims(pr.Namespace).Delete(GetPVCName(pr), &metav1.DeleteOptions{}); err != nil {
} else if err := c.CoreV1().PersistentVolumeClaims(pr.Namespace).Delete(ctx, GetPVCName(pr), metav1.DeleteOptions{}); err != nil {
return fmt.Errorf("failed to delete Persistent Volume %q due to error: %w", pr.Name, err)
}
return nil
Expand Down
4 changes: 2 additions & 2 deletions pkg/pod/creds_init.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ func credsInit(ctx context.Context, serviceAccountName, namespace string, kubecl
serviceAccountName = config.DefaultServiceAccountValue
}

sa, err := kubeclient.CoreV1().ServiceAccounts(namespace).Get(serviceAccountName, metav1.GetOptions{})
sa, err := kubeclient.CoreV1().ServiceAccounts(namespace).Get(ctx, serviceAccountName, metav1.GetOptions{})
if err != nil {
return nil, nil, nil, err
}
Expand All @@ -65,7 +65,7 @@ func credsInit(ctx context.Context, serviceAccountName, namespace string, kubecl
var volumes []corev1.Volume
args := []string{}
for _, secretEntry := range sa.Secrets {
secret, err := kubeclient.CoreV1().Secrets(namespace).Get(secretEntry.Name, metav1.GetOptions{})
secret, err := kubeclient.CoreV1().Secrets(namespace).Get(ctx, secretEntry.Name, metav1.GetOptions{})
if err != nil {
return nil, nil, nil, err
}
Expand Down
13 changes: 7 additions & 6 deletions pkg/pod/entrypoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package pod

import (
"context"
"errors"
"fmt"
"path/filepath"
Expand Down Expand Up @@ -162,8 +163,8 @@ func collectResultsName(results []v1beta1.TaskResult) string {

// UpdateReady updates the Pod's annotations to signal the first step to start
// by projecting the ready annotation via the Downward API.
func UpdateReady(kubeclient kubernetes.Interface, pod corev1.Pod) error {
newPod, err := kubeclient.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
func UpdateReady(ctx context.Context, kubeclient kubernetes.Interface, pod corev1.Pod) error {
newPod, err := kubeclient.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("error getting Pod %q when updating ready annotation: %w", pod.Name, err)
}
Expand All @@ -175,7 +176,7 @@ func UpdateReady(kubeclient kubernetes.Interface, pod corev1.Pod) error {
}
if newPod.ObjectMeta.Annotations[readyAnnotation] != readyAnnotationValue {
newPod.ObjectMeta.Annotations[readyAnnotation] = readyAnnotationValue
if _, err := kubeclient.CoreV1().Pods(newPod.Namespace).Update(newPod); err != nil {
if _, err := kubeclient.CoreV1().Pods(newPod.Namespace).Update(ctx, newPod, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("error adding ready annotation to Pod %q: %w", pod.Name, err)
}
}
Expand All @@ -184,8 +185,8 @@ func UpdateReady(kubeclient kubernetes.Interface, pod corev1.Pod) error {

// StopSidecars updates sidecar containers in the Pod to a nop image, which
// exits successfully immediately.
func StopSidecars(nopImage string, kubeclient kubernetes.Interface, pod corev1.Pod) error {
newPod, err := kubeclient.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
func StopSidecars(ctx context.Context, nopImage string, kubeclient kubernetes.Interface, pod corev1.Pod) error {
newPod, err := kubeclient.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("error getting Pod %q when stopping sidecars: %w", pod.Name, err)
}
Expand All @@ -208,7 +209,7 @@ func StopSidecars(nopImage string, kubeclient kubernetes.Interface, pod corev1.P
}
}
if updated {
if _, err := kubeclient.CoreV1().Pods(newPod.Namespace).Update(newPod); err != nil {
if _, err := kubeclient.CoreV1().Pods(newPod.Namespace).Update(ctx, newPod, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("error stopping sidecars of Pod %q: %w", pod.Name, err)
}
}
Expand Down
7 changes: 4 additions & 3 deletions pkg/pod/entrypoint_lookup.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package pod

import (
"context"
"fmt"

"github.com/google/go-containerregistry/pkg/name"
Expand All @@ -30,7 +31,7 @@ type EntrypointCache interface {
// Get the Image data for the given image reference. If the value is
// not found in the cache, it will be fetched from the image registry,
// possibly using K8s service account imagePullSecrets.
Get(ref name.Reference, namespace, serviceAccountName string) (v1.Image, error)
Get(ctx context.Context, ref name.Reference, namespace, serviceAccountName string) (v1.Image, error)
// Update the cache with a new digest->Image mapping. This will avoid a
// remote registry lookup next time Get is called.
Set(digest name.Digest, img v1.Image)
Expand All @@ -41,7 +42,7 @@ type EntrypointCache interface {
//
// Images that are not specified by digest will be specified by digest after
// lookup in the resulting list of containers.
func resolveEntrypoints(cache EntrypointCache, namespace, serviceAccountName string, steps []corev1.Container) ([]corev1.Container, error) {
func resolveEntrypoints(ctx context.Context, cache EntrypointCache, namespace, serviceAccountName string, steps []corev1.Container) ([]corev1.Container, error) {
// Keep a local cache of name->image lookups, just for the scope of
// resolving this set of steps. If the image is pushed to before the
// next run, we need to resolve its digest and entrypoint again, but we
Expand All @@ -63,7 +64,7 @@ func resolveEntrypoints(cache EntrypointCache, namespace, serviceAccountName str
} else {
// Look it up in the cache. If it's not found in the
// cache, it will be resolved from the registry.
img, err = cache.Get(origRef, namespace, serviceAccountName)
img, err = cache.Get(ctx, origRef, namespace, serviceAccountName)
if err != nil {
return nil, err
}
Expand Down
5 changes: 3 additions & 2 deletions pkg/pod/entrypoint_lookup_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package pod

import (
"context"
"fmt"

"github.com/google/go-containerregistry/pkg/authn"
Expand Down Expand Up @@ -48,7 +49,7 @@ func NewEntrypointCache(kubeclient kubernetes.Interface) (EntrypointCache, error
}, nil
}

func (e *entrypointCache) Get(ref name.Reference, namespace, serviceAccountName string) (v1.Image, error) {
func (e *entrypointCache) Get(ctx context.Context, ref name.Reference, namespace, serviceAccountName string) (v1.Image, error) {
// If image is specified by digest, check the local cache.
if digest, ok := ref.(name.Digest); ok {
if img, ok := e.lru.Get(digest.String()); ok {
Expand All @@ -59,7 +60,7 @@ func (e *entrypointCache) Get(ref name.Reference, namespace, serviceAccountName
// If the image wasn't specified by digest, or if the entrypoint
// wasn't found, we have to consult the remote registry, using
// imagePullSecrets.
kc, err := k8schain.New(e.kubeclient, k8schain.Options{
kc, err := k8schain.New(ctx, e.kubeclient, k8schain.Options{
Namespace: namespace,
ServiceAccountName: serviceAccountName,
})
Expand Down
9 changes: 7 additions & 2 deletions pkg/pod/entrypoint_lookup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package pod

import (
"context"
"fmt"
"testing"

Expand All @@ -30,6 +31,10 @@ import (
)

func TestResolveEntrypoints(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()

// Generate a random image with entrypoint configured.
img, err := random.Image(1, 1)
if err != nil {
Expand All @@ -53,7 +58,7 @@ func TestResolveEntrypoints(t *testing.T) {
"gcr.io/my/image:latest": &data{img: img},
}

got, err := resolveEntrypoints(cache, "namespace", "serviceAccountName", []corev1.Container{{
got, err := resolveEntrypoints(ctx, cache, "namespace", "serviceAccountName", []corev1.Container{{
// This step specifies its command, so there's nothing to
// resolve.
Image: "fully-specified",
Expand Down Expand Up @@ -110,7 +115,7 @@ type data struct {
seen bool // Whether the image has been looked up before.
}

func (f fakeCache) Get(ref name.Reference, _, _ string) (v1.Image, error) {
func (f fakeCache) Get(ctx context.Context, ref name.Reference, _, _ string) (v1.Image, error) {
if d, ok := ref.(name.Digest); ok {
if data, found := f[d.String()]; found {
return data.img, nil
Expand Down
15 changes: 11 additions & 4 deletions pkg/pod/entrypoint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package pod

import (
"context"
"testing"
"time"

Expand Down Expand Up @@ -296,12 +297,15 @@ func TestUpdateReady(t *testing.T) {
},
}} {
t.Run(c.desc, func(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
kubeclient := fakek8s.NewSimpleClientset(&c.pod)
if err := UpdateReady(kubeclient, c.pod); err != nil {
if err := UpdateReady(ctx, kubeclient, c.pod); err != nil {
t.Errorf("UpdateReady: %v", err)
}

got, err := kubeclient.CoreV1().Pods(c.pod.Namespace).Get(c.pod.Name, metav1.GetOptions{})
got, err := kubeclient.CoreV1().Pods(c.pod.Namespace).Get(ctx, c.pod.Name, metav1.GetOptions{})
if err != nil {
t.Errorf("Getting pod %q after update: %v", c.pod.Name, err)
} else if d := cmp.Diff(c.wantAnnotations, got.Annotations); d != "" {
Expand Down Expand Up @@ -411,12 +415,15 @@ func TestStopSidecars(t *testing.T) {
wantContainers: []corev1.Container{stepContainer, sidecarContainer, injectedSidecar},
}} {
t.Run(c.desc, func(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
kubeclient := fakek8s.NewSimpleClientset(&c.pod)
if err := StopSidecars(nopImage, kubeclient, c.pod); err != nil {
if err := StopSidecars(ctx, nopImage, kubeclient, c.pod); err != nil {
t.Errorf("error stopping sidecar: %v", err)
}

got, err := kubeclient.CoreV1().Pods(c.pod.Namespace).Get(c.pod.Name, metav1.GetOptions{})
got, err := kubeclient.CoreV1().Pods(c.pod.Namespace).Get(ctx, c.pod.Name, metav1.GetOptions{})
if err != nil {
t.Errorf("Getting pod %q after update: %v", c.pod.Name, err)
} else if d := cmp.Diff(c.wantContainers, got.Spec.Containers); d != "" {
Expand Down
8 changes: 4 additions & 4 deletions pkg/pod/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1beta1.TaskRun, taskSpec
}

// Resolve entrypoint for any steps that don't specify command.
stepContainers, err = resolveEntrypoints(b.EntrypointCache, taskRun.Namespace, taskRun.Spec.ServiceAccountName, stepContainers)
stepContainers, err = resolveEntrypoints(ctx, b.EntrypointCache, taskRun.Namespace, taskRun.Spec.ServiceAccountName, stepContainers)
if err != nil {
return nil, err
}
Expand All @@ -148,7 +148,7 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1beta1.TaskRun, taskSpec
initContainers = append(initContainers, entrypointInit)
volumes = append(volumes, toolsVolume, downwardVolume)

limitRangeMin, err := getLimitRangeMinimum(taskRun.Namespace, b.KubeClient)
limitRangeMin, err := getLimitRangeMinimum(ctx, taskRun.Namespace, b.KubeClient)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -337,8 +337,8 @@ func nodeAffinityUsingAffinityAssistant(affinityAssistantName string) *corev1.Af
// https://github.com/kubernetes/kubernetes/issues/79496, the
// max LimitRange minimum must be found in the event of conflicting
// container minimums specified.
func getLimitRangeMinimum(namespace string, kubeclient kubernetes.Interface) (corev1.ResourceList, error) {
limitRanges, err := kubeclient.CoreV1().LimitRanges(namespace).List(metav1.ListOptions{})
func getLimitRangeMinimum(ctx context.Context, namespace string, kubeclient kubernetes.Interface) (corev1.ResourceList, error) {
limitRanges, err := kubeclient.CoreV1().LimitRanges(namespace).List(ctx, metav1.ListOptions{})
if err != nil {
return nil, err
}
Expand Down
6 changes: 3 additions & 3 deletions pkg/reconciler/pipelinerun/affinity_assistant.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,12 +53,12 @@ func (c *Reconciler) createAffinityAssistants(ctx context.Context, wb []v1beta1.
for _, w := range wb {
if w.PersistentVolumeClaim != nil || w.VolumeClaimTemplate != nil {
affinityAssistantName := getAffinityAssistantName(w.Name, pr.Name)
_, err := c.KubeClientSet.AppsV1().StatefulSets(namespace).Get(affinityAssistantName, metav1.GetOptions{})
_, err := c.KubeClientSet.AppsV1().StatefulSets(namespace).Get(ctx, affinityAssistantName, metav1.GetOptions{})
claimName := getClaimName(w, pr.GetOwnerReference())
switch {
case apierrors.IsNotFound(err):
affinityAssistantStatefulSet := affinityAssistantStatefulSet(affinityAssistantName, pr, claimName, c.Images.NopImage)
_, err := c.KubeClientSet.AppsV1().StatefulSets(namespace).Create(affinityAssistantStatefulSet)
_, err := c.KubeClientSet.AppsV1().StatefulSets(namespace).Create(ctx, affinityAssistantStatefulSet, metav1.CreateOptions{})
if err != nil {
errs = append(errs, fmt.Errorf("failed to create StatefulSet %s: %s", affinityAssistantName, err))
}
Expand Down Expand Up @@ -94,7 +94,7 @@ func (c *Reconciler) cleanupAffinityAssistants(ctx context.Context, pr *v1beta1.
for _, w := range pr.Spec.Workspaces {
if w.PersistentVolumeClaim != nil || w.VolumeClaimTemplate != nil {
affinityAssistantStsName := getAffinityAssistantName(w.Name, pr.Name)
if err := c.KubeClientSet.AppsV1().StatefulSets(pr.Namespace).Delete(affinityAssistantStsName, &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) {
if err := c.KubeClientSet.AppsV1().StatefulSets(pr.Namespace).Delete(ctx, affinityAssistantStsName, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) {
errs = append(errs, fmt.Errorf("failed to delete StatefulSet %s: %s", affinityAssistantStsName, err))
}
}
Expand Down
12 changes: 8 additions & 4 deletions pkg/reconciler/pipelinerun/affinity_assistant_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,10 @@ import (
// TestCreateAndDeleteOfAffinityAssistant tests to create and delete an Affinity Assistant
// for a given PipelineRun with a PVC workspace
func TestCreateAndDeleteOfAffinityAssistant(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()

c := Reconciler{
KubeClientSet: fakek8s.NewSimpleClientset(),
Images: pipeline.Images{},
Expand All @@ -56,23 +60,23 @@ func TestCreateAndDeleteOfAffinityAssistant(t *testing.T) {
},
}

err := c.createAffinityAssistants(context.Background(), testPipelineRun.Spec.Workspaces, testPipelineRun, testPipelineRun.Namespace)
err := c.createAffinityAssistants(ctx, testPipelineRun.Spec.Workspaces, testPipelineRun, testPipelineRun.Namespace)
if err != nil {
t.Errorf("unexpected error from createAffinityAssistants: %v", err)
}

expectedAffinityAssistantName := getAffinityAssistantName(workspaceName, testPipelineRun.Name)
_, err = c.KubeClientSet.AppsV1().StatefulSets(testPipelineRun.Namespace).Get(expectedAffinityAssistantName, metav1.GetOptions{})
_, err = c.KubeClientSet.AppsV1().StatefulSets(testPipelineRun.Namespace).Get(ctx, expectedAffinityAssistantName, metav1.GetOptions{})
if err != nil {
t.Errorf("unexpected error when retrieving StatefulSet: %v", err)
}

err = c.cleanupAffinityAssistants(context.Background(), testPipelineRun)
err = c.cleanupAffinityAssistants(ctx, testPipelineRun)
if err != nil {
t.Errorf("unexpected error from cleanupAffinityAssistants: %v", err)
}

_, err = c.KubeClientSet.AppsV1().StatefulSets(testPipelineRun.Namespace).Get(expectedAffinityAssistantName, metav1.GetOptions{})
_, err = c.KubeClientSet.AppsV1().StatefulSets(testPipelineRun.Namespace).Get(ctx, expectedAffinityAssistantName, metav1.GetOptions{})
if !apierrors.IsNotFound(err) {
t.Errorf("expected a NotFound response, got: %v", err)
}
Expand Down
5 changes: 3 additions & 2 deletions pkg/reconciler/pipelinerun/cancel.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package pipelinerun

import (
"context"
"encoding/json"
"fmt"
"log"
Expand Down Expand Up @@ -48,15 +49,15 @@ func init() {
}

// cancelPipelineRun marks the PipelineRun as cancelled and any resolved TaskRun(s) too.
func cancelPipelineRun(logger *zap.SugaredLogger, pr *v1beta1.PipelineRun, clientSet clientset.Interface) error {
func cancelPipelineRun(ctx context.Context, logger *zap.SugaredLogger, pr *v1beta1.PipelineRun, clientSet clientset.Interface) error {
errs := []string{}

// Loop over the TaskRuns in the PipelineRun status.
// If a TaskRun is not in the status yet we should not cancel it anyways.
for taskRunName := range pr.Status.TaskRuns {
logger.Infof("cancelling TaskRun %s", taskRunName)

if _, err := clientSet.TektonV1beta1().TaskRuns(pr.Namespace).Patch(taskRunName, types.JSONPatchType, cancelPatchBytes, ""); err != nil {
if _, err := clientSet.TektonV1beta1().TaskRuns(pr.Namespace).Patch(ctx, taskRunName, types.JSONPatchType, cancelPatchBytes, metav1.PatchOptions{}, ""); err != nil {
errs = append(errs, fmt.Errorf("Failed to patch TaskRun `%s` with cancellation: %s", taskRunName, err).Error())
continue
}
Expand Down
Loading

0 comments on commit b9a21a6

Please sign in to comment.