From ac1b453e3ca305c04e338fa7ad25bdd537cf27a7 Mon Sep 17 00:00:00 2001 From: Vincent Demeester Date: Fri, 21 Dec 2018 12:01:56 +0100 Subject: [PATCH] =?UTF-8?q?Add=20possibility=20for=20a=20client=20to=20can?= =?UTF-8?q?cel=20a=20build=E2=80=A6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit … by updating the status of a build. It will mark the build as failed and clean any pods related to it. Signed-off-by: Vincent Demeester --- pkg/apis/build/v1alpha1/build_types.go | 2 + pkg/reconciler/build/build.go | 38 +++++++++++++++- pkg/reconciler/build/build_test.go | 62 ++++++++++++++++++++++++++ test/e2e/simple_test.go | 47 +++++++++++++++++++ 4 files changed, 148 insertions(+), 1 deletion(-) diff --git a/pkg/apis/build/v1alpha1/build_types.go b/pkg/apis/build/v1alpha1/build_types.go index 6d5c8296..938e6330 100644 --- a/pkg/apis/build/v1alpha1/build_types.go +++ b/pkg/apis/build/v1alpha1/build_types.go @@ -283,6 +283,8 @@ type GoogleSpec struct { // will be False. const BuildSucceeded = duckv1alpha1.ConditionSucceeded +const BuildCancelled duckv1alpha1.ConditionType = "Cancelled" + var buildCondSet = duckv1alpha1.NewBatchConditionSet() // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/pkg/reconciler/build/build.go b/pkg/reconciler/build/build.go index 22011ede..62fe6275 100644 --- a/pkg/reconciler/build/build.go +++ b/pkg/reconciler/build/build.go @@ -153,6 +153,36 @@ func (c *Reconciler) Reconcile(ctx context.Context, key string) error { return nil } + // If the build's status is cancelled, kill resources and update status + if isCancelled(&build.Status) { + logger.Warnf("Build has been cancelled: %v", build.Name) + build.Status.SetCondition(&duckv1alpha1.Condition{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionFalse, + Reason: "BuildCancelled", + Message: fmt.Sprintf("Build %q was cancelled", build.Name), + }) + if err := c.updateStatus(build); err != nil { + return err + } + if build.Status.Cluster == nil { + logger.Warnf("build %q has no pod running yet", build.Name) + return nil + } + p, err := c.kubeclientset.CoreV1().Pods(build.Namespace).Get(build.Status.Cluster.PodName, metav1.GetOptions{}) + if err != nil { + return err + } + if p == nil { + logger.Warnf("build %q has no pod running", build.Name) + return nil + } + if err := c.kubeclientset.CoreV1().Pods(build.Namespace).Delete(p.Name, &metav1.DeleteOptions{}); err != nil { + return err + } + return nil + } + // If the build hasn't started yet, validate it and create a Pod for it // and record that pod's name in the build status. var p *corev1.Pod @@ -261,12 +291,18 @@ func (c *Reconciler) startPodForBuild(build *v1alpha1.Build) (*corev1.Pod, error return c.kubeclientset.CoreV1().Pods(p.Namespace).Create(p) } -// IsDone returns true if the build's status indicates the build is done. +// isDone returns true if the build's status indicates the build is done. func isDone(status *v1alpha1.BuildStatus) bool { cond := status.GetCondition(v1alpha1.BuildSucceeded) return cond != nil && cond.Status != corev1.ConditionUnknown } +// isCancelled returns true if the build's status indicates the build is cancelled. +func isCancelled(status *v1alpha1.BuildStatus) bool { + cond := status.GetCondition(v1alpha1.BuildCancelled) + return cond != nil && cond.Status == corev1.ConditionTrue +} + func (c *Reconciler) checkTimeout(build *v1alpha1.Build) error { // If build has not started timeout, startTime should be zero. if build.Status.StartTime.IsZero() { diff --git a/pkg/reconciler/build/build_test.go b/pkg/reconciler/build/build_test.go index 10ddfcaa..bb92d494 100644 --- a/pkg/reconciler/build/build_test.go +++ b/pkg/reconciler/build/build_test.go @@ -561,3 +561,65 @@ func TestTimeoutFlow(t *testing.T) { t.Errorf("Unexpected build status %s", d) } } + +func TestCancelledFlow(t *testing.T) { + b := newBuild("cancelled") + + f := &fixture{ + t: t, + objects: []runtime.Object{b}, + client: fake.NewSimpleClientset(b), + kubeclient: k8sfake.NewSimpleClientset(), + } + f.createServiceAccount() + + r, i, k8sI := f.newReconciler() + f.updateIndex(i, b) + stopCh := make(chan struct{}) + defer close(stopCh) + i.Start(stopCh) + k8sI.Start(stopCh) + + ctx := context.Background() + if err := r.Reconcile(ctx, getKey(b, t)); err != nil { + t.Errorf("Not Expect error when syncing build") + } + + buildClient := f.client.BuildV1alpha1().Builds(b.Namespace) + b, err := buildClient.Get(b.Name, metav1.GetOptions{}) + if err != nil { + t.Errorf("error fetching build: %v", err) + } + + // Get pod info + b, err = buildClient.Get(b.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("error getting build: %v", err) + } + if b.Status.Cluster == nil || b.Status.Cluster.PodName == "" { + t.Fatalf("build status did not specify podName: %v", b.Status.Cluster) + } + b.Status.Conditions = []duckv1alpha1.Condition{{ + Type: v1alpha1.BuildCancelled, + Status: corev1.ConditionTrue, + }} + + f.updateIndex(i, b) + if err := r.Reconcile(ctx, getKey(b, t)); err != nil { + t.Errorf("error syncing build: %v", err) + } + + // Check that the build has the expected timeout status. + b, err = buildClient.Get(b.Name, metav1.GetOptions{}) + if err != nil { + t.Errorf("error fetching build: %v", err) + } + if d := cmp.Diff(b.Status.GetCondition(duckv1alpha1.ConditionSucceeded), &duckv1alpha1.Condition{ + Type: duckv1alpha1.ConditionSucceeded, + Status: corev1.ConditionFalse, + Reason: "BuildCancelled", + Message: fmt.Sprintf("Build %q was cancelled", b.Name), + }, ignoreVolatileTime); d != "" { + t.Errorf("Unexpected build status %s", d) + } +} diff --git a/test/e2e/simple_test.go b/test/e2e/simple_test.go index 8465f376..459593d7 100644 --- a/test/e2e/simple_test.go +++ b/test/e2e/simple_test.go @@ -605,3 +605,50 @@ func TestSimpleBuildWithHybridSources(t *testing.T) { t.Fatalf("Error watching build: %v", err) } } + +// TestCancelledBuild tests that a build with a long running step +// so that we have time to cancel it. +func TestCancelledBuild(t *testing.T) { + logger := logging.GetContextLogger("TestCancelledBuild") + + clients := buildClients(logger) + buildName := "cancelled-build" + + test.CleanupOnInterrupt(func() { teardownBuild(clients, logger, buildName) }, logger) + defer teardownBuild(clients, logger, buildName) + + if _, err := clients.buildClient.builds.Create(&v1alpha1.Build{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: buildTestNamespace, + Name: buildName, + }, + Spec: v1alpha1.BuildSpec{ + Steps: []corev1.Container{{ + Image: "busybox", + Args: []string{"sleep", "5000"}, // fails. + }}, + }, + }); err != nil { + t.Fatalf("Error creating build: %v", err) + } + + // Wait for a little while + // FIXME(vdemeester) I would prefer something less flaky-prone + time.Sleep(20 * time.Second) + + b, err := clients.buildClient.builds.Get(buildName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Error getting build %s: %v", buildName, err) + } + b.Status.Conditions = append(b.Status.Conditions, duckv1alpha1.Condition{ + Type: v1alpha1.BuildCancelled, + Status: corev1.ConditionTrue, + }) + if _, err := clients.buildClient.builds.Update(b); err != nil { + t.Fatalf("Error update build status for %s: %v", buildName, err) + } + + if b, err := clients.buildClient.watchBuild(buildName); err == nil { + t.Fatalf("watchBuild did not return expected `cancelled` error") + } +}