Skip to content

Commit

Permalink
Add initial integration test for dag tektoncd#168
Browse files Browse the repository at this point in the history
Three caveats to this integration test:
- It was created before tektoncd#320, so the resource binding is not correct
- It was created before tektoncd#387, so it relies on the log PVC which will no
  longer exist (can work around this by mounting a PVC explicitly in the
  test and writing directly to it instead of echoing?)
- It doesn't exercise `runAfter` functionality
  • Loading branch information
bobcatfish committed Jan 28, 2019
1 parent 559397d commit f52acf0
Show file tree
Hide file tree
Showing 6 changed files with 531 additions and 32 deletions.
9 changes: 5 additions & 4 deletions docs/using.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# How to use the Pipeline CRD

- [How do I create a new Pipeline?](#creating-a-pipeline)
- [What order will the tasks run in?](#expressing-task-order)
- [How do I make a Task?](#creating-a-task)
- [How do I make Resources?](#creating-resources)
- [How do I run a Pipeline?](#running-a-pipeline)
Expand All @@ -13,19 +14,19 @@
Some can be generic and reused (e.g. building with Kaniko) and others will be
specific to your project (e.g. running your particular set of unit tests).
2. Create a `Pipeline` which expresses the Tasks you would like to run and what
[Resources](#creating-resources) the Tasks need.
[PipelineResources](#creating-resources) the Tasks need.
Use [`providedBy`](#providedBy) to express the order the `Tasks` should run in.

See [the example Pipeline](../examples/pipeline.yaml).

### ProvidedBy
### Expressing task order

When you need to execute `Tasks` in a particular order, it will likely be because they
are operating over the same `Resources` (e.g. your unit test task must run first against
are operating over the same `PipelineResources` (e.g. your unit test task must run first against
your git repo, then you build an image from that repo, then you run integration tests
against that image).

We express this ordering by adding `providedBy` on `Resources` that our `Tasks`
We express this ordering by adding `providedBy` on `PipelineResources` that our `Tasks`
need.

- The (optional) `providedBy` key on an `input source` defines a set of previous
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -73,24 +73,57 @@ func TestValidateResolvedTaskResources_ValidParams(t *testing.T) {
}

func TestValidateResolvedTaskResources_InvalidParams(t *testing.T) {
rtr := &resources.ResolvedTaskResources{
TaskSpec: &v1alpha1.TaskSpec{
Steps: validBuildSteps,
Inputs: &v1alpha1.Inputs{
Params: []v1alpha1.TaskParam{{
Name: "foo",
}, {
Name: "bar",
}},
},
},
}
p := []v1alpha1.Param{{
Name: "foobar",
Value: "somethingfun",
tcs := []struct {
name string
rtr *resources.ResolvedTaskResources
params []v1alpha1.Param
}{{
name: "wrong params",
rtr: &resources.ResolvedTaskResources{
TaskSpec: &v1alpha1.TaskSpec{
Steps: validBuildSteps,
Inputs: &v1alpha1.Inputs{
Params: []v1alpha1.TaskParam{{
Name: "foo",
}, {
Name: "bar",
}},
},
}},
params: []v1alpha1.Param{{
Name: "foobar",
Value: "somethingfun",
}},
}, {
name: "extra params",
rtr: &resources.ResolvedTaskResources{
TaskSpec: &v1alpha1.TaskSpec{
Steps: validBuildSteps,
Inputs: &v1alpha1.Inputs{
Params: []v1alpha1.TaskParam{{
Name: "foo",
}, {
Name: "bar",
}},
},
}},
params: []v1alpha1.Param{{
Name: "foo",
Value: "fooooooo",
}, {
Name: "bar",
Value: "barrrrrr",
}, {
Name: "foobar",
Value: "somethingfun",
}},
}}
if err := taskrun.ValidateResolvedTaskResources(p, rtr); err == nil {
t.Errorf("Expected to see error when validating invalid resolved TaskRun with wrong params but saw none")
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
if err := taskrun.ValidateResolvedTaskResources(tc.params, tc.rtr); err == nil {
t.Errorf("Expected to see error when validating invalid resolved TaskRun with wrong params but saw none")
}
})
}
}

Expand Down Expand Up @@ -161,6 +194,34 @@ func TestValidateResolvedTaskResources_InvalidResources(t *testing.T) {
},
Outputs: map[string]*v1alpha1.PipelineResource{"testimageoutput": r},
},
}, {
name: "extra-output",
rtr: &resources.ResolvedTaskResources{
TaskSpec: &v1alpha1.TaskSpec{
Inputs: &v1alpha1.Inputs{
Resources: []v1alpha1.TaskResource{{
Name: "resource-to-build",
Type: v1alpha1.PipelineResourceTypeGit,
}},
},
},
Inputs: map[string]*v1alpha1.PipelineResource{"resource-to-build": r},
Outputs: map[string]*v1alpha1.PipelineResource{"some-extra-output": r},
},
}, {
name: "extra-input",
rtr: &resources.ResolvedTaskResources{
TaskSpec: &v1alpha1.TaskSpec{
Outputs: &v1alpha1.Outputs{
Resources: []v1alpha1.TaskResource{{
Name: "resource-to-provide",
Type: v1alpha1.PipelineResourceTypeGit,
}},
},
},
Inputs: map[string]*v1alpha1.PipelineResource{"some-extra-input": r},
Outputs: map[string]*v1alpha1.PipelineResource{"resource-to-provide": r},
},
}}

for _, tc := range tcs {
Expand Down
20 changes: 10 additions & 10 deletions test/crd.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,15 +50,15 @@ const (
buildOutput = "Build successful"
)

func getHelloWorldValidationPod(namespace, volumeClaimName string) *corev1.Pod {
func getLogFetcherPod(namespace, volumeClaimName, podName string) *corev1.Pod {
return &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: hwValidationPodName,
Name: podName,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: hwValidationPodName,
Name: podName,
Image: "busybox",
Command: []string{
"cat",
Expand Down Expand Up @@ -118,29 +118,29 @@ func getHelloWorldTaskRun(namespace string) *v1alpha1.TaskRun {
}
}

func getBuildOutputFromVolume(logger *logging.BaseLogger, c *clients, namespace, testStr string) (string, error) {
func getBuildOutputFromVolume(logger *logging.BaseLogger, c *clients, namespace, volumeClaimName, podName string) (string, error) {
// Create Validation Pod
pods := c.KubeClient.Kube.CoreV1().Pods(namespace)

// Volume created for Task should have the same name as the Task
if _, err := pods.Create(getHelloWorldValidationPod(namespace, hwTaskRunName)); err != nil {
return "", fmt.Errorf("failed to create Validation pod to mount volume `%s`: %s", hwTaskRunName, err)
if _, err := pods.Create(getLogFetcherPod(namespace, volumeClaimName, podName)); err != nil {
return "", fmt.Errorf("failed to create Validation pod to mount volume `%s`: %s", volumeClaimName, err)
}

logger.Infof("Waiting for pod with test volume %s to come up so we can read logs from it", hwTaskRunName)
if err := WaitForPodState(c, hwValidationPodName, namespace, func(p *corev1.Pod) (bool, error) {
logger.Infof("Waiting for pod with test volume %s to come up so we can read logs from it", volumeClaimName)
if err := WaitForPodState(c, podName, namespace, func(p *corev1.Pod) (bool, error) {
// the "Running" status is used as "Succeeded" caused issues as the pod succeeds and restarts quickly
// there might be a race condition here and possibly a better way of handling this, perhaps using a Job or different state validation
if p.Status.Phase == corev1.PodRunning {
return true, nil
}
return false, nil
}, "ValidationPodCompleted"); err != nil {
return "", fmt.Errorf("error waiting for Pod %s to finish: %s", hwValidationPodName, err)
return "", fmt.Errorf("error waiting for Pod %s to finish: %s", podName, err)
}

// Get validation pod logs and verify that the build executed a container w/ desired output
req := pods.GetLogs(hwValidationPodName, &corev1.PodLogOptions{})
req := pods.GetLogs(podName, &corev1.PodLogOptions{})
readCloser, err := req.Stream()
if err != nil {
return "", fmt.Errorf("failed to open stream to read: %v", err)
Expand Down
Loading

0 comments on commit f52acf0

Please sign in to comment.