Skip to content

Commit

Permalink
Add tekton tasks for eks-cluster-create/teardown and kaniko (#866)
Browse files Browse the repository at this point in the history
* Add tekton tasks for eks-cluster-create/teardown and kaniko

* * increase default k8s version of eks-cluster-create task
* add sample file for aws-credentials secret

* rerun make optional-generate and make optional-test
  • Loading branch information
theofpa authored Feb 1, 2021
1 parent de0c878 commit fc5a44d
Show file tree
Hide file tree
Showing 7 changed files with 383 additions and 0 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: eks-cluster-create
labels:
app.kubernetes.io/version: "0.1"
annotations:
tekton.dev/pipelines.minVersion: "0.12.1"
tekton.dev/tags: "aws, eks"
tekton.dev/displayName: "EKS Cluster Create"
spec:
description: |
Create an EKS cluster.
This Task can be used to create an EKS cluster in an AWS account and fetch a kubeconfig that
can be used (in a context with kubectl) to make requests to the cluster.
params:
- name: cluster-name
description: The name of the EKS cluster you want to spin.
- name: version
default: "1.18"
description: The EKS version to install.
- name: region
default: us-west-2
description: The region where the cluster is in.
- name: zones
default: us-west-2a,us-west-2b,us-west-2c
description: The zones where the cluster is in.
- name: nodegroup-name
default: linux-nodes
description: The name of the nodegroup of the cluster.
- name: node-type
default: m5.xlarge
description: The type of the EC2 instaces for the nodegroup of the cluster.
- name: desired-nodes
default: "4"
description: The desired number of nodes in the cluster.
- name: min-nodes
default: "1"
description: The minimum number of nodes in the cluster.
- name: max-nodes
default: "4"
description: The maximum number of nodes in the cluster.
workspaces:
- name: secrets
mountPath: /tekton/home/.aws
description: The secret with the AWS keys
- name: kubeconfig
description: |
A workspace into which a kubeconfig file called `kubeconfig` will be written that will contain the information required to access the cluster. The `kubeconfig` will expect to use [aws-iam-authenticator](https://github.com/kubernetes-sigs/aws-iam-authenticator/) to authenticate, so in order for it to be used it must be run in a container which contains both `kubectl` and `aws-iam-authenticator`.
steps:
- name: write-kubeconfig
image: weaveworks/eksctl:0.35.0@sha256:48c1fa508970a01fd87a73ac7932a7160479d678cd019a3c84533d911fc54327
script: |
echo "Starting to create eks cluster"
eksctl create cluster \
--name $(params.cluster-name) \
--version $(params.version) \
--region $(params.region) \
--zones $(params.zones) \
--nodegroup-name $(params.nodegroup-name) \
--node-type $(params.node-type) \
--nodes $(params.desired-nodes) \
--nodes-min $(params.min-nodes) \
--nodes-max $(params.max-nodes)
if [ $? -eq 0 ]
then
echo "Successfully created eks cluster $(params.cluster-name)"
eksctl utils write-kubeconfig -c $(params.cluster-name) --kubeconfig $(workspaces.kubeconfig.path)/kubeconfig
fi
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: eks-cluster-teardown
labels:
app.kubernetes.io/version: "0.1"
annotations:
tekton.dev/pipelines.minVersion: "0.12.1"
tekton.dev/tags: "aws, eks"
tekton.dev/displayName: "EKS Cluster Teardown"
spec:
description: |
Teardown an EKS cluster.
This Task can be used to teardown an EKS cluster in an AWS account.
params:
- name: cluster-name
description: The name of the EKS cluster which will be teared down.
- name: region
default: us-west-2
description: The region where the cluster is in.
workspaces:
- name: secrets
mountPath: /tekton/home/.aws
description: The service account with the AWS keys
steps:
- name: delete-cluster
image: weaveworks/eksctl:0.35.0@sha256:48c1fa508970a01fd87a73ac7932a7160479d678cd019a3c84533d911fc54327
script: |
echo "Tearing down the eks cluster"
eksctl delete cluster \
--name $(params.cluster-name) \
--region $(params.region)
if [ $? -eq 0 ]
then
echo "Successfully teared down eks cluster $(params.cluster-name)"
fi
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: kaniko
spec:
description: >-
This Task builds source into a container image using Google's kaniko tool.
Kaniko doesn't depend on a Docker daemon and executes each
command within a Dockerfile completely in userspace. This enables
building container images in environments that can't easily or
securely run a Docker daemon, such as a standard Kubernetes cluster.
params:
- name: IMAGE
description: Name (reference) of the image to build.
- name: DOCKERFILE
description: Path to the Dockerfile to build.
default: ./Dockerfile
- name: CONTEXT
description: The build context used by Kaniko.
default: ./
- name: SUBPATH
description: Sub path within the given context.
default: ""
- name: EXTRA_ARGS
default: ""
- name: BUILDER_IMAGE
description: The image on which builds will run
# custom build while pending PRs awslabs/amazon-ecr-credential-helper#253, GoogleContainerTools/kaniko#1515 and #1543
default: docker.io/theofpa/executor@sha256:e968634a9c63f3a16c8ef331749115cb6cbcde4d1726c28e3e0b4e5eb79aa4aa
workspaces:
- name: source
- name: aws-credentials
mountPath: /tekton/home/.aws/
- name: docker-config
mountPath: /tekton/home/.docker/
results:
- name: IMAGE-DIGEST
description: Digest of the image just built.

steps:
- name: build-and-push
workingDir: $(workspaces.source.path)
image: $(params.BUILDER_IMAGE)
# specifying DOCKER_CONFIG is required to allow kaniko to detect docker credential
# https://github.com/tektoncd/pipeline/pull/706
env:
- name: DOCKER_CONFIG
value: /tekton/home/.docker
command:
- /kaniko/executor
- $(params.EXTRA_ARGS)
- --dockerfile=$(params.DOCKERFILE)
- --context=$(params.CONTEXT) # The user does not need to care the workspace and the source.
- --context-sub-path=$(params.SUBPATH)
- --destination=$(params.IMAGE)
- --oci-layout-path=$(workspaces.source.path)/image-digest
# kaniko assumes it is running as root, which means this example fails on platforms
# that default to run containers as random uid (like OpenShift). Adding this securityContext
# makes it explicit that it needs to run as root.
securityContext:
runAsUser: 0
- name: write-digest
workingDir: $(workspaces.source.path)
image: gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/imagedigestexporter:v0.16.2
# output of imagedigestexport [{"key":"digest","value":"sha256:eed29..660","resourceRef":{"name":"myrepo/myimage"}}]
command: ["/ko-app/imagedigestexporter"]
args:
- -images=[{"name":"$(params.IMAGE)","type":"image","url":"$(params.IMAGE)","digest":"","OutputImageDir":"$(workspaces.source.path)/image-digest"}]
- -terminationMessagePath=image-digested
securityContext:
runAsUser: 0
- name: digest-to-results
workingDir: $(workspaces.source.path)
image: docker.io/stedolan/jq@sha256:a61ed0bca213081b64be94c5e1b402ea58bc549f457c2682a86704dd55231e09
script: |
cat image-digested | jq '.[0].value' -rj | tee /tekton/results/IMAGE-DIGEST
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: eks-cluster-create
labels:
app.kubernetes.io/version: "0.1"
annotations:
tekton.dev/pipelines.minVersion: "0.12.1"
tekton.dev/tags: "aws, eks"
tekton.dev/displayName: "EKS Cluster Create"
spec:
description: |
Create an EKS cluster.
This Task can be used to create an EKS cluster in an AWS account and fetch a kubeconfig that
can be used (in a context with kubectl) to make requests to the cluster.
params:
- name: cluster-name
description: The name of the EKS cluster you want to spin.
- name: version
default: "1.18"
description: The EKS version to install.
- name: region
default: us-west-2
description: The region where the cluster is in.
- name: zones
default: us-west-2a,us-west-2b,us-west-2c
description: The zones where the cluster is in.
- name: nodegroup-name
default: linux-nodes
description: The name of the nodegroup of the cluster.
- name: node-type
default: m5.xlarge
description: The type of the EC2 instaces for the nodegroup of the cluster.
- name: desired-nodes
default: "4"
description: The desired number of nodes in the cluster.
- name: min-nodes
default: "1"
description: The minimum number of nodes in the cluster.
- name: max-nodes
default: "4"
description: The maximum number of nodes in the cluster.
workspaces:
- name: secrets
mountPath: /tekton/home/.aws
description: The secret with the AWS keys
- name: kubeconfig
description: |
A workspace into which a kubeconfig file called `kubeconfig` will be written that will contain the information required to access the cluster. The `kubeconfig` will expect to use [aws-iam-authenticator](https://github.com/kubernetes-sigs/aws-iam-authenticator/) to authenticate, so in order for it to be used it must be run in a container which contains both `kubectl` and `aws-iam-authenticator`.
steps:
- name: write-kubeconfig
image: weaveworks/eksctl:0.35.0@sha256:48c1fa508970a01fd87a73ac7932a7160479d678cd019a3c84533d911fc54327
script: |
echo "Starting to create eks cluster"
eksctl create cluster \
--name $(params.cluster-name) \
--version $(params.version) \
--region $(params.region) \
--zones $(params.zones) \
--nodegroup-name $(params.nodegroup-name) \
--node-type $(params.node-type) \
--nodes $(params.desired-nodes) \
--nodes-min $(params.min-nodes) \
--nodes-max $(params.max-nodes)
if [ $? -eq 0 ]
then
echo "Successfully created eks cluster $(params.cluster-name)"
eksctl utils write-kubeconfig -c $(params.cluster-name) --kubeconfig $(workspaces.kubeconfig.path)/kubeconfig
fi
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: eks-cluster-teardown
labels:
app.kubernetes.io/version: "0.1"
annotations:
tekton.dev/pipelines.minVersion: "0.12.1"
tekton.dev/tags: "aws, eks"
tekton.dev/displayName: "EKS Cluster Teardown"
spec:
description: |
Teardown an EKS cluster.
This Task can be used to teardown an EKS cluster in an AWS account.
params:
- name: cluster-name
description: The name of the EKS cluster which will be teared down.
- name: region
default: us-west-2
description: The region where the cluster is in.
workspaces:
- name: secrets
mountPath: /tekton/home/.aws
description: The service account with the AWS keys
steps:
- name: delete-cluster
image: weaveworks/eksctl:0.35.0@sha256:48c1fa508970a01fd87a73ac7932a7160479d678cd019a3c84533d911fc54327
script: |
echo "Tearing down the eks cluster"
eksctl delete cluster \
--name $(params.cluster-name) \
--region $(params.region)
if [ $? -eq 0 ]
then
echo "Successfully teared down eks cluster $(params.cluster-name)"
fi
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: kaniko
spec:
description: >-
This Task builds source into a container image using Google's kaniko tool.
Kaniko doesn't depend on a Docker daemon and executes each
command within a Dockerfile completely in userspace. This enables
building container images in environments that can't easily or
securely run a Docker daemon, such as a standard Kubernetes cluster.
params:
- name: IMAGE
description: Name (reference) of the image to build.
- name: DOCKERFILE
description: Path to the Dockerfile to build.
default: ./Dockerfile
- name: CONTEXT
description: The build context used by Kaniko.
default: ./
- name: SUBPATH
description: Sub path within the given context.
default: ""
- name: EXTRA_ARGS
default: ""
- name: BUILDER_IMAGE
description: The image on which builds will run
# custom build while pending PRs awslabs/amazon-ecr-credential-helper#253, GoogleContainerTools/kaniko#1515 and #1543
default: docker.io/theofpa/executor@sha256:e968634a9c63f3a16c8ef331749115cb6cbcde4d1726c28e3e0b4e5eb79aa4aa
workspaces:
- name: source
- name: aws-credentials
mountPath: /tekton/home/.aws/
- name: docker-config
mountPath: /tekton/home/.docker/
results:
- name: IMAGE-DIGEST
description: Digest of the image just built.

steps:
- name: build-and-push
workingDir: $(workspaces.source.path)
image: $(params.BUILDER_IMAGE)
# specifying DOCKER_CONFIG is required to allow kaniko to detect docker credential
# https://github.com/tektoncd/pipeline/pull/706
env:
- name: DOCKER_CONFIG
value: /tekton/home/.docker
command:
- /kaniko/executor
- $(params.EXTRA_ARGS)
- --dockerfile=$(params.DOCKERFILE)
- --context=$(params.CONTEXT) # The user does not need to care the workspace and the source.
- --context-sub-path=$(params.SUBPATH)
- --destination=$(params.IMAGE)
- --oci-layout-path=$(workspaces.source.path)/image-digest
# kaniko assumes it is running as root, which means this example fails on platforms
# that default to run containers as random uid (like OpenShift). Adding this securityContext
# makes it explicit that it needs to run as root.
securityContext:
runAsUser: 0
- name: write-digest
workingDir: $(workspaces.source.path)
image: gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/imagedigestexporter:v0.16.2
# output of imagedigestexport [{"key":"digest","value":"sha256:eed29..660","resourceRef":{"name":"myrepo/myimage"}}]
command: ["/ko-app/imagedigestexporter"]
args:
- -images=[{"name":"$(params.IMAGE)","type":"image","url":"$(params.IMAGE)","digest":"","OutputImageDir":"$(workspaces.source.path)/image-digest"}]
- -terminationMessagePath=image-digested
securityContext:
runAsUser: 0
- name: digest-to-results
workingDir: $(workspaces.source.path)
image: docker.io/stedolan/jq@sha256:a61ed0bca213081b64be94c5e1b402ea58bc549f457c2682a86704dd55231e09
script: |
cat image-digested | jq '.[0].value' -rj | tee /tekton/results/IMAGE-DIGEST
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
apiVersion: v1
kind: Secret
metadata:
name: aws-credentials
type: Opaque
stringData:
credentials: |-
[default]
aws_access_key_id = $(aws_access_key_id)
aws_secret_access_key = $(aws_secret_access_key)
config: |-
[default]
region = us-west-2

0 comments on commit fc5a44d

Please sign in to comment.