Skip to content

Commit

Permalink
workflows: refactor EKS workflows
Browse files Browse the repository at this point in the history
Following merge of #400, we broke #391.

This PR fixes the issue by refactoring both EKS workflows to be
consistent with other workflows, now that we are creating a cluster with
nodes available right away just like on other platforms, and thus do not
need to split the `install`/`test` steps anymore.

Fixes 3d2fbdb

Signed-off-by: Nicolas Busseneau <[email protected]>
  • Loading branch information
nbusseneau committed Jul 19, 2021
1 parent 3d2fbdb commit a27c290
Show file tree
Hide file tree
Showing 7 changed files with 33 additions and 64 deletions.
10 changes: 0 additions & 10 deletions .github/in-cluster-test-scripts/eks-install.sh

This file was deleted.

12 changes: 0 additions & 12 deletions .github/in-cluster-test-scripts/eks-tunnel-install.sh

This file was deleted.

11 changes: 11 additions & 0 deletions .github/in-cluster-test-scripts/eks-tunnel.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,23 @@
set -x
set -e

# Install Cilium
cilium install \
--cluster-name "${CLUSTER_NAME}" \
--wait=false \
--config monitor-aggregation=none \
--datapath-mode=tunnel \
--ipam cluster-pool

# Enable Relay
cilium hubble enable

# Wait for Cilium status to be ready
cilium status --wait

# Make sure the 'aws-node' DaemonSet exists but has no scheduled pods
[[ $(kubectl -n kube-system get ds/aws-node -o jsonpath='{.status.currentNumberScheduled}') == 0 ]]

# Port forward Relay
cilium hubble port-forward&
sleep 10s
Expand Down
3 changes: 3 additions & 0 deletions .github/in-cluster-test-scripts/eks-uninstall.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,3 +5,6 @@ set -e

# Uninstall Cilium
cilium uninstall --wait

# Make sure the 'aws-node' DaemonSet blocking nodeSelector was removed
[[ ! $(kubectl -n kube-system get ds/aws-node -o jsonpath="{.spec.template.spec.nodeSelector['io\.cilium/aws-node-enabled']}") ]]
9 changes: 9 additions & 0 deletions .github/in-cluster-test-scripts/eks.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,21 @@
set -x
set -e

# Install Cilium
cilium install \
--cluster-name "${CLUSTER_NAME}" \
--wait=false \
--config monitor-aggregation=none

# Enable Relay
cilium hubble enable

# Wait for Cilium status to be ready
cilium status --wait

# Make sure the 'aws-node' DaemonSet exists but has no scheduled pods
[[ $(kubectl -n kube-system get ds/aws-node -o jsonpath='{.status.currentNumberScheduled}') == 0 ]]

# Port forward Relay
cilium hubble port-forward&
sleep 10s
Expand Down
26 changes: 5 additions & 21 deletions .github/workflows/eks-tunnel.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ jobs:
echo ::set-output name=sha::${SHA}
echo ::set-output name=owner::${OWNER}
- name: Create EKS cluster with nodegroup
- name: Create EKS cluster
run: |
cat <<EOF > eks-config.yaml
apiVersion: eksctl.io/v1alpha5
Expand Down Expand Up @@ -91,23 +91,6 @@ jobs:
.github/get-kubeconfig.sh
kubectl create configmap cilium-cli-kubeconfig -n kube-system --from-file kubeconfig
- name: Load cilium install script in configmap
run: |
kubectl create configmap cilium-cli-test-script-install -n kube-system --from-file=in-cluster-test-script.sh=.github/in-cluster-test-scripts/eks-tunnel-install.sh
- name: Create cilium-cli install job
run: |
helm install .github/cilium-cli-test-job-chart \
--generate-name \
--set tag=${{ steps.vars.outputs.sha }} \
--set cluster_name=${{ env.clusterName }} \
--set job_name=cilium-cli-install \
--set test_script_cm=cilium-cli-test-script-install
- name: Make sure the 'aws-node' DaemonSet exists but has no scheduled pods
run: |
[[ $(kubectl -n kube-system get ds/aws-node -o jsonpath='{.status.currentNumberScheduled}') == 0 ]]
- name: Load cilium cli script in configmap
run: |
kubectl create configmap cilium-cli-test-script -n kube-system --from-file=in-cluster-test-script.sh=.github/in-cluster-test-scripts/eks-tunnel.sh
Expand All @@ -121,13 +104,12 @@ jobs:
- name: Wait for test job
run: |
kubectl -n kube-system wait job/cilium-cli --for=condition=complete --timeout=10m
kubectl -n kube-system wait job/cilium-cli --for=condition=complete --timeout=20m
- name: Post-test information gathering
if: ${{ !success() }}
run: |
echo "=== Retrieve in-cluster jobs logs ==="
kubectl logs --timestamps -n kube-system job/cilium-cli-install
kubectl logs --timestamps -n kube-system job/cilium-cli
echo "\n\n\n=== Install latest stable CLI ==="
Expand All @@ -152,7 +134,9 @@ jobs:
--set job_name=cilium-cli-uninstall \
--set test_script_cm=cilium-cli-test-script-uninstall
kubectl -n kube-system wait job/cilium-cli-uninstall --for=condition=complete --timeout=2m
[[ ! $(kubectl -n kube-system get ds/aws-node -o jsonpath="{.spec.template.spec.nodeSelector['io\.cilium/aws-node-enabled']}") ]]
echo "=== Retrieve in-cluster jobs logs ==="
kubectl logs --timestamps -n kube-system job/cilium-cli-uninstall
- name: Clean up EKS
if: ${{ always() }}
Expand Down
26 changes: 5 additions & 21 deletions .github/workflows/eks.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ jobs:
echo ::set-output name=sha::${SHA}
echo ::set-output name=owner::${OWNER}
- name: Create EKS cluster nodegroup
- name: Create EKS cluster
run: |
cat <<EOF > eks-config.yaml
apiVersion: eksctl.io/v1alpha5
Expand Down Expand Up @@ -91,23 +91,6 @@ jobs:
.github/get-kubeconfig.sh
kubectl create configmap cilium-cli-kubeconfig -n kube-system --from-file kubeconfig
- name: Load cilium install script in configmap
run: |
kubectl create configmap cilium-cli-test-script-install -n kube-system --from-file=in-cluster-test-script.sh=.github/in-cluster-test-scripts/eks-install.sh
- name: Create cilium-cli install job
run: |
helm install .github/cilium-cli-test-job-chart \
--generate-name \
--set tag=${{ steps.vars.outputs.sha }} \
--set cluster_name=${{ env.clusterName }} \
--set job_name=cilium-cli-install \
--set test_script_cm=cilium-cli-test-script-install
- name: Make sure the 'aws-node' DaemonSet exists but has no scheduled pods
run: |
[[ $(kubectl -n kube-system get ds/aws-node -o jsonpath='{.status.currentNumberScheduled}') == 0 ]]
- name: Load cilium cli script in configmap
run: |
kubectl create configmap cilium-cli-test-script -n kube-system --from-file=in-cluster-test-script.sh=.github/in-cluster-test-scripts/eks.sh
Expand All @@ -121,13 +104,12 @@ jobs:
- name: Wait for test job
run: |
kubectl -n kube-system wait job/cilium-cli --for=condition=complete --timeout=10m
kubectl -n kube-system wait job/cilium-cli --for=condition=complete --timeout=20m
- name: Post-test information gathering
if: ${{ !success() }}
run: |
echo "=== Retrieve in-cluster jobs logs ==="
kubectl logs --timestamps -n kube-system job/cilium-cli-install
kubectl logs --timestamps -n kube-system job/cilium-cli
echo "\n\n\n=== Install latest stable CLI ==="
Expand All @@ -152,7 +134,9 @@ jobs:
--set job_name=cilium-cli-uninstall \
--set test_script_cm=cilium-cli-test-script-uninstall
kubectl -n kube-system wait job/cilium-cli-uninstall --for=condition=complete --timeout=2m
[[ ! $(kubectl -n kube-system get ds/aws-node -o jsonpath="{.spec.template.spec.nodeSelector['io\.cilium/aws-node-enabled']}") ]]
echo "=== Retrieve in-cluster jobs logs ==="
kubectl logs --timestamps -n kube-system job/cilium-cli-uninstall
- name: Clean up EKS
if: ${{ always() }}
Expand Down

0 comments on commit a27c290

Please sign in to comment.