From e6d9d74599562c5329918f342dc299543bfa7eff Mon Sep 17 00:00:00 2001 From: gabriel-farache Date: Thu, 31 Oct 2024 15:38:47 +0100 Subject: [PATCH] Add CI for all workflow charts Signed-off-by: gabriel-farache --- .github/workflows/create-ocp-project.yaml | 109 ++++++++ .github/workflows/extendable-workflow.yaml | 69 +++++ .github/workflows/greeting.yaml | 69 +++++ .github/workflows/modify-vm-resources.yaml | 109 ++++++++ .github/workflows/move2kube-e2e.yaml | 68 +++++ .github/workflows/mta-v7.x-e2e.yaml | 66 +++++ .github/workflows/mtv-migration.yaml | 96 +++++++ .github/workflows/mtv-plan.yaml | 96 +++++++ .github/workflows/request-vm-cnv.yaml | 109 ++++++++ .../00-move2kube-instance-route.yaml | 11 + .../templates/00-move2kube-instance.yaml | 14 +- docs/main/move2kube/install_m2k.sh | 28 ++- e2e/move2kube.sh | 237 ++++++++++++++++++ 13 files changed, 1063 insertions(+), 18 deletions(-) create mode 100644 .github/workflows/create-ocp-project.yaml create mode 100644 .github/workflows/extendable-workflow.yaml create mode 100644 .github/workflows/greeting.yaml create mode 100644 .github/workflows/modify-vm-resources.yaml create mode 100644 .github/workflows/move2kube-e2e.yaml create mode 100644 .github/workflows/mta-v7.x-e2e.yaml create mode 100644 .github/workflows/mtv-migration.yaml create mode 100644 .github/workflows/mtv-plan.yaml create mode 100644 .github/workflows/request-vm-cnv.yaml create mode 100644 charts/move2kube/templates/00-move2kube-instance-route.yaml create mode 100755 e2e/move2kube.sh diff --git a/.github/workflows/create-ocp-project.yaml b/.github/workflows/create-ocp-project.yaml new file mode 100644 index 00000000..f5fb8f7b --- /dev/null +++ b/.github/workflows/create-ocp-project.yaml @@ -0,0 +1,109 @@ +name: Create OCP project CI + +on: + workflow_dispatch: + pull_request: + branches: + - main + paths: + - 'charts/create-ocp-project/**' + - .github/workflows/create-ocp-project.yaml + +jobs: + run-e2e: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + + - name: Create k8s Kind Cluster + uses: helm/kind-action@v1.10.0 + with: + cluster_name: kind + + - name: Install Operators Support + run: | + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/crds.yaml + # give the apiserver time + sleep 5s + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/olm.yaml + + - name: Deploy sonataflow-operator + run: | + helm repo add orchestrator https://parodos-dev.github.io/orchestrator-helm-chart + helm install orchestrator orchestrator/orchestrator-k8s + + kubectl get sfp -A + kubectl wait --for=condition=Ready=true pods -l "app.kubernetes.io/name=backstage" --timeout=10m + kubectl get pods -o wide + kubectl wait --for=condition=Ready=true pods -l "app=sonataflow-platform" --timeout=180s + + - name: Deploy MTA serverless workflow and its components + run: | + cd charts + helm install create-ocp-project create-ocp-project -n + WORKFLOW_NAME=create-ocp-project + kubectl -n sonataflow-infra patch secret "${WORKFLOW_NAME}-creds" --type merge -p '{ + "data":{ + "NOTIFICATIONS_BEARER_TOKEN":"'$(kubectl get secrets -n rhdh-operator backstage-backend-auth-secret -o go-template='{{ .data.BACKEND_SECRET }}')'" + }, + "stringData":{ + "JIRA_API_TOKEN":"DUMB_TOKEN", + "OCP_API_SERVER_TOKEN":"DUMB_TOKEN" + } + }' + kubectl -n sonataflow-infra patch sonataflow create-ocp-project --type merge -p '{ + "spec": { + "podTemplate": { + "container": { + "env": [ + { + "name": "BACKSTAGE_NOTIFICATIONS_URL", + "value": "http://backstage-backstage.rhdh-operator" + }, + { + "name": "JIRA_URL", + "value": "DUMB_VALUE" + }, + { + "name": "JIRA_USERNAME", + "value": "DUMB_VALUE" + }, + { + "name": "OCP_API_SERVER_URL", + "value": "DUMB_VALUE" + }, + { + "name": "OCP_CONSOLE_URL", + "value": "DUMB_VALUE" + } + ] + } + } + } + } + ' + kubectl wait --for=condition=Ready=true pods -l app="${WORKFLOW_NAME}" --timeout=1m + + - uses: actions/checkout@v4 + - name: Test workflow is responsive + run: | + kubectl expose "$(kubectl get pod -o name | grep create-ocp-project-analysis)" --type="NodePort" --port=8080 --name=create-ocp-project-svc + kubectl port-forward svc/create-ocp-project-svc 8080:8080 & + status_code=$(curl s -o /dev/null -w '%{http_code}' -XGET --location 'http://localhost:8080/create-ocp-project' --header 'Accept: application/json, text/plain, */*' --header 'Content-Type: application/json') + if [ "$status_code" -ne 200 ]; then; + echo "$status_code" + exit 1 + fi + + - name: Export kind Logs + if: always() + run: kind export logs ./kind_logs + + - name: Upload Kind Logs + uses: actions/upload-artifact@v4 + # Always run this, even if one of the previous steps failed. + if: always() + with: + name: kind-logs + path: ./kind_logs/ + diff --git a/.github/workflows/extendable-workflow.yaml b/.github/workflows/extendable-workflow.yaml new file mode 100644 index 00000000..94c9feb7 --- /dev/null +++ b/.github/workflows/extendable-workflow.yaml @@ -0,0 +1,69 @@ +name: Extendable Workflow CI + +on: + workflow_dispatch: + pull_request: + branches: + - main + paths: + - 'charts/extendable-workflow/**' + - .github/workflows/extendable-workflow.yaml + +jobs: + run-e2e: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + + - name: Create k8s Kind Cluster + uses: helm/kind-action@v1.10.0 + with: + cluster_name: kind + + - name: Install Operators Support + run: | + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/crds.yaml + # give the apiserver time + sleep 5s + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/olm.yaml + + - name: Deploy sonataflow-operator + run: | + helm repo add orchestrator https://parodos-dev.github.io/orchestrator-helm-chart + helm install orchestrator orchestrator/orchestrator-k8s + + kubectl get sfp -A + kubectl wait --for=condition=Ready=true pods -l "app.kubernetes.io/name=backstage" --timeout=10m + kubectl get pods -o wide + kubectl wait --for=condition=Ready=true pods -l "app=sonataflow-platform" --timeout=180s + + - name: Deploy MTA serverless workflow and its components + run: | + cd charts + helm install extendable-workflow extendable-workflow -n + WORKFLOW_NAME=extendable-workflow + kubectl wait --for=condition=Ready=true pods -l app="${WORKFLOW_NAME}" --timeout=1m + + - uses: actions/checkout@v4 + - name: Test workflow is responsive + run: | + kubectl expose "$(kubectl get pod -o name | grep extendable-workflow-analysis)" --type="NodePort" --port=8080 --name=extendable-workflow-svc + kubectl port-forward svc/extendable-workflow-svc 8080:8080 & + status_code=$(curl s -o /dev/null -w '%{http_code}' -XGET --location 'http://localhost:8080/extendable-workflow' --header 'Accept: application/json, text/plain, */*' --header 'Content-Type: application/json') + if [ "$status_code" -ne 200 ]; then; + echo "$status_code" + exit 1 + fi + + - name: Export kind Logs + if: always() + run: kind export logs ./kind_logs + + - name: Upload Kind Logs + uses: actions/upload-artifact@v4 + # Always run this, even if one of the previous steps failed. + if: always() + with: + name: kind-logs + path: ./kind_logs/ + diff --git a/.github/workflows/greeting.yaml b/.github/workflows/greeting.yaml new file mode 100644 index 00000000..243fa83e --- /dev/null +++ b/.github/workflows/greeting.yaml @@ -0,0 +1,69 @@ +name: Greeting CI + +on: + workflow_dispatch: + pull_request: + branches: + - main + paths: + - 'charts/greeting/**' + - .github/workflows/greeting.yaml + +jobs: + run-e2e: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + + - name: Create k8s Kind Cluster + uses: helm/kind-action@v1.10.0 + with: + cluster_name: kind + + - name: Install Operators Support + run: | + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/crds.yaml + # give the apiserver time + sleep 5s + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/olm.yaml + + - name: Deploy sonataflow-operator + run: | + helm repo add orchestrator https://parodos-dev.github.io/orchestrator-helm-chart + helm install orchestrator orchestrator/orchestrator-k8s + + kubectl get sfp -A + kubectl wait --for=condition=Ready=true pods -l "app.kubernetes.io/name=backstage" --timeout=10m + kubectl get pods -o wide + kubectl wait --for=condition=Ready=true pods -l "app=sonataflow-platform" --timeout=180s + + - name: Deploy MTA serverless workflow and its components + run: | + cd charts + helm install greeting greeting -n + WORKFLOW_NAME=greeting + kubectl wait --for=condition=Ready=true pods -l app="${WORKFLOW_NAME}" --timeout=1m + + - uses: actions/checkout@v4 + - name: Test workflow is responsive + run: | + kubectl expose "$(kubectl get pod -o name | grep greeting-analysis)" --type="NodePort" --port=8080 --name=greeting-svc + kubectl port-forward svc/greeting-svc 8080:8080 & + status_code=$(curl s -o /dev/null -w '%{http_code}' -XGET --location 'http://localhost:8080/greeting' --header 'Accept: application/json, text/plain, */*' --header 'Content-Type: application/json') + if [ "$status_code" -ne 200 ]; then; + echo "$status_code" + exit 1 + fi + + - name: Export kind Logs + if: always() + run: kind export logs ./kind_logs + + - name: Upload Kind Logs + uses: actions/upload-artifact@v4 + # Always run this, even if one of the previous steps failed. + if: always() + with: + name: kind-logs + path: ./kind_logs/ + diff --git a/.github/workflows/modify-vm-resources.yaml b/.github/workflows/modify-vm-resources.yaml new file mode 100644 index 00000000..d13243c9 --- /dev/null +++ b/.github/workflows/modify-vm-resources.yaml @@ -0,0 +1,109 @@ +name: Modify VM Resources CI + +on: + workflow_dispatch: + pull_request: + branches: + - main + paths: + - 'charts/modify-vm-resources/**' + - .github/workflows/modify-vm-resources.yaml + +jobs: + run-e2e: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + + - name: Create k8s Kind Cluster + uses: helm/kind-action@v1.10.0 + with: + cluster_name: kind + + - name: Install Operators Support + run: | + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/crds.yaml + # give the apiserver time + sleep 5s + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/olm.yaml + + - name: Deploy sonataflow-operator + run: | + helm repo add orchestrator https://parodos-dev.github.io/orchestrator-helm-chart + helm install orchestrator orchestrator/orchestrator-k8s + + kubectl get sfp -A + kubectl wait --for=condition=Ready=true pods -l "app.kubernetes.io/name=backstage" --timeout=10m + kubectl get pods -o wide + kubectl wait --for=condition=Ready=true pods -l "app=sonataflow-platform" --timeout=180s + + - name: Deploy MTA serverless workflow and its components + run: | + cd charts + helm install modify-vm-resources modify-vm-resources -n + WORKFLOW_NAME=modify-vm-resources + kubectl -n sonataflow-infra patch secret "${WORKFLOW_NAME}-creds" --type merge -p '{ + "data":{ + "NOTIFICATIONS_BEARER_TOKEN":"'$(kubectl get secrets -n rhdh-operator backstage-backend-auth-secret -o go-template='{{ .data.BACKEND_SECRET }}')'" + }, + "stringData":{ + "JIRA_API_TOKEN":"DUMB_TOKEN", + "OCP_API_SERVER_TOKEN":"DUMB_TOKEN" + } + }' + kubectl -n sonataflow-infra patch sonataflow modify-vm-resources --type merge -p '{ + "spec": { + "podTemplate": { + "container": { + "env": [ + { + "name": "BACKSTAGE_NOTIFICATIONS_URL", + "value": "http://backstage-backstage.rhdh-operator" + }, + { + "name": "JIRA_URL", + "value": "DUMB_VALUE" + }, + { + "name": "JIRA_USERNAME", + "value": "DUMB_VALUE" + }, + { + "name": "OCP_API_SERVER_URL", + "value": "DUMB_VALUE" + }, + { + "name": "OCP_CONSOLE_URL", + "value": "DUMB_VALUE" + } + ] + } + } + } + } + ' + kubectl wait --for=condition=Ready=true pods -l app="${WORKFLOW_NAME}" --timeout=1m + + - uses: actions/checkout@v4 + - name: Test workflow is responsive + run: | + kubectl expose "$(kubectl get pod -o name | grep modify-vm-resources-analysis)" --type="NodePort" --port=8080 --name=modify-vm-resources-svc + kubectl port-forward svc/modify-vm-resources-svc 8080:8080 & + status_code=$(curl s -o /dev/null -w '%{http_code}' -XGET --location 'http://localhost:8080/modify-vm-resources' --header 'Accept: application/json, text/plain, */*' --header 'Content-Type: application/json') + if [ "$status_code" -ne 200 ]; then; + echo "$status_code" + exit 1 + fi + + - name: Export kind Logs + if: always() + run: kind export logs ./kind_logs + + - name: Upload Kind Logs + uses: actions/upload-artifact@v4 + # Always run this, even if one of the previous steps failed. + if: always() + with: + name: kind-logs + path: ./kind_logs/ + diff --git a/.github/workflows/move2kube-e2e.yaml b/.github/workflows/move2kube-e2e.yaml new file mode 100644 index 00000000..36cefece --- /dev/null +++ b/.github/workflows/move2kube-e2e.yaml @@ -0,0 +1,68 @@ +name: Move2kube Workflow end to end tests + +on: + workflow_dispatch: + pull_request: + branches: + - main + paths: + - 'charts/move2kube/**' + - .github/workflows/move2kube-e2e.yaml + - e2e/move2kube.sh + +jobs: + run-m2k-e2e: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + + - name: Create k8s Kind Cluster + uses: helm/kind-action@v1.10.0 + with: + cluster_name: kind + - name: Create sonataflow-infra namespace + run: | + # Needs to knative events resources in this namespace as the broker url is hard coded at the moment + kubectl create ns sonataflow-infra + + - name: Deploy sonataflow-operator + run: | + helm repo add orchestrator https://parodos-dev.github.io/orchestrator-helm-chart + helm install orchestrator orchestrator/orchestrator-k8s + + kubectl get sfp -A + kubectl wait --for=condition=Ready=true pods -l "app.kubernetes.io/name=backstage" --timeout=10m + kubectl get pods -o wide + kubectl wait --for=condition=Ready=true pods -l "app=sonataflow-platform" --timeout=180s + kubectl set env deployment/orchestrator-backstage LOG_LEVEL=DEBUG + + - name: Deploy Move2kube serverless workflow and its components + env: + SSH_PUB_KEY: ${{secrets.SSH_PUB_KEY}} + SSH_PRIV_KEY: ${{secrets.SSH_PRIV_KEY}} + run: | + echo "${SSH_PUB_KEY}" >> id_rsa.pub + echo "${SSH_PRIV_KEY}" >> id_rsa + # we are not on OCP but on k8s, route does not exists + rm -rf charts/move2kube/templates/00-move2kube-instance-route.yaml + PRIV_ID_RSA_PATH=id_rsa PUB_ID_RSA_PATH=id_rsa.pub M2K_HELM_REPO=charts/move2kube/ ./docs/main/move2kube/install_m2k.sh + + - name: Run e2e script + run: | + export BACKEND_SECRET=$(kubectl get secret orchestrator-auth -o jsonpath={.data.backend-secret} | base64 -d) + e2e/move2kube.sh + + - name: Export kind Logs + if: always() + run: | + kubectl get pods + kind export logs ./kind_logs + + - name: Upload Kind Logs + uses: actions/upload-artifact@v4 + # Always run this, even if one of th previous steps failed. + if: always() + with: + name: kind-logs + path: ./kind_logs/ + diff --git a/.github/workflows/mta-v7.x-e2e.yaml b/.github/workflows/mta-v7.x-e2e.yaml new file mode 100644 index 00000000..72d57ee2 --- /dev/null +++ b/.github/workflows/mta-v7.x-e2e.yaml @@ -0,0 +1,66 @@ +name: MTA v7.x Workflow end to end tests + +on: + workflow_dispatch: + pull_request: + branches: + - main + paths: + - 'charts/mta-v7.x/**' + - .github/workflows/mta-v7.x-e2e.yaml + +jobs: + run-e2e: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + + - name: Create k8s Kind Cluster + uses: helm/kind-action@v1.10.0 + with: + cluster_name: kind + + - name: Install Operators Support + run: | + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/crds.yaml + # give the apiserver time + sleep 5s + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/olm.yaml + + - name: Deploy sonataflow-operator + run: | + helm repo add orchestrator https://parodos-dev.github.io/orchestrator-helm-chart + helm install orchestrator orchestrator/orchestrator-k8s + + kubectl get sfp -A + kubectl wait --for=condition=Ready=true pods -l "app.kubernetes.io/name=backstage" --timeout=10m + kubectl get pods -o wide + kubectl wait --for=condition=Ready=true pods -l "app=sonataflow-platform" --timeout=180s + + - name: Deploy MTA serverless workflow and its components + run: | + MTA_HELM_REPO=charts/mta-v7.x/ sh docs/main/mta-v7.x/install-mta-v7.sh + + - uses: actions/checkout@v4 + - name: Test workflow is responsive + run: | + kubectl expose "$(kubectl get pod -o name | grep mta-analysis)" --type="NodePort" --port=8080 --name=mta-svc + kubectl port-forward svc/mta-svc 8080:8080 & + status_code=$(curl s -o /dev/null -w '%{http_code}' -XGET --location 'http://localhost:8080/mta-analysis-v7' --header 'Accept: application/json, text/plain, */*' --header 'Content-Type: application/json') + if [ "$status_code" -ne 200 ]; then; + echo "$status_code" + exit 1 + fi + + - name: Export kind Logs + if: always() + run: kind export logs ./kind_logs + + - name: Upload Kind Logs + uses: actions/upload-artifact@v4 + # Always run this, even if one of the previous steps failed. + if: always() + with: + name: kind-logs + path: ./kind_logs/ + diff --git a/.github/workflows/mtv-migration.yaml b/.github/workflows/mtv-migration.yaml new file mode 100644 index 00000000..89f43693 --- /dev/null +++ b/.github/workflows/mtv-migration.yaml @@ -0,0 +1,96 @@ +name: MTV Migration CI + +on: + workflow_dispatch: + pull_request: + branches: + - main + paths: + - 'charts/mtv-migration/**' + - .github/workflows/mtv-migration.yaml + +jobs: + run-e2e: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + + - name: Create k8s Kind Cluster + uses: helm/kind-action@v1.10.0 + with: + cluster_name: kind + + - name: Install Operators Support + run: | + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/crds.yaml + # give the apiserver time + sleep 5s + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/olm.yaml + + - name: Deploy sonataflow-operator + run: | + helm repo add orchestrator https://parodos-dev.github.io/orchestrator-helm-chart + helm install orchestrator orchestrator/orchestrator-k8s + + kubectl get sfp -A + kubectl wait --for=condition=Ready=true pods -l "app.kubernetes.io/name=backstage" --timeout=10m + kubectl get pods -o wide + kubectl wait --for=condition=Ready=true pods -l "app=sonataflow-platform" --timeout=180s + + - name: Deploy MTA serverless workflow and its components + run: | + cd charts + helm install mtv-migration mtv-migration -n + WORKFLOW_NAME=mtv-migration + kubectl -n sonataflow-infra patch secret "${WORKFLOW_NAME}-creds" --type merge -p '{ + "data":{ + "NOTIFICATIONS_BEARER_TOKEN":"'$(kubectl get secrets -n rhdh-operator backstage-backend-auth-secret -o go-template='{{ .data.BACKEND_SECRET }}')'" + }, + "stringData":{ + "OCP_API_SERVER_TOKEN":"DUMB_TOKEN" + } + }' + kubectl -n sonataflow-infra patch sonataflow mtv-migration --type merge -p '{ + "spec": { + "podTemplate": { + "container": { + "env": [ + { + "name": "BACKSTAGE_NOTIFICATIONS_URL", + "value": "http://backstage-backstage.rhdh-operator" + }, + { + "name": "OCP_API_SERVER_URL", + "value": "DUMB_VALUE" + } + ] + } + } + } + } + ' + kubectl wait --for=condition=Ready=true pods -l app="${WORKFLOW_NAME}" --timeout=1m + + - uses: actions/checkout@v4 + - name: Test workflow is responsive + run: | + kubectl expose "$(kubectl get pod -o name | grep mtv-migration-analysis)" --type="NodePort" --port=8080 --name=mtv-migration-svc + kubectl port-forward svc/mtv-migration-svc 8080:8080 & + status_code=$(curl s -o /dev/null -w '%{http_code}' -XGET --location 'http://localhost:8080/mtv-migration' --header 'Accept: application/json, text/plain, */*' --header 'Content-Type: application/json') + if [ "$status_code" -ne 200 ]; then; + echo "$status_code" + exit 1 + fi + + - name: Export kind Logs + if: always() + run: kind export logs ./kind_logs + + - name: Upload Kind Logs + uses: actions/upload-artifact@v4 + # Always run this, even if one of the previous steps failed. + if: always() + with: + name: kind-logs + path: ./kind_logs/ + diff --git a/.github/workflows/mtv-plan.yaml b/.github/workflows/mtv-plan.yaml new file mode 100644 index 00000000..0a53cade --- /dev/null +++ b/.github/workflows/mtv-plan.yaml @@ -0,0 +1,96 @@ +name: MTV Plan CI + +on: + workflow_dispatch: + pull_request: + branches: + - main + paths: + - 'charts/mtv-plan/**' + - .github/workflows/mtv-plan.yaml + +jobs: + run-e2e: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + + - name: Create k8s Kind Cluster + uses: helm/kind-action@v1.10.0 + with: + cluster_name: kind + + - name: Install Operators Support + run: | + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/crds.yaml + # give the apiserver time + sleep 5s + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/olm.yaml + + - name: Deploy sonataflow-operator + run: | + helm repo add orchestrator https://parodos-dev.github.io/orchestrator-helm-chart + helm install orchestrator orchestrator/orchestrator-k8s + + kubectl get sfp -A + kubectl wait --for=condition=Ready=true pods -l "app.kubernetes.io/name=backstage" --timeout=10m + kubectl get pods -o wide + kubectl wait --for=condition=Ready=true pods -l "app=sonataflow-platform" --timeout=180s + + - name: Deploy MTA serverless workflow and its components + run: | + cd charts + helm install mtv-plan mtv-plan -n + WORKFLOW_NAME=mtv-plan + kubectl -n sonataflow-infra patch secret "${WORKFLOW_NAME}-creds" --type merge -p '{ + "data":{ + "NOTIFICATIONS_BEARER_TOKEN":"'$(kubectl get secrets -n rhdh-operator backstage-backend-auth-secret -o go-template='{{ .data.BACKEND_SECRET }}')'" + }, + "stringData":{ + "OCP_API_SERVER_TOKEN":"DUMB_TOKEN" + } + }' + kubectl -n sonataflow-infra patch sonataflow mtv-plan --type merge -p '{ + "spec": { + "podTemplate": { + "container": { + "env": [ + { + "name": "BACKSTAGE_NOTIFICATIONS_URL", + "value": "http://backstage-backstage.rhdh-operator" + }, + { + "name": "OCP_API_SERVER_URL", + "value": "DUMB_VALUE" + } + ] + } + } + } + } + ' + kubectl wait --for=condition=Ready=true pods -l app="${WORKFLOW_NAME}" --timeout=1m + + - uses: actions/checkout@v4 + - name: Test workflow is responsive + run: | + kubectl expose "$(kubectl get pod -o name | grep mtv-plan-analysis)" --type="NodePort" --port=8080 --name=mtv-plan-svc + kubectl port-forward svc/mtv-plan-svc 8080:8080 & + status_code=$(curl s -o /dev/null -w '%{http_code}' -XGET --location 'http://localhost:8080/mtv-plan' --header 'Accept: application/json, text/plain, */*' --header 'Content-Type: application/json') + if [ "$status_code" -ne 200 ]; then; + echo "$status_code" + exit 1 + fi + + - name: Export kind Logs + if: always() + run: kind export logs ./kind_logs + + - name: Upload Kind Logs + uses: actions/upload-artifact@v4 + # Always run this, even if one of the previous steps failed. + if: always() + with: + name: kind-logs + path: ./kind_logs/ + diff --git a/.github/workflows/request-vm-cnv.yaml b/.github/workflows/request-vm-cnv.yaml new file mode 100644 index 00000000..c9355a7f --- /dev/null +++ b/.github/workflows/request-vm-cnv.yaml @@ -0,0 +1,109 @@ +name: Request VM CNV CI + +on: + workflow_dispatch: + pull_request: + branches: + - main + paths: + - 'charts/request-vm-cnv/**' + - .github/workflows/request-vm-cnv.yaml + +jobs: + run-e2e: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + + - name: Create k8s Kind Cluster + uses: helm/kind-action@v1.10.0 + with: + cluster_name: kind + + - name: Install Operators Support + run: | + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/crds.yaml + # give the apiserver time + sleep 5s + kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/master/deploy/upstream/quickstart/olm.yaml + + - name: Deploy sonataflow-operator + run: | + helm repo add orchestrator https://parodos-dev.github.io/orchestrator-helm-chart + helm install orchestrator orchestrator/orchestrator-k8s + + kubectl get sfp -A + kubectl wait --for=condition=Ready=true pods -l "app.kubernetes.io/name=backstage" --timeout=10m + kubectl get pods -o wide + kubectl wait --for=condition=Ready=true pods -l "app=sonataflow-platform" --timeout=180s + + - name: Deploy MTA serverless workflow and its components + run: | + cd charts + helm install request-vm-cnv request-vm-cnv -n + WORKFLOW_NAME=request-vm-cnv + kubectl -n sonataflow-infra patch secret "${WORKFLOW_NAME}-creds" --type merge -p '{ + "data":{ + "NOTIFICATIONS_BEARER_TOKEN":"'$(kubectl get secrets -n rhdh-operator backstage-backend-auth-secret -o go-template='{{ .data.BACKEND_SECRET }}')'" + }, + "stringData":{ + "JIRA_API_TOKEN":"DUMB_TOKEN", + "OCP_API_SERVER_TOKEN":"DUMB_TOKEN" + } + }' + kubectl -n sonataflow-infra patch sonataflow request-vm-cnv --type merge -p '{ + "spec": { + "podTemplate": { + "container": { + "env": [ + { + "name": "BACKSTAGE_NOTIFICATIONS_URL", + "value": "http://backstage-backstage.rhdh-operator" + }, + { + "name": "JIRA_URL", + "value": "DUMB_VALUE" + }, + { + "name": "JIRA_USERNAME", + "value": "DUMB_VALUE" + }, + { + "name": "OCP_API_SERVER_URL", + "value": "DUMB_VALUE" + }, + { + "name": "OCP_CONSOLE_URL", + "value": "DUMB_VALUE" + } + ] + } + } + } + } + ' + kubectl wait --for=condition=Ready=true pods -l app="${WORKFLOW_NAME}" --timeout=1m + + - uses: actions/checkout@v4 + - name: Test workflow is responsive + run: | + kubectl expose "$(kubectl get pod -o name | grep request-vm-cnv-analysis)" --type="NodePort" --port=8080 --name=request-vm-cnv-svc + kubectl port-forward svc/request-vm-cnv-svc 8080:8080 & + status_code=$(curl s -o /dev/null -w '%{http_code}' -XGET --location 'http://localhost:8080/request-vm-cnv' --header 'Accept: application/json, text/plain, */*' --header 'Content-Type: application/json') + if [ "$status_code" -ne 200 ]; then; + echo "$status_code" + exit 1 + fi + + - name: Export kind Logs + if: always() + run: kind export logs ./kind_logs + + - name: Upload Kind Logs + uses: actions/upload-artifact@v4 + # Always run this, even if one of the previous steps failed. + if: always() + with: + name: kind-logs + path: ./kind_logs/ + diff --git a/charts/move2kube/templates/00-move2kube-instance-route.yaml b/charts/move2kube/templates/00-move2kube-instance-route.yaml new file mode 100644 index 00000000..0c73bbf3 --- /dev/null +++ b/charts/move2kube/templates/00-move2kube-instance-route.yaml @@ -0,0 +1,11 @@ +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: {{ .Values.instance.name }}-route + namespace: {{ .Values.instance.namespace }} +spec: + tls: + termination: edge + to: + kind: Service + name: {{ .Values.instance.name }}-svc diff --git a/charts/move2kube/templates/00-move2kube-instance.yaml b/charts/move2kube/templates/00-move2kube-instance.yaml index 30af1793..b6220bb1 100644 --- a/charts/move2kube/templates/00-move2kube-instance.yaml +++ b/charts/move2kube/templates/00-move2kube-instance.yaml @@ -63,16 +63,4 @@ spec: - port: 8080 protocol: TCP selector: - app: {{ .Values.instance.name }} ---- -apiVersion: route.openshift.io/v1 -kind: Route -metadata: - name: {{ .Values.instance.name }}-route - namespace: {{ .Values.instance.namespace }} -spec: - tls: - termination: edge - to: - kind: Service - name: {{ .Values.instance.name }}-svc + app: {{ .Values.instance.name }} \ No newline at end of file diff --git a/docs/main/move2kube/install_m2k.sh b/docs/main/move2kube/install_m2k.sh index b49e6678..6738fdf5 100755 --- a/docs/main/move2kube/install_m2k.sh +++ b/docs/main/move2kube/install_m2k.sh @@ -1,6 +1,11 @@ #!/bin/bash -CLUSTER_CLIENT=$(which "${CLUSTER_CLIENT}" >/dev/null 2>&1 && echo oc || echo kubectl) +CLUSTER_CLIENT=$(which oc >/dev/null 2>&1 && echo oc || echo kubectl) +if [[ "${CLUSTER_CLIENT}" == "oc" ]]; then + echo "Deploying on OCP cluster" +else + echo "Deploying on k8s cluster" +fi if [[ -z "${PRIV_ID_RSA_PATH}" ]]; then echo 'PRIV_ID_RSA_PATH env variable must be set to the path of the private id_rsa file to use. I.e: ${HOME}/.ssh/id_rsa' @@ -21,6 +26,9 @@ fi TARGET_NS=sonataflow-infra M2K_INSTANCE_NS=move2kube WORKFLOW_NAME=m2k + +"${CLUSTER_CLIENT}" create ns "${TARGET_NS}" 2> /dev/null > /dev/null + "${CLUSTER_CLIENT}" patch configmap/config-features \ -n knative-serving \ --type merge \ @@ -29,14 +37,24 @@ WORKFLOW_NAME=m2k helm install move2kube ${M2K_HELM_REPO} -n ${TARGET_NS} --set instance.namespace=${M2K_INSTANCE_NS} if [ $? -ne 0 ]; then echo "move2kube chart already installed, run helm delete move2kube -n ${TARGET_NS} to remove it" - exit -1 + exit 1 +fi + +if [[ "${CLUSTER_CLIENT}" == "oc" ]]; then + "${CLUSTER_CLIENT}" -n ${TARGET_NS} adm policy add-scc-to-user $("${CLUSTER_CLIENT}" -n ${TARGET_NS} get deployments m2k-save-transformation-func-v1-deployment -oyaml | "${CLUSTER_CLIENT}" adm policy scc-subject-review --no-headers -o yaml --filename - | yq -r .status.allowedBy.name) -z default + "${CLUSTER_CLIENT}" -n ${M2K_INSTANCE_NS} adm policy add-scc-to-user $("${CLUSTER_CLIENT}" -n ${M2K_INSTANCE_NS} get deployments move2kube -oyaml | "${CLUSTER_CLIENT}" adm policy scc-subject-review --no-headers -o yaml --filename - | yq -r .status.allowedBy.name) -z default fi -"${CLUSTER_CLIENT}" -n ${TARGET_NS} adm policy add-scc-to-user $("${CLUSTER_CLIENT}" -n ${TARGET_NS} get deployments m2k-save-transformation-func-v1-deployment -oyaml | "${CLUSTER_CLIENT}" adm policy scc-subject-review --no-headers -o yaml --filename - | yq -r .status.allowedBy.name) -z default -"${CLUSTER_CLIENT}" -n ${M2K_INSTANCE_NS} adm policy add-scc-to-user $("${CLUSTER_CLIENT}" -n ${M2K_INSTANCE_NS} get deployments move2kube -oyaml | "${CLUSTER_CLIENT}" adm policy scc-subject-review --no-headers -o yaml --filename - | yq -r .status.allowedBy.name) -z default + "${CLUSTER_CLIENT}" -n ${M2K_INSTANCE_NS} create secret generic sshkeys --from-file=id_rsa=${PRIV_ID_RSA_PATH} --from-file=id_rsa.pub=${PUB_ID_RSA_PATH} "${CLUSTER_CLIENT}" -n ${M2K_INSTANCE_NS} scale deploy move2kube --replicas=0 && "${CLUSTER_CLIENT}" -n ${M2K_INSTANCE_NS} scale deploy move2kube --replicas=1 kubectl -n ${M2K_INSTANCE_NS} wait --for=condition=Ready=true --timeout=2m pod -l app=move2kube-instance -M2K_ROUTE=$("${CLUSTER_CLIENT}" -n ${M2K_INSTANCE_NS} get routes move2kube-route -o yaml | yq -r .spec.host) + +if [[ "${CLUSTER_CLIENT}" == "oc" ]]; then + M2K_ROUTE=$("${CLUSTER_CLIENT}" -n ${M2K_INSTANCE_NS} get routes move2kube-route -o yaml | yq -r .spec.host) +else + M2K_ROUTE="move2kube-svc.${M2K_INSTANCE_NS}" +fi + "${CLUSTER_CLIENT}" -n ${TARGET_NS} delete ksvc m2k-save-transformation-func helm upgrade move2kube ${M2K_HELM_REPO} -n ${TARGET_NS} --set workflow.move2kubeURL=https://${M2K_ROUTE} diff --git a/e2e/move2kube.sh b/e2e/move2kube.sh new file mode 100755 index 00000000..caec1279 --- /dev/null +++ b/e2e/move2kube.sh @@ -0,0 +1,237 @@ +#!/bin/bash + +set -x +set -e + +# holds the pid of the port forward process for cleanups +export port_forward_pid="" + +function cleanup() { + echo "cleanup $?" + kill "$port_forward_pid" || true + kill "$move2kube_port_forward_pid" || true +} + +function getAllNotifications() { + GUEST_TOKEN=$(curl $BACKSTAGE_URL/api/auth/guest/refresh | jq -r .backstageIdentity.token) + curl -s -H "Authorization: Bearer ${GUEST_TOKEN}" "${BACKSTAGE_NOTIFICATION_URL}" | jq ".notifications" +} + +trap 'cleanup' EXIT SIGTERM + +echo "Proxy Janus-idp port ⏳" +kubectl port-forward "$(kubectl get svc -l app.kubernetes.io/component=backstage -o name)" 9080:7007 & +port_forward_pid="$!" +sleep 3 +echo "Proxy Janus-idp port ✅" + +echo "Proxy move2kube instance port ⏳" +kubectl port-forward svc/move2kube-instance-svc 8080:8080 & +move2kube_port_forward_pid="$!" +sleep 3 +echo "Proxy move2kube instance port ✅" + + +echo "End to end tests start ⏳" +MOVE2KUBE_URL="http://localhost:8080" +BACKSTAGE_URL="http://localhost:9080" +BACKSTAGE_NOTIFICATION_URL="${BACKSTAGE_URL}/api/notifications/" +GIT_ORG="gfarache31/m2k-test" +GIT_REPO="bitbucket.org/${GIT_ORG}" +GIT_SOURCE_BRANCH="master" +GIT_TARGET_BRANCH="e2e-test-$(date +%s)" +echo "Creating workspace and project in move2kube instance" +WORKSPACE_ID=$(curl -X POST "${MOVE2KUBE_URL}/api/v1/workspaces" -H 'Content-Type: application/json' --data '{"name": "e2e Workspace", "description": "e2e tests"}' | jq -r .id) +PROJECT_ID=$(curl -X POST "${MOVE2KUBE_URL}/api/v1/workspaces/${WORKSPACE_ID}/projects" -H 'Content-Type: application/json' --data '{"name": "e2e Project", "description": "e2e tests"}' | jq -r .id) + +echo "Wait until M2K workflow is available in backstage..." +M2K_STATUS=$(curl -XGET -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer ${BACKEND_SECRET}" ${BACKSTAGE_URL}/api/orchestrator/v2/workflows/m2k/overview) +until [ "$M2K_STATUS" -eq 200 ] +do +sleep 5 +M2K_STATUS=$(curl -XGET -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer ${BACKEND_SECRET}" ${BACKSTAGE_URL}/api/orchestrator/v2/workflows/m2k/overview) +done + +echo "M2K is available in backstage, sending execution request" +out=$(curl -XPOST -H "Content-Type: application/json" -H "Authorization: Bearer ${BACKEND_SECRET}" \ + ${BACKSTAGE_URL}/api/orchestrator/v2/workflows/m2k/execute \ + -d "{\"inputData\": {\"repositoryURL\": \"ssh://${GIT_REPO}\", \"recipients\": [\"user:default/guest\"], \"sourceBranch\": \"${GIT_SOURCE_BRANCH}\", \"targetBranch\": \"${GIT_TARGET_BRANCH}\", \"workspaceId\": \"${WORKSPACE_ID}\", \"projectId\": \"${PROJECT_ID}\"}}") +ID=$(echo "$out" | jq -r -e .id) + +echo "Workflow ID: ${ID}" + +if [ -z "$ID" ] || [ "$ID" == "null" ]; then + echo "workflow instance id is null... exiting " + exit 1 +fi + + +echo "Wait until plan exists" +retries=20 +http_status=$(curl -X GET -s -o /dev/null -w "%{http_code}" "${MOVE2KUBE_URL}/api/v1/workspaces/${WORKSPACE_ID}/projects/${PROJECT_ID}/plan") +while [ ${retries} -ne 0 ] && [ "${http_status}" -eq 404 ]; do +echo "Wait until plan exists" + sleep 5 + retries=$((retries-1)) + http_status=$(curl -X GET -s -o /dev/null -w "%{http_code}" "${MOVE2KUBE_URL}/api/v1/workspaces/${WORKSPACE_ID}/projects/${PROJECT_ID}/plan") +done + +if [ "${http_status}" -eq 204 ] +then + echo "Plan not created, error when creating it, checks move2kbe logs, http status=${http_status}...exiting " + exit 1 +fi + +if [ "${http_status}" -eq 404 ] +then + echo "Plan not created, http status=${http_status}...exiting " + exit 1 +fi + + +GUEST_TOKEN=$(curl $BACKSTAGE_URL/api/auth/guest/refresh | jq -r .backstageIdentity.token) + +echo "Checking if Q&A waiting notification with move2kube URL received" +retries=20 +while test ${retries} -ne 0 && getAllNotifications | jq -e '.|length == 0' ; do +echo "Wait until a message arrives" + sleep 5 + retries=$((retries-1)) +done + +ALL_NOTIFICATION=$(getAllNotifications) +printf "All notifications\n%s\n" "$ALL_NOTIFICATION" +if printf "%s" "$ALL_NOTIFICATION" | jq -e '.|length == 0' +then + printf "No notification found. The full reply is %s\n\nexiting " "${NOTIFICATION}" + exit 1 +fi + +NOTIFICATION=$(printf "%s" "$ALL_NOTIFICATION" | jq '.[0]') +if printf "%s" "${NOTIFICATION}" | jq ".payload.link | select(contains(\"${MOVE2KUBE_URL}/api/v1/workspaces/${WORKSPACE_ID}/projects/${PROJECT_ID}/outputs\"))" +then + printf "Notification has payload link with matching URL: %s\n\n" "${NOTIFICATION}" +else + printf "Notification has no payload link with matching URL: %s\n\nexiting " "${NOTIFICATION}" + exit 1 +fi + +echo "Checking if Knative function running" +nb_pods=$(kubectl get pods -l app=m2k-save-transformation-func-v1 -no-headers | wc -l) +retries=20 +while [[ ${retries} -ne 0 && ${nb_pods} -eq 0 ]]; do +echo "Wait until Knative function running" + sleep 5 + retries=$((retries-1)) + nb_pods=$(kubectl get pods -l app=m2k-save-transformation-func-v1 --no-headers | wc -l) +done + +if [[ $nb_pods -ne 1 ]] +then + echo "Knative function not running...exiting " + exit 1 +fi + +echo "Answering Q&A to continue workflow" +TRANSFORMATION_ID=$(curl "${MOVE2KUBE_URL}/api/v1/workspaces/${WORKSPACE_ID}/projects/${PROJECT_ID}" | jq -r '.outputs | keys'[0]) +current_question=$(curl -X GET "${MOVE2KUBE_URL}/api/v1/workspaces/${WORKSPACE_ID}/projects/${PROJECT_ID}/outputs/${TRANSFORMATION_ID}/problems/current") +question_id=$(echo "${current_question}" | jq -r '.question | fromjson | .id' | sed -r -e 's/"/\\\\\\\"/g') +default_answer=$(echo "${current_question}" | jq '.question | fromjson | .default' | sed -r -e 's/"/\\"/g' | tr '\n' ' ') +while [ "${question_id}" != "" ]; do + curl -iX POST "${MOVE2KUBE_URL}/api/v1/workspaces/${WORKSPACE_ID}/projects/${PROJECT_ID}/outputs/${TRANSFORMATION_ID}/problems/current/solution" \ + -H 'Content-Type: application/json' \ + -d "{\"solution\": \"{\\\"id\\\":\\\"${question_id}\\\",\\\"answer\\\":${default_answer}}\"}" + current_question=$(curl -X GET "${MOVE2KUBE_URL}/api/v1/workspaces/${WORKSPACE_ID}/projects/${PROJECT_ID}/outputs/${TRANSFORMATION_ID}/problems/current") + question_id=$(echo "${current_question}" | jq -r '.question | fromjson | .id' | sed -r -e 's/"/\\\\\\\"/g') + default_answer=$(echo "${current_question}" | jq '.question | fromjson | .default' | sed -r -e 's/"/\\"/g' | tr '\n' ' ') +done + +echo "Checking if workflow completed successfully" + +curl -H "Content-Type: application/json" -H "Authorization: Bearer ${BACKEND_SECRET}" "${BACKSTAGE_URL}/api/orchestrator/v2/workflows/instances/${ID}" + +state=$(curl -H "Content-Type: application/json" -H "Authorization: Bearer ${BACKEND_SECRET}" "${BACKSTAGE_URL}/api/orchestrator/v2/workflows/instances/${ID}" | jq -r .instance.state) +retries=20 +while [[ ${retries} -ne 0 && "$state" != "COMPLETED" ]]; do + sleep 5 + retries=$((retries-1)) + curl -H "Content-Type: application/json" -H "Authorization: Bearer ${BACKEND_SECRET}" "${BACKSTAGE_URL}/api/orchestrator/v2/workflows/instances/${ID}" + state=$(curl -H "Content-Type: application/json" -H "Authorization: Bearer ${BACKEND_SECRET}" "${BACKSTAGE_URL}/api/orchestrator/v2/workflows/instances/${ID}" | jq -r .instance.state) +done + +if [ "$state" != "COMPLETED" ]; then + echo "workflow instance state is '${state}', should be 'COMPLETED'... exiting " + exit 1 +fi + +echo "Checking if branch ${GIT_TARGET_BRANCH} created on git repo ${GIT_REPO}" + +http_status=$(curl -X GET -L -s -o /dev/null -w "%{http_code}" "https://api.bitbucket.org/2.0/repositories/${GIT_ORG}/refs/branches/${GIT_TARGET_BRANCH}") +retries=20 +while [[ ${retries} -ne 0 && ${http_status} -eq 404 ]]; do + sleep 5 + retries=$((retries-1)) +http_status=$(curl -X GET -L -s -o /dev/null -w "%{http_code}" "https://api.bitbucket.org/2.0/repositories/${GIT_ORG}/refs/branches/${GIT_TARGET_BRANCH}") +done +if [ "${http_status}" -eq 404 ] +then + echo "Branch ${GIT_TARGET_BRANCH} not created on repo ${GIT_REPO}...exiting " + exit 1 +else + echo "Branch ${GIT_TARGET_BRANCH} successfully created on repo ${GIT_REPO}! " +fi + +echo "Checking if completion notification received" +retries=20 +while test ${retries} -ne 0 && getAllNotifications | jq -e '.|length == 1' ; do +echo "Wait until a message arrives, expecting 2 messages overall" + sleep 5 + retries=$((retries-1)) +done + +ALL_NOTIFICATION=$(getAllNotifications) +printf "All notifications\n%s\n" "$ALL_NOTIFICATION" + +if printf "%s" "$ALL_NOTIFICATION" | jq -e '.|length == 1' +then + printf "No notification with result found - expecting success or failure notification. The full reply is %s\n\nexiting " "${ALL_NOTIFICATION}" + exit 1 +fi + +NOTIFICATION=$(printf "%s" "$ALL_NOTIFICATION" | jq '.[0]') +if printf "%s" "$NOTIFICATION" | jq -e '.payload| (.severity != "high" and .severity != "critical" )' +then + printf "Notification has NO result with high or critical severuty in it: %s\n\n" "${NOTIFICATION}" +else + printf "Notification has result high or critical severity in it: %s\n\nexiting " "${NOTIFICATION}" + exit 1 +fi + + +echo "Checking that when wrong inputs parameters, the workflows ends in error" +out=$(curl -XPOST -H "Content-Type: application/json" -H "Authorization: Bearer ${BACKEND_SECRET}" \ + ${BACKSTAGE_URL}/api/orchestrator/v2/workflows/m2k/execute \ + -d "{\"inputData\": {\"repositoryURL\": \"ssh://${GIT_REPO}_WRONG\", \"recipients\": [\"user:default/guest\"], \"sourceBranch\": \"${GIT_SOURCE_BRANCH}\", \"targetBranch\": \"${GIT_TARGET_BRANCH}\", \"workspaceId\": \"${WORKSPACE_ID}\", \"projectId\": \"${PROJECT_ID}\"}}") +ID=$(echo "$out" | jq -r -e .id) + +echo "Workflow ID: ${ID}" + +if [ -z "$ID" ] || [ "$ID" == "null" ]; then + echo "workflow instance id is null... exiting " + exit 1 +fi + +state=$(curl -H "Content-Type: application/json" -H "Authorization: Bearer ${BACKEND_SECRET}" "${BACKSTAGE_URL}/api/orchestrator/v2/workflows/instances/${ID}" | jq -r .instance.state) +retries=20 +while [[ ${retries} -ne 0 && "$state" != "ERROR" ]]; do + sleep 5 + retries=$((retries-1)) + state=$(curl -H "Content-Type: application/json" -H "Authorization: Bearer ${BACKEND_SECRET}" "${BACKSTAGE_URL}/api/orchestrator/v2/workflows/instances/${ID}" | jq -r .instance.state) +done + +if [ "$state" != "ERROR" ]; then + echo "workflow instance state is '${state}', should be 'ERROR'... exiting " + exit 1 +fi + +echo "End to end tests passed ✅"