From b07fbd627194a6655300309429877ae82433ae66 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Artur=20Za=C5=82=C4=99ski?= Date: Wed, 10 Jul 2024 15:58:37 +0200 Subject: [PATCH] Fix waiting for daemonset when desired number of pods is 0 (#756) Fixes #755 SUMMARY Because we don't have any node with non_exisiting_label (see code below) desired number of Pods will be 0. Kubernetes won't create .status.updatedNumberScheduled field (at least on version v1.27), because we still are not going to create any Pods. So that if .status.updatedNumberScheduled doesn't exist we should assume that number is 0 Code to reproduce: - name: Create daemonset kubernetes.core.k8s: state: present wait: true definition: apiVersion: apps/v1 kind: DaemonSet metadata: name: my-daemonset namespace: default spec: selector: matchLabels: app: my-app template: metadata: labels: app: my-app spec: containers: - name: my-container image: nginx nodeSelector: non_exisiting_label: 1 ISSUE TYPE Bugfix Pull Request COMPONENT NAME kubernetes.core.plugins.module_utils.k8s.waiter ADDITIONAL INFORMATION TASK [Create daemonset] ********************************************************************************************************************************** changed: [controlplane] => {"changed": true, "duration": 5, "method": "create", "result": {"apiVersion": "apps/v1", "kind": "DaemonSet", "metadata": {"annotations": {"deprecated.daemonset.template.generation": "1"}, "creationTimestamp": "2024-06-28T08:23:41Z", "generation": 1, "managedFields": [{"apiVersion": "apps/v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:metadata": {"f:annotations": {".": {}, "f:deprecated.daemonset.template.generation": {}}}, "f:spec": {"f:revisionHistoryLimit": {}, "f:selector": {}, "f:template": {"f:metadata": {"f:labels": {".": {}, "f:app": {}}}, "f:spec": {"f:containers": {"k:{\"name\":\"my-container\"}": {".": {}, "f:image": {}, "f:imagePullPolicy": {}, "f:name": {}, "f:resources": {}, "f:terminationMessagePath": {}, "f:terminationMessagePolicy": {}}}, "f:dnsPolicy": {}, "f:nodeSelector": {}, "f:restartPolicy": {}, "f:schedulerName": {}, "f:securityContext": {}, "f:terminationGracePeriodSeconds": {}}}, "f:updateStrategy": {"f:rollingUpdate": {".": {}, "f:maxSurge": {}, "f:maxUnavailable": {}}, "f:type": {}}}}, "manager": "OpenAPI-Generator", "operation": "Update", "time": "2024-06-28T08:23:41Z"}, {"apiVersion": "apps/v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:status": {"f:observedGeneration": {}}}, "manager": "kube-controller-manager", "operation": "Update", "subresource": "status", "time": "2024-06-28T08:23:41Z"}], "name": "my-daemonset", "namespace": "default", "resourceVersion": "1088421", "uid": "faafdbf7-4388-4cec-88d5-84657966312d"}, "spec": {"revisionHistoryLimit": 10, "selector": {"matchLabels": {"app": "my-app"}}, "template": {"metadata": {"creationTimestamp": null, "labels": {"app": "my-app"}}, "spec": {"containers": [{"image": "nginx", "imagePullPolicy": "Always", "name": "my-container", "resources": {}, "terminationMessagePath": "/dev/termination-log", "terminationMessagePolicy": "File"}], "dnsPolicy": "ClusterFirst", "nodeSelector": {"non_exisiting_label": "1"}, "restartPolicy": "Always", "schedulerName": "default-scheduler", "securityContext": {}, "terminationGracePeriodSeconds": 30}}, "updateStrategy": {"rollingUpdate": {"maxSurge": 0, "maxUnavailable": 1}, "type": "RollingUpdate"}}, "status": {"currentNumberScheduled": 0, "desiredNumberScheduled": 0, "numberMisscheduled": 0, "numberReady": 0, "observedGeneration": 1}}} ~$ kubectl get ds NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE my-daemonset 0 0 0 0 0 non_exisiting_label=1 30s Reviewed-by: Mike Graves --- .../fragments/756-fix-daemonset-waiting.yaml | 2 + plugins/module_utils/k8s/waiter.py | 2 +- .../targets/k8s_waiter/defaults/main.yml | 3 ++ .../targets/k8s_waiter/tasks/main.yml | 42 +++++++++++++++++++ 4 files changed, 48 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/756-fix-daemonset-waiting.yaml diff --git a/changelogs/fragments/756-fix-daemonset-waiting.yaml b/changelogs/fragments/756-fix-daemonset-waiting.yaml new file mode 100644 index 0000000000..e186e5ef62 --- /dev/null +++ b/changelogs/fragments/756-fix-daemonset-waiting.yaml @@ -0,0 +1,2 @@ +bugfixes: + - waiter - Fix waiting for daemonset when desired number of pods is 0. (https://github.com/ansible-collections/kubernetes.core/pull/756). diff --git a/plugins/module_utils/k8s/waiter.py b/plugins/module_utils/k8s/waiter.py index 5328d63180..16ee10dda5 100644 --- a/plugins/module_utils/k8s/waiter.py +++ b/plugins/module_utils/k8s/waiter.py @@ -51,7 +51,7 @@ def daemonset_ready(daemonset: ResourceInstance) -> bool: return bool( daemonset.status and daemonset.status.desiredNumberScheduled is not None - and daemonset.status.updatedNumberScheduled + and (daemonset.status.updatedNumberScheduled or 0) == daemonset.status.desiredNumberScheduled and daemonset.status.numberReady == daemonset.status.desiredNumberScheduled and daemonset.status.observedGeneration == daemonset.metadata.generation diff --git a/tests/integration/targets/k8s_waiter/defaults/main.yml b/tests/integration/targets/k8s_waiter/defaults/main.yml index 04c873ce36..002c00238c 100644 --- a/tests/integration/targets/k8s_waiter/defaults/main.yml +++ b/tests/integration/targets/k8s_waiter/defaults/main.yml @@ -5,6 +5,7 @@ k8s_pod_metadata: k8s_pod_spec: serviceAccount: "{{ k8s_pod_service_account }}" + nodeSelector: "{{ k8s_pod_node_selector }}" containers: - image: "{{ k8s_pod_image }}" imagePullPolicy: Always @@ -33,6 +34,8 @@ k8s_pod_ports: [] k8s_pod_env: [] +k8s_pod_node_selector: {} + k8s_pod_template: metadata: "{{ k8s_pod_metadata }}" spec: "{{ k8s_pod_spec }}" diff --git a/tests/integration/targets/k8s_waiter/tasks/main.yml b/tests/integration/targets/k8s_waiter/tasks/main.yml index 1cb7e4c405..3bfb668bb6 100644 --- a/tests/integration/targets/k8s_waiter/tasks/main.yml +++ b/tests/integration/targets/k8s_waiter/tasks/main.yml @@ -127,6 +127,48 @@ - ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled - updated_ds_pods.resources[0].spec.containers[0].image.endswith(":3") + - name: Create daemonset with nodeSelector and not existing label + k8s: + definition: + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: wait-daemonset-not-existing-label + namespace: "{{ wait_namespace }}" + spec: + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + wait_sleep: 5 + wait_timeout: "{{ k8s_wait_timeout | default(omit) }}" + vars: + k8s_pod_name: wait-daemonset-not-existing-label + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1 + k8s_pod_command: + - sleep + - "600" + k8s_pod_node_selector: + nonExisitingLabel: test-not-exiting-label + register: ds_not_existing_label + + - name: Get updated pods + k8s_info: + api_version: v1 + kind: Pod + namespace: "{{ wait_namespace }}" + label_selectors: + - app=wait-daemonset-not-existing-label + register: updated_ds_pods_not_existing_label + + - name: Check that daemonset wait worked (when desired number is 0) + assert: + that: + - ds_not_existing_label.result.status.currentNumberScheduled == ds_not_existing_label.result.status.desiredNumberScheduled + - ds_not_existing_label.result.status.desiredNumberScheduled == 0 + - updated_ds_pods_not_existing_label.resources | length == 0 + - name: Add a statefulset k8s: definition: