Skip to content

Commit

Permalink
fix issue when using k8s_drain with disable_eviction set to yes (ansi…
Browse files Browse the repository at this point in the history
…ble-collections#418)

fix issue when using k8s_drain with disable_eviction set to yes

SUMMARY

fixes ansible-collections#416

ISSUE TYPE


Bugfix Pull Request

COMPONENT NAME

k8s_drain
ADDITIONAL INFORMATION

Reviewed-by: Abhijeet Kasurde <None>
  • Loading branch information
abikouo committed Mar 23, 2022
1 parent d68dec3 commit 074f0a6
Show file tree
Hide file tree
Showing 4 changed files with 78 additions and 17 deletions.
3 changes: 3 additions & 0 deletions changelogs/fragments/417-fix-k8s-drain-delete-options.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
---
bugfixes:
- k8s_drain - fix error occurring when trying to drain node with disable_eviction set to yes (https://github.com/ansible-collections/kubernetes.core/issues/416).
24 changes: 8 additions & 16 deletions plugins/modules/k8s_drain.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@

try:
from kubernetes.client.api import core_v1_api
from kubernetes.client.models import V1DeleteOptions
from kubernetes.client.models import V1DeleteOptions, V1ObjectMeta
from kubernetes.client.exceptions import ApiException
except ImportError:
# ImportError are managed by the common module already.
Expand Down Expand Up @@ -273,15 +273,8 @@ def __init__(self, module):
self._drain_options = module.params.get("delete_options", {})
self._delete_options = None
if self._drain_options.get("terminate_grace_period"):
self._delete_options = {}
self._delete_options.update({"apiVersion": "v1"})
self._delete_options.update({"kind": "DeleteOptions"})
self._delete_options.update(
{
"gracePeriodSeconds": self._drain_options.get(
"terminate_grace_period"
)
}
self._delete_options = V1DeleteOptions(
grace_period_seconds=self._drain_options.get("terminate_grace_period")
)

self._changed = False
Expand Down Expand Up @@ -318,17 +311,16 @@ def _elapsed_time():

def evict_pods(self, pods):
for namespace, name in pods:
definition = {"metadata": {"name": name, "namespace": namespace}}
if self._delete_options:
definition.update({"delete_options": self._delete_options})
try:
if self._drain_options.get("disable_eviction"):
body = V1DeleteOptions(**definition)
self._api_instance.delete_namespaced_pod(
name=name, namespace=namespace, body=body
name=name, namespace=namespace, body=self._delete_options
)
else:
body = v1_eviction(**definition)
body = v1_eviction(
delete_options=self._delete_options,
metadata=V1ObjectMeta(name=name, namespace=namespace),
)
self._api_instance.create_namespaced_pod_eviction(
name=name, namespace=namespace, body=body
)
Expand Down
2 changes: 1 addition & 1 deletion tests/integration/targets/k8s_drain/aliases
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
k8s_drain
k8s
k8s_info
time=78
time=121
66 changes: 66 additions & 0 deletions tests/integration/targets/k8s_drain/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -286,6 +286,72 @@
state: uncordon
name: '{{ node_to_drain }}'

- name: Create another Pod
k8s:
namespace: '{{ test_namespace }}'
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
definition:
apiVersion: v1
kind: Pod
metadata:
name: '{{ drain_pod_name }}-01'
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchFields:
- key: metadata.name
operator: In
values:
- '{{ node_to_drain }}'
containers:
- name: c0
image: busybox
command:
- /bin/sh
- -c
- while true;do date;sleep 5; done
volumeMounts:
- mountPath: /emptydir
name: emptydir
volumes:
- name: emptydir
emptyDir: {}

- name: Drain node using disable_eviction set to yes
k8s_drain:
state: drain
name: '{{ node_to_drain }}'
delete_options:
force: true
disable_eviction: yes
terminate_grace_period: 0
ignore_daemonsets: yes
wait_timeout: 0
delete_emptydir_data: true
register: disable_evict

- name: assert that node has been drained
assert:
that:
- disable_evict is changed
- '"node {{ node_to_drain }} marked unschedulable." in disable_evict.result'

- name: assert that unmanaged pod were deleted
k8s_info:
namespace: '{{ test_namespace }}'
kind: Pod
name: '{{ drain_pod_name }}-01'
register: _result
failed_when: _result.resources

- name: Uncordon node
k8s_drain:
state: uncordon
name: '{{ node_to_drain }}'

always:
- name: Uncordon node
k8s_drain:
Expand Down

0 comments on commit 074f0a6

Please sign in to comment.