From af6d4f182097b6d69f7ef97cbfaa39162385e8fc Mon Sep 17 00:00:00 2001 From: Mike Dame Date: Fri, 1 May 2020 14:28:44 -0400 Subject: [PATCH] Consider pod image in duplicates strategy --- pkg/descheduler/strategies/duplicates.go | 53 +++++++++++-------- pkg/descheduler/strategies/duplicates_test.go | 30 +++++++++++ 2 files changed, 62 insertions(+), 21 deletions(-) diff --git a/pkg/descheduler/strategies/duplicates.go b/pkg/descheduler/strategies/duplicates.go index 885724dc11..1db4231a77 100644 --- a/pkg/descheduler/strategies/duplicates.go +++ b/pkg/descheduler/strategies/duplicates.go @@ -30,7 +30,8 @@ import ( // RemoveDuplicatePods removes the duplicate pods on node. This strategy evicts all duplicate pods on node. // A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same -// namespace. As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages. +// namespace, and have at least one container with the same image. +// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages. func RemoveDuplicatePods( client clientset.Interface, strategy api.DeschedulerStrategy, @@ -40,40 +41,50 @@ func RemoveDuplicatePods( ) { for _, node := range nodes { klog.V(1).Infof("Processing node: %#v", node.Name) - dpm := listDuplicatePodsOnANode(client, node, evictLocalStoragePods) - for creator, pods := range dpm { - if len(pods) > 1 { - klog.V(1).Infof("%#v", creator) - // i = 0 does not evict the first pod - for i := 1; i < len(pods); i++ { - if _, err := podEvictor.EvictPod(pods[i], node); err != nil { - break - } - } + duplicatePods := listDuplicatePodsOnANode(client, node, evictLocalStoragePods) + for _, pod := range duplicatePods { + if _, err := podEvictor.EvictPod(pod, node); err != nil { + break } } } } -//type creator string -type duplicatePodsMap map[string][]*v1.Pod - // listDuplicatePodsOnANode lists duplicate pods on a given node. -func listDuplicatePodsOnANode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) duplicatePodsMap { +// It checks for pods which have the same owner and have at least 1 container with the same image spec +func listDuplicatePodsOnANode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) []*v1.Pod { pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods) if err != nil { return nil } - dpm := duplicatePodsMap{} - // Ignoring the error here as in the ListDuplicatePodsOnNode function we call ListEvictablePodsOnNode which checks for error. + duplicatePods := make([]*v1.Pod, 0, len(pods)) + dpm := map[string][]*v1.Pod{} for _, pod := range pods { ownerRefList := podutil.OwnerRef(pod) + podKeys := make([]string, 0, len(ownerRefList)*len(pod.Spec.Containers)) for _, ownerRef := range ownerRefList { - // Namespace/Kind/Name should be unique for the cluster. - s := strings.Join([]string{pod.ObjectMeta.Namespace, ownerRef.Kind, ownerRef.Name}, "/") - dpm[s] = append(dpm[s], pod) + for _, container := range pod.Spec.Containers { + // Namespace/Kind/Name should be unique for the cluster. + // We also consider the image, as 2 pods could have the same owner but serve different purposes + // So any non-unique Namespace/Kind/Name/Image pattern is a duplicate pod. + s := strings.Join([]string{pod.ObjectMeta.Namespace, ownerRef.Kind, ownerRef.Name, container.Image}, "/") + podKeys = append(podKeys, s) + } + } + + // Check if any of the keys have already been found. If we find one, this is a duplicate pod and we can + // stop appending it to the list (to avoid adding the same pod twice for multiple matching container images) + // but we still keep adding the rest of its keys because it may have other container images that other pods will + // subsequently match on. + duplicate := false + for _, key := range podKeys { + dpm[key] = append(dpm[key], pod) + if len(dpm[key]) > 1 && !duplicate { + duplicate = true + duplicatePods = append(duplicatePods, pod) + } } } - return dpm + return duplicatePods } diff --git a/pkg/descheduler/strategies/duplicates_test.go b/pkg/descheduler/strategies/duplicates_test.go index d2ccd92ee7..f8d6b79b1c 100644 --- a/pkg/descheduler/strategies/duplicates_test.go +++ b/pkg/descheduler/strategies/duplicates_test.go @@ -50,6 +50,12 @@ func TestFindDuplicatePods(t *testing.T) { p9.Namespace = "test" p10 := test.BuildTestPod("p10", 100, 0, node.Name, nil) p10.Namespace = "test" + p11 := test.BuildTestPod("p11", 100, 0, node.Name, nil) + p11.Namespace = "different-images" + p12 := test.BuildTestPod("p12", 100, 0, node.Name, nil) + p12.Namespace = "different-images" + p13 := test.BuildTestPod("p12", 100, 0, node.Name, nil) + p13.Namespace = "match-self" // ### Evictable Pods ### @@ -90,6 +96,18 @@ func TestFindDuplicatePods(t *testing.T) { priority := utils.SystemCriticalPriority p7.Spec.Priority = &priority + // Same owners, but different images + p11.Spec.Containers[0].Image = "foo" + p11.ObjectMeta.OwnerReferences = ownerRef1 + p12.Spec.Containers[0].Image = "bar" + p12.ObjectMeta.OwnerReferences = ownerRef2 + + // Multiple containers + p13.Spec.Containers = append(p13.Spec.Containers, v1.Container{ + Name: "foo", + Image: "foo", + }) + testCases := []struct { description string maxPodsToEvict int @@ -126,6 +144,18 @@ func TestFindDuplicatePods(t *testing.T) { pods: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8, *p9, *p10}, expectedEvictedPodCount: 4, }, + { + description: "Pods with the same owner but different images should not be evicted", + maxPodsToEvict: 5, + pods: []v1.Pod{*p11, *p12}, + expectedEvictedPodCount: 0, + }, + { + description: "Pods with multiple containers should not match themselves", + maxPodsToEvict: 5, + pods: []v1.Pod{*p13}, + expectedEvictedPodCount: 0, + }, } for _, testCase := range testCases {