Skip to content

Commit

Permalink
Consider pod image in duplicates strategy
Browse files Browse the repository at this point in the history
  • Loading branch information
damemi committed May 12, 2020
1 parent d7e9305 commit 7c65e7c
Show file tree
Hide file tree
Showing 2 changed files with 63 additions and 21 deletions.
54 changes: 33 additions & 21 deletions pkg/descheduler/strategies/duplicates.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,8 @@ import (

// RemoveDuplicatePods removes the duplicate pods on node. This strategy evicts all duplicate pods on node.
// A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same
// namespace. As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
// namespace, and have at least one container with the same image.
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
func RemoveDuplicatePods(
ctx context.Context,
client clientset.Interface,
Expand All @@ -42,40 +43,51 @@ func RemoveDuplicatePods(
) {
for _, node := range nodes {
klog.V(1).Infof("Processing node: %#v", node.Name)
dpm := listDuplicatePodsOnANode(ctx, client, node, evictLocalStoragePods)
for creator, pods := range dpm {
if len(pods) > 1 {
klog.V(1).Infof("%#v", creator)
// i = 0 does not evict the first pod
for i := 1; i < len(pods); i++ {
if _, err := podEvictor.EvictPod(ctx, pods[i], node); err != nil {
break
}
}
duplicatePods := listDuplicatePodsOnANode(ctx, client, node, evictLocalStoragePods)
for _, pod := range duplicatePods {
if _, err := podEvictor.EvictPod(ctx, pod, node); err != nil {
break
}
}
}
}

//type creator string
type duplicatePodsMap map[string][]*v1.Pod

// listDuplicatePodsOnANode lists duplicate pods on a given node.
func listDuplicatePodsOnANode(ctx context.Context, client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) duplicatePodsMap {

// It checks for pods which have the same owner and have at least 1 container with the same image spec
func listDuplicatePodsOnANode(ctx context.Context, client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) []*v1.Pod {
pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods)
if err != nil {
return nil
}

dpm := duplicatePodsMap{}
// Ignoring the error here as in the ListDuplicatePodsOnNode function we call ListEvictablePodsOnNode which checks for error.
duplicatePods := make([]*v1.Pod, 0, len(pods))
dpm := map[string][]*v1.Pod{}
for _, pod := range pods {
ownerRefList := podutil.OwnerRef(pod)
podKeys := make([]string, 0, len(ownerRefList)*len(pod.Spec.Containers))
for _, ownerRef := range ownerRefList {
// Namespace/Kind/Name should be unique for the cluster.
s := strings.Join([]string{pod.ObjectMeta.Namespace, ownerRef.Kind, ownerRef.Name}, "/")
dpm[s] = append(dpm[s], pod)
for _, container := range pod.Spec.Containers {
// Namespace/Kind/Name should be unique for the cluster.
// We also consider the image, as 2 pods could have the same owner but serve different purposes
// So any non-unique Namespace/Kind/Name/Image pattern is a duplicate pod.
s := strings.Join([]string{pod.ObjectMeta.Namespace, ownerRef.Kind, ownerRef.Name, container.Image}, "/")
podKeys = append(podKeys, s)
}
}

// Check if any of the keys have already been found. If we find one, this is a duplicate pod and we can
// stop appending it to the list (to avoid adding the same pod twice for multiple matching container images)
// but we still keep adding the rest of its keys because it may have other container images that other pods will
// subsequently match on.
duplicate := false
for _, key := range podKeys {
dpm[key] = append(dpm[key], pod)
if len(dpm[key]) > 1 && !duplicate {
duplicate = true
duplicatePods = append(duplicatePods, pod)
}
}
}
return dpm
return duplicatePods
}
30 changes: 30 additions & 0 deletions pkg/descheduler/strategies/duplicates_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,12 @@ func TestFindDuplicatePods(t *testing.T) {
p9.Namespace = "test"
p10 := test.BuildTestPod("p10", 100, 0, node.Name, nil)
p10.Namespace = "test"
p11 := test.BuildTestPod("p11", 100, 0, node.Name, nil)
p11.Namespace = "different-images"
p12 := test.BuildTestPod("p12", 100, 0, node.Name, nil)
p12.Namespace = "different-images"
p13 := test.BuildTestPod("p13", 100, 0, node.Name, nil)
p13.Namespace = "match-self"

// ### Evictable Pods ###

Expand Down Expand Up @@ -92,6 +98,18 @@ func TestFindDuplicatePods(t *testing.T) {
priority := utils.SystemCriticalPriority
p7.Spec.Priority = &priority

// Same owners, but different images
p11.Spec.Containers[0].Image = "foo"
p11.ObjectMeta.OwnerReferences = ownerRef1
p12.Spec.Containers[0].Image = "bar"
p12.ObjectMeta.OwnerReferences = ownerRef2

// Multiple containers
p13.Spec.Containers = append(p13.Spec.Containers, v1.Container{
Name: "foo",
Image: "foo",
})

testCases := []struct {
description string
maxPodsToEvict int
Expand Down Expand Up @@ -128,6 +146,18 @@ func TestFindDuplicatePods(t *testing.T) {
pods: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8, *p9, *p10},
expectedEvictedPodCount: 4,
},
{
description: "Pods with the same owner but different images should not be evicted",
maxPodsToEvict: 5,
pods: []v1.Pod{*p11, *p12},
expectedEvictedPodCount: 0,
},
{
description: "Pods with multiple containers should not match themselves",
maxPodsToEvict: 5,
pods: []v1.Pod{*p13},
expectedEvictedPodCount: 0,
},
}

for _, testCase := range testCases {
Expand Down

0 comments on commit 7c65e7c

Please sign in to comment.