Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add pod label selector to cli and strategies #202

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions cmd/descheduler/app/options/options.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,9 @@ func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
// node-selector query causes descheduler to run only on nodes that matches the node labels in the query
fs.StringVar(&rs.NodeSelector, "node-selector", rs.NodeSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
// pod-selector query causes descheduler to only evict pods that match the pod labels in the query
fs.StringVar(&rs.PodSelector, "pod-selector", rs.PodSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")

// max-no-pods-to-evict limits the maximum number of pods to be evicted per node by descheduler.
fs.IntVar(&rs.MaxNoOfPodsToEvictPerNode, "max-pods-to-evict-per-node", rs.MaxNoOfPodsToEvictPerNode, "Limits the maximum number of pods to be evicted per node by descheduler")
// evict-local-storage-pods allows eviction of pods that are using local storage. This is false by default.
Expand Down
3 changes: 3 additions & 0 deletions pkg/apis/componentconfig/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,9 @@ type DeschedulerConfiguration struct {
// Node selectors
NodeSelector string

// Pod selectors
PodSelector string

// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode int

Expand Down
3 changes: 3 additions & 0 deletions pkg/apis/componentconfig/v1alpha1/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,9 @@ type DeschedulerConfiguration struct {
// Node selectors
NodeSelector string `json:"nodeSelector,omitempty"`

// Pod selectors
PodSelector string `json:"podSelector,omitempty"`

// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode int `json:"maxNoOfPodsToEvictPerNode,omitempty"`

Expand Down
13 changes: 9 additions & 4 deletions pkg/descheduler/pod/pods.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
Expand All @@ -40,8 +41,12 @@ func IsEvictable(pod *v1.Pod, evictLocalStoragePods bool) bool {
}

// ListEvictablePodsOnNode returns the list of evictable pods on node.
func ListEvictablePodsOnNode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) ([]*v1.Pod, error) {
pods, err := ListPodsOnANode(client, node)
func ListEvictablePodsOnNode(client clientset.Interface, labelSelector string, node *v1.Node, evictLocalStoragePods bool) ([]*v1.Pod, error) {
podSelector, err := labels.Parse(labelSelector)
if err != nil {
return []*v1.Pod{}, err
}
pods, err := ListPodsOnANode(client, podSelector, node)
if err != nil {
return []*v1.Pod{}, err
}
Expand All @@ -56,14 +61,14 @@ func ListEvictablePodsOnNode(client clientset.Interface, node *v1.Node, evictLoc
return evictablePods, nil
}

func ListPodsOnANode(client clientset.Interface, node *v1.Node) ([]*v1.Pod, error) {
func ListPodsOnANode(client clientset.Interface, labelSelector labels.Selector, node *v1.Node) ([]*v1.Pod, error) {
fieldSelector, err := fields.ParseSelector("spec.nodeName=" + node.Name + ",status.phase!=" + string(api.PodSucceeded) + ",status.phase!=" + string(api.PodFailed))
if err != nil {
return []*v1.Pod{}, err
}

podList, err := client.CoreV1().Pods(v1.NamespaceAll).List(
metav1.ListOptions{FieldSelector: fieldSelector.String()})
metav1.ListOptions{FieldSelector: fieldSelector.String(), LabelSelector: labelSelector.String()})
if err != nil {
return []*v1.Pod{}, err
}
Expand Down
13 changes: 6 additions & 7 deletions pkg/descheduler/strategies/duplicates.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,9 @@ package strategies
import (
"strings"

"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"

"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
Expand All @@ -39,15 +38,15 @@ func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.Descheduler
if !strategy.Enabled {
return
}
deleteDuplicatePods(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodepodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
deleteDuplicatePods(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodepodCount, ds.PodSelector, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
}

// deleteDuplicatePods evicts the pod from node and returns the count of evicted pods.
func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodepodCount nodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodepodCount nodePodEvictedCount, labelSelector string, maxPodsToEvict int, evictLocalStoragePods bool) int {
podsEvicted := 0
for _, node := range nodes {
klog.V(1).Infof("Processing node: %#v", node.Name)
dpm := ListDuplicatePodsOnANode(client, node, evictLocalStoragePods)
dpm := ListDuplicatePodsOnANode(client, labelSelector, node, evictLocalStoragePods)
for creator, pods := range dpm {
if len(pods) > 1 {
klog.V(1).Infof("%#v", creator)
Expand All @@ -72,8 +71,8 @@ func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string,
}

// ListDuplicatePodsOnANode lists duplicate pods on a given node.
func ListDuplicatePodsOnANode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) DuplicatePodsMap {
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
func ListDuplicatePodsOnANode(client clientset.Interface, labelSelector string, node *v1.Node, evictLocalStoragePods bool) DuplicatePodsMap {
pods, err := podutil.ListEvictablePodsOnNode(client, labelSelector, node, evictLocalStoragePods)
if err != nil {
return nil
}
Expand Down
5 changes: 3 additions & 2 deletions pkg/descheduler/strategies/duplicates_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ package strategies
import (
"testing"

"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
Expand Down Expand Up @@ -128,14 +128,15 @@ func TestFindDuplicatePods(t *testing.T) {

npe := nodePodEvictedCount{}
npe[node] = 0
labelSelector := ""
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: testCase.pods}, nil
})
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
return true, node, nil
})
podsEvicted := deleteDuplicatePods(fakeClient, "v1", []*v1.Node{node}, false, npe, testCase.maxPodsToEvict, false)
podsEvicted := deleteDuplicatePods(fakeClient, "v1", []*v1.Node{node}, false, npe, labelSelector, testCase.maxPodsToEvict, false)
if podsEvicted != testCase.expectedEvictedPodCount {
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
}
Expand Down
14 changes: 10 additions & 4 deletions pkg/descheduler/strategies/lownodeutilization.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,9 @@ package strategies
import (
"sort"

"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"
helper "k8s.io/kubernetes/pkg/api/v1/resource"
Expand Down Expand Up @@ -60,7 +61,7 @@ func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerS
return
}

npm := createNodePodsMap(ds.Client, nodes)
npm := createNodePodsMap(ds.Client, ds.PodSelector, nodes)
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, ds.EvictLocalStoragePods)

klog.V(1).Infof("Criteria for a node under utilization: CPU: %v, Mem: %v, Pods: %v",
Expand Down Expand Up @@ -311,10 +312,15 @@ func sortPodsBasedOnPriority(evictablePods []*v1.Pod) {
}

// createNodePodsMap returns nodepodsmap with evictable pods on node.
func createNodePodsMap(client clientset.Interface, nodes []*v1.Node) NodePodsMap {
func createNodePodsMap(client clientset.Interface, podSelector string, nodes []*v1.Node) NodePodsMap {
npm := NodePodsMap{}
selector, err := labels.Parse(podSelector)
if err != nil {
return npm
}

for _, node := range nodes {
pods, err := podutil.ListPodsOnANode(client, node)
pods, err := podutil.ListPodsOnANode(client, selector, node)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

pod selectors might not work right for lownodeutilization startgey this way as in this strategy, a node's load is computed by considering all pods. So computing loads on each node should be done by considering all pods. Perhaps during only eviction, pod selectors might be used.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Would it be better if I moved the implementation of this into podutil.isEvictable rather than podutil.ListPodsOnNode? I considered this, but it means a label check on each pod which slows down the check as pod numbers go up. Would it work to make lownodeutilization a special case? I guess it already works quite a bit differently to the other strategies.

if err != nil {
klog.Warningf("node %s will not be processed, error in accessing its pods (%#v)", node.Name, err)
} else {
Expand Down
11 changes: 7 additions & 4 deletions pkg/descheduler/strategies/lownodeutilization_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,13 @@ import (
"strings"
"testing"

"k8s.io/api/core/v1"
"reflect"

v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"reflect"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/test"
)
Expand Down Expand Up @@ -83,6 +84,7 @@ func TestLowNodeUtilizationWithoutPriority(t *testing.T) {
p8.Annotations = test.GetCriticalPodAnnotation()
p9 := test.BuildTestPod("p9", 400, 0, n1.Name)
p9.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
labelSelector := ""
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
list := action.(core.ListAction)
Expand Down Expand Up @@ -111,7 +113,7 @@ func TestLowNodeUtilizationWithoutPriority(t *testing.T) {
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
})
expectedPodsEvicted := 3
npm := createNodePodsMap(fakeClient, []*v1.Node{n1, n2, n3})
npm := createNodePodsMap(fakeClient, labelSelector, []*v1.Node{n1, n2, n3})
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, false)
if len(lowNodes) != 1 {
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
Expand Down Expand Up @@ -188,6 +190,7 @@ func TestLowNodeUtilizationWithPriorities(t *testing.T) {
p8.Annotations = test.GetCriticalPodAnnotation()
p9 := test.BuildTestPod("p9", 400, 0, n1.Name)
p9.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
labelSelector := ""
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
list := action.(core.ListAction)
Expand Down Expand Up @@ -216,7 +219,7 @@ func TestLowNodeUtilizationWithPriorities(t *testing.T) {
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
})
expectedPodsEvicted := 3
npm := createNodePodsMap(fakeClient, []*v1.Node{n1, n2, n3})
npm := createNodePodsMap(fakeClient, labelSelector, []*v1.Node{n1, n2, n3})
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, false)
if len(lowNodes) != 1 {
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
Expand Down
4 changes: 2 additions & 2 deletions pkg/descheduler/strategies/node_affinity.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ limitations under the License.
package strategies

import (
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/klog"

"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
Expand Down Expand Up @@ -45,7 +45,7 @@ func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strateg
for _, node := range nodes {
klog.V(1).Infof("Processing node: %#v\n", node.Name)

pods, err := podutil.ListEvictablePodsOnNode(ds.Client, node, evictLocalStoragePods)
pods, err := podutil.ListEvictablePodsOnNode(ds.Client, ds.PodSelector, node, evictLocalStoragePods)
if err != nil {
klog.Errorf("failed to get pods from %v: %v", node.Name, err)
}
Expand Down
8 changes: 4 additions & 4 deletions pkg/descheduler/strategies/node_taint.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import (
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"

"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"
)
Expand All @@ -37,15 +37,15 @@ func RemovePodsViolatingNodeTaints(ds *options.DeschedulerServer, strategy api.D
if !strategy.Enabled {
return
}
deletePodsViolatingNodeTaints(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodePodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
deletePodsViolatingNodeTaints(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodePodCount, ds.PodSelector, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
}

// deletePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
func deletePodsViolatingNodeTaints(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodePodCount nodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
func deletePodsViolatingNodeTaints(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodePodCount nodePodEvictedCount, podSelector string, maxPodsToEvict int, evictLocalStoragePods bool) int {
podsEvicted := 0
for _, node := range nodes {
klog.V(1).Infof("Processing node: %#v\n", node.Name)
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
pods, err := podutil.ListEvictablePodsOnNode(client, podSelector, node, evictLocalStoragePods)
if err != nil {
//no pods evicted as error encountered retrieving evictable Pods
return 0
Expand Down
9 changes: 5 additions & 4 deletions pkg/descheduler/strategies/node_taint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,14 @@ package strategies

import (
"fmt"
"k8s.io/api/core/v1"
"testing"

v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/test"
"testing"
)

func createNoScheduleTaint(key, value string, index int) v1.Taint {
Expand Down Expand Up @@ -158,14 +159,14 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
}

for _, tc := range tests {

labelSelector := ""
// create fake client
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: tc.pods}, nil
})

actualEvictedPodCount := deletePodsViolatingNodeTaints(fakeClient, "v1", tc.nodes, false, tc.npe, tc.maxPodsToEvict, tc.evictLocalStoragePods)
actualEvictedPodCount := deletePodsViolatingNodeTaints(fakeClient, "v1", tc.nodes, false, tc.npe, labelSelector, tc.maxPodsToEvict, tc.evictLocalStoragePods)
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
}
Expand Down
8 changes: 4 additions & 4 deletions pkg/descheduler/strategies/pod_antiaffinity.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import (
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"

"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"
Expand All @@ -34,15 +34,15 @@ func RemovePodsViolatingInterPodAntiAffinity(ds *options.DeschedulerServer, stra
if !strategy.Enabled {
return
}
removePodsWithAffinityRules(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodePodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
removePodsWithAffinityRules(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodePodCount, ds.PodSelector, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
}

// removePodsWithAffinityRules evicts pods on the node which are having a pod affinity rules.
func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodePodCount nodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodePodCount nodePodEvictedCount, podSelector string, maxPodsToEvict int, evictLocalStoragePods bool) int {
podsEvicted := 0
for _, node := range nodes {
klog.V(1).Infof("Processing node: %#v\n", node.Name)
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
pods, err := podutil.ListEvictablePodsOnNode(client, podSelector, node, evictLocalStoragePods)
if err != nil {
return 0
}
Expand Down
8 changes: 4 additions & 4 deletions pkg/descheduler/strategies/pod_antiaffinity_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ package strategies
import (
"testing"

"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
Expand All @@ -43,7 +43,7 @@ func TestPodAntiAffinity(t *testing.T) {
setPodAntiAffinity(p1)
setPodAntiAffinity(p3)
setPodAntiAffinity(p4)

labelSelector := ""
// create fake client
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
Expand All @@ -55,13 +55,13 @@ func TestPodAntiAffinity(t *testing.T) {
npe := nodePodEvictedCount{}
npe[node] = 0
expectedEvictedPodCount := 3
podsEvicted := removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 0, false)
podsEvicted := removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, labelSelector, 0, false)
if podsEvicted != expectedEvictedPodCount {
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, expectedEvictedPodCount)
}
npe[node] = 0
expectedEvictedPodCount = 1
podsEvicted = removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 1, false)
podsEvicted = removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, labelSelector, 1, false)
if podsEvicted != expectedEvictedPodCount {
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, expectedEvictedPodCount)
}
Expand Down
5 changes: 3 additions & 2 deletions test/e2e/e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,13 +160,14 @@ func TestE2E(t *testing.T) {
func evictPods(t *testing.T, clientSet clientset.Interface, nodeList *v1.NodeList, rc *v1.ReplicationController) {
var leastLoadedNode v1.Node
podsBefore := math.MaxInt16
labelSelector := ""
for i := range nodeList.Items {
// Skip the Master Node
if _, exist := nodeList.Items[i].Labels["node-role.kubernetes.io/master"]; exist {
continue
}
// List all the pods on the current Node
podsOnANode, err := podutil.ListEvictablePodsOnNode(clientSet, &nodeList.Items[i], true)
podsOnANode, err := podutil.ListEvictablePodsOnNode(clientSet, labelSelector, &nodeList.Items[i], true)
if err != nil {
t.Errorf("Error listing pods on a node %v", err)
}
Expand All @@ -178,7 +179,7 @@ func evictPods(t *testing.T, clientSet clientset.Interface, nodeList *v1.NodeLis
}
t.Log("Eviction of pods starting")
startEndToEndForLowNodeUtilization(clientSet)
podsOnleastUtilizedNode, err := podutil.ListEvictablePodsOnNode(clientSet, &leastLoadedNode, true)
podsOnleastUtilizedNode, err := podutil.ListEvictablePodsOnNode(clientSet, labelSelector, &leastLoadedNode, true)
if err != nil {
t.Errorf("Error listing pods on a node %v", err)
}
Expand Down