Skip to content

Commit

Permalink
Merge pull request #216 from damemi/remove-k8s-deps
Browse files Browse the repository at this point in the history
Remove k8s.io/kubernetes deps and switch to go modules
  • Loading branch information
k8s-ci-robot authored Feb 6, 2020
2 parents c567768 + cc92eaa commit e2a23f2
Show file tree
Hide file tree
Showing 24,031 changed files with 4,536 additions and 5,698,729 deletions.
The diff you're trying to view is too large. We only load the first 3000 changed files.
491 changes: 0 additions & 491 deletions glide.lock

This file was deleted.

19 changes: 0 additions & 19 deletions glide.yaml

This file was deleted.

14 changes: 14 additions & 0 deletions go.mod
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
module sigs.k8s.io/descheduler

go 1.13

require (
github.com/spf13/cobra v0.0.5
github.com/spf13/pflag v1.0.5
k8s.io/api v0.17.0
k8s.io/apimachinery v0.17.3-beta.0
k8s.io/apiserver v0.17.0
k8s.io/client-go v0.17.0
k8s.io/component-base v0.17.0
k8s.io/klog v1.0.0
)
355 changes: 355 additions & 0 deletions go.sum

Large diffs are not rendered by default.

3 changes: 2 additions & 1 deletion pkg/descheduler/descheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
"sigs.k8s.io/descheduler/pkg/utils"
)

func Run(rs *options.DeschedulerServer) error {
Expand Down Expand Up @@ -65,7 +66,7 @@ func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy *
return nil
}

nodePodCount := strategies.InitializeNodePodCount(nodes)
nodePodCount := utils.InitializeNodePodCount(nodes)
wait.Until(func() {
strategies.RemoveDuplicatePods(rs, deschedulerPolicy.Strategies["RemoveDuplicates"], evictionPolicyGroupVersion, nodes, nodePodCount)
strategies.LowNodeUtilization(rs, deschedulerPolicy.Strategies["LowNodeUtilization"], evictionPolicyGroupVersion, nodes, nodePodCount)
Expand Down
17 changes: 7 additions & 10 deletions pkg/descheduler/pod/pods.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
clientset "k8s.io/client-go/kubernetes"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/kubelet/types"
"sigs.k8s.io/descheduler/pkg/utils"
)

const (
Expand Down Expand Up @@ -57,7 +55,7 @@ func ListEvictablePodsOnNode(client clientset.Interface, node *v1.Node, evictLoc
}

func ListPodsOnANode(client clientset.Interface, node *v1.Node) ([]*v1.Pod, error) {
fieldSelector, err := fields.ParseSelector("spec.nodeName=" + node.Name + ",status.phase!=" + string(api.PodSucceeded) + ",status.phase!=" + string(api.PodFailed))
fieldSelector, err := fields.ParseSelector("spec.nodeName=" + node.Name + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed))
if err != nil {
return []*v1.Pod{}, err
}
Expand All @@ -76,19 +74,19 @@ func ListPodsOnANode(client clientset.Interface, node *v1.Node) ([]*v1.Pod, erro
}

func IsCriticalPod(pod *v1.Pod) bool {
return types.IsCriticalPod(pod)
return utils.IsCriticalPod(pod)
}

func IsBestEffortPod(pod *v1.Pod) bool {
return qos.GetPodQOS(pod) == v1.PodQOSBestEffort
return utils.GetPodQOS(pod) == v1.PodQOSBestEffort
}

func IsBurstablePod(pod *v1.Pod) bool {
return qos.GetPodQOS(pod) == v1.PodQOSBurstable
return utils.GetPodQOS(pod) == v1.PodQOSBurstable
}

func IsGuaranteedPod(pod *v1.Pod) bool {
return qos.GetPodQOS(pod) == v1.PodQOSGuaranteed
return utils.GetPodQOS(pod) == v1.PodQOSGuaranteed
}

func IsDaemonsetPod(ownerRefList []metav1.OwnerReference) bool {
Expand All @@ -102,8 +100,7 @@ func IsDaemonsetPod(ownerRefList []metav1.OwnerReference) bool {

// IsMirrorPod checks whether the pod is a mirror pod.
func IsMirrorPod(pod *v1.Pod) bool {
_, found := pod.ObjectMeta.Annotations[types.ConfigMirrorAnnotationKey]
return found
return utils.IsMirrorPod(pod)
}

// HaveEvictAnnotation checks if the pod have evict annotation
Expand Down
4 changes: 2 additions & 2 deletions pkg/descheduler/pod/pods_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@ limitations under the License.
package pod

import (
"sigs.k8s.io/descheduler/pkg/utils"
"testing"

v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/kubernetes/pkg/apis/scheduling"
"sigs.k8s.io/descheduler/test"
)

Expand Down Expand Up @@ -209,7 +209,7 @@ func TestPodTypes(t *testing.T) {
// A Critical Pod.
p5.Namespace = "kube-system"
p5.Annotations = test.GetCriticalPodAnnotation()
systemCriticalPriority := scheduling.SystemCriticalPriority
systemCriticalPriority := utils.SystemCriticalPriority
p5.Spec.Priority = &systemCriticalPriority
if !IsMirrorPod(p4) {
t.Errorf("Expected p4 to be a mirror pod.")
Expand Down
5 changes: 3 additions & 2 deletions pkg/descheduler/strategies/duplicates.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils"
)

//type creator string
Expand All @@ -35,15 +36,15 @@ type DuplicatePodsMap map[string][]*v1.Pod
// RemoveDuplicatePods removes the duplicate pods on node. This strategy evicts all duplicate pods on node.
// A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same
// namespace. As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodepodCount nodePodEvictedCount) {
func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodepodCount utils.NodePodEvictedCount) {
if !strategy.Enabled {
return
}
deleteDuplicatePods(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodepodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
}

// deleteDuplicatePods evicts the pod from node and returns the count of evicted pods.
func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodepodCount nodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodepodCount utils.NodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
podsEvicted := 0
for _, node := range nodes {
klog.V(1).Infof("Processing node: %#v", node.Name)
Expand Down
3 changes: 2 additions & 1 deletion pkg/descheduler/strategies/duplicates_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test"
)

Expand Down Expand Up @@ -126,7 +127,7 @@ func TestFindDuplicatePods(t *testing.T) {

for _, testCase := range testCases {

npe := nodePodEvictedCount{}
npe := utils.NodePodEvictedCount{}
npe[node] = 0
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
Expand Down
13 changes: 6 additions & 7 deletions pkg/descheduler/strategies/lownodeutilization.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,12 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"
helper "k8s.io/kubernetes/pkg/api/v1/resource"

"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils"
)

type NodeUsageMap struct {
Expand All @@ -44,7 +43,7 @@ type NodeUsageMap struct {

type NodePodsMap map[*v1.Node][]*v1.Pod

func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount nodePodEvictedCount) {
func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount utils.NodePodEvictedCount) {
if !strategy.Enabled {
return
}
Expand Down Expand Up @@ -155,7 +154,7 @@ func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThr
// evictPodsFromTargetNodes evicts pods based on priority, if all the pods on the node have priority, if not
// evicts them based on QoS as fallback option.
// TODO: @ravig Break this function into smaller functions.
func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVersion string, targetNodes, lowNodes []NodeUsageMap, targetThresholds api.ResourceThresholds, dryRun bool, maxPodsToEvict int, nodepodCount nodePodEvictedCount) int {
func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVersion string, targetNodes, lowNodes []NodeUsageMap, targetThresholds api.ResourceThresholds, dryRun bool, maxPodsToEvict int, nodepodCount utils.NodePodEvictedCount) int {
podsEvicted := 0

SortNodesByUsage(targetNodes)
Expand Down Expand Up @@ -240,8 +239,8 @@ func evictPods(inputPods []*v1.Pod,
if maxPodsToEvict > 0 && *podsEvicted+1 > maxPodsToEvict {
break
}
cUsage := helper.GetResourceRequest(pod, v1.ResourceCPU)
mUsage := helper.GetResourceRequest(pod, v1.ResourceMemory)
cUsage := utils.GetResourceRequest(pod, v1.ResourceCPU)
mUsage := utils.GetResourceRequest(pod, v1.ResourceMemory)
success, err := evictions.EvictPod(client, pod, evictionPolicyGroupVersion, dryRun)
if !success {
klog.Warningf("Error when evicting pod: %#v (%#v)", pod.Name, err)
Expand Down Expand Up @@ -373,7 +372,7 @@ func NodeUtilization(node *v1.Node, pods []*v1.Pod, evictLocalStoragePods bool)
gPods = append(gPods, pod)
}

req, _ := helper.PodRequestsAndLimits(pod)
req, _ := utils.PodRequestsAndLimits(pod)
for name, quantity := range req {
if name == v1.ResourceCPU || name == v1.ResourceMemory {
if value, ok := totalReqs[name]; !ok {
Expand Down
5 changes: 3 additions & 2 deletions pkg/descheduler/strategies/lownodeutilization_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (
core "k8s.io/client-go/testing"
"reflect"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test"
)

Expand Down Expand Up @@ -116,7 +117,7 @@ func TestLowNodeUtilizationWithoutPriority(t *testing.T) {
if len(lowNodes) != 1 {
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
}
npe := nodePodEvictedCount{}
npe := utils.NodePodEvictedCount{}
npe[n1] = 0
npe[n2] = 0
npe[n3] = 0
Expand Down Expand Up @@ -221,7 +222,7 @@ func TestLowNodeUtilizationWithPriorities(t *testing.T) {
if len(lowNodes) != 1 {
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
}
npe := nodePodEvictedCount{}
npe := utils.NodePodEvictedCount{}
npe[n1] = 0
npe[n2] = 0
npe[n3] = 0
Expand Down
5 changes: 3 additions & 2 deletions pkg/descheduler/strategies/node_affinity.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package strategies
import (
"k8s.io/api/core/v1"
"k8s.io/klog"
"sigs.k8s.io/descheduler/pkg/utils"

"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api"
Expand All @@ -27,11 +28,11 @@ import (
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
)

func RemovePodsViolatingNodeAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodePodCount nodePodEvictedCount) {
func RemovePodsViolatingNodeAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodePodCount utils.NodePodEvictedCount) {
removePodsViolatingNodeAffinityCount(ds, strategy, evictionPolicyGroupVersion, nodes, nodePodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
}

func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount nodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount utils.NodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
evictedPodCount := 0
if !strategy.Enabled {
return evictedPodCount
Expand Down
15 changes: 8 additions & 7 deletions pkg/descheduler/strategies/node_affinity_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test"
)

Expand Down Expand Up @@ -92,7 +93,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
pods []v1.Pod
strategy api.DeschedulerStrategy
expectedEvictedPodCount int
npe nodePodEvictedCount
npe utils.NodePodEvictedCount
maxPodsToEvict int
}{
{
Expand All @@ -108,7 +109,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
maxPodsToEvict: 0,
},
{
Expand All @@ -124,7 +125,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
maxPodsToEvict: 0,
},
{
Expand All @@ -133,7 +134,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithLabels),
nodes: []*v1.Node{nodeWithLabels},
npe: nodePodEvictedCount{nodeWithLabels: 0},
npe: utils.NodePodEvictedCount{nodeWithLabels: 0},
maxPodsToEvict: 0,
},
{
Expand All @@ -142,7 +143,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
maxPodsToEvict: 0,
},
{
Expand All @@ -151,7 +152,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
maxPodsToEvict: 1,
},
{
Expand All @@ -160,7 +161,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
npe: nodePodEvictedCount{nodeWithoutLabels: 0, unschedulableNodeWithLabels: 0},
npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, unschedulableNodeWithLabels: 0},
maxPodsToEvict: 0,
},
}
Expand Down
5 changes: 3 additions & 2 deletions pkg/descheduler/strategies/node_taint.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils"

"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
Expand All @@ -33,15 +34,15 @@ const (
)

// RemovePodsViolatingNodeTaints with elimination strategy
func RemovePodsViolatingNodeTaints(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodePodCount nodePodEvictedCount) {
func RemovePodsViolatingNodeTaints(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodePodCount utils.NodePodEvictedCount) {
if !strategy.Enabled {
return
}
deletePodsViolatingNodeTaints(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodePodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
}

// deletePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
func deletePodsViolatingNodeTaints(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodePodCount nodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
func deletePodsViolatingNodeTaints(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodePodCount utils.NodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
podsEvicted := 0
for _, node := range nodes {
klog.V(1).Infof("Processing node: %#v\n", node.Name)
Expand Down
Loading

0 comments on commit e2a23f2

Please sign in to comment.