diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_allocation.go b/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_allocation.go index a3e329df04b0..8d709dcb44e1 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_allocation.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_allocation.go @@ -21,6 +21,8 @@ import ( "github.com/golang/glog" "k8s.io/api/core/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/features" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" @@ -56,20 +58,31 @@ func (r *ResourceAllocationPriority) PriorityMap( requested.Memory += nodeInfo.NonZeroRequest().Memory var score int64 // Check if the pod has volumes and this could be added to scorer function for balanced resource allocation. - if len(pod.Spec.Volumes) >= 0 && nodeInfo.TransientInfo != nil { + if len(pod.Spec.Volumes) >= 0 && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && nodeInfo.TransientInfo != nil { score = r.scorer(&requested, &allocatable, true, nodeInfo.TransientInfo.TransNodeInfo.RequestedVolumes, nodeInfo.TransientInfo.TransNodeInfo.AllocatableVolumesCount) } else { score = r.scorer(&requested, &allocatable, false, 0, 0) } if glog.V(10) { - glog.Infof( - "%v -> %v: %v, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d", - pod.Name, node.Name, r.Name, - allocatable.MilliCPU, allocatable.Memory, - requested.MilliCPU+allocatable.MilliCPU, requested.Memory+allocatable.Memory, - score, - ) + if len(pod.Spec.Volumes) >= 0 && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && nodeInfo.TransientInfo != nil { + glog.Infof( + "%v -> %v: %v, capacity %d millicores %d memory bytes, %d volumes, total request %d millicores %d memory bytes %d volumes, score %d", + pod.Name, node.Name, r.Name, + allocatable.MilliCPU, allocatable.Memory, nodeInfo.TransientInfo.TransNodeInfo.AllocatableVolumesCount, + requested.MilliCPU, requested.Memory, + nodeInfo.TransientInfo.TransNodeInfo.RequestedVolumes, + score, + ) + } else { + glog.Infof( + "%v -> %v: %v, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d", + pod.Name, node.Name, r.Name, + allocatable.MilliCPU, allocatable.Memory, + requested.MilliCPU, requested.Memory, + score, + ) + } } return schedulerapi.HostPriority{