Skip to content

Commit

Permalink
Make generated pods only request the maximum necessary resources
Browse files Browse the repository at this point in the history
Before this change, if memory or CPU requests were set in a Task's
Steps, (which are Containers) the generated Pod would require the sum
of all of the Steps requests to be scheduled on a Node. However,
because Tekton overwrites Container entrypoints in Tasks to
effectively make the Containers execute one at a time, we want to
make Pods generated by the TaskRun only request the maximum resources
that will be necessary for any single Container rather than the sum
of all resource requests.

To make this happen, when generating a Pod for a Task, we find the
Step with largest cpu/memory requests among all Steps, and set
the cpu/memory requests for all other steps to 0, respectively. If
no Step has an explicit cpu/memory request, all requests are set to
0. If we unset resource requests instead of setting them to 0
explicitly, then the limits would be used for the requests, which
would defeat the purpose of unsetting the requested values (and
could end up making the Pod request more memory than it did in the
first place).

Fixes #598
  • Loading branch information
dwnusbaum committed Apr 4, 2019
1 parent d719494 commit e9649ca
Show file tree
Hide file tree
Showing 6 changed files with 320 additions and 7 deletions.
6 changes: 6 additions & 0 deletions docs/tasks.md
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,12 @@ or container images that you define:
the configuration file.
- Each container image runs until completion or until the first failure is
detected.
- The CPU/memory resource request fields will be set to zero if the container
image does not have the largest CPU/memory resource request out of all
container images in the Task. This ensures that the Pod that executes the Task
will only request the resources necessary to execute any single container
image in the Task, rather than requesting the sum of all of the container
image's resource requests.

### Inputs

Expand Down
40 changes: 40 additions & 0 deletions pkg/reconciler/v1alpha1/taskrun/resources/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (
"path/filepath"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes"
Expand Down Expand Up @@ -73,6 +74,8 @@ var (
VolumeSource: emptyVolumeSource,
}}

zeroQty = resource.MustParse("0")

// Random byte reader used for pod name generation.
// var for testing.
randReader = rand.Reader
Expand Down Expand Up @@ -174,6 +177,9 @@ func MakePod(taskRun *v1alpha1.TaskRun, taskSpec v1alpha1.TaskSpec, kubeclient k
initContainers := []corev1.Container{*cred}
podContainers := []corev1.Container{}

maxCpuIdx := findMaxResourceRequest(taskSpec.Steps, corev1.ResourceCPU)
maxMemIdx := findMaxResourceRequest(taskSpec.Steps, corev1.ResourceMemory)

for i, step := range taskSpec.Steps {
step.Env = append(implicitEnvVars, step.Env...)
// TODO(mattmoor): Check that volumeMounts match volumes.
Expand Down Expand Up @@ -202,6 +208,7 @@ func MakePod(taskRun *v1alpha1.TaskRun, taskSpec v1alpha1.TaskSpec, kubeclient k
if step.Name == names.SimpleNameGenerator.RestrictLength(fmt.Sprintf("%v%v", containerPrefix, entrypoint.InitContainerName)) {
initContainers = append(initContainers, step)
} else {
zeroNonMaxResourceRequests(&step, i == maxCpuIdx, i == maxMemIdx)
podContainers = append(podContainers, step)
}
}
Expand Down Expand Up @@ -261,3 +268,36 @@ func makeLabels(s *v1alpha1.TaskRun) map[string]string {
labels[pipeline.GroupName+pipeline.TaskRunLabelKey] = s.Name
return labels
}

// zeroNonMaxResourceRequests zeroes out the container's cpu or memory resource
// requests if the container does not have the largest request out of all
// containers in the pod. This is done because Tekton overwrites each
// container's entrypoint to make containers effectively execute one at a time,
// so we want pods to only request the maximum resources needed at any single point in time.
// If no contianer has an explicit resource request, all requests are set to 0.
func zeroNonMaxResourceRequests(container *corev1.Container, isMaxCpu, isMaxMem bool) {
if container.Resources.Requests == nil {
container.Resources.Requests = corev1.ResourceList{}
}
if !isMaxCpu {
container.Resources.Requests[corev1.ResourceCPU] = zeroQty
}
if !isMaxMem {
container.Resources.Requests[corev1.ResourceMemory] = zeroQty
}
}

// findMaxResourceRequest returns the index of the container with the maximum
// request for the given resource from among the given set of containers.
func findMaxResourceRequest(containers []corev1.Container, resourceName corev1.ResourceName) int {
maxIdx := -1
maxReq := zeroQty
for i, c := range containers {
req, exists := c.Resources.Requests[resourceName]
if exists && req.Cmp(maxReq) > 0 {
maxIdx = i
maxReq = req
}
}
return maxIdx
}
33 changes: 29 additions & 4 deletions pkg/reconciler/v1alpha1/taskrun/resources/pod_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ import (
"testing"

"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand All @@ -33,8 +32,10 @@ import (
)

var (
ignorePrivateResourceFields = cmpopts.IgnoreUnexported(resource.Quantity{})
nopContainer = corev1.Container{
resourceQuantityCmp = cmp.Comparer(func(x, y resource.Quantity) bool {
return x.Cmp(y) == 0
})
nopContainer = corev1.Container{
Name: "nop",
Image: *nopImage,
Command: []string{"/ko-app/nop"},
Expand Down Expand Up @@ -104,6 +105,12 @@ func TestMakePod(t *testing.T) {
Env: implicitEnvVars,
VolumeMounts: implicitVolumeMounts,
WorkingDir: workspaceDir,
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("0"),
corev1.ResourceMemory: resource.MustParse("0"),
},
},
},
nopContainer,
},
Expand Down Expand Up @@ -143,6 +150,12 @@ func TestMakePod(t *testing.T) {
Env: implicitEnvVars,
VolumeMounts: implicitVolumeMounts,
WorkingDir: workspaceDir,
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("0"),
corev1.ResourceMemory: resource.MustParse("0"),
},
},
},
nopContainer,
},
Expand Down Expand Up @@ -176,6 +189,12 @@ func TestMakePod(t *testing.T) {
Env: implicitEnvVars,
VolumeMounts: implicitVolumeMounts,
WorkingDir: workspaceDir,
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("0"),
corev1.ResourceMemory: resource.MustParse("0"),
},
},
},
nopContainer,
},
Expand Down Expand Up @@ -209,6 +228,12 @@ func TestMakePod(t *testing.T) {
Env: implicitEnvVars,
VolumeMounts: implicitVolumeMounts,
WorkingDir: workspaceDir,
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("0"),
corev1.ResourceMemory: resource.MustParse("0"),
},
},
},
nopContainer,
},
Expand Down Expand Up @@ -257,7 +282,7 @@ func TestMakePod(t *testing.T) {
t.Errorf("Pod name got %q, want %q", got.Name, wantName)
}

if d := cmp.Diff(&got.Spec, c.want, ignorePrivateResourceFields); d != "" {
if d := cmp.Diff(&got.Spec, c.want, resourceQuantityCmp); d != "" {
t.Errorf("Diff spec:\n%s", d)
}

Expand Down
Loading

0 comments on commit e9649ca

Please sign in to comment.