Skip to content

Commit

Permalink
grpc: implement update command
Browse files Browse the repository at this point in the history
Update command is used to update the resources of running containers.
Implement update request in agent gRPC.

fixes kata-containers#228

Signed-off-by: Julio Montes <[email protected]>
  • Loading branch information
Julio Montes committed Apr 30, 2018
1 parent 74e720e commit 888dae0
Show file tree
Hide file tree
Showing 7 changed files with 466 additions and 116 deletions.
50 changes: 50 additions & 0 deletions grpc.go
Original file line number Diff line number Diff line change
Expand Up @@ -718,6 +718,56 @@ func (a *agentGRPC) ListProcesses(ctx context.Context, req *pb.ListProcessesRequ
return resp, nil
}

func (a *agentGRPC) UpdateContainer(ctx context.Context, req *pb.UpdateContainerRequest) (*gpb.Empty, error) {
if req.Resources == nil {
return emptyResp, fmt.Errorf("Resources in the request are nil")
}

c, err := a.sandbox.getContainer(req.ContainerId)
if err != nil {
return emptyResp, err
}

config := c.container.Config()

if config.Cgroups == nil {
config.Cgroups = &configs.Cgroup{
Resources: &configs.Resources{},
}
} else if config.Cgroups.Resources == nil {
config.Cgroups.Resources = &configs.Resources{}
}

// Update the value
if req.Resources.BlockIO != nil {
config.Cgroups.Resources.BlkioWeight = uint16(req.Resources.BlockIO.Weight)
}

if req.Resources.CPU != nil {
config.Cgroups.Resources.CpuPeriod = req.Resources.CPU.Period
config.Cgroups.Resources.CpuQuota = req.Resources.CPU.Quota
config.Cgroups.Resources.CpuShares = req.Resources.CPU.Shares
config.Cgroups.Resources.CpuRtPeriod = req.Resources.CPU.RealtimePeriod
config.Cgroups.Resources.CpuRtRuntime = req.Resources.CPU.RealtimeRuntime
config.Cgroups.Resources.CpusetCpus = req.Resources.CPU.Cpus
config.Cgroups.Resources.CpusetMems = req.Resources.CPU.Mems
}

if req.Resources.Memory != nil {
config.Cgroups.Resources.KernelMemory = req.Resources.Memory.Kernel
config.Cgroups.Resources.KernelMemoryTCP = req.Resources.Memory.KernelTCP
config.Cgroups.Resources.Memory = req.Resources.Memory.Limit
config.Cgroups.Resources.MemoryReservation = req.Resources.Memory.Reservation
config.Cgroups.Resources.MemorySwap = req.Resources.Memory.Swap
}

if req.Resources.Pids != nil {
config.Cgroups.Resources.PidsLimit = req.Resources.Pids.Limit
}

return emptyResp, c.container.Set(config)
}

func (a *agentGRPC) RemoveContainer(ctx context.Context, req *pb.RemoveContainerRequest) (*gpb.Empty, error) {
ctr, err := a.sandbox.getContainer(req.ContainerId)
if err != nil {
Expand Down
37 changes: 37 additions & 0 deletions grpc_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -226,3 +226,40 @@ func TestListProcesses(t *testing.T) {
assert.NotNil(r)
assert.NotEmpty(r.ProcessList)
}

func TestUpdateContainer(t *testing.T) {
containerID := "1"
assert := assert.New(t)
req := &pb.UpdateContainerRequest{
ContainerId: containerID,
Resources: &pb.LinuxResources{
BlockIO: &pb.LinuxBlockIO{},
Memory: &pb.LinuxMemory{},
CPU: &pb.LinuxCPU{},
Pids: &pb.LinuxPids{},
Network: &pb.LinuxNetwork{},
},
}

a := &agentGRPC{
sandbox: &sandbox{
containers: make(map[string]*container),
},
}

// getContainer should fail
r, err := a.UpdateContainer(context.TODO(), req)
assert.Error(err)
assert.Equal(emptyResp, r)

a.sandbox.containers[containerID] = &container{
container: &mockContainer{
id: containerID,
processes: []int{1},
},
}

r, err = a.UpdateContainer(context.TODO(), req)
assert.NoError(err)
assert.Equal(emptyResp, r)
}
8 changes: 7 additions & 1 deletion mockcontainer.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,13 @@ func (m *mockContainer) State() (*libcontainer.State, error) {
}

func (m *mockContainer) Config() configs.Config {
return configs.Config{}
return configs.Config{
Capabilities: &configs.Capabilities{},
Cgroups: &configs.Cgroup{
Resources: &configs.Resources{},
},
Seccomp: &configs.Seccomp{},
}
}

func (m *mockContainer) Processes() ([]int, error) {
Expand Down
Loading

0 comments on commit 888dae0

Please sign in to comment.