commit
8c38c931b0
196 changed files with 15007 additions and 4165 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -1,4 +1,5 @@
|
||||||
containerd/containerd
|
containerd/containerd
|
||||||
|
containerd-shim/containerd-shim
|
||||||
bin/
|
bin/
|
||||||
ctr/ctr
|
ctr/ctr
|
||||||
hack/benchmark
|
hack/benchmark
|
||||||
|
|
7
Makefile
7
Makefile
|
@ -1,4 +1,4 @@
|
||||||
BUILDTAGS=libcontainer
|
BUILDTAGS=
|
||||||
|
|
||||||
# if this session isn't interactive, then we don't want to allocate a
|
# if this session isn't interactive, then we don't want to allocate a
|
||||||
# TTY, which would fail, but if it is interactive, we do want to attach
|
# TTY, which would fail, but if it is interactive, we do want to attach
|
||||||
|
@ -13,7 +13,7 @@ DOCKER_RUN := docker run --rm -i $(DOCKER_FLAGS) "$(DOCKER_IMAGE)"
|
||||||
|
|
||||||
export GOPATH:=$(CURDIR)/vendor:$(GOPATH)
|
export GOPATH:=$(CURDIR)/vendor:$(GOPATH)
|
||||||
|
|
||||||
all: client daemon
|
all: client daemon shim
|
||||||
|
|
||||||
bin:
|
bin:
|
||||||
mkdir -p bin/
|
mkdir -p bin/
|
||||||
|
@ -27,6 +27,9 @@ client: bin
|
||||||
daemon: bin
|
daemon: bin
|
||||||
cd containerd && go build -tags "$(BUILDTAGS)" -o ../bin/containerd
|
cd containerd && go build -tags "$(BUILDTAGS)" -o ../bin/containerd
|
||||||
|
|
||||||
|
shim: bin
|
||||||
|
cd containerd-shim && go build -tags "$(BUILDTAGS)" -o ../bin/containerd-shim
|
||||||
|
|
||||||
dbuild:
|
dbuild:
|
||||||
@docker build --rm --force-rm -t "$(DOCKER_IMAGE)" .
|
@docker build --rm --force-rm -t "$(DOCKER_IMAGE)" .
|
||||||
|
|
||||||
|
|
|
@ -2,15 +2,18 @@ package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/docker/containerd/api/grpc/types"
|
"github.com/docker/containerd/api/grpc/types"
|
||||||
"github.com/docker/containerd/runtime"
|
"github.com/docker/containerd/runtime"
|
||||||
"github.com/docker/containerd/supervisor"
|
"github.com/docker/containerd/supervisor"
|
||||||
|
"github.com/opencontainers/runc/libcontainer"
|
||||||
|
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||||
"github.com/opencontainers/specs"
|
"github.com/opencontainers/specs"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
@ -30,35 +33,39 @@ func (s *apiServer) CreateContainer(ctx context.Context, c *types.CreateContaine
|
||||||
if c.BundlePath == "" {
|
if c.BundlePath == "" {
|
||||||
return nil, errors.New("empty bundle path")
|
return nil, errors.New("empty bundle path")
|
||||||
}
|
}
|
||||||
e := supervisor.NewEvent(supervisor.StartContainerEventType)
|
e := supervisor.NewTask(supervisor.StartContainerTaskType)
|
||||||
e.ID = c.Id
|
e.ID = c.Id
|
||||||
e.BundlePath = c.BundlePath
|
e.BundlePath = c.BundlePath
|
||||||
|
e.Stdin = c.Stdin
|
||||||
e.Stdout = c.Stdout
|
e.Stdout = c.Stdout
|
||||||
e.Stderr = c.Stderr
|
e.Stderr = c.Stderr
|
||||||
e.Stdin = c.Stdin
|
e.Labels = c.Labels
|
||||||
e.Console = c.Console
|
|
||||||
e.StartResponse = make(chan supervisor.StartResponse, 1)
|
e.StartResponse = make(chan supervisor.StartResponse, 1)
|
||||||
if c.Checkpoint != "" {
|
if c.Checkpoint != "" {
|
||||||
e.Checkpoint = &runtime.Checkpoint{
|
e.Checkpoint = &runtime.Checkpoint{
|
||||||
Name: c.Checkpoint,
|
Name: c.Checkpoint,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.sv.SendEvent(e)
|
s.sv.SendTask(e)
|
||||||
if err := <-e.Err; err != nil {
|
if err := <-e.Err; err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
sr := <-e.StartResponse
|
r := <-e.StartResponse
|
||||||
|
apiC, err := createAPIContainer(r.Container, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
return &types.CreateContainerResponse{
|
return &types.CreateContainerResponse{
|
||||||
Pid: uint32(sr.Pid),
|
Container: apiC,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *apiServer) Signal(ctx context.Context, r *types.SignalRequest) (*types.SignalResponse, error) {
|
func (s *apiServer) Signal(ctx context.Context, r *types.SignalRequest) (*types.SignalResponse, error) {
|
||||||
e := supervisor.NewEvent(supervisor.SignalEventType)
|
e := supervisor.NewTask(supervisor.SignalTaskType)
|
||||||
e.ID = r.Id
|
e.ID = r.Id
|
||||||
e.Pid = int(r.Pid)
|
e.Pid = r.Pid
|
||||||
e.Signal = syscall.Signal(int(r.Signal))
|
e.Signal = syscall.Signal(int(r.Signal))
|
||||||
s.sv.SendEvent(e)
|
s.sv.SendTask(e)
|
||||||
if err := <-e.Err; err != nil {
|
if err := <-e.Err; err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -77,22 +84,30 @@ func (s *apiServer) AddProcess(ctx context.Context, r *types.AddProcessRequest)
|
||||||
AdditionalGids: r.User.AdditionalGids,
|
AdditionalGids: r.User.AdditionalGids,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
e := supervisor.NewEvent(supervisor.AddProcessEventType)
|
if r.Id == "" {
|
||||||
|
return nil, fmt.Errorf("container id cannot be empty")
|
||||||
|
}
|
||||||
|
if r.Pid == "" {
|
||||||
|
return nil, fmt.Errorf("process id cannot be empty")
|
||||||
|
}
|
||||||
|
e := supervisor.NewTask(supervisor.AddProcessTaskType)
|
||||||
e.ID = r.Id
|
e.ID = r.Id
|
||||||
e.Process = process
|
e.Pid = r.Pid
|
||||||
e.Console = r.Console
|
e.ProcessSpec = process
|
||||||
e.Stdin = r.Stdin
|
e.Stdin = r.Stdin
|
||||||
e.Stdout = r.Stdout
|
e.Stdout = r.Stdout
|
||||||
e.Stderr = r.Stderr
|
e.Stderr = r.Stderr
|
||||||
s.sv.SendEvent(e)
|
e.StartResponse = make(chan supervisor.StartResponse, 1)
|
||||||
|
s.sv.SendTask(e)
|
||||||
if err := <-e.Err; err != nil {
|
if err := <-e.Err; err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &types.AddProcessResponse{Pid: uint32(e.Pid)}, nil
|
<-e.StartResponse
|
||||||
|
return &types.AddProcessResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *apiServer) CreateCheckpoint(ctx context.Context, r *types.CreateCheckpointRequest) (*types.CreateCheckpointResponse, error) {
|
func (s *apiServer) CreateCheckpoint(ctx context.Context, r *types.CreateCheckpointRequest) (*types.CreateCheckpointResponse, error) {
|
||||||
e := supervisor.NewEvent(supervisor.CreateCheckpointEventType)
|
e := supervisor.NewTask(supervisor.CreateCheckpointTaskType)
|
||||||
e.ID = r.Id
|
e.ID = r.Id
|
||||||
e.Checkpoint = &runtime.Checkpoint{
|
e.Checkpoint = &runtime.Checkpoint{
|
||||||
Name: r.Checkpoint.Name,
|
Name: r.Checkpoint.Name,
|
||||||
|
@ -101,7 +116,7 @@ func (s *apiServer) CreateCheckpoint(ctx context.Context, r *types.CreateCheckpo
|
||||||
UnixSockets: r.Checkpoint.UnixSockets,
|
UnixSockets: r.Checkpoint.UnixSockets,
|
||||||
Shell: r.Checkpoint.Shell,
|
Shell: r.Checkpoint.Shell,
|
||||||
}
|
}
|
||||||
s.sv.SendEvent(e)
|
s.sv.SendTask(e)
|
||||||
if err := <-e.Err; err != nil {
|
if err := <-e.Err; err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -112,12 +127,12 @@ func (s *apiServer) DeleteCheckpoint(ctx context.Context, r *types.DeleteCheckpo
|
||||||
if r.Name == "" {
|
if r.Name == "" {
|
||||||
return nil, errors.New("checkpoint name cannot be empty")
|
return nil, errors.New("checkpoint name cannot be empty")
|
||||||
}
|
}
|
||||||
e := supervisor.NewEvent(supervisor.DeleteCheckpointEventType)
|
e := supervisor.NewTask(supervisor.DeleteCheckpointTaskType)
|
||||||
e.ID = r.Id
|
e.ID = r.Id
|
||||||
e.Checkpoint = &runtime.Checkpoint{
|
e.Checkpoint = &runtime.Checkpoint{
|
||||||
Name: r.Name,
|
Name: r.Name,
|
||||||
}
|
}
|
||||||
s.sv.SendEvent(e)
|
s.sv.SendTask(e)
|
||||||
if err := <-e.Err; err != nil {
|
if err := <-e.Err; err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -125,8 +140,8 @@ func (s *apiServer) DeleteCheckpoint(ctx context.Context, r *types.DeleteCheckpo
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *apiServer) ListCheckpoint(ctx context.Context, r *types.ListCheckpointRequest) (*types.ListCheckpointResponse, error) {
|
func (s *apiServer) ListCheckpoint(ctx context.Context, r *types.ListCheckpointRequest) (*types.ListCheckpointResponse, error) {
|
||||||
e := supervisor.NewEvent(supervisor.GetContainerEventType)
|
e := supervisor.NewTask(supervisor.GetContainerTaskType)
|
||||||
s.sv.SendEvent(e)
|
s.sv.SendTask(e)
|
||||||
if err := <-e.Err; err != nil {
|
if err := <-e.Err; err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -140,11 +155,11 @@ func (s *apiServer) ListCheckpoint(ctx context.Context, r *types.ListCheckpointR
|
||||||
if container == nil {
|
if container == nil {
|
||||||
return nil, grpc.Errorf(codes.NotFound, "no such containers")
|
return nil, grpc.Errorf(codes.NotFound, "no such containers")
|
||||||
}
|
}
|
||||||
|
var out []*types.Checkpoint
|
||||||
checkpoints, err := container.Checkpoints()
|
checkpoints, err := container.Checkpoints()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var out []*types.Checkpoint
|
|
||||||
for _, c := range checkpoints {
|
for _, c := range checkpoints {
|
||||||
out = append(out, &types.Checkpoint{
|
out = append(out, &types.Checkpoint{
|
||||||
Name: c.Name,
|
Name: c.Name,
|
||||||
|
@ -159,129 +174,206 @@ func (s *apiServer) ListCheckpoint(ctx context.Context, r *types.ListCheckpointR
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *apiServer) State(ctx context.Context, r *types.StateRequest) (*types.StateResponse, error) {
|
func (s *apiServer) State(ctx context.Context, r *types.StateRequest) (*types.StateResponse, error) {
|
||||||
e := supervisor.NewEvent(supervisor.GetContainerEventType)
|
e := supervisor.NewTask(supervisor.GetContainerTaskType)
|
||||||
s.sv.SendEvent(e)
|
e.ID = r.Id
|
||||||
|
s.sv.SendTask(e)
|
||||||
if err := <-e.Err; err != nil {
|
if err := <-e.Err; err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
m := s.sv.Machine()
|
m := s.sv.Machine()
|
||||||
state := &types.StateResponse{
|
state := &types.StateResponse{
|
||||||
Machine: &types.Machine{
|
Machine: &types.Machine{
|
||||||
Id: m.ID,
|
|
||||||
Cpus: uint32(m.Cpus),
|
Cpus: uint32(m.Cpus),
|
||||||
Memory: uint64(m.Cpus),
|
Memory: uint64(m.Memory),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, c := range e.Containers {
|
for _, c := range e.Containers {
|
||||||
processes, err := c.Processes()
|
apiC, err := createAPIContainer(c, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, grpc.Errorf(codes.Internal, "get processes for container")
|
return nil, err
|
||||||
}
|
}
|
||||||
var procs []*types.Process
|
state.Containers = append(state.Containers, apiC)
|
||||||
for _, p := range processes {
|
|
||||||
pid, err := p.Pid()
|
|
||||||
if err != nil {
|
|
||||||
logrus.WithField("error", err).Error("get process pid")
|
|
||||||
}
|
|
||||||
oldProc := p.Spec()
|
|
||||||
procs = append(procs, &types.Process{
|
|
||||||
Pid: uint32(pid),
|
|
||||||
Terminal: oldProc.Terminal,
|
|
||||||
Args: oldProc.Args,
|
|
||||||
Env: oldProc.Env,
|
|
||||||
Cwd: oldProc.Cwd,
|
|
||||||
User: &types.User{
|
|
||||||
Uid: oldProc.User.UID,
|
|
||||||
Gid: oldProc.User.GID,
|
|
||||||
AdditionalGids: oldProc.User.AdditionalGids,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
state.Containers = append(state.Containers, &types.Container{
|
|
||||||
Id: c.ID(),
|
|
||||||
BundlePath: c.Path(),
|
|
||||||
Processes: procs,
|
|
||||||
Status: string(c.State()),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
return state, nil
|
return state, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *apiServer) UpdateContainer(ctx context.Context, r *types.UpdateContainerRequest) (*types.UpdateContainerResponse, error) {
|
func createAPIContainer(c runtime.Container, getPids bool) (*types.Container, error) {
|
||||||
e := supervisor.NewEvent(supervisor.UpdateContainerEventType)
|
processes, err := c.Processes()
|
||||||
e.ID = r.Id
|
if err != nil {
|
||||||
if r.Signal != 0 {
|
return nil, grpc.Errorf(codes.Internal, "get processes for container")
|
||||||
e.Signal = syscall.Signal(r.Signal)
|
|
||||||
}
|
}
|
||||||
|
var procs []*types.Process
|
||||||
|
for _, p := range processes {
|
||||||
|
oldProc := p.Spec()
|
||||||
|
stdio := p.Stdio()
|
||||||
|
procs = append(procs, &types.Process{
|
||||||
|
Pid: p.ID(),
|
||||||
|
SystemPid: uint32(p.SystemPid()),
|
||||||
|
Terminal: oldProc.Terminal,
|
||||||
|
Args: oldProc.Args,
|
||||||
|
Env: oldProc.Env,
|
||||||
|
Cwd: oldProc.Cwd,
|
||||||
|
Stdin: stdio.Stdin,
|
||||||
|
Stdout: stdio.Stdout,
|
||||||
|
Stderr: stdio.Stderr,
|
||||||
|
User: &types.User{
|
||||||
|
Uid: oldProc.User.UID,
|
||||||
|
Gid: oldProc.User.GID,
|
||||||
|
AdditionalGids: oldProc.User.AdditionalGids,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
var pids []int
|
||||||
|
if getPids {
|
||||||
|
if pids, err = c.Pids(); err != nil {
|
||||||
|
return nil, grpc.Errorf(codes.Internal, "get all pids for container")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &types.Container{
|
||||||
|
Id: c.ID(),
|
||||||
|
BundlePath: c.Path(),
|
||||||
|
Processes: procs,
|
||||||
|
Labels: c.Labels(),
|
||||||
|
Status: string(c.State()),
|
||||||
|
Pids: toUint32(pids),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func toUint32(its []int) []uint32 {
|
||||||
|
o := []uint32{}
|
||||||
|
for _, i := range its {
|
||||||
|
o = append(o, uint32(i))
|
||||||
|
}
|
||||||
|
return o
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *apiServer) UpdateContainer(ctx context.Context, r *types.UpdateContainerRequest) (*types.UpdateContainerResponse, error) {
|
||||||
|
e := supervisor.NewTask(supervisor.UpdateContainerTaskType)
|
||||||
|
e.ID = r.Id
|
||||||
e.State = runtime.State(r.Status)
|
e.State = runtime.State(r.Status)
|
||||||
s.sv.SendEvent(e)
|
s.sv.SendTask(e)
|
||||||
if err := <-e.Err; err != nil {
|
if err := <-e.Err; err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &types.UpdateContainerResponse{}, nil
|
return &types.UpdateContainerResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *apiServer) Events(r *types.EventsRequest, stream types.API_EventsServer) error {
|
func (s *apiServer) UpdateProcess(ctx context.Context, r *types.UpdateProcessRequest) (*types.UpdateProcessResponse, error) {
|
||||||
events := s.sv.Events()
|
e := supervisor.NewTask(supervisor.UpdateProcessTaskType)
|
||||||
defer s.sv.Unsubscribe(events)
|
|
||||||
for evt := range events {
|
|
||||||
var ev *types.Event
|
|
||||||
switch evt.Type {
|
|
||||||
case supervisor.ExitEventType, supervisor.ExecExitEventType:
|
|
||||||
ev = &types.Event{
|
|
||||||
Type: "exit",
|
|
||||||
Id: evt.ID,
|
|
||||||
Pid: uint32(evt.Pid),
|
|
||||||
Status: uint32(evt.Status),
|
|
||||||
}
|
|
||||||
case supervisor.OOMEventType:
|
|
||||||
ev = &types.Event{
|
|
||||||
Type: "oom",
|
|
||||||
Id: evt.ID,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ev != nil {
|
|
||||||
if err := stream.Send(ev); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *apiServer) GetStats(r *types.StatsRequest, stream types.API_GetStatsServer) error {
|
|
||||||
e := supervisor.NewEvent(supervisor.StatsEventType)
|
|
||||||
e.ID = r.Id
|
e.ID = r.Id
|
||||||
s.sv.SendEvent(e)
|
e.Pid = r.Pid
|
||||||
|
e.Height = int(r.Height)
|
||||||
|
e.Width = int(r.Width)
|
||||||
|
e.CloseStdin = r.CloseStdin
|
||||||
|
s.sv.SendTask(e)
|
||||||
if err := <-e.Err; err != nil {
|
if err := <-e.Err; err != nil {
|
||||||
if err == supervisor.ErrContainerNotFound {
|
return nil, err
|
||||||
return grpc.Errorf(codes.NotFound, err.Error())
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
defer func() {
|
return &types.UpdateProcessResponse{}, nil
|
||||||
ue := supervisor.NewEvent(supervisor.UnsubscribeStatsEventType)
|
}
|
||||||
ue.ID = e.ID
|
|
||||||
ue.Stats = e.Stats
|
func (s *apiServer) Events(r *types.EventsRequest, stream types.API_EventsServer) error {
|
||||||
s.sv.SendEvent(ue)
|
t := time.Time{}
|
||||||
if err := <-ue.Err; err != nil {
|
if r.Timestamp != 0 {
|
||||||
logrus.Errorf("Error unsubscribing %s: %v", r.Id, err)
|
t = time.Unix(int64(r.Timestamp), 0)
|
||||||
}
|
}
|
||||||
}()
|
events := s.sv.Events(t)
|
||||||
for {
|
defer s.sv.Unsubscribe(events)
|
||||||
select {
|
for e := range events {
|
||||||
case st := <-e.Stats:
|
if err := stream.Send(&types.Event{
|
||||||
pbSt, ok := st.(*types.Stats)
|
Id: e.ID,
|
||||||
if !ok {
|
Type: e.Type,
|
||||||
panic("invalid stats type from collector")
|
Timestamp: uint64(e.Timestamp.Unix()),
|
||||||
}
|
Pid: e.Pid,
|
||||||
if err := stream.Send(pbSt); err != nil {
|
Status: uint32(e.Status),
|
||||||
return err
|
}); err != nil {
|
||||||
}
|
return err
|
||||||
case <-stream.Context().Done():
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *apiServer) Stats(ctx context.Context, r *types.StatsRequest) (*types.StatsResponse, error) {
|
||||||
|
e := supervisor.NewTask(supervisor.StatsTaskType)
|
||||||
|
e.ID = r.Id
|
||||||
|
e.Stat = make(chan *runtime.Stat, 1)
|
||||||
|
s.sv.SendTask(e)
|
||||||
|
if err := <-e.Err; err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
stats := <-e.Stat
|
||||||
|
t := convertToPb(stats)
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertToPb(st *runtime.Stat) *types.StatsResponse {
|
||||||
|
pbSt := &types.StatsResponse{
|
||||||
|
Timestamp: uint64(st.Timestamp.Unix()),
|
||||||
|
CgroupStats: &types.CgroupStats{},
|
||||||
|
}
|
||||||
|
lcSt, ok := st.Data.(*libcontainer.Stats)
|
||||||
|
if !ok {
|
||||||
|
return pbSt
|
||||||
|
}
|
||||||
|
cpuSt := lcSt.CgroupStats.CpuStats
|
||||||
|
pbSt.CgroupStats.CpuStats = &types.CpuStats{
|
||||||
|
CpuUsage: &types.CpuUsage{
|
||||||
|
TotalUsage: cpuSt.CpuUsage.TotalUsage,
|
||||||
|
PercpuUsage: cpuSt.CpuUsage.PercpuUsage,
|
||||||
|
UsageInKernelmode: cpuSt.CpuUsage.UsageInKernelmode,
|
||||||
|
UsageInUsermode: cpuSt.CpuUsage.UsageInUsermode,
|
||||||
|
},
|
||||||
|
ThrottlingData: &types.ThrottlingData{
|
||||||
|
Periods: cpuSt.ThrottlingData.Periods,
|
||||||
|
ThrottledPeriods: cpuSt.ThrottlingData.ThrottledPeriods,
|
||||||
|
ThrottledTime: cpuSt.ThrottlingData.ThrottledTime,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
memSt := lcSt.CgroupStats.MemoryStats
|
||||||
|
pbSt.CgroupStats.MemoryStats = &types.MemoryStats{
|
||||||
|
Cache: memSt.Cache,
|
||||||
|
Usage: &types.MemoryData{
|
||||||
|
Usage: memSt.Usage.Usage,
|
||||||
|
MaxUsage: memSt.Usage.MaxUsage,
|
||||||
|
Failcnt: memSt.Usage.Failcnt,
|
||||||
|
},
|
||||||
|
SwapUsage: &types.MemoryData{
|
||||||
|
Usage: memSt.SwapUsage.Usage,
|
||||||
|
MaxUsage: memSt.SwapUsage.MaxUsage,
|
||||||
|
Failcnt: memSt.SwapUsage.Failcnt,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
blkSt := lcSt.CgroupStats.BlkioStats
|
||||||
|
pbSt.CgroupStats.BlkioStats = &types.BlkioStats{
|
||||||
|
IoServiceBytesRecursive: convertBlkioEntryToPb(blkSt.IoServiceBytesRecursive),
|
||||||
|
IoServicedRecursive: convertBlkioEntryToPb(blkSt.IoServicedRecursive),
|
||||||
|
IoQueuedRecursive: convertBlkioEntryToPb(blkSt.IoQueuedRecursive),
|
||||||
|
IoServiceTimeRecursive: convertBlkioEntryToPb(blkSt.IoServiceTimeRecursive),
|
||||||
|
IoWaitTimeRecursive: convertBlkioEntryToPb(blkSt.IoWaitTimeRecursive),
|
||||||
|
IoMergedRecursive: convertBlkioEntryToPb(blkSt.IoMergedRecursive),
|
||||||
|
IoTimeRecursive: convertBlkioEntryToPb(blkSt.IoTimeRecursive),
|
||||||
|
SectorsRecursive: convertBlkioEntryToPb(blkSt.SectorsRecursive),
|
||||||
|
}
|
||||||
|
pbSt.CgroupStats.HugetlbStats = make(map[string]*types.HugetlbStats)
|
||||||
|
for k, st := range lcSt.CgroupStats.HugetlbStats {
|
||||||
|
pbSt.CgroupStats.HugetlbStats[k] = &types.HugetlbStats{
|
||||||
|
Usage: st.Usage,
|
||||||
|
MaxUsage: st.MaxUsage,
|
||||||
|
Failcnt: st.Failcnt,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return pbSt
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertBlkioEntryToPb(b []cgroups.BlkioStatEntry) []*types.BlkioStatsEntry {
|
||||||
|
var pbEs []*types.BlkioStatsEntry
|
||||||
|
for _, e := range b {
|
||||||
|
pbEs = append(pbEs, &types.BlkioStatsEntry{
|
||||||
|
Major: e.Major,
|
||||||
|
Minor: e.Minor,
|
||||||
|
Op: e.Op,
|
||||||
|
Value: e.Value,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return pbEs
|
||||||
|
}
|
||||||
|
|
|
@ -9,6 +9,8 @@ It is generated from these files:
|
||||||
api.proto
|
api.proto
|
||||||
|
|
||||||
It has these top-level messages:
|
It has these top-level messages:
|
||||||
|
UpdateProcessRequest
|
||||||
|
UpdateProcessResponse
|
||||||
CreateContainerRequest
|
CreateContainerRequest
|
||||||
CreateContainerResponse
|
CreateContainerResponse
|
||||||
SignalRequest
|
SignalRequest
|
||||||
|
@ -43,7 +45,7 @@ It has these top-level messages:
|
||||||
BlkioStats
|
BlkioStats
|
||||||
HugetlbStats
|
HugetlbStats
|
||||||
CgroupStats
|
CgroupStats
|
||||||
Stats
|
StatsResponse
|
||||||
StatsRequest
|
StatsRequest
|
||||||
*/
|
*/
|
||||||
package types
|
package types
|
||||||
|
@ -62,40 +64,68 @@ var _ = proto.Marshal
|
||||||
var _ = fmt.Errorf
|
var _ = fmt.Errorf
|
||||||
var _ = math.Inf
|
var _ = math.Inf
|
||||||
|
|
||||||
type CreateContainerRequest struct {
|
type UpdateProcessRequest struct {
|
||||||
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
||||||
BundlePath string `protobuf:"bytes,2,opt,name=bundlePath" json:"bundlePath,omitempty"`
|
Pid string `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"`
|
||||||
Stdin string `protobuf:"bytes,3,opt,name=stdin" json:"stdin,omitempty"`
|
CloseStdin bool `protobuf:"varint,3,opt,name=closeStdin" json:"closeStdin,omitempty"`
|
||||||
Stdout string `protobuf:"bytes,4,opt,name=stdout" json:"stdout,omitempty"`
|
Width uint32 `protobuf:"varint,4,opt,name=width" json:"width,omitempty"`
|
||||||
Stderr string `protobuf:"bytes,5,opt,name=stderr" json:"stderr,omitempty"`
|
Height uint32 `protobuf:"varint,5,opt,name=height" json:"height,omitempty"`
|
||||||
Console string `protobuf:"bytes,6,opt,name=console" json:"console,omitempty"`
|
}
|
||||||
Checkpoint string `protobuf:"bytes,7,opt,name=checkpoint" json:"checkpoint,omitempty"`
|
|
||||||
|
func (m *UpdateProcessRequest) Reset() { *m = UpdateProcessRequest{} }
|
||||||
|
func (m *UpdateProcessRequest) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*UpdateProcessRequest) ProtoMessage() {}
|
||||||
|
func (*UpdateProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||||
|
|
||||||
|
type UpdateProcessResponse struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *UpdateProcessResponse) Reset() { *m = UpdateProcessResponse{} }
|
||||||
|
func (m *UpdateProcessResponse) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*UpdateProcessResponse) ProtoMessage() {}
|
||||||
|
func (*UpdateProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||||
|
|
||||||
|
type CreateContainerRequest struct {
|
||||||
|
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
||||||
|
BundlePath string `protobuf:"bytes,2,opt,name=bundlePath" json:"bundlePath,omitempty"`
|
||||||
|
Checkpoint string `protobuf:"bytes,3,opt,name=checkpoint" json:"checkpoint,omitempty"`
|
||||||
|
Stdin string `protobuf:"bytes,4,opt,name=stdin" json:"stdin,omitempty"`
|
||||||
|
Stdout string `protobuf:"bytes,5,opt,name=stdout" json:"stdout,omitempty"`
|
||||||
|
Stderr string `protobuf:"bytes,6,opt,name=stderr" json:"stderr,omitempty"`
|
||||||
|
Labels []string `protobuf:"bytes,7,rep,name=labels" json:"labels,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *CreateContainerRequest) Reset() { *m = CreateContainerRequest{} }
|
func (m *CreateContainerRequest) Reset() { *m = CreateContainerRequest{} }
|
||||||
func (m *CreateContainerRequest) String() string { return proto.CompactTextString(m) }
|
func (m *CreateContainerRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*CreateContainerRequest) ProtoMessage() {}
|
func (*CreateContainerRequest) ProtoMessage() {}
|
||||||
func (*CreateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
func (*CreateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
||||||
|
|
||||||
type CreateContainerResponse struct {
|
type CreateContainerResponse struct {
|
||||||
Pid uint32 `protobuf:"varint,1,opt,name=pid" json:"pid,omitempty"`
|
Container *Container `protobuf:"bytes,1,opt,name=container" json:"container,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *CreateContainerResponse) Reset() { *m = CreateContainerResponse{} }
|
func (m *CreateContainerResponse) Reset() { *m = CreateContainerResponse{} }
|
||||||
func (m *CreateContainerResponse) String() string { return proto.CompactTextString(m) }
|
func (m *CreateContainerResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*CreateContainerResponse) ProtoMessage() {}
|
func (*CreateContainerResponse) ProtoMessage() {}
|
||||||
func (*CreateContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
func (*CreateContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
||||||
|
|
||||||
|
func (m *CreateContainerResponse) GetContainer() *Container {
|
||||||
|
if m != nil {
|
||||||
|
return m.Container
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
type SignalRequest struct {
|
type SignalRequest struct {
|
||||||
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
||||||
Pid uint32 `protobuf:"varint,2,opt,name=pid" json:"pid,omitempty"`
|
Pid string `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"`
|
||||||
Signal uint32 `protobuf:"varint,3,opt,name=signal" json:"signal,omitempty"`
|
Signal uint32 `protobuf:"varint,3,opt,name=signal" json:"signal,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *SignalRequest) Reset() { *m = SignalRequest{} }
|
func (m *SignalRequest) Reset() { *m = SignalRequest{} }
|
||||||
func (m *SignalRequest) String() string { return proto.CompactTextString(m) }
|
func (m *SignalRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*SignalRequest) ProtoMessage() {}
|
func (*SignalRequest) ProtoMessage() {}
|
||||||
func (*SignalRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
func (*SignalRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
|
||||||
|
|
||||||
type SignalResponse struct {
|
type SignalResponse struct {
|
||||||
}
|
}
|
||||||
|
@ -103,7 +133,7 @@ type SignalResponse struct {
|
||||||
func (m *SignalResponse) Reset() { *m = SignalResponse{} }
|
func (m *SignalResponse) Reset() { *m = SignalResponse{} }
|
||||||
func (m *SignalResponse) String() string { return proto.CompactTextString(m) }
|
func (m *SignalResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*SignalResponse) ProtoMessage() {}
|
func (*SignalResponse) ProtoMessage() {}
|
||||||
func (*SignalResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
func (*SignalResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
|
||||||
|
|
||||||
type AddProcessRequest struct {
|
type AddProcessRequest struct {
|
||||||
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
||||||
|
@ -112,16 +142,16 @@ type AddProcessRequest struct {
|
||||||
Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"`
|
Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"`
|
||||||
Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"`
|
Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"`
|
||||||
Cwd string `protobuf:"bytes,6,opt,name=cwd" json:"cwd,omitempty"`
|
Cwd string `protobuf:"bytes,6,opt,name=cwd" json:"cwd,omitempty"`
|
||||||
Stdin string `protobuf:"bytes,7,opt,name=stdin" json:"stdin,omitempty"`
|
Pid string `protobuf:"bytes,7,opt,name=pid" json:"pid,omitempty"`
|
||||||
Stdout string `protobuf:"bytes,8,opt,name=stdout" json:"stdout,omitempty"`
|
Stdin string `protobuf:"bytes,8,opt,name=stdin" json:"stdin,omitempty"`
|
||||||
Stderr string `protobuf:"bytes,9,opt,name=stderr" json:"stderr,omitempty"`
|
Stdout string `protobuf:"bytes,9,opt,name=stdout" json:"stdout,omitempty"`
|
||||||
Console string `protobuf:"bytes,10,opt,name=console" json:"console,omitempty"`
|
Stderr string `protobuf:"bytes,10,opt,name=stderr" json:"stderr,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *AddProcessRequest) Reset() { *m = AddProcessRequest{} }
|
func (m *AddProcessRequest) Reset() { *m = AddProcessRequest{} }
|
||||||
func (m *AddProcessRequest) String() string { return proto.CompactTextString(m) }
|
func (m *AddProcessRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*AddProcessRequest) ProtoMessage() {}
|
func (*AddProcessRequest) ProtoMessage() {}
|
||||||
func (*AddProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
|
func (*AddProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
|
||||||
|
|
||||||
func (m *AddProcessRequest) GetUser() *User {
|
func (m *AddProcessRequest) GetUser() *User {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -139,16 +169,15 @@ type User struct {
|
||||||
func (m *User) Reset() { *m = User{} }
|
func (m *User) Reset() { *m = User{} }
|
||||||
func (m *User) String() string { return proto.CompactTextString(m) }
|
func (m *User) String() string { return proto.CompactTextString(m) }
|
||||||
func (*User) ProtoMessage() {}
|
func (*User) ProtoMessage() {}
|
||||||
func (*User) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
|
func (*User) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
|
||||||
|
|
||||||
type AddProcessResponse struct {
|
type AddProcessResponse struct {
|
||||||
Pid uint32 `protobuf:"varint,1,opt,name=pid" json:"pid,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *AddProcessResponse) Reset() { *m = AddProcessResponse{} }
|
func (m *AddProcessResponse) Reset() { *m = AddProcessResponse{} }
|
||||||
func (m *AddProcessResponse) String() string { return proto.CompactTextString(m) }
|
func (m *AddProcessResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*AddProcessResponse) ProtoMessage() {}
|
func (*AddProcessResponse) ProtoMessage() {}
|
||||||
func (*AddProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
|
func (*AddProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
|
||||||
|
|
||||||
type CreateCheckpointRequest struct {
|
type CreateCheckpointRequest struct {
|
||||||
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
||||||
|
@ -158,7 +187,7 @@ type CreateCheckpointRequest struct {
|
||||||
func (m *CreateCheckpointRequest) Reset() { *m = CreateCheckpointRequest{} }
|
func (m *CreateCheckpointRequest) Reset() { *m = CreateCheckpointRequest{} }
|
||||||
func (m *CreateCheckpointRequest) String() string { return proto.CompactTextString(m) }
|
func (m *CreateCheckpointRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*CreateCheckpointRequest) ProtoMessage() {}
|
func (*CreateCheckpointRequest) ProtoMessage() {}
|
||||||
func (*CreateCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
|
func (*CreateCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
|
||||||
|
|
||||||
func (m *CreateCheckpointRequest) GetCheckpoint() *Checkpoint {
|
func (m *CreateCheckpointRequest) GetCheckpoint() *Checkpoint {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -173,7 +202,7 @@ type CreateCheckpointResponse struct {
|
||||||
func (m *CreateCheckpointResponse) Reset() { *m = CreateCheckpointResponse{} }
|
func (m *CreateCheckpointResponse) Reset() { *m = CreateCheckpointResponse{} }
|
||||||
func (m *CreateCheckpointResponse) String() string { return proto.CompactTextString(m) }
|
func (m *CreateCheckpointResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*CreateCheckpointResponse) ProtoMessage() {}
|
func (*CreateCheckpointResponse) ProtoMessage() {}
|
||||||
func (*CreateCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
|
func (*CreateCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
|
||||||
|
|
||||||
type DeleteCheckpointRequest struct {
|
type DeleteCheckpointRequest struct {
|
||||||
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
||||||
|
@ -183,7 +212,7 @@ type DeleteCheckpointRequest struct {
|
||||||
func (m *DeleteCheckpointRequest) Reset() { *m = DeleteCheckpointRequest{} }
|
func (m *DeleteCheckpointRequest) Reset() { *m = DeleteCheckpointRequest{} }
|
||||||
func (m *DeleteCheckpointRequest) String() string { return proto.CompactTextString(m) }
|
func (m *DeleteCheckpointRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*DeleteCheckpointRequest) ProtoMessage() {}
|
func (*DeleteCheckpointRequest) ProtoMessage() {}
|
||||||
func (*DeleteCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
|
func (*DeleteCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
|
||||||
|
|
||||||
type DeleteCheckpointResponse struct {
|
type DeleteCheckpointResponse struct {
|
||||||
}
|
}
|
||||||
|
@ -191,7 +220,7 @@ type DeleteCheckpointResponse struct {
|
||||||
func (m *DeleteCheckpointResponse) Reset() { *m = DeleteCheckpointResponse{} }
|
func (m *DeleteCheckpointResponse) Reset() { *m = DeleteCheckpointResponse{} }
|
||||||
func (m *DeleteCheckpointResponse) String() string { return proto.CompactTextString(m) }
|
func (m *DeleteCheckpointResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*DeleteCheckpointResponse) ProtoMessage() {}
|
func (*DeleteCheckpointResponse) ProtoMessage() {}
|
||||||
func (*DeleteCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
|
func (*DeleteCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
|
||||||
|
|
||||||
type ListCheckpointRequest struct {
|
type ListCheckpointRequest struct {
|
||||||
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
||||||
|
@ -200,7 +229,7 @@ type ListCheckpointRequest struct {
|
||||||
func (m *ListCheckpointRequest) Reset() { *m = ListCheckpointRequest{} }
|
func (m *ListCheckpointRequest) Reset() { *m = ListCheckpointRequest{} }
|
||||||
func (m *ListCheckpointRequest) String() string { return proto.CompactTextString(m) }
|
func (m *ListCheckpointRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ListCheckpointRequest) ProtoMessage() {}
|
func (*ListCheckpointRequest) ProtoMessage() {}
|
||||||
func (*ListCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
|
func (*ListCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
|
||||||
|
|
||||||
type Checkpoint struct {
|
type Checkpoint struct {
|
||||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||||
|
@ -213,7 +242,7 @@ type Checkpoint struct {
|
||||||
func (m *Checkpoint) Reset() { *m = Checkpoint{} }
|
func (m *Checkpoint) Reset() { *m = Checkpoint{} }
|
||||||
func (m *Checkpoint) String() string { return proto.CompactTextString(m) }
|
func (m *Checkpoint) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Checkpoint) ProtoMessage() {}
|
func (*Checkpoint) ProtoMessage() {}
|
||||||
func (*Checkpoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
|
func (*Checkpoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
|
||||||
|
|
||||||
type ListCheckpointResponse struct {
|
type ListCheckpointResponse struct {
|
||||||
Checkpoints []*Checkpoint `protobuf:"bytes,1,rep,name=checkpoints" json:"checkpoints,omitempty"`
|
Checkpoints []*Checkpoint `protobuf:"bytes,1,rep,name=checkpoints" json:"checkpoints,omitempty"`
|
||||||
|
@ -222,7 +251,7 @@ type ListCheckpointResponse struct {
|
||||||
func (m *ListCheckpointResponse) Reset() { *m = ListCheckpointResponse{} }
|
func (m *ListCheckpointResponse) Reset() { *m = ListCheckpointResponse{} }
|
||||||
func (m *ListCheckpointResponse) String() string { return proto.CompactTextString(m) }
|
func (m *ListCheckpointResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ListCheckpointResponse) ProtoMessage() {}
|
func (*ListCheckpointResponse) ProtoMessage() {}
|
||||||
func (*ListCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
|
func (*ListCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
|
||||||
|
|
||||||
func (m *ListCheckpointResponse) GetCheckpoints() []*Checkpoint {
|
func (m *ListCheckpointResponse) GetCheckpoints() []*Checkpoint {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -232,12 +261,13 @@ func (m *ListCheckpointResponse) GetCheckpoints() []*Checkpoint {
|
||||||
}
|
}
|
||||||
|
|
||||||
type StateRequest struct {
|
type StateRequest struct {
|
||||||
|
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *StateRequest) Reset() { *m = StateRequest{} }
|
func (m *StateRequest) Reset() { *m = StateRequest{} }
|
||||||
func (m *StateRequest) String() string { return proto.CompactTextString(m) }
|
func (m *StateRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*StateRequest) ProtoMessage() {}
|
func (*StateRequest) ProtoMessage() {}
|
||||||
func (*StateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
|
func (*StateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
|
||||||
|
|
||||||
type ContainerState struct {
|
type ContainerState struct {
|
||||||
Status string `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"`
|
Status string `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"`
|
||||||
|
@ -246,21 +276,25 @@ type ContainerState struct {
|
||||||
func (m *ContainerState) Reset() { *m = ContainerState{} }
|
func (m *ContainerState) Reset() { *m = ContainerState{} }
|
||||||
func (m *ContainerState) String() string { return proto.CompactTextString(m) }
|
func (m *ContainerState) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ContainerState) ProtoMessage() {}
|
func (*ContainerState) ProtoMessage() {}
|
||||||
func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
|
func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
|
||||||
|
|
||||||
type Process struct {
|
type Process struct {
|
||||||
Pid uint32 `protobuf:"varint,1,opt,name=pid" json:"pid,omitempty"`
|
Pid string `protobuf:"bytes,1,opt,name=pid" json:"pid,omitempty"`
|
||||||
Terminal bool `protobuf:"varint,2,opt,name=terminal" json:"terminal,omitempty"`
|
Terminal bool `protobuf:"varint,2,opt,name=terminal" json:"terminal,omitempty"`
|
||||||
User *User `protobuf:"bytes,3,opt,name=user" json:"user,omitempty"`
|
User *User `protobuf:"bytes,3,opt,name=user" json:"user,omitempty"`
|
||||||
Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"`
|
Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"`
|
||||||
Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"`
|
Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"`
|
||||||
Cwd string `protobuf:"bytes,6,opt,name=cwd" json:"cwd,omitempty"`
|
Cwd string `protobuf:"bytes,6,opt,name=cwd" json:"cwd,omitempty"`
|
||||||
|
SystemPid uint32 `protobuf:"varint,7,opt,name=systemPid" json:"systemPid,omitempty"`
|
||||||
|
Stdin string `protobuf:"bytes,8,opt,name=stdin" json:"stdin,omitempty"`
|
||||||
|
Stdout string `protobuf:"bytes,9,opt,name=stdout" json:"stdout,omitempty"`
|
||||||
|
Stderr string `protobuf:"bytes,10,opt,name=stderr" json:"stderr,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Process) Reset() { *m = Process{} }
|
func (m *Process) Reset() { *m = Process{} }
|
||||||
func (m *Process) String() string { return proto.CompactTextString(m) }
|
func (m *Process) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Process) ProtoMessage() {}
|
func (*Process) ProtoMessage() {}
|
||||||
func (*Process) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
|
func (*Process) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
|
||||||
|
|
||||||
func (m *Process) GetUser() *User {
|
func (m *Process) GetUser() *User {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -271,16 +305,17 @@ func (m *Process) GetUser() *User {
|
||||||
|
|
||||||
type Container struct {
|
type Container struct {
|
||||||
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
||||||
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
|
BundlePath string `protobuf:"bytes,2,opt,name=bundlePath" json:"bundlePath,omitempty"`
|
||||||
BundlePath string `protobuf:"bytes,3,opt,name=bundlePath" json:"bundlePath,omitempty"`
|
Processes []*Process `protobuf:"bytes,3,rep,name=processes" json:"processes,omitempty"`
|
||||||
Processes []*Process `protobuf:"bytes,4,rep,name=processes" json:"processes,omitempty"`
|
Status string `protobuf:"bytes,4,opt,name=status" json:"status,omitempty"`
|
||||||
Status string `protobuf:"bytes,5,opt,name=status" json:"status,omitempty"`
|
Labels []string `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty"`
|
||||||
|
Pids []uint32 `protobuf:"varint,6,rep,name=pids" json:"pids,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Container) Reset() { *m = Container{} }
|
func (m *Container) Reset() { *m = Container{} }
|
||||||
func (m *Container) String() string { return proto.CompactTextString(m) }
|
func (m *Container) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Container) ProtoMessage() {}
|
func (*Container) ProtoMessage() {}
|
||||||
func (*Container) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
|
func (*Container) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
|
||||||
|
|
||||||
func (m *Container) GetProcesses() []*Process {
|
func (m *Container) GetProcesses() []*Process {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -291,15 +326,14 @@ func (m *Container) GetProcesses() []*Process {
|
||||||
|
|
||||||
// Machine is information about machine on which containerd is run
|
// Machine is information about machine on which containerd is run
|
||||||
type Machine struct {
|
type Machine struct {
|
||||||
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
Cpus uint32 `protobuf:"varint,1,opt,name=cpus" json:"cpus,omitempty"`
|
||||||
Cpus uint32 `protobuf:"varint,2,opt,name=cpus" json:"cpus,omitempty"`
|
Memory uint64 `protobuf:"varint,2,opt,name=memory" json:"memory,omitempty"`
|
||||||
Memory uint64 `protobuf:"varint,3,opt,name=memory" json:"memory,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Machine) Reset() { *m = Machine{} }
|
func (m *Machine) Reset() { *m = Machine{} }
|
||||||
func (m *Machine) String() string { return proto.CompactTextString(m) }
|
func (m *Machine) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Machine) ProtoMessage() {}
|
func (*Machine) ProtoMessage() {}
|
||||||
func (*Machine) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
|
func (*Machine) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
|
||||||
|
|
||||||
// StateResponse is information about containerd daemon
|
// StateResponse is information about containerd daemon
|
||||||
type StateResponse struct {
|
type StateResponse struct {
|
||||||
|
@ -310,7 +344,7 @@ type StateResponse struct {
|
||||||
func (m *StateResponse) Reset() { *m = StateResponse{} }
|
func (m *StateResponse) Reset() { *m = StateResponse{} }
|
||||||
func (m *StateResponse) String() string { return proto.CompactTextString(m) }
|
func (m *StateResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*StateResponse) ProtoMessage() {}
|
func (*StateResponse) ProtoMessage() {}
|
||||||
func (*StateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
|
func (*StateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
|
||||||
|
|
||||||
func (m *StateResponse) GetContainers() []*Container {
|
func (m *StateResponse) GetContainers() []*Container {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -328,14 +362,14 @@ func (m *StateResponse) GetMachine() *Machine {
|
||||||
|
|
||||||
type UpdateContainerRequest struct {
|
type UpdateContainerRequest struct {
|
||||||
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
||||||
Signal uint32 `protobuf:"varint,2,opt,name=signal" json:"signal,omitempty"`
|
Pid string `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"`
|
||||||
Status string `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"`
|
Status string `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *UpdateContainerRequest) Reset() { *m = UpdateContainerRequest{} }
|
func (m *UpdateContainerRequest) Reset() { *m = UpdateContainerRequest{} }
|
||||||
func (m *UpdateContainerRequest) String() string { return proto.CompactTextString(m) }
|
func (m *UpdateContainerRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*UpdateContainerRequest) ProtoMessage() {}
|
func (*UpdateContainerRequest) ProtoMessage() {}
|
||||||
func (*UpdateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
|
func (*UpdateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
|
||||||
|
|
||||||
type UpdateContainerResponse struct {
|
type UpdateContainerResponse struct {
|
||||||
}
|
}
|
||||||
|
@ -343,53 +377,29 @@ type UpdateContainerResponse struct {
|
||||||
func (m *UpdateContainerResponse) Reset() { *m = UpdateContainerResponse{} }
|
func (m *UpdateContainerResponse) Reset() { *m = UpdateContainerResponse{} }
|
||||||
func (m *UpdateContainerResponse) String() string { return proto.CompactTextString(m) }
|
func (m *UpdateContainerResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*UpdateContainerResponse) ProtoMessage() {}
|
func (*UpdateContainerResponse) ProtoMessage() {}
|
||||||
func (*UpdateContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
|
func (*UpdateContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
|
||||||
|
|
||||||
type EventsRequest struct {
|
type EventsRequest struct {
|
||||||
|
Timestamp uint64 `protobuf:"varint,1,opt,name=timestamp" json:"timestamp,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *EventsRequest) Reset() { *m = EventsRequest{} }
|
func (m *EventsRequest) Reset() { *m = EventsRequest{} }
|
||||||
func (m *EventsRequest) String() string { return proto.CompactTextString(m) }
|
func (m *EventsRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*EventsRequest) ProtoMessage() {}
|
func (*EventsRequest) ProtoMessage() {}
|
||||||
func (*EventsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
|
func (*EventsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
|
||||||
|
|
||||||
type Event struct {
|
type Event struct {
|
||||||
Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
|
Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
|
||||||
Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"`
|
Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"`
|
||||||
Status uint32 `protobuf:"varint,3,opt,name=status" json:"status,omitempty"`
|
Status uint32 `protobuf:"varint,3,opt,name=status" json:"status,omitempty"`
|
||||||
BundlePath string `protobuf:"bytes,4,opt,name=bundlePath" json:"bundlePath,omitempty"`
|
Pid string `protobuf:"bytes,4,opt,name=pid" json:"pid,omitempty"`
|
||||||
Pid uint32 `protobuf:"varint,5,opt,name=pid" json:"pid,omitempty"`
|
Timestamp uint64 `protobuf:"varint,5,opt,name=timestamp" json:"timestamp,omitempty"`
|
||||||
Signal uint32 `protobuf:"varint,7,opt,name=signal" json:"signal,omitempty"`
|
|
||||||
Process *Process `protobuf:"bytes,8,opt,name=process" json:"process,omitempty"`
|
|
||||||
Containers []*Container `protobuf:"bytes,9,rep,name=containers" json:"containers,omitempty"`
|
|
||||||
Checkpoint *Checkpoint `protobuf:"bytes,10,opt,name=checkpoint" json:"checkpoint,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Event) Reset() { *m = Event{} }
|
func (m *Event) Reset() { *m = Event{} }
|
||||||
func (m *Event) String() string { return proto.CompactTextString(m) }
|
func (m *Event) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Event) ProtoMessage() {}
|
func (*Event) ProtoMessage() {}
|
||||||
func (*Event) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
|
func (*Event) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
|
||||||
|
|
||||||
func (m *Event) GetProcess() *Process {
|
|
||||||
if m != nil {
|
|
||||||
return m.Process
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Event) GetContainers() []*Container {
|
|
||||||
if m != nil {
|
|
||||||
return m.Containers
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Event) GetCheckpoint() *Checkpoint {
|
|
||||||
if m != nil {
|
|
||||||
return m.Checkpoint
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type NetworkStats struct {
|
type NetworkStats struct {
|
||||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||||
|
@ -406,7 +416,7 @@ type NetworkStats struct {
|
||||||
func (m *NetworkStats) Reset() { *m = NetworkStats{} }
|
func (m *NetworkStats) Reset() { *m = NetworkStats{} }
|
||||||
func (m *NetworkStats) String() string { return proto.CompactTextString(m) }
|
func (m *NetworkStats) String() string { return proto.CompactTextString(m) }
|
||||||
func (*NetworkStats) ProtoMessage() {}
|
func (*NetworkStats) ProtoMessage() {}
|
||||||
func (*NetworkStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
|
func (*NetworkStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} }
|
||||||
|
|
||||||
type CpuUsage struct {
|
type CpuUsage struct {
|
||||||
TotalUsage uint64 `protobuf:"varint,1,opt,name=total_usage" json:"total_usage,omitempty"`
|
TotalUsage uint64 `protobuf:"varint,1,opt,name=total_usage" json:"total_usage,omitempty"`
|
||||||
|
@ -418,7 +428,7 @@ type CpuUsage struct {
|
||||||
func (m *CpuUsage) Reset() { *m = CpuUsage{} }
|
func (m *CpuUsage) Reset() { *m = CpuUsage{} }
|
||||||
func (m *CpuUsage) String() string { return proto.CompactTextString(m) }
|
func (m *CpuUsage) String() string { return proto.CompactTextString(m) }
|
||||||
func (*CpuUsage) ProtoMessage() {}
|
func (*CpuUsage) ProtoMessage() {}
|
||||||
func (*CpuUsage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
|
func (*CpuUsage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} }
|
||||||
|
|
||||||
type ThrottlingData struct {
|
type ThrottlingData struct {
|
||||||
Periods uint64 `protobuf:"varint,1,opt,name=periods" json:"periods,omitempty"`
|
Periods uint64 `protobuf:"varint,1,opt,name=periods" json:"periods,omitempty"`
|
||||||
|
@ -429,7 +439,7 @@ type ThrottlingData struct {
|
||||||
func (m *ThrottlingData) Reset() { *m = ThrottlingData{} }
|
func (m *ThrottlingData) Reset() { *m = ThrottlingData{} }
|
||||||
func (m *ThrottlingData) String() string { return proto.CompactTextString(m) }
|
func (m *ThrottlingData) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ThrottlingData) ProtoMessage() {}
|
func (*ThrottlingData) ProtoMessage() {}
|
||||||
func (*ThrottlingData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} }
|
func (*ThrottlingData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} }
|
||||||
|
|
||||||
type CpuStats struct {
|
type CpuStats struct {
|
||||||
CpuUsage *CpuUsage `protobuf:"bytes,1,opt,name=cpu_usage" json:"cpu_usage,omitempty"`
|
CpuUsage *CpuUsage `protobuf:"bytes,1,opt,name=cpu_usage" json:"cpu_usage,omitempty"`
|
||||||
|
@ -439,7 +449,7 @@ type CpuStats struct {
|
||||||
func (m *CpuStats) Reset() { *m = CpuStats{} }
|
func (m *CpuStats) Reset() { *m = CpuStats{} }
|
||||||
func (m *CpuStats) String() string { return proto.CompactTextString(m) }
|
func (m *CpuStats) String() string { return proto.CompactTextString(m) }
|
||||||
func (*CpuStats) ProtoMessage() {}
|
func (*CpuStats) ProtoMessage() {}
|
||||||
func (*CpuStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} }
|
func (*CpuStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
|
||||||
|
|
||||||
func (m *CpuStats) GetCpuUsage() *CpuUsage {
|
func (m *CpuStats) GetCpuUsage() *CpuUsage {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -464,7 +474,7 @@ type MemoryData struct {
|
||||||
func (m *MemoryData) Reset() { *m = MemoryData{} }
|
func (m *MemoryData) Reset() { *m = MemoryData{} }
|
||||||
func (m *MemoryData) String() string { return proto.CompactTextString(m) }
|
func (m *MemoryData) String() string { return proto.CompactTextString(m) }
|
||||||
func (*MemoryData) ProtoMessage() {}
|
func (*MemoryData) ProtoMessage() {}
|
||||||
func (*MemoryData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} }
|
func (*MemoryData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} }
|
||||||
|
|
||||||
type MemoryStats struct {
|
type MemoryStats struct {
|
||||||
Cache uint64 `protobuf:"varint,1,opt,name=cache" json:"cache,omitempty"`
|
Cache uint64 `protobuf:"varint,1,opt,name=cache" json:"cache,omitempty"`
|
||||||
|
@ -477,7 +487,7 @@ type MemoryStats struct {
|
||||||
func (m *MemoryStats) Reset() { *m = MemoryStats{} }
|
func (m *MemoryStats) Reset() { *m = MemoryStats{} }
|
||||||
func (m *MemoryStats) String() string { return proto.CompactTextString(m) }
|
func (m *MemoryStats) String() string { return proto.CompactTextString(m) }
|
||||||
func (*MemoryStats) ProtoMessage() {}
|
func (*MemoryStats) ProtoMessage() {}
|
||||||
func (*MemoryStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
|
func (*MemoryStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} }
|
||||||
|
|
||||||
func (m *MemoryStats) GetUsage() *MemoryData {
|
func (m *MemoryStats) GetUsage() *MemoryData {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -517,7 +527,7 @@ type BlkioStatsEntry struct {
|
||||||
func (m *BlkioStatsEntry) Reset() { *m = BlkioStatsEntry{} }
|
func (m *BlkioStatsEntry) Reset() { *m = BlkioStatsEntry{} }
|
||||||
func (m *BlkioStatsEntry) String() string { return proto.CompactTextString(m) }
|
func (m *BlkioStatsEntry) String() string { return proto.CompactTextString(m) }
|
||||||
func (*BlkioStatsEntry) ProtoMessage() {}
|
func (*BlkioStatsEntry) ProtoMessage() {}
|
||||||
func (*BlkioStatsEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} }
|
func (*BlkioStatsEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} }
|
||||||
|
|
||||||
type BlkioStats struct {
|
type BlkioStats struct {
|
||||||
IoServiceBytesRecursive []*BlkioStatsEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive" json:"io_service_bytes_recursive,omitempty"`
|
IoServiceBytesRecursive []*BlkioStatsEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive" json:"io_service_bytes_recursive,omitempty"`
|
||||||
|
@ -533,7 +543,7 @@ type BlkioStats struct {
|
||||||
func (m *BlkioStats) Reset() { *m = BlkioStats{} }
|
func (m *BlkioStats) Reset() { *m = BlkioStats{} }
|
||||||
func (m *BlkioStats) String() string { return proto.CompactTextString(m) }
|
func (m *BlkioStats) String() string { return proto.CompactTextString(m) }
|
||||||
func (*BlkioStats) ProtoMessage() {}
|
func (*BlkioStats) ProtoMessage() {}
|
||||||
func (*BlkioStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} }
|
func (*BlkioStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} }
|
||||||
|
|
||||||
func (m *BlkioStats) GetIoServiceBytesRecursive() []*BlkioStatsEntry {
|
func (m *BlkioStats) GetIoServiceBytesRecursive() []*BlkioStatsEntry {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -600,7 +610,7 @@ type HugetlbStats struct {
|
||||||
func (m *HugetlbStats) Reset() { *m = HugetlbStats{} }
|
func (m *HugetlbStats) Reset() { *m = HugetlbStats{} }
|
||||||
func (m *HugetlbStats) String() string { return proto.CompactTextString(m) }
|
func (m *HugetlbStats) String() string { return proto.CompactTextString(m) }
|
||||||
func (*HugetlbStats) ProtoMessage() {}
|
func (*HugetlbStats) ProtoMessage() {}
|
||||||
func (*HugetlbStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} }
|
func (*HugetlbStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} }
|
||||||
|
|
||||||
type CgroupStats struct {
|
type CgroupStats struct {
|
||||||
CpuStats *CpuStats `protobuf:"bytes,1,opt,name=cpu_stats" json:"cpu_stats,omitempty"`
|
CpuStats *CpuStats `protobuf:"bytes,1,opt,name=cpu_stats" json:"cpu_stats,omitempty"`
|
||||||
|
@ -612,7 +622,7 @@ type CgroupStats struct {
|
||||||
func (m *CgroupStats) Reset() { *m = CgroupStats{} }
|
func (m *CgroupStats) Reset() { *m = CgroupStats{} }
|
||||||
func (m *CgroupStats) String() string { return proto.CompactTextString(m) }
|
func (m *CgroupStats) String() string { return proto.CompactTextString(m) }
|
||||||
func (*CgroupStats) ProtoMessage() {}
|
func (*CgroupStats) ProtoMessage() {}
|
||||||
func (*CgroupStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} }
|
func (*CgroupStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} }
|
||||||
|
|
||||||
func (m *CgroupStats) GetCpuStats() *CpuStats {
|
func (m *CgroupStats) GetCpuStats() *CpuStats {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -642,25 +652,25 @@ func (m *CgroupStats) GetHugetlbStats() map[string]*HugetlbStats {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type Stats struct {
|
type StatsResponse struct {
|
||||||
NetworkStats []*NetworkStats `protobuf:"bytes,1,rep,name=network_stats" json:"network_stats,omitempty"`
|
NetworkStats []*NetworkStats `protobuf:"bytes,1,rep,name=network_stats" json:"network_stats,omitempty"`
|
||||||
CgroupStats *CgroupStats `protobuf:"bytes,2,opt,name=cgroup_stats" json:"cgroup_stats,omitempty"`
|
CgroupStats *CgroupStats `protobuf:"bytes,2,opt,name=cgroup_stats" json:"cgroup_stats,omitempty"`
|
||||||
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp" json:"timestamp,omitempty"`
|
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp" json:"timestamp,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Stats) Reset() { *m = Stats{} }
|
func (m *StatsResponse) Reset() { *m = StatsResponse{} }
|
||||||
func (m *Stats) String() string { return proto.CompactTextString(m) }
|
func (m *StatsResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Stats) ProtoMessage() {}
|
func (*StatsResponse) ProtoMessage() {}
|
||||||
func (*Stats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} }
|
func (*StatsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} }
|
||||||
|
|
||||||
func (m *Stats) GetNetworkStats() []*NetworkStats {
|
func (m *StatsResponse) GetNetworkStats() []*NetworkStats {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
return m.NetworkStats
|
return m.NetworkStats
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Stats) GetCgroupStats() *CgroupStats {
|
func (m *StatsResponse) GetCgroupStats() *CgroupStats {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
return m.CgroupStats
|
return m.CgroupStats
|
||||||
}
|
}
|
||||||
|
@ -674,9 +684,11 @@ type StatsRequest struct {
|
||||||
func (m *StatsRequest) Reset() { *m = StatsRequest{} }
|
func (m *StatsRequest) Reset() { *m = StatsRequest{} }
|
||||||
func (m *StatsRequest) String() string { return proto.CompactTextString(m) }
|
func (m *StatsRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*StatsRequest) ProtoMessage() {}
|
func (*StatsRequest) ProtoMessage() {}
|
||||||
func (*StatsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} }
|
func (*StatsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} }
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
proto.RegisterType((*UpdateProcessRequest)(nil), "types.UpdateProcessRequest")
|
||||||
|
proto.RegisterType((*UpdateProcessResponse)(nil), "types.UpdateProcessResponse")
|
||||||
proto.RegisterType((*CreateContainerRequest)(nil), "types.CreateContainerRequest")
|
proto.RegisterType((*CreateContainerRequest)(nil), "types.CreateContainerRequest")
|
||||||
proto.RegisterType((*CreateContainerResponse)(nil), "types.CreateContainerResponse")
|
proto.RegisterType((*CreateContainerResponse)(nil), "types.CreateContainerResponse")
|
||||||
proto.RegisterType((*SignalRequest)(nil), "types.SignalRequest")
|
proto.RegisterType((*SignalRequest)(nil), "types.SignalRequest")
|
||||||
|
@ -711,7 +723,7 @@ func init() {
|
||||||
proto.RegisterType((*BlkioStats)(nil), "types.BlkioStats")
|
proto.RegisterType((*BlkioStats)(nil), "types.BlkioStats")
|
||||||
proto.RegisterType((*HugetlbStats)(nil), "types.HugetlbStats")
|
proto.RegisterType((*HugetlbStats)(nil), "types.HugetlbStats")
|
||||||
proto.RegisterType((*CgroupStats)(nil), "types.CgroupStats")
|
proto.RegisterType((*CgroupStats)(nil), "types.CgroupStats")
|
||||||
proto.RegisterType((*Stats)(nil), "types.Stats")
|
proto.RegisterType((*StatsResponse)(nil), "types.StatsResponse")
|
||||||
proto.RegisterType((*StatsRequest)(nil), "types.StatsRequest")
|
proto.RegisterType((*StatsRequest)(nil), "types.StatsRequest")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -725,13 +737,14 @@ type APIClient interface {
|
||||||
CreateContainer(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error)
|
CreateContainer(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error)
|
||||||
UpdateContainer(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error)
|
UpdateContainer(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error)
|
||||||
Signal(ctx context.Context, in *SignalRequest, opts ...grpc.CallOption) (*SignalResponse, error)
|
Signal(ctx context.Context, in *SignalRequest, opts ...grpc.CallOption) (*SignalResponse, error)
|
||||||
|
UpdateProcess(ctx context.Context, in *UpdateProcessRequest, opts ...grpc.CallOption) (*UpdateProcessResponse, error)
|
||||||
AddProcess(ctx context.Context, in *AddProcessRequest, opts ...grpc.CallOption) (*AddProcessResponse, error)
|
AddProcess(ctx context.Context, in *AddProcessRequest, opts ...grpc.CallOption) (*AddProcessResponse, error)
|
||||||
CreateCheckpoint(ctx context.Context, in *CreateCheckpointRequest, opts ...grpc.CallOption) (*CreateCheckpointResponse, error)
|
CreateCheckpoint(ctx context.Context, in *CreateCheckpointRequest, opts ...grpc.CallOption) (*CreateCheckpointResponse, error)
|
||||||
DeleteCheckpoint(ctx context.Context, in *DeleteCheckpointRequest, opts ...grpc.CallOption) (*DeleteCheckpointResponse, error)
|
DeleteCheckpoint(ctx context.Context, in *DeleteCheckpointRequest, opts ...grpc.CallOption) (*DeleteCheckpointResponse, error)
|
||||||
ListCheckpoint(ctx context.Context, in *ListCheckpointRequest, opts ...grpc.CallOption) (*ListCheckpointResponse, error)
|
ListCheckpoint(ctx context.Context, in *ListCheckpointRequest, opts ...grpc.CallOption) (*ListCheckpointResponse, error)
|
||||||
State(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateResponse, error)
|
State(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateResponse, error)
|
||||||
Events(ctx context.Context, in *EventsRequest, opts ...grpc.CallOption) (API_EventsClient, error)
|
Events(ctx context.Context, in *EventsRequest, opts ...grpc.CallOption) (API_EventsClient, error)
|
||||||
GetStats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (API_GetStatsClient, error)
|
Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (*StatsResponse, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type aPIClient struct {
|
type aPIClient struct {
|
||||||
|
@ -769,6 +782,15 @@ func (c *aPIClient) Signal(ctx context.Context, in *SignalRequest, opts ...grpc.
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *aPIClient) UpdateProcess(ctx context.Context, in *UpdateProcessRequest, opts ...grpc.CallOption) (*UpdateProcessResponse, error) {
|
||||||
|
out := new(UpdateProcessResponse)
|
||||||
|
err := grpc.Invoke(ctx, "/types.API/UpdateProcess", in, out, c.cc, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (c *aPIClient) AddProcess(ctx context.Context, in *AddProcessRequest, opts ...grpc.CallOption) (*AddProcessResponse, error) {
|
func (c *aPIClient) AddProcess(ctx context.Context, in *AddProcessRequest, opts ...grpc.CallOption) (*AddProcessResponse, error) {
|
||||||
out := new(AddProcessResponse)
|
out := new(AddProcessResponse)
|
||||||
err := grpc.Invoke(ctx, "/types.API/AddProcess", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/types.API/AddProcess", in, out, c.cc, opts...)
|
||||||
|
@ -846,36 +868,13 @@ func (x *aPIEventsClient) Recv() (*Event, error) {
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *aPIClient) GetStats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (API_GetStatsClient, error) {
|
func (c *aPIClient) Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (*StatsResponse, error) {
|
||||||
stream, err := grpc.NewClientStream(ctx, &_API_serviceDesc.Streams[1], c.cc, "/types.API/GetStats", opts...)
|
out := new(StatsResponse)
|
||||||
|
err := grpc.Invoke(ctx, "/types.API/Stats", in, out, c.cc, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
x := &aPIGetStatsClient{stream}
|
return out, nil
|
||||||
if err := x.ClientStream.SendMsg(in); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := x.ClientStream.CloseSend(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return x, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type API_GetStatsClient interface {
|
|
||||||
Recv() (*Stats, error)
|
|
||||||
grpc.ClientStream
|
|
||||||
}
|
|
||||||
|
|
||||||
type aPIGetStatsClient struct {
|
|
||||||
grpc.ClientStream
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *aPIGetStatsClient) Recv() (*Stats, error) {
|
|
||||||
m := new(Stats)
|
|
||||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return m, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Server API for API service
|
// Server API for API service
|
||||||
|
@ -884,13 +883,14 @@ type APIServer interface {
|
||||||
CreateContainer(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error)
|
CreateContainer(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error)
|
||||||
UpdateContainer(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error)
|
UpdateContainer(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error)
|
||||||
Signal(context.Context, *SignalRequest) (*SignalResponse, error)
|
Signal(context.Context, *SignalRequest) (*SignalResponse, error)
|
||||||
|
UpdateProcess(context.Context, *UpdateProcessRequest) (*UpdateProcessResponse, error)
|
||||||
AddProcess(context.Context, *AddProcessRequest) (*AddProcessResponse, error)
|
AddProcess(context.Context, *AddProcessRequest) (*AddProcessResponse, error)
|
||||||
CreateCheckpoint(context.Context, *CreateCheckpointRequest) (*CreateCheckpointResponse, error)
|
CreateCheckpoint(context.Context, *CreateCheckpointRequest) (*CreateCheckpointResponse, error)
|
||||||
DeleteCheckpoint(context.Context, *DeleteCheckpointRequest) (*DeleteCheckpointResponse, error)
|
DeleteCheckpoint(context.Context, *DeleteCheckpointRequest) (*DeleteCheckpointResponse, error)
|
||||||
ListCheckpoint(context.Context, *ListCheckpointRequest) (*ListCheckpointResponse, error)
|
ListCheckpoint(context.Context, *ListCheckpointRequest) (*ListCheckpointResponse, error)
|
||||||
State(context.Context, *StateRequest) (*StateResponse, error)
|
State(context.Context, *StateRequest) (*StateResponse, error)
|
||||||
Events(*EventsRequest, API_EventsServer) error
|
Events(*EventsRequest, API_EventsServer) error
|
||||||
GetStats(*StatsRequest, API_GetStatsServer) error
|
Stats(context.Context, *StatsRequest) (*StatsResponse, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func RegisterAPIServer(s *grpc.Server, srv APIServer) {
|
func RegisterAPIServer(s *grpc.Server, srv APIServer) {
|
||||||
|
@ -933,6 +933,18 @@ func _API_Signal_Handler(srv interface{}, ctx context.Context, dec func(interfac
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func _API_UpdateProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
|
||||||
|
in := new(UpdateProcessRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
out, err := srv.(APIServer).UpdateProcess(ctx, in)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
func _API_AddProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
|
func _API_AddProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
|
||||||
in := new(AddProcessRequest)
|
in := new(AddProcessRequest)
|
||||||
if err := dec(in); err != nil {
|
if err := dec(in); err != nil {
|
||||||
|
@ -1014,25 +1026,16 @@ func (x *aPIEventsServer) Send(m *Event) error {
|
||||||
return x.ServerStream.SendMsg(m)
|
return x.ServerStream.SendMsg(m)
|
||||||
}
|
}
|
||||||
|
|
||||||
func _API_GetStats_Handler(srv interface{}, stream grpc.ServerStream) error {
|
func _API_Stats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
|
||||||
m := new(StatsRequest)
|
in := new(StatsRequest)
|
||||||
if err := stream.RecvMsg(m); err != nil {
|
if err := dec(in); err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
return srv.(APIServer).GetStats(m, &aPIGetStatsServer{stream})
|
out, err := srv.(APIServer).Stats(ctx, in)
|
||||||
}
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
type API_GetStatsServer interface {
|
}
|
||||||
Send(*Stats) error
|
return out, nil
|
||||||
grpc.ServerStream
|
|
||||||
}
|
|
||||||
|
|
||||||
type aPIGetStatsServer struct {
|
|
||||||
grpc.ServerStream
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *aPIGetStatsServer) Send(m *Stats) error {
|
|
||||||
return x.ServerStream.SendMsg(m)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var _API_serviceDesc = grpc.ServiceDesc{
|
var _API_serviceDesc = grpc.ServiceDesc{
|
||||||
|
@ -1051,6 +1054,10 @@ var _API_serviceDesc = grpc.ServiceDesc{
|
||||||
MethodName: "Signal",
|
MethodName: "Signal",
|
||||||
Handler: _API_Signal_Handler,
|
Handler: _API_Signal_Handler,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
MethodName: "UpdateProcess",
|
||||||
|
Handler: _API_UpdateProcess_Handler,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
MethodName: "AddProcess",
|
MethodName: "AddProcess",
|
||||||
Handler: _API_AddProcess_Handler,
|
Handler: _API_AddProcess_Handler,
|
||||||
|
@ -1071,6 +1078,10 @@ var _API_serviceDesc = grpc.ServiceDesc{
|
||||||
MethodName: "State",
|
MethodName: "State",
|
||||||
Handler: _API_State_Handler,
|
Handler: _API_State_Handler,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
MethodName: "Stats",
|
||||||
|
Handler: _API_Stats_Handler,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Streams: []grpc.StreamDesc{
|
Streams: []grpc.StreamDesc{
|
||||||
{
|
{
|
||||||
|
@ -1078,105 +1089,104 @@ var _API_serviceDesc = grpc.ServiceDesc{
|
||||||
Handler: _API_Events_Handler,
|
Handler: _API_Events_Handler,
|
||||||
ServerStreams: true,
|
ServerStreams: true,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
StreamName: "GetStats",
|
|
||||||
Handler: _API_GetStats_Handler,
|
|
||||||
ServerStreams: true,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptor0 = []byte{
|
var fileDescriptor0 = []byte{
|
||||||
// 1454 bytes of a gzipped FileDescriptorProto
|
// 1517 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0xd9, 0x6e, 0x1c, 0x45,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0xd9, 0x6e, 0x1c, 0x45,
|
||||||
0x17, 0xf6, 0xcc, 0xf4, 0x6c, 0x67, 0x16, 0xdb, 0xed, 0xd8, 0x19, 0xcf, 0xff, 0x87, 0x24, 0x4d,
|
0x17, 0xf6, 0xec, 0x33, 0x67, 0x16, 0xdb, 0xed, 0x6d, 0x3c, 0xf9, 0xf3, 0xc7, 0x74, 0x02, 0x89,
|
||||||
0x08, 0x11, 0x8a, 0xac, 0xe0, 0xb0, 0x84, 0x70, 0x01, 0xc1, 0x89, 0x12, 0x50, 0x02, 0x56, 0x62,
|
0x50, 0x64, 0x05, 0x87, 0x25, 0x04, 0x09, 0x08, 0x4e, 0x44, 0x40, 0x09, 0x98, 0xd8, 0x46, 0xe2,
|
||||||
0x23, 0x71, 0xc3, 0xa8, 0xdd, 0x5d, 0x8c, 0x8b, 0xe9, 0x8d, 0xee, 0x6a, 0x2f, 0xaf, 0x80, 0x78,
|
0x86, 0x51, 0x4f, 0x77, 0x31, 0xd3, 0xb8, 0x37, 0xba, 0xaa, 0xbd, 0x48, 0x3c, 0x01, 0xdc, 0xf3,
|
||||||
0x1c, 0xc4, 0x03, 0x20, 0x71, 0xcf, 0x73, 0xf0, 0x14, 0x9c, 0x5a, 0xba, 0x7a, 0x99, 0xc5, 0x70,
|
0x16, 0x48, 0x5c, 0xf1, 0x00, 0x3c, 0x0e, 0x4f, 0xc1, 0xe9, 0xda, 0xa6, 0xbb, 0x67, 0x31, 0xb9,
|
||||||
0xc1, 0xcd, 0x48, 0x55, 0x75, 0xce, 0x77, 0xce, 0xf9, 0xce, 0xd2, 0x55, 0x03, 0x5d, 0x3b, 0xa2,
|
0xe0, 0xc6, 0x72, 0x55, 0x9d, 0xf3, 0x9d, 0xef, 0x6c, 0x55, 0x7d, 0x06, 0x5a, 0x56, 0xe4, 0xee,
|
||||||
0x7b, 0x51, 0x1c, 0xb2, 0xd0, 0x6c, 0xb2, 0xcb, 0x88, 0x24, 0xd6, 0x2f, 0x35, 0xd8, 0x39, 0x88,
|
0x47, 0x71, 0xc8, 0x42, 0xa3, 0xc6, 0xae, 0x22, 0x42, 0xcd, 0x11, 0x6c, 0x9e, 0x46, 0x8e, 0xc5,
|
||||||
0x89, 0xcd, 0xc8, 0x41, 0x18, 0x30, 0x9b, 0x06, 0x24, 0x7e, 0x4d, 0x7e, 0x4a, 0x49, 0xc2, 0x4c,
|
0xc8, 0x51, 0x1c, 0xda, 0x84, 0xd2, 0x57, 0xe4, 0xa7, 0x84, 0x50, 0x66, 0x00, 0x94, 0x5d, 0xa7,
|
||||||
0x80, 0x3a, 0x75, 0x47, 0xb5, 0x5b, 0xb5, 0x7b, 0x5d, 0x13, 0x17, 0x27, 0x69, 0xe0, 0x7a, 0xe4,
|
0x5f, 0xda, 0x2b, 0xdd, 0x6b, 0x19, 0x6d, 0xa8, 0x44, 0xb8, 0x28, 0xf3, 0x05, 0x9e, 0xd8, 0x5e,
|
||||||
0xd0, 0x66, 0xa7, 0xa3, 0xba, 0xd8, 0x1b, 0x40, 0x33, 0x61, 0x2e, 0x0d, 0x46, 0x0d, 0xb1, 0x1c,
|
0x48, 0xc9, 0x31, 0x73, 0xdc, 0xa0, 0x5f, 0xc1, 0xbd, 0xa6, 0xd1, 0x85, 0xda, 0x85, 0xeb, 0xb0,
|
||||||
0x42, 0x0b, 0x97, 0x61, 0xca, 0x46, 0x46, 0x61, 0x4d, 0xe2, 0x78, 0xd4, 0x14, 0xeb, 0x75, 0x68,
|
0x49, 0xbf, 0x8a, 0xcb, 0xae, 0xd1, 0x83, 0xfa, 0x84, 0xb8, 0xe3, 0x09, 0xeb, 0xd7, 0xd2, 0xb5,
|
||||||
0x3b, 0x61, 0x90, 0x84, 0x1e, 0x19, 0xb5, 0x32, 0x4c, 0xe7, 0x94, 0x38, 0xb3, 0x28, 0xa4, 0x01,
|
0xb9, 0x03, 0x5b, 0x05, 0x1b, 0x34, 0x0a, 0x03, 0x4a, 0xcc, 0x5f, 0x4b, 0xb0, 0x7d, 0x18, 0x13,
|
||||||
0x1b, 0xb5, 0xf9, 0x9e, 0x75, 0x17, 0xae, 0xcf, 0x79, 0x93, 0x44, 0xa8, 0x46, 0xcc, 0x1e, 0x34,
|
0x3c, 0x39, 0x0c, 0x03, 0x66, 0xb9, 0x01, 0x89, 0xe7, 0xd9, 0xc7, 0xc5, 0x28, 0x09, 0x1c, 0x8f,
|
||||||
0x22, 0xe5, 0xcf, 0xc0, 0x7a, 0x04, 0x83, 0x37, 0x74, 0x1a, 0xd8, 0xde, 0x22, 0x67, 0x95, 0x24,
|
0x1c, 0x59, 0x68, 0x63, 0x4a, 0x63, 0x42, 0xec, 0xb3, 0x28, 0x74, 0x03, 0xc6, 0x69, 0xb4, 0x52,
|
||||||
0xf7, 0x72, 0x20, 0xdc, 0x10, 0x92, 0xc2, 0xcd, 0x81, 0xb5, 0x01, 0xc3, 0x4c, 0x53, 0x02, 0x5b,
|
0x1a, 0x94, 0xb3, 0xaa, 0xf2, 0x25, 0xd2, 0xc0, 0x65, 0x98, 0x08, 0x1a, 0x6a, 0x4d, 0xe2, 0xb8,
|
||||||
0xbf, 0xd6, 0x60, 0xf3, 0x89, 0xeb, 0x1e, 0xc6, 0xa1, 0x43, 0x92, 0x64, 0x11, 0xe0, 0x06, 0x74,
|
0x5f, 0x57, 0x6b, 0xcf, 0x1a, 0x11, 0x8f, 0xf6, 0x1b, 0x7b, 0x95, 0x7b, 0x2d, 0xf3, 0x63, 0xd8,
|
||||||
0x18, 0x89, 0x7d, 0xca, 0x51, 0x38, 0x6a, 0xc7, 0xdc, 0x05, 0x23, 0x4d, 0x48, 0x2c, 0x30, 0x7b,
|
0x99, 0x21, 0x23, 0x88, 0x1a, 0xb7, 0xa1, 0x65, 0xab, 0x4d, 0x4e, 0xaa, 0x7d, 0xb0, 0xb6, 0xcf,
|
||||||
0xfb, 0xbd, 0x3d, 0x41, 0xe6, 0xde, 0x31, 0x6e, 0x99, 0x7d, 0x30, 0xec, 0x78, 0x9a, 0x20, 0x0b,
|
0x03, 0xb8, 0xaf, 0x85, 0xcd, 0x47, 0xd0, 0x3d, 0x76, 0xc7, 0x81, 0xe5, 0x5d, 0x1b, 0xc3, 0x94,
|
||||||
0x0d, 0xe9, 0x0b, 0x09, 0xce, 0x90, 0x02, 0xb5, 0x70, 0xce, 0x5d, 0x15, 0xbe, 0xa6, 0xaf, 0x5d,
|
0x09, 0x97, 0xe4, 0xc4, 0xbb, 0xe6, 0x1a, 0xf4, 0x94, 0xa6, 0x8c, 0xcc, 0x1f, 0x25, 0x58, 0x7f,
|
||||||
0xa1, 0xaf, 0x53, 0xa1, 0xaf, 0x5b, 0xa5, 0x0f, 0x04, 0x55, 0x8f, 0xc0, 0x10, 0xf6, 0x10, 0x34,
|
0xe2, 0x38, 0x4b, 0x92, 0xb2, 0x06, 0x4d, 0x46, 0x62, 0xdf, 0x4d, 0x51, 0xca, 0x3c, 0x0b, 0xbb,
|
||||||
0xcd, 0x78, 0xe1, 0x8b, 0xa9, 0x0e, 0x7d, 0x07, 0x86, 0xb6, 0xeb, 0x52, 0x46, 0x43, 0x74, 0xfc,
|
0x50, 0x4d, 0x28, 0xf2, 0xab, 0x70, 0x7e, 0x6d, 0xc9, 0xef, 0x14, 0xb7, 0x8c, 0x0e, 0x54, 0xad,
|
||||||
0x39, 0x75, 0x13, 0x74, 0xb7, 0x81, 0x14, 0xdc, 0x06, 0xb3, 0x18, 0xef, 0x22, 0x7e, 0x5f, 0xea,
|
0x78, 0x4c, 0x31, 0x30, 0x15, 0xc1, 0x85, 0x04, 0xe7, 0x18, 0x15, 0xb9, 0xb0, 0x2f, 0x1c, 0x19,
|
||||||
0x3c, 0xe8, 0x0c, 0x2d, 0x22, 0xe6, 0x9d, 0x52, 0x0a, 0xeb, 0x82, 0x8c, 0x4d, 0x45, 0x46, 0xae,
|
0x12, 0xc9, 0xb2, 0x91, 0x0f, 0x67, 0xb3, 0x10, 0xce, 0x56, 0x21, 0x9c, 0x90, 0xae, 0xd1, 0xfd,
|
||||||
0x69, 0x8d, 0x61, 0x34, 0x8f, 0xa6, 0xd8, 0x7f, 0x08, 0xd7, 0x9f, 0x12, 0x8f, 0x5c, 0x65, 0x09,
|
0x2a, 0xb7, 0x85, 0x18, 0x89, 0x64, 0xd9, 0x4d, 0x17, 0x63, 0xe9, 0x76, 0xd7, 0xd8, 0x86, 0x9e,
|
||||||
0x59, 0x0d, 0x6c, 0x9f, 0xc8, 0xd2, 0xe3, 0x80, 0xf3, 0x4a, 0x0a, 0xf0, 0x6d, 0xd8, 0x7e, 0x49,
|
0xe5, 0x38, 0x2e, 0x73, 0x43, 0x24, 0xfd, 0xb9, 0xeb, 0x50, 0xa4, 0x5a, 0x41, 0xf7, 0x37, 0xc1,
|
||||||
0x13, 0xb6, 0x12, 0xce, 0xfa, 0x0e, 0x20, 0x17, 0xd0, 0xe0, 0xda, 0x14, 0xb9, 0xa0, 0x4c, 0x65,
|
0xc8, 0xfa, 0x2a, 0x43, 0xf0, 0x42, 0xa7, 0x43, 0xe7, 0x79, 0x5e, 0x1c, 0xde, 0xcc, 0x15, 0x42,
|
||||||
0x1a, 0x69, 0x61, 0x4e, 0x24, 0x12, 0xdd, 0x31, 0xb7, 0xa0, 0x97, 0x06, 0xf4, 0xe2, 0x4d, 0xe8,
|
0x99, 0xfb, 0xbe, 0xae, 0x72, 0xa3, 0x0f, 0xcc, 0x01, 0xf4, 0x67, 0xd1, 0xa4, 0xa5, 0x87, 0xb0,
|
||||||
0xcc, 0x08, 0x4b, 0x44, 0xa1, 0x77, 0x44, 0x22, 0x4f, 0x89, 0xe7, 0x89, 0x3a, 0xef, 0x58, 0x9f,
|
0xf3, 0x94, 0x78, 0xe4, 0x3a, 0x4b, 0x18, 0xc4, 0xc0, 0xf2, 0x89, 0xc8, 0x61, 0x0a, 0x38, 0xab,
|
||||||
0xc3, 0x4e, 0xd5, 0xbe, 0x62, 0xf8, 0x2e, 0xf4, 0x72, 0xb6, 0x12, 0xb4, 0xd6, 0x58, 0x4c, 0xd7,
|
0x24, 0x01, 0x6f, 0xc3, 0xd6, 0x0b, 0x97, 0xb2, 0xa5, 0x70, 0xe6, 0x77, 0x00, 0x53, 0x01, 0x0d,
|
||||||
0x10, 0xfa, 0x6f, 0x18, 0xb2, 0xa5, 0x1c, 0xb7, 0x6e, 0xc1, 0x50, 0xb7, 0x83, 0x38, 0x90, 0xc5,
|
0xae, 0x4d, 0x91, 0x4b, 0x97, 0xc9, 0xc4, 0x62, 0x10, 0x99, 0x1d, 0xc9, 0x5e, 0xdb, 0x80, 0x76,
|
||||||
0x61, 0xb3, 0x34, 0x51, 0xe1, 0xcc, 0xa0, 0xad, 0xd2, 0x59, 0x4a, 0xe3, 0x7f, 0x52, 0xb8, 0x96,
|
0x12, 0xb8, 0x97, 0xc7, 0xa1, 0x7d, 0x46, 0x18, 0xe5, 0xa5, 0xce, 0x1b, 0x90, 0x4e, 0x88, 0xe7,
|
||||||
0x07, 0x5d, 0xed, 0xce, 0xf2, 0x1c, 0x55, 0x46, 0x86, 0x9c, 0x11, 0xb7, 0xa1, 0x1b, 0x49, 0x3f,
|
0xf1, 0x4a, 0x6f, 0x9a, 0x9f, 0xc2, 0x76, 0xd1, 0xbe, 0x2c, 0xe4, 0xb7, 0xa0, 0x3d, 0x8d, 0x16,
|
||||||
0x89, 0xb4, 0xd3, 0xdb, 0x1f, 0x2a, 0x17, 0x32, 0xff, 0xf3, 0xd0, 0xc4, 0xd8, 0xc0, 0xfa, 0x68,
|
0x45, 0x6b, 0x95, 0x45, 0xe1, 0xea, 0x1c, 0x33, 0x8c, 0xd6, 0x3c, 0xe2, 0x7b, 0xd0, 0xd3, 0x45,
|
||||||
0xbf, 0xb2, 0x9d, 0x53, 0x34, 0x56, 0xb5, 0xe5, 0x44, 0x28, 0xa4, 0x9b, 0xdc, 0x27, 0x7e, 0x18,
|
0xcf, 0x85, 0x44, 0x29, 0x58, 0x2c, 0xa1, 0x52, 0xe2, 0xf7, 0x12, 0x34, 0x64, 0x3a, 0x55, 0x49,
|
||||||
0x5f, 0x0a, 0x3b, 0x86, 0xf5, 0x2d, 0x8e, 0x07, 0xc9, 0xa0, 0xa2, 0xfe, 0x0e, 0x16, 0x6a, 0xe6,
|
0xfd, 0x87, 0x45, 0xbb, 0x0e, 0x2d, 0x7a, 0x45, 0x19, 0xf1, 0x8f, 0x64, 0xe9, 0x76, 0x5f, 0xb7,
|
||||||
0x73, 0xc6, 0xfc, 0x46, 0xc6, 0xbc, 0x0e, 0xe6, 0x26, 0xb4, 0x7d, 0x69, 0x4b, 0xd5, 0x72, 0xe6,
|
0x74, 0x7f, 0x86, 0x96, 0xf6, 0xe8, 0xda, 0x9b, 0xe7, 0x0d, 0x68, 0x45, 0xc2, 0x37, 0x22, 0x0a,
|
||||||
0x9c, 0xf2, 0xc0, 0x7a, 0x0a, 0x3b, 0xc7, 0x91, 0x7b, 0xd5, 0xb0, 0xcc, 0x47, 0x4e, 0x3e, 0x82,
|
0xb8, 0x7d, 0xd0, 0x93, 0xb4, 0x95, 0xcf, 0xd3, 0x78, 0x54, 0x0b, 0x37, 0x8d, 0xa0, 0x8f, 0x9e,
|
||||||
0x64, 0x48, 0x82, 0x05, 0x6b, 0x17, 0xae, 0xcf, 0xa1, 0xa8, 0xe2, 0x5d, 0x87, 0xc1, 0xb3, 0x33,
|
0x45, 0x69, 0xf9, 0xd7, 0x79, 0xf9, 0xdf, 0x85, 0xc6, 0x4b, 0xcb, 0x9e, 0xa0, 0xf1, 0xf4, 0xc0,
|
||||||
0x82, 0xd5, 0x91, 0xe5, 0xfe, 0xcf, 0x1a, 0x34, 0xc5, 0x0e, 0x8f, 0x98, 0x3b, 0xa3, 0x6c, 0x48,
|
0x8e, 0x64, 0x18, 0xf9, 0x3d, 0xea, 0x13, 0x3f, 0x8c, 0xaf, 0xb8, 0xe5, 0xaa, 0xf9, 0x2d, 0x5e,
|
||||||
0x7b, 0xf5, 0x7c, 0x54, 0x68, 0xfc, 0x41, 0x85, 0x79, 0xa3, 0x38, 0x13, 0x9b, 0x95, 0x99, 0xd8,
|
0x30, 0x22, 0x29, 0x32, 0x9b, 0x77, 0xb0, 0xf6, 0x15, 0x6f, 0x95, 0xcc, 0x99, 0x7b, 0xc9, 0xb8,
|
||||||
0x16, 0x6b, 0x8c, 0x5b, 0xa5, 0x45, 0x0c, 0x9f, 0xf9, 0xa4, 0x94, 0xe9, 0xeb, 0x2e, 0xa1, 0xaf,
|
0x05, 0x0d, 0x5f, 0xe0, 0xcb, 0xf6, 0x50, 0x74, 0xa5, 0x55, 0xf3, 0x09, 0x6c, 0x8b, 0xfb, 0x79,
|
||||||
0x3c, 0x0d, 0x60, 0xd9, 0x34, 0xf8, 0xad, 0x06, 0xfd, 0xaf, 0x09, 0x3b, 0x0f, 0xe3, 0x19, 0x4f,
|
0xe9, 0x2d, 0x3c, 0x73, 0x83, 0x09, 0x0f, 0xf9, 0xd5, 0x6b, 0xee, 0xc2, 0xce, 0x0c, 0x84, 0x6c,
|
||||||
0x52, 0x52, 0x69, 0x3f, 0xac, 0xd9, 0xf8, 0x62, 0x72, 0x72, 0xc9, 0x88, 0xcc, 0xae, 0xc1, 0xe3,
|
0x06, 0x13, 0xba, 0xcf, 0xce, 0x09, 0x56, 0x9b, 0x02, 0xc5, 0x7c, 0x31, 0xd7, 0xc7, 0xff, 0x2c,
|
||||||
0xc1, 0x9d, 0x43, 0x5b, 0x36, 0x9d, 0xc8, 0xb0, 0xb9, 0x09, 0xdd, 0xd7, 0x17, 0x13, 0x1c, 0x8f,
|
0x3f, 0xe2, 0xd8, 0x55, 0xf3, 0x1b, 0xa8, 0x71, 0x99, 0x34, 0x00, 0x29, 0x37, 0x69, 0x52, 0x98,
|
||||||
0x61, 0x2c, 0xfb, 0x50, 0x88, 0xe1, 0x96, 0x1b, 0x87, 0x51, 0x44, 0x64, 0xa4, 0x06, 0x07, 0x3b,
|
0x9f, 0x67, 0xb1, 0xab, 0xe8, 0x54, 0x55, 0x09, 0x4c, 0x21, 0x6b, 0x1c, 0xf2, 0xcf, 0x12, 0x74,
|
||||||
0xca, 0xc0, 0x5a, 0x99, 0x14, 0xee, 0x44, 0x0a, 0xac, 0x9d, 0x81, 0x1d, 0x69, 0xb0, 0x4e, 0x41,
|
0xbe, 0x22, 0xec, 0x22, 0x8c, 0xcf, 0xd2, 0xa0, 0xd1, 0x42, 0x87, 0x61, 0x25, 0xc6, 0x97, 0xc3,
|
||||||
0x2c, 0x03, 0xeb, 0x8a, 0xaa, 0xf2, 0xa1, 0x73, 0x10, 0xa5, 0xc7, 0x89, 0x3d, 0x25, 0x7c, 0x12,
|
0xd1, 0x15, 0xc3, 0x24, 0xf2, 0xe8, 0xa6, 0xb9, 0xc6, 0x9d, 0x23, 0x4b, 0xf4, 0x55, 0x85, 0xef,
|
||||||
0xb0, 0x90, 0xd9, 0xde, 0x24, 0xe5, 0x4b, 0xe1, 0xba, 0x61, 0x5e, 0x83, 0x7e, 0x44, 0x62, 0xac,
|
0x21, 0xee, 0xab, 0xcb, 0x21, 0x16, 0x4a, 0x18, 0x8b, 0x5c, 0x72, 0x31, 0xdc, 0x72, 0xe2, 0x30,
|
||||||
0x4b, 0xb5, 0x5b, 0x47, 0xa2, 0x0c, 0xf3, 0x7f, 0xb0, 0x25, 0x96, 0x13, 0x1a, 0x4c, 0x66, 0x24,
|
0x8a, 0x88, 0x23, 0x6c, 0xa5, 0x60, 0x27, 0x0a, 0xac, 0xae, 0xa4, 0x70, 0x27, 0x92, 0x60, 0x0d,
|
||||||
0x0e, 0x88, 0xe7, 0x87, 0x2e, 0x51, 0x71, 0xec, 0xc2, 0xa6, 0x3e, 0xe4, 0x8d, 0x29, 0x8e, 0x44,
|
0x05, 0x76, 0xa2, 0xc1, 0x9a, 0x19, 0x31, 0x05, 0xd6, 0xe2, 0xc4, 0x7d, 0x68, 0x1e, 0x46, 0xc9,
|
||||||
0x3c, 0xd6, 0x11, 0x0c, 0x8f, 0x4e, 0xf1, 0x5b, 0xcd, 0x3c, 0x1a, 0x4c, 0x9f, 0xda, 0xcc, 0xe6,
|
0x29, 0xb5, 0xc6, 0x24, 0x6d, 0x76, 0x16, 0x32, 0xcb, 0x1b, 0x26, 0xe9, 0x52, 0x04, 0xcb, 0xd8,
|
||||||
0xdf, 0x00, 0xc4, 0xa7, 0xa1, 0x9b, 0x28, 0x83, 0xa8, 0xcd, 0xa4, 0x08, 0x71, 0x27, 0xd9, 0x91,
|
0x84, 0x4e, 0x44, 0x62, 0xac, 0x13, 0xb9, 0x5b, 0xc6, 0xbc, 0x57, 0x8d, 0x1b, 0xb0, 0xc1, 0x97,
|
||||||
0x24, 0x0d, 0x87, 0x7f, 0x7e, 0xc4, 0xa8, 0xaf, 0x0c, 0x5a, 0xdf, 0x8b, 0x20, 0x24, 0xf1, 0x16,
|
0x43, 0x37, 0x18, 0x9e, 0x91, 0x38, 0x20, 0x9e, 0x1f, 0x3a, 0x44, 0xfa, 0xb1, 0x0b, 0xeb, 0xfa,
|
||||||
0x74, 0x73, 0x67, 0x6b, 0x22, 0x5f, 0xeb, 0x59, 0xbe, 0xb2, 0x40, 0xf7, 0x60, 0x9d, 0x69, 0x2f,
|
0x30, 0x6d, 0x37, 0x7e, 0xc4, 0xfd, 0x31, 0x4f, 0xa0, 0x77, 0x32, 0xc1, 0x2f, 0x02, 0xe6, 0xb9,
|
||||||
0x26, 0x58, 0xb5, 0xb6, 0xea, 0x8d, 0x6d, 0x25, 0x59, 0xf6, 0xd1, 0xfa, 0x0c, 0xe0, 0x95, 0x68,
|
0xc1, 0xf8, 0xa9, 0xc5, 0x2c, 0x63, 0x15, 0x1a, 0x88, 0xef, 0x86, 0x0e, 0x95, 0x06, 0x51, 0x9b,
|
||||||
0x45, 0xe1, 0x31, 0xce, 0xc6, 0x22, 0x41, 0x48, 0xb4, 0x6f, 0x5f, 0x68, 0x76, 0xf8, 0x16, 0xc6,
|
0x09, 0x11, 0xe2, 0x0c, 0xd5, 0x91, 0x08, 0x1a, 0x5e, 0xe9, 0xd3, 0xa3, 0x34, 0x05, 0xc2, 0xa0,
|
||||||
0xf4, 0x83, 0x4d, 0x3d, 0x07, 0x2b, 0x46, 0x3a, 0xf8, 0x57, 0x0d, 0x7a, 0x12, 0x41, 0x3a, 0x89,
|
0xf9, 0x3d, 0x77, 0x42, 0x04, 0xde, 0xc4, 0xc7, 0x53, 0x93, 0x15, 0x8f, 0xe7, 0xaa, 0x2a, 0x52,
|
||||||
0x10, 0x0e, 0xb6, 0x5f, 0x06, 0x71, 0x2b, 0x43, 0x2c, 0x7f, 0x6d, 0x0a, 0x36, 0xb1, 0x0c, 0x93,
|
0xe5, 0xe8, 0x3e, 0xac, 0x32, 0xcd, 0x62, 0x88, 0x85, 0x64, 0xc9, 0x5a, 0xdd, 0x92, 0x92, 0x79,
|
||||||
0x73, 0x3b, 0x52, 0x56, 0x1a, 0xcb, 0xc4, 0xde, 0x85, 0xbe, 0xcc, 0x86, 0x12, 0x34, 0x96, 0x09,
|
0x8e, 0xe6, 0x27, 0x00, 0x2f, 0x79, 0x6b, 0x70, 0xc6, 0xd8, 0xee, 0xd9, 0x00, 0x61, 0xa0, 0x7d,
|
||||||
0xde, 0xe7, 0x1f, 0x6a, 0xf4, 0x44, 0xcc, 0xc2, 0xde, 0xfe, 0x8d, 0x92, 0x84, 0xf0, 0x71, 0x4f,
|
0xeb, 0x52, 0x47, 0x27, 0xdd, 0x42, 0x9f, 0x7e, 0xb0, 0x5c, 0xcf, 0x96, 0xdf, 0x0a, 0x55, 0xf3,
|
||||||
0xfc, 0x3e, 0x0b, 0x58, 0x7c, 0x39, 0xbe, 0x0f, 0x90, 0xaf, 0x78, 0xdb, 0xcd, 0xc8, 0xa5, 0xaa,
|
0xef, 0x12, 0xb4, 0x05, 0x82, 0x20, 0x89, 0x10, 0x36, 0xb6, 0x83, 0x82, 0xd8, 0x53, 0x88, 0xf9,
|
||||||
0x6c, 0x8c, 0xe4, 0xcc, 0xf6, 0x52, 0x15, 0xf9, 0xe3, 0xfa, 0xa3, 0x9a, 0xf5, 0x15, 0xac, 0x7f,
|
0x07, 0x25, 0x63, 0x13, 0xdf, 0x1d, 0x7a, 0x61, 0x45, 0xd2, 0x4a, 0x65, 0x91, 0xd8, 0x5d, 0xe8,
|
||||||
0xe1, 0xcd, 0x68, 0x58, 0x50, 0x41, 0x29, 0xdf, 0xfe, 0x31, 0x8c, 0x55, 0xbc, 0x7c, 0x49, 0x03,
|
0x88, 0x6c, 0x48, 0xc1, 0xea, 0x22, 0xc1, 0xfb, 0xe9, 0x95, 0x85, 0x4c, 0xf8, 0x15, 0xd1, 0x3e,
|
||||||
0x5c, 0x4a, 0xba, 0xb0, 0xef, 0xc3, 0x48, 0x4d, 0x53, 0x8d, 0x27, 0xeb, 0xe5, 0xf7, 0x06, 0x40,
|
0xb8, 0x99, 0x93, 0xe0, 0x1c, 0xf7, 0xf9, 0xdf, 0x67, 0x01, 0x8b, 0xaf, 0x06, 0xf7, 0x01, 0xa6,
|
||||||
0x0e, 0x66, 0x3e, 0x86, 0x31, 0x0d, 0x27, 0x58, 0x52, 0x67, 0xd4, 0x21, 0xb2, 0x05, 0x26, 0x31,
|
0xab, 0xb4, 0x17, 0xce, 0xc8, 0x95, 0xac, 0x6c, 0xf4, 0xe4, 0xdc, 0xf2, 0x12, 0xe9, 0xf9, 0xe3,
|
||||||
0x71, 0xd2, 0x38, 0xa1, 0x67, 0x44, 0x8d, 0xc0, 0x1d, 0x15, 0x4b, 0xd5, 0x87, 0x0f, 0x61, 0x3b,
|
0xf2, 0xa3, 0x92, 0xf9, 0x25, 0xac, 0x7e, 0xe6, 0x9d, 0xb9, 0x61, 0x46, 0x05, 0xa5, 0x7c, 0xeb,
|
||||||
0xd7, 0x75, 0x0b, 0x6a, 0xf5, 0x95, 0x6a, 0x0f, 0x61, 0x0b, 0xd5, 0x70, 0x70, 0xa5, 0x25, 0xa5,
|
0xc7, 0x30, 0x96, 0xfe, 0xa6, 0x4b, 0x37, 0xc0, 0xa5, 0x08, 0x17, 0x36, 0x5e, 0x18, 0x4d, 0xbf,
|
||||||
0xc6, 0x4a, 0xa5, 0x4f, 0x60, 0xb7, 0xe0, 0x27, 0xaf, 0xd4, 0x82, 0xaa, 0xb1, 0x52, 0xf5, 0x23,
|
0xaa, 0x04, 0x9e, 0xa8, 0x97, 0xbf, 0x2a, 0x00, 0x53, 0x30, 0xe3, 0x31, 0x0c, 0xdc, 0x70, 0x88,
|
||||||
0xd8, 0x41, 0xd5, 0x73, 0x9b, 0xb2, 0xaa, 0x5e, 0xf3, 0x1f, 0xf8, 0xe9, 0x93, 0x78, 0x5a, 0xf2,
|
0x25, 0x75, 0xee, 0xda, 0x44, 0xb4, 0xc0, 0x30, 0x26, 0x76, 0x12, 0x53, 0xf7, 0x9c, 0xc8, 0x2b,
|
||||||
0xb3, 0xb5, 0x52, 0xe9, 0x7d, 0xd8, 0x44, 0xa5, 0x8a, 0x9d, 0xf6, 0x55, 0x2a, 0x09, 0x71, 0x18,
|
0x69, 0x5b, 0xfa, 0x52, 0xe4, 0xf0, 0x1e, 0x6c, 0x4d, 0x75, 0x9d, 0x8c, 0x5a, 0x79, 0xa9, 0xda,
|
||||||
0x4e, 0x95, 0x82, 0x4a, 0x67, 0x95, 0x8a, 0xf5, 0x04, 0xfa, 0x2f, 0xd2, 0x29, 0x61, 0xde, 0x89,
|
0x43, 0xd8, 0x40, 0x35, 0xbc, 0x4b, 0x92, 0x9c, 0x52, 0x65, 0xa9, 0xd2, 0x87, 0xb0, 0x9b, 0xe1,
|
||||||
0xae, 0xfe, 0x7f, 0xdb, 0x40, 0x3f, 0xd7, 0xa1, 0x77, 0x30, 0x8d, 0xc3, 0x34, 0x2a, 0x75, 0xb9,
|
0x99, 0x56, 0x6a, 0x46, 0xb5, 0xba, 0x54, 0xf5, 0x7d, 0xd8, 0x46, 0xd5, 0x0b, 0xcb, 0x65, 0x45,
|
||||||
0xac, 0xe1, 0xb9, 0x2e, 0x97, 0x32, 0xf7, 0xa0, 0x2f, 0x3f, 0xa0, 0x4a, 0x4c, 0x36, 0x97, 0x39,
|
0xbd, 0xda, 0xbf, 0xe0, 0xe9, 0x93, 0x78, 0x9c, 0xe3, 0x59, 0x5f, 0xaa, 0xf4, 0x0e, 0xac, 0xa3,
|
||||||
0x5f, 0xea, 0xfc, 0x12, 0x73, 0xc2, 0x7d, 0x56, 0x82, 0xe5, 0xf6, 0x2a, 0x94, 0xdf, 0xa7, 0x30,
|
0x52, 0xc1, 0x4e, 0xe3, 0x3a, 0x15, 0x4a, 0x6c, 0x86, 0xb7, 0x4a, 0x46, 0xa5, 0xb9, 0x4c, 0x05,
|
||||||
0x38, 0x95, 0x81, 0x28, 0x49, 0x99, 0xca, 0x3b, 0x99, 0xe5, 0xdc, 0xc1, 0xbd, 0x62, 0xc0, 0xb2,
|
0x6f, 0xfc, 0xce, 0xf3, 0x64, 0x4c, 0x98, 0x37, 0xd2, 0xd5, 0xff, 0xba, 0x0d, 0xf4, 0x4b, 0x19,
|
||||||
0x89, 0x5e, 0xc0, 0xe6, 0xdc, 0x66, 0xb9, 0x97, 0xac, 0x62, 0x2f, 0xf5, 0xf6, 0xb7, 0x14, 0x6c,
|
0xda, 0x87, 0xe3, 0x38, 0x4c, 0xa2, 0x5c, 0x97, 0x8b, 0x1a, 0x9e, 0xe9, 0x72, 0x21, 0x73, 0x0f,
|
||||||
0x51, 0x4b, 0x34, 0x58, 0x04, 0x4d, 0xe9, 0xcf, 0x7b, 0x30, 0x08, 0xe4, 0x47, 0x47, 0x33, 0xd1,
|
0x3a, 0xe2, 0x41, 0x93, 0x62, 0xa2, 0xb9, 0x8c, 0xd9, 0x52, 0x4f, 0xbf, 0x53, 0x46, 0x29, 0x67,
|
||||||
0x28, 0x28, 0x96, 0x3e, 0x48, 0xc8, 0x86, 0x23, 0xfc, 0x5b, 0xc8, 0x46, 0x91, 0x5b, 0xcc, 0x07,
|
0x29, 0x98, 0x6f, 0xaf, 0x4c, 0xf9, 0x7d, 0x04, 0xdd, 0x89, 0x70, 0x44, 0x4a, 0x8a, 0x54, 0xde,
|
||||||
0xaf, 0x08, 0x14, 0xf3, 0x23, 0x45, 0xff, 0x58, 0xde, 0xde, 0x16, 0x3d, 0x24, 0xf6, 0xff, 0x68,
|
0x51, 0x96, 0xa7, 0x04, 0xf7, 0xb3, 0x0e, 0x8b, 0x26, 0x7a, 0x0e, 0xeb, 0x33, 0x9b, 0xf9, 0x5e,
|
||||||
0x42, 0xe3, 0xc9, 0xe1, 0x97, 0xe6, 0x6b, 0x58, 0xaf, 0x3c, 0x73, 0xcc, 0x6c, 0xac, 0x2c, 0x7e,
|
0x32, 0xb3, 0xbd, 0xd4, 0x3e, 0xd8, 0x90, 0xb0, 0x59, 0x2d, 0xde, 0x60, 0x97, 0xe2, 0x65, 0xd6,
|
||||||
0x8c, 0x8d, 0xdf, 0x5a, 0x76, 0xac, 0x2e, 0x0e, 0x6b, 0x1c, 0xb3, 0x72, 0xab, 0xd0, 0x98, 0x8b,
|
0x1f, 0xaf, 0xc6, 0xdb, 0xd0, 0x0d, 0xc4, 0xe3, 0xa3, 0x23, 0x52, 0xc9, 0x00, 0xe4, 0x1e, 0x26,
|
||||||
0xef, 0x2c, 0x1a, 0x73, 0xd9, 0x65, 0x64, 0xcd, 0xfc, 0x18, 0x5a, 0xf2, 0xb1, 0x64, 0x5e, 0x53,
|
0x8c, 0x8a, 0xcd, 0x79, 0xce, 0x8d, 0x4a, 0x36, 0xc6, 0xb9, 0x67, 0x4e, 0xa4, 0x41, 0x7e, 0xa8,
|
||||||
0xb2, 0xa5, 0x57, 0xd7, 0x78, 0xbb, 0xb2, 0xab, 0x15, 0x0f, 0x00, 0xf2, 0x27, 0x86, 0x39, 0x52,
|
0xcd, 0x1b, 0x11, 0x0e, 0x7e, 0xab, 0x43, 0xe5, 0xc9, 0xd1, 0x17, 0xc6, 0x2b, 0x58, 0x2d, 0x0c,
|
||||||
0x62, 0x73, 0xaf, 0xac, 0xf1, 0xee, 0x82, 0x13, 0x0d, 0x72, 0x0c, 0x1b, 0xd5, 0x67, 0x83, 0x59,
|
0x36, 0x86, 0xba, 0x5e, 0xe6, 0x4f, 0x5f, 0x83, 0xff, 0x2f, 0x3a, 0x96, 0x6f, 0xfa, 0x4a, 0x8a,
|
||||||
0xe1, 0xa1, 0x7a, 0xc9, 0x1f, 0xdf, 0x5c, 0x7a, 0x5e, 0x84, 0xad, 0x3e, 0x1e, 0x34, 0xec, 0x92,
|
0x59, 0x78, 0xf0, 0x35, 0xe6, 0xfc, 0x6f, 0x09, 0x8d, 0xb9, 0xe8, 0x3b, 0x61, 0xc5, 0xf8, 0x00,
|
||||||
0xa7, 0x88, 0x86, 0x5d, 0xfa, 0xea, 0x58, 0x33, 0xbf, 0x81, 0x61, 0xf9, 0xde, 0x6f, 0xfe, 0x5f,
|
0xea, 0x62, 0x0c, 0x32, 0x36, 0xa5, 0x6c, 0x6e, 0x9e, 0x1a, 0x6c, 0x15, 0x76, 0xb5, 0xe2, 0x0b,
|
||||||
0x29, 0x2d, 0x7c, 0x8e, 0x8c, 0x6f, 0x2c, 0x39, 0xd5, 0x80, 0x1f, 0xc8, 0xd2, 0xc5, 0xbb, 0x46,
|
0xe8, 0xe6, 0x06, 0x4c, 0xe3, 0x46, 0xce, 0x56, 0x7e, 0x8a, 0x1a, 0xfc, 0x6f, 0xfe, 0xa1, 0x46,
|
||||||
0xc6, 0x72, 0xe1, 0x51, 0x30, 0xbe, 0x56, 0xde, 0xd4, 0x5a, 0x0f, 0xa0, 0x25, 0x6f, 0x90, 0x3a,
|
0x3b, 0x04, 0x98, 0x8e, 0x23, 0x46, 0x5f, 0x4a, 0xcf, 0x4c, 0x63, 0x83, 0xdd, 0x39, 0x27, 0x1a,
|
||||||
0x65, 0xa5, 0x0b, 0xe5, 0xb8, 0x5f, 0xdc, 0xb5, 0xd6, 0x1e, 0xd4, 0x70, 0x4a, 0x75, 0x9e, 0x13,
|
0xe4, 0x14, 0xd6, 0x8a, 0xf3, 0x86, 0x51, 0x88, 0x6a, 0x71, 0x3a, 0x18, 0xdc, 0x5a, 0x78, 0x9e,
|
||||||
0x26, 0xeb, 0xb9, 0x68, 0x6a, 0x4e, 0x45, 0x6c, 0x72, 0x95, 0x93, 0x96, 0xf8, 0x0f, 0xe1, 0xe1,
|
0x85, 0x2d, 0x4e, 0x1d, 0x1a, 0x76, 0xc1, 0x0c, 0xa3, 0x61, 0x17, 0x8e, 0x2b, 0x2b, 0xc6, 0xd7,
|
||||||
0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa6, 0xf1, 0xd0, 0x3c, 0x50, 0x10, 0x00, 0x00,
|
0xd0, 0xcb, 0x0f, 0x0c, 0x86, 0x0a, 0xd2, 0xdc, 0x39, 0x66, 0x70, 0x73, 0xc1, 0xa9, 0x06, 0x7c,
|
||||||
|
0x17, 0x6a, 0x62, 0x34, 0x50, 0x15, 0x9f, 0x9d, 0x26, 0x06, 0x9b, 0xf9, 0x4d, 0xad, 0xf5, 0x00,
|
||||||
|
0xea, 0xe2, 0x53, 0x51, 0x17, 0x40, 0xee, 0xcb, 0x71, 0xd0, 0xc9, 0xee, 0x9a, 0x2b, 0x0f, 0x4a,
|
||||||
|
0xca, 0x0e, 0xcd, 0xd9, 0xa1, 0xf3, 0xec, 0x64, 0x92, 0x33, 0xaa, 0xf3, 0x9f, 0x40, 0x1e, 0xfe,
|
||||||
|
0x13, 0x00, 0x00, 0xff, 0xff, 0xc7, 0xed, 0x36, 0x83, 0x0f, 0x11, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,32 +6,44 @@ service API {
|
||||||
rpc CreateContainer(CreateContainerRequest) returns (CreateContainerResponse) {}
|
rpc CreateContainer(CreateContainerRequest) returns (CreateContainerResponse) {}
|
||||||
rpc UpdateContainer(UpdateContainerRequest) returns (UpdateContainerResponse) {}
|
rpc UpdateContainer(UpdateContainerRequest) returns (UpdateContainerResponse) {}
|
||||||
rpc Signal(SignalRequest) returns (SignalResponse) {}
|
rpc Signal(SignalRequest) returns (SignalResponse) {}
|
||||||
|
rpc UpdateProcess(UpdateProcessRequest) returns (UpdateProcessResponse) {}
|
||||||
rpc AddProcess(AddProcessRequest) returns (AddProcessResponse) {}
|
rpc AddProcess(AddProcessRequest) returns (AddProcessResponse) {}
|
||||||
rpc CreateCheckpoint(CreateCheckpointRequest) returns (CreateCheckpointResponse) {}
|
rpc CreateCheckpoint(CreateCheckpointRequest) returns (CreateCheckpointResponse) {}
|
||||||
rpc DeleteCheckpoint(DeleteCheckpointRequest) returns (DeleteCheckpointResponse) {}
|
rpc DeleteCheckpoint(DeleteCheckpointRequest) returns (DeleteCheckpointResponse) {}
|
||||||
rpc ListCheckpoint(ListCheckpointRequest) returns (ListCheckpointResponse) {}
|
rpc ListCheckpoint(ListCheckpointRequest) returns (ListCheckpointResponse) {}
|
||||||
rpc State(StateRequest) returns (StateResponse) {}
|
rpc State(StateRequest) returns (StateResponse) {}
|
||||||
rpc Events(EventsRequest) returns (stream Event) {}
|
rpc Events(EventsRequest) returns (stream Event) {}
|
||||||
rpc GetStats(StatsRequest) returns (stream Stats) {}
|
rpc Stats(StatsRequest) returns (StatsResponse) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
message UpdateProcessRequest {
|
||||||
|
string id = 1;
|
||||||
|
string pid = 2;
|
||||||
|
bool closeStdin = 3; // Close stdin of the container
|
||||||
|
uint32 width = 4;
|
||||||
|
uint32 height = 5;
|
||||||
|
}
|
||||||
|
|
||||||
|
message UpdateProcessResponse {
|
||||||
}
|
}
|
||||||
|
|
||||||
message CreateContainerRequest {
|
message CreateContainerRequest {
|
||||||
string id = 1; // ID of container
|
string id = 1; // ID of container
|
||||||
string bundlePath = 2; // path to OCI bundle
|
string bundlePath = 2; // path to OCI bundle
|
||||||
string stdin = 3; // path to the file where stdin will be read (optional)
|
string checkpoint = 3; // checkpoint name if you want to create immediate checkpoint (optional)
|
||||||
string stdout = 4; // path to file where stdout will be written (optional)
|
string stdin = 4; // path to the file where stdin will be read (optional)
|
||||||
string stderr = 5; // path to file where stderr will be written (optional)
|
string stdout = 5; // path to file where stdout will be written (optional)
|
||||||
string console = 6; // path to the console for a container (optional)
|
string stderr = 6; // path to file where stderr will be written (optional)
|
||||||
string checkpoint = 7; // checkpoint name if you want to create immediate checkpoint (optional)
|
repeated string labels = 7;
|
||||||
}
|
}
|
||||||
|
|
||||||
message CreateContainerResponse {
|
message CreateContainerResponse {
|
||||||
uint32 pid = 1; // PID of the containers main process
|
Container container = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message SignalRequest {
|
message SignalRequest {
|
||||||
string id = 1; // ID of container
|
string id = 1; // ID of container
|
||||||
uint32 pid = 2; // PID of process inside container
|
string pid = 2; // PID of process inside container
|
||||||
uint32 signal = 3; // Signal which will be sent, you can find value in "man 7 signal"
|
uint32 signal = 3; // Signal which will be sent, you can find value in "man 7 signal"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -45,11 +57,11 @@ message AddProcessRequest {
|
||||||
repeated string args = 4; // Arguments for process, first is binary path itself
|
repeated string args = 4; // Arguments for process, first is binary path itself
|
||||||
repeated string env = 5; // List of environment variables for process
|
repeated string env = 5; // List of environment variables for process
|
||||||
string cwd = 6; // Workind directory of process
|
string cwd = 6; // Workind directory of process
|
||||||
string stdin = 7; // path to the file where stdin will be read (optional)
|
string pid = 7; // Process ID
|
||||||
string stdout = 8; // path to file where stdout will be written (optional)
|
string stdin = 8; // path to the file where stdin will be read (optional)
|
||||||
string stderr = 9; // path to file where stderr will be written (optional)
|
string stdout = 9; // path to file where stdout will be written (optional)
|
||||||
string console = 10; // path to the console for a container (optional)
|
string stderr = 10; // path to file where stderr will be written (optional)
|
||||||
};
|
}
|
||||||
|
|
||||||
message User {
|
message User {
|
||||||
uint32 uid = 1; // UID of user
|
uint32 uid = 1; // UID of user
|
||||||
|
@ -58,7 +70,6 @@ message User {
|
||||||
}
|
}
|
||||||
|
|
||||||
message AddProcessResponse {
|
message AddProcessResponse {
|
||||||
uint32 pid = 1; // PID of process is returned in case of success
|
|
||||||
}
|
}
|
||||||
|
|
||||||
message CreateCheckpointRequest {
|
message CreateCheckpointRequest {
|
||||||
|
@ -94,6 +105,7 @@ message ListCheckpointResponse {
|
||||||
}
|
}
|
||||||
|
|
||||||
message StateRequest {
|
message StateRequest {
|
||||||
|
string id = 1; // container id for a single container
|
||||||
}
|
}
|
||||||
|
|
||||||
message ContainerState {
|
message ContainerState {
|
||||||
|
@ -101,27 +113,31 @@ message ContainerState {
|
||||||
}
|
}
|
||||||
|
|
||||||
message Process {
|
message Process {
|
||||||
uint32 pid = 1;
|
string pid = 1;
|
||||||
bool terminal = 2; // Use tty for container stdio
|
bool terminal = 2; // Use tty for container stdio
|
||||||
User user = 3; // User under which process will be run
|
User user = 3; // User under which process will be run
|
||||||
repeated string args = 4; // Arguments for process, first is binary path itself
|
repeated string args = 4; // Arguments for process, first is binary path itself
|
||||||
repeated string env = 5; // List of environment variables for process
|
repeated string env = 5; // List of environment variables for process
|
||||||
string cwd = 6; // Workind directory of process
|
string cwd = 6; // Workind directory of process
|
||||||
|
uint32 systemPid = 7;
|
||||||
|
string stdin = 8; // path to the file where stdin will be read (optional)
|
||||||
|
string stdout = 9; // path to file where stdout will be written (optional)
|
||||||
|
string stderr = 10; // path to file where stderr will be written (optional)
|
||||||
}
|
}
|
||||||
|
|
||||||
message Container {
|
message Container {
|
||||||
string id = 1; // ID of container
|
string id = 1; // ID of container
|
||||||
string name = 2; // Name of container (???)
|
string bundlePath = 2; // Path to OCI bundle
|
||||||
string bundlePath = 3; // Path to OCI bundle
|
repeated Process processes = 3; // List of processes which run in container
|
||||||
repeated Process processes = 4; // List of processes which run in container
|
string status = 4; // Container status ("running", "paused", etc.)
|
||||||
string status = 5; // Container status ("running", "paused", etc.)
|
repeated string labels = 5;
|
||||||
|
repeated uint32 pids = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Machine is information about machine on which containerd is run
|
// Machine is information about machine on which containerd is run
|
||||||
message Machine {
|
message Machine {
|
||||||
string id = 1; // ID of machine
|
uint32 cpus = 1; // number of cpus
|
||||||
uint32 cpus = 2; // number of cpus
|
uint64 memory = 2; // amount of memory
|
||||||
uint64 memory = 3; // amount of memory
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// StateResponse is information about containerd daemon
|
// StateResponse is information about containerd daemon
|
||||||
|
@ -132,7 +148,7 @@ message StateResponse {
|
||||||
|
|
||||||
message UpdateContainerRequest {
|
message UpdateContainerRequest {
|
||||||
string id = 1; // ID of container
|
string id = 1; // ID of container
|
||||||
uint32 signal = 2; // Signal
|
string pid = 2;
|
||||||
string status = 3; // Status to whcih containerd will try to change
|
string status = 3; // Status to whcih containerd will try to change
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -140,18 +156,15 @@ message UpdateContainerResponse {
|
||||||
}
|
}
|
||||||
|
|
||||||
message EventsRequest {
|
message EventsRequest {
|
||||||
|
uint64 timestamp = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message Event {
|
message Event {
|
||||||
string type = 1;
|
string type = 1;
|
||||||
string id = 2;
|
string id = 2;
|
||||||
uint32 status = 3;
|
uint32 status = 3;
|
||||||
string bundlePath = 4;
|
string pid = 4;
|
||||||
uint32 pid = 5;
|
uint64 timestamp = 5;
|
||||||
uint32 signal = 7;
|
|
||||||
Process process = 8;
|
|
||||||
repeated Container containers = 9;
|
|
||||||
Checkpoint checkpoint = 10;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
message NetworkStats {
|
message NetworkStats {
|
||||||
|
@ -229,7 +242,7 @@ message CgroupStats {
|
||||||
map<string, HugetlbStats> hugetlb_stats = 4; // the map is in the format "size of hugepage: stats of the hugepage"
|
map<string, HugetlbStats> hugetlb_stats = 4; // the map is in the format "size of hugepage: stats of the hugepage"
|
||||||
}
|
}
|
||||||
|
|
||||||
message Stats {
|
message StatsResponse {
|
||||||
repeated NetworkStats network_stats = 1;
|
repeated NetworkStats network_stats = 1;
|
||||||
CgroupStats cgroup_stats = 2;
|
CgroupStats cgroup_stats = 2;
|
||||||
uint64 timestamp = 3;
|
uint64 timestamp = 3;
|
||||||
|
|
0
containerd-shim/example/config.json
Normal file
0
containerd-shim/example/config.json
Normal file
0
containerd-shim/example/init/exit
Normal file
0
containerd-shim/example/init/exit
Normal file
0
containerd-shim/example/init/pid
Normal file
0
containerd-shim/example/init/pid
Normal file
0
containerd-shim/example/init/process.json
Normal file
0
containerd-shim/example/init/process.json
Normal file
0
containerd-shim/example/init/resize
Normal file
0
containerd-shim/example/init/resize
Normal file
0
containerd-shim/example/init/stderr
Normal file
0
containerd-shim/example/init/stderr
Normal file
0
containerd-shim/example/init/stdin
Normal file
0
containerd-shim/example/init/stdin
Normal file
0
containerd-shim/example/init/stdout
Normal file
0
containerd-shim/example/init/stdout
Normal file
0
containerd-shim/example/logger/exit
Normal file
0
containerd-shim/example/logger/exit
Normal file
0
containerd-shim/example/logger/pid
Normal file
0
containerd-shim/example/logger/pid
Normal file
0
containerd-shim/example/logger/process.json
Normal file
0
containerd-shim/example/logger/process.json
Normal file
0
containerd-shim/example/logger/resize
Normal file
0
containerd-shim/example/logger/resize
Normal file
0
containerd-shim/example/logger/stderr
Normal file
0
containerd-shim/example/logger/stderr
Normal file
0
containerd-shim/example/logger/stdin
Normal file
0
containerd-shim/example/logger/stdin
Normal file
0
containerd-shim/example/logger/stdout
Normal file
0
containerd-shim/example/logger/stdout
Normal file
127
containerd-shim/main.go
Normal file
127
containerd-shim/main.go
Normal file
|
@ -0,0 +1,127 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/containerd/util"
|
||||||
|
"github.com/docker/docker/pkg/term"
|
||||||
|
)
|
||||||
|
|
||||||
|
func setupLogger() {
|
||||||
|
f, err := os.OpenFile("/tmp/shim.log", os.O_CREATE|os.O_RDWR|os.O_APPEND, 0755)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
logrus.SetOutput(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// containerd-shim is a small shim that sits in front of a runc implementation
|
||||||
|
// that allows it to be repartented to init and handle reattach from the caller.
|
||||||
|
//
|
||||||
|
// the cwd of the shim should be the bundle for the container. Arg1 should be the path
|
||||||
|
// to the state directory where the shim can locate fifos and other information.
|
||||||
|
func main() {
|
||||||
|
flag.Parse()
|
||||||
|
// start handling signals as soon as possible so that things are properly reaped
|
||||||
|
// or if runc exits before we hit the handler
|
||||||
|
signals := make(chan os.Signal, 2048)
|
||||||
|
signal.Notify(signals)
|
||||||
|
// set the shim as the subreaper for all orphaned processes created by the container
|
||||||
|
if err := util.SetSubreaper(1); err != nil {
|
||||||
|
logrus.WithField("error", err).Fatal("shim: set as subreaper")
|
||||||
|
}
|
||||||
|
// open the exit pipe
|
||||||
|
f, err := os.OpenFile("exit", syscall.O_WRONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
logrus.WithField("error", err).Fatal("shim: open exit pipe")
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
control, err := os.OpenFile("control", syscall.O_RDWR, 0)
|
||||||
|
if err != nil {
|
||||||
|
logrus.WithField("error", err).Fatal("shim: open control pipe")
|
||||||
|
}
|
||||||
|
defer control.Close()
|
||||||
|
p, err := newProcess(flag.Arg(0), flag.Arg(1))
|
||||||
|
if err != nil {
|
||||||
|
logrus.WithField("error", err).Fatal("shim: create new process")
|
||||||
|
}
|
||||||
|
if err := p.start(); err != nil {
|
||||||
|
p.delete()
|
||||||
|
logrus.WithField("error", err).Fatal("shim: start process")
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
var msg, w, h int
|
||||||
|
if _, err := fmt.Fscanf(control, "%d %d %d\n", &msg, &w, &h); err != nil {
|
||||||
|
logrus.WithField("error", err).Error("shim: reading from control")
|
||||||
|
}
|
||||||
|
logrus.Info("got control message")
|
||||||
|
switch msg {
|
||||||
|
case 0:
|
||||||
|
// close stdin
|
||||||
|
p.shimIO.Stdin.Close()
|
||||||
|
case 1:
|
||||||
|
if p.console == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ws := term.Winsize{
|
||||||
|
Width: uint16(w),
|
||||||
|
Height: uint16(h),
|
||||||
|
}
|
||||||
|
term.SetWinsize(p.console.Fd(), &ws)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
var exitShim bool
|
||||||
|
for s := range signals {
|
||||||
|
logrus.WithField("signal", s).Debug("shim: received signal")
|
||||||
|
switch s {
|
||||||
|
case syscall.SIGCHLD:
|
||||||
|
exits, err := util.Reap()
|
||||||
|
if err != nil {
|
||||||
|
logrus.WithField("error", err).Error("shim: reaping child processes")
|
||||||
|
}
|
||||||
|
for _, e := range exits {
|
||||||
|
// check to see if runc is one of the processes that has exited
|
||||||
|
if e.Pid == p.pid() {
|
||||||
|
exitShim = true
|
||||||
|
logrus.WithFields(logrus.Fields{
|
||||||
|
"pid": e.Pid,
|
||||||
|
"status": e.Status,
|
||||||
|
}).Info("shim: runc exited")
|
||||||
|
if err := writeInt("exitStatus", e.Status); err != nil {
|
||||||
|
logrus.WithFields(logrus.Fields{
|
||||||
|
"error": err,
|
||||||
|
"status": e.Status,
|
||||||
|
}).Error("shim: write exit status")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// runc has exited so the shim can also exit
|
||||||
|
if exitShim {
|
||||||
|
if err := p.Close(); err != nil {
|
||||||
|
logrus.WithField("error", err).Error("shim: close stdio")
|
||||||
|
}
|
||||||
|
if err := p.delete(); err != nil {
|
||||||
|
logrus.WithField("error", err).Error("shim: delete runc state")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeInt(path string, i int) error {
|
||||||
|
f, err := os.Create(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
_, err = fmt.Fprintf(f, "%d", i)
|
||||||
|
return err
|
||||||
|
}
|
273
containerd-shim/process.go
Normal file
273
containerd-shim/process.go
Normal file
|
@ -0,0 +1,273 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/docker/containerd/runtime"
|
||||||
|
"github.com/opencontainers/runc/libcontainer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type process struct {
|
||||||
|
id string
|
||||||
|
bundle string
|
||||||
|
stdio *stdio
|
||||||
|
exec bool
|
||||||
|
containerPid int
|
||||||
|
checkpoint *runtime.Checkpoint
|
||||||
|
shimIO *IO
|
||||||
|
console libcontainer.Console
|
||||||
|
consolePath string
|
||||||
|
state *runtime.ProcessState
|
||||||
|
}
|
||||||
|
|
||||||
|
func newProcess(id, bundle string) (*process, error) {
|
||||||
|
p := &process{
|
||||||
|
id: id,
|
||||||
|
bundle: bundle,
|
||||||
|
}
|
||||||
|
s, err := loadProcess()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
p.state = s
|
||||||
|
if s.Checkpoint != "" {
|
||||||
|
cpt, err := loadCheckpoint(bundle, s.Checkpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
p.checkpoint = cpt
|
||||||
|
}
|
||||||
|
if err := p.openIO(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadProcess() (*runtime.ProcessState, error) {
|
||||||
|
f, err := os.Open("process.json")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
var s runtime.ProcessState
|
||||||
|
if err := json.NewDecoder(f).Decode(&s); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadCheckpoint(bundle, name string) (*runtime.Checkpoint, error) {
|
||||||
|
f, err := os.Open(filepath.Join(bundle, "checkpoints", name, "config.json"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
var cpt runtime.Checkpoint
|
||||||
|
if err := json.NewDecoder(f).Decode(&cpt); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &cpt, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *process) start() error {
|
||||||
|
cwd, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
args := []string{}
|
||||||
|
if p.state.Exec {
|
||||||
|
args = append(args, "exec",
|
||||||
|
"--process", filepath.Join(cwd, "process.json"),
|
||||||
|
"--console", p.consolePath,
|
||||||
|
)
|
||||||
|
} else if p.checkpoint != nil {
|
||||||
|
args = append(args, "restore",
|
||||||
|
"--image-path", filepath.Join(p.bundle, "checkpoints", p.checkpoint.Name),
|
||||||
|
)
|
||||||
|
add := func(flags ...string) {
|
||||||
|
args = append(args, flags...)
|
||||||
|
}
|
||||||
|
if p.checkpoint.Shell {
|
||||||
|
add("--shell-job")
|
||||||
|
}
|
||||||
|
if p.checkpoint.Tcp {
|
||||||
|
add("--tcp-established")
|
||||||
|
}
|
||||||
|
if p.checkpoint.UnixSockets {
|
||||||
|
add("--ext-unix-sk")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
args = append(args, "start",
|
||||||
|
"--bundle", p.bundle,
|
||||||
|
"--console", p.consolePath,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
args = append(args,
|
||||||
|
"-d",
|
||||||
|
"--pid-file", filepath.Join(cwd, "pid"),
|
||||||
|
p.id,
|
||||||
|
)
|
||||||
|
cmd := exec.Command("runc", args...)
|
||||||
|
cmd.Dir = p.bundle
|
||||||
|
cmd.Stdin = p.stdio.stdin
|
||||||
|
cmd.Stdout = p.stdio.stdout
|
||||||
|
cmd.Stderr = p.stdio.stderr
|
||||||
|
// set the parent death signal to SIGKILL so that if the shim dies the container
|
||||||
|
// process also dies
|
||||||
|
cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||||
|
Pdeathsig: syscall.SIGKILL,
|
||||||
|
}
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
data, err := ioutil.ReadFile("pid")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
pid, err := strconv.Atoi(string(data))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
p.containerPid = pid
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *process) pid() int {
|
||||||
|
return p.containerPid
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *process) delete() error {
|
||||||
|
if !p.state.Exec {
|
||||||
|
return exec.Command("runc", "delete", p.id).Run()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// openIO opens the pre-created fifo's for use with the container
|
||||||
|
// in RDWR so that they remain open if the other side stops listening
|
||||||
|
func (p *process) openIO() error {
|
||||||
|
p.stdio = &stdio{}
|
||||||
|
var (
|
||||||
|
uid = p.state.RootUID
|
||||||
|
gid = p.state.RootGID
|
||||||
|
)
|
||||||
|
if p.state.Terminal {
|
||||||
|
console, err := libcontainer.NewConsole(uid, gid)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
p.console = console
|
||||||
|
p.consolePath = console.Path()
|
||||||
|
stdin, err := os.OpenFile(p.state.Stdin, syscall.O_RDWR, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
go io.Copy(console, stdin)
|
||||||
|
stdout, err := os.OpenFile(p.state.Stdout, syscall.O_RDWR, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
io.Copy(stdout, console)
|
||||||
|
console.Close()
|
||||||
|
}()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
i, err := p.initializeIO(uid)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
p.shimIO = i
|
||||||
|
// non-tty
|
||||||
|
for name, dest := range map[string]func(f *os.File){
|
||||||
|
p.state.Stdin: func(f *os.File) {
|
||||||
|
go io.Copy(i.Stdin, f)
|
||||||
|
},
|
||||||
|
p.state.Stdout: func(f *os.File) {
|
||||||
|
go io.Copy(f, i.Stdout)
|
||||||
|
},
|
||||||
|
p.state.Stderr: func(f *os.File) {
|
||||||
|
go io.Copy(f, i.Stderr)
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
f, err := os.OpenFile(name, syscall.O_RDWR, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dest(f)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type IO struct {
|
||||||
|
Stdin io.WriteCloser
|
||||||
|
Stdout io.ReadCloser
|
||||||
|
Stderr io.ReadCloser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *process) initializeIO(rootuid int) (i *IO, err error) {
|
||||||
|
var fds []uintptr
|
||||||
|
i = &IO{}
|
||||||
|
// cleanup in case of an error
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
for _, fd := range fds {
|
||||||
|
syscall.Close(int(fd))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// STDIN
|
||||||
|
r, w, err := os.Pipe()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fds = append(fds, r.Fd(), w.Fd())
|
||||||
|
p.stdio.stdin, i.Stdin = r, w
|
||||||
|
// STDOUT
|
||||||
|
if r, w, err = os.Pipe(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fds = append(fds, r.Fd(), w.Fd())
|
||||||
|
p.stdio.stdout, i.Stdout = w, r
|
||||||
|
// STDERR
|
||||||
|
if r, w, err = os.Pipe(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fds = append(fds, r.Fd(), w.Fd())
|
||||||
|
p.stdio.stderr, i.Stderr = w, r
|
||||||
|
// change ownership of the pipes incase we are in a user namespace
|
||||||
|
for _, fd := range fds {
|
||||||
|
if err := syscall.Fchown(int(fd), rootuid, rootuid); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
func (p *process) Close() error {
|
||||||
|
return p.stdio.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
type stdio struct {
|
||||||
|
stdin *os.File
|
||||||
|
stdout *os.File
|
||||||
|
stderr *os.File
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stdio) Close() error {
|
||||||
|
err := s.stdin.Close()
|
||||||
|
if oerr := s.stdout.Close(); err == nil {
|
||||||
|
err = oerr
|
||||||
|
}
|
||||||
|
if oerr := s.stderr.Close(); err == nil {
|
||||||
|
err = oerr
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"log"
|
"log"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
|
"os/signal"
|
||||||
"runtime"
|
"runtime"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
@ -29,11 +30,6 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
var daemonFlags = []cli.Flag{
|
var daemonFlags = []cli.Flag{
|
||||||
cli.StringFlag{
|
|
||||||
Name: "id",
|
|
||||||
Value: getDefaultID(),
|
|
||||||
Usage: "unique containerd id to identify the instance",
|
|
||||||
},
|
|
||||||
cli.BoolFlag{
|
cli.BoolFlag{
|
||||||
Name: "debug",
|
Name: "debug",
|
||||||
Usage: "enable debug output in the logs",
|
Usage: "enable debug output in the logs",
|
||||||
|
@ -43,14 +39,9 @@ var daemonFlags = []cli.Flag{
|
||||||
Value: "/run/containerd",
|
Value: "/run/containerd",
|
||||||
Usage: "runtime state directory",
|
Usage: "runtime state directory",
|
||||||
},
|
},
|
||||||
cli.IntFlag{
|
|
||||||
Name: "c,concurrency",
|
|
||||||
Value: 10,
|
|
||||||
Usage: "set the concurrency level for tasks",
|
|
||||||
},
|
|
||||||
cli.DurationFlag{
|
cli.DurationFlag{
|
||||||
Name: "metrics-interval",
|
Name: "metrics-interval",
|
||||||
Value: 60 * time.Second,
|
Value: 120 * time.Second,
|
||||||
Usage: "interval for flushing metrics to the store",
|
Usage: "interval for flushing metrics to the store",
|
||||||
},
|
},
|
||||||
cli.StringFlag{
|
cli.StringFlag{
|
||||||
|
@ -88,10 +79,9 @@ func main() {
|
||||||
}
|
}
|
||||||
app.Action = func(context *cli.Context) {
|
app.Action = func(context *cli.Context) {
|
||||||
if err := daemon(
|
if err := daemon(
|
||||||
context.String("id"),
|
|
||||||
context.String("listen"),
|
context.String("listen"),
|
||||||
context.String("state-dir"),
|
context.String("state-dir"),
|
||||||
context.Int("concurrency"),
|
10,
|
||||||
context.Bool("oom-notify"),
|
context.Bool("oom-notify"),
|
||||||
); err != nil {
|
); err != nil {
|
||||||
logrus.Fatal(err)
|
logrus.Fatal(err)
|
||||||
|
@ -111,7 +101,7 @@ func checkLimits() error {
|
||||||
logrus.WithFields(logrus.Fields{
|
logrus.WithFields(logrus.Fields{
|
||||||
"current": l.Cur,
|
"current": l.Cur,
|
||||||
"max": l.Max,
|
"max": l.Max,
|
||||||
}).Warn("low RLIMIT_NOFILE changing to max")
|
}).Warn("containerd: low RLIMIT_NOFILE changing to max")
|
||||||
l.Cur = l.Max
|
l.Cur = l.Max
|
||||||
return syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
|
return syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||||
}
|
}
|
||||||
|
@ -130,7 +120,6 @@ func debugMetrics(interval time.Duration, graphiteAddr string) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
logrus.Debugf("Sending metrics to Graphite server on %s", graphiteAddr)
|
|
||||||
go graphite.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr)
|
go graphite.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr)
|
||||||
} else {
|
} else {
|
||||||
l := log.New(os.Stdout, "[containerd] ", log.LstdFlags)
|
l := log.New(os.Stdout, "[containerd] ", log.LstdFlags)
|
||||||
|
@ -154,13 +143,13 @@ func processMetrics() {
|
||||||
// collect the number of open fds
|
// collect the number of open fds
|
||||||
fds, err := util.GetOpenFds(os.Getpid())
|
fds, err := util.GetOpenFds(os.Getpid())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.WithField("error", err).Error("get open fd count")
|
logrus.WithField("error", err).Error("containerd: get open fd count")
|
||||||
}
|
}
|
||||||
fg.Update(int64(fds))
|
fg.Update(int64(fds))
|
||||||
// get the memory used
|
// get the memory used
|
||||||
m := sigar.ProcMem{}
|
m := sigar.ProcMem{}
|
||||||
if err := m.Get(os.Getpid()); err != nil {
|
if err := m.Get(os.Getpid()); err != nil {
|
||||||
logrus.WithField("error", err).Error("get pid memory information")
|
logrus.WithField("error", err).Error("containerd: get pid memory information")
|
||||||
}
|
}
|
||||||
memg.Update(int64(m.Size))
|
memg.Update(int64(m.Size))
|
||||||
}
|
}
|
||||||
|
@ -172,9 +161,11 @@ func processMetrics() {
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func daemon(id, address, stateDir string, concurrency int, oom bool) error {
|
func daemon(address, stateDir string, concurrency int, oom bool) error {
|
||||||
tasks := make(chan *supervisor.StartTask, concurrency*100)
|
// setup a standard reaper so that we don't leave any zombies if we are still alive
|
||||||
sv, err := supervisor.New(id, stateDir, tasks, oom)
|
// this is just good practice because we are spawning new processes
|
||||||
|
go reapProcesses()
|
||||||
|
sv, err := supervisor.New(stateDir, oom)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -184,17 +175,6 @@ func daemon(id, address, stateDir string, concurrency int, oom bool) error {
|
||||||
w := supervisor.NewWorker(sv, wg)
|
w := supervisor.NewWorker(sv, wg)
|
||||||
go w.Start()
|
go w.Start()
|
||||||
}
|
}
|
||||||
// only set containerd as the subreaper if it is not an init process
|
|
||||||
if pid := os.Getpid(); pid != 1 {
|
|
||||||
logrus.WithFields(logrus.Fields{
|
|
||||||
"pid": pid,
|
|
||||||
}).Debug("containerd is not init, set as subreaper")
|
|
||||||
if err := setSubReaper(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// start the signal handler in the background.
|
|
||||||
go startSignalHandler(sv)
|
|
||||||
if err := sv.Start(); err != nil {
|
if err := sv.Start(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -207,10 +187,23 @@ func daemon(id, address, stateDir string, concurrency int, oom bool) error {
|
||||||
}
|
}
|
||||||
s := grpc.NewServer()
|
s := grpc.NewServer()
|
||||||
types.RegisterAPIServer(s, server.NewServer(sv))
|
types.RegisterAPIServer(s, server.NewServer(sv))
|
||||||
logrus.Debugf("GRPC API listen on %s", address)
|
logrus.Debugf("containerd: grpc api on %s", address)
|
||||||
return s.Serve(l)
|
return s.Serve(l)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func reapProcesses() {
|
||||||
|
s := make(chan os.Signal, 2048)
|
||||||
|
signal.Notify(s, syscall.SIGCHLD)
|
||||||
|
if err := util.SetSubreaper(1); err != nil {
|
||||||
|
logrus.WithField("error", err).Error("containerd: set subpreaper")
|
||||||
|
}
|
||||||
|
for range s {
|
||||||
|
if _, err := util.Reap(); err != nil {
|
||||||
|
logrus.WithField("error", err).Error("containerd: reap child processes")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// getDefaultID returns the hostname for the instance host
|
// getDefaultID returns the hostname for the instance host
|
||||||
func getDefaultID() string {
|
func getDefaultID() string {
|
||||||
hostname, err := os.Hostname()
|
hostname, err := os.Hostname()
|
||||||
|
|
|
@ -1,67 +0,0 @@
|
||||||
// +build linux
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"os/signal"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/docker/containerd/supervisor"
|
|
||||||
"github.com/docker/containerd/util"
|
|
||||||
"github.com/opencontainers/runc/libcontainer/utils"
|
|
||||||
)
|
|
||||||
|
|
||||||
const signalBufferSize = 2048
|
|
||||||
|
|
||||||
func startSignalHandler(supervisor *supervisor.Supervisor) {
|
|
||||||
logrus.WithFields(logrus.Fields{
|
|
||||||
"bufferSize": signalBufferSize,
|
|
||||||
}).Debug("containerd: starting signal handler")
|
|
||||||
signals := make(chan os.Signal, signalBufferSize)
|
|
||||||
signal.Notify(signals)
|
|
||||||
for s := range signals {
|
|
||||||
switch s {
|
|
||||||
case syscall.SIGTERM, syscall.SIGINT:
|
|
||||||
supervisor.Stop(signals)
|
|
||||||
case syscall.SIGCHLD:
|
|
||||||
exits, err := reap()
|
|
||||||
if err != nil {
|
|
||||||
logrus.WithField("error", err).Error("containerd: reaping child processes")
|
|
||||||
}
|
|
||||||
for _, e := range exits {
|
|
||||||
supervisor.SendEvent(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
supervisor.Close()
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func reap() (exits []*supervisor.Event, err error) {
|
|
||||||
var (
|
|
||||||
ws syscall.WaitStatus
|
|
||||||
rus syscall.Rusage
|
|
||||||
)
|
|
||||||
for {
|
|
||||||
pid, err := syscall.Wait4(-1, &ws, syscall.WNOHANG, &rus)
|
|
||||||
if err != nil {
|
|
||||||
if err == syscall.ECHILD {
|
|
||||||
return exits, nil
|
|
||||||
}
|
|
||||||
return exits, err
|
|
||||||
}
|
|
||||||
if pid <= 0 {
|
|
||||||
return exits, nil
|
|
||||||
}
|
|
||||||
e := supervisor.NewEvent(supervisor.ExitEventType)
|
|
||||||
e.Pid = pid
|
|
||||||
e.Status = utils.ExitStatus(ws)
|
|
||||||
exits = append(exits, e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func setSubReaper() error {
|
|
||||||
return util.SetSubreaper(1)
|
|
||||||
}
|
|
|
@ -16,6 +16,7 @@ var checkpointCommand = cli.Command{
|
||||||
Subcommands: []cli.Command{
|
Subcommands: []cli.Command{
|
||||||
listCheckpointCommand,
|
listCheckpointCommand,
|
||||||
createCheckpointCommand,
|
createCheckpointCommand,
|
||||||
|
deleteCheckpointCommand,
|
||||||
},
|
},
|
||||||
Action: listCheckpoints,
|
Action: listCheckpoints,
|
||||||
}
|
}
|
||||||
|
@ -86,7 +87,11 @@ var createCheckpointCommand = cli.Command{
|
||||||
if _, err := c.CreateCheckpoint(netcontext.Background(), &types.CreateCheckpointRequest{
|
if _, err := c.CreateCheckpoint(netcontext.Background(), &types.CreateCheckpointRequest{
|
||||||
Id: containerID,
|
Id: containerID,
|
||||||
Checkpoint: &types.Checkpoint{
|
Checkpoint: &types.Checkpoint{
|
||||||
Name: name,
|
Name: name,
|
||||||
|
Exit: context.Bool("exit"),
|
||||||
|
Tcp: context.Bool("tcp"),
|
||||||
|
Shell: context.Bool("shell"),
|
||||||
|
UnixSockets: context.Bool("unix-sockets"),
|
||||||
},
|
},
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
fatal(err.Error(), 1)
|
fatal(err.Error(), 1)
|
||||||
|
|
327
ctr/container.go
327
ctr/container.go
|
@ -5,9 +5,12 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
|
"os/signal"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
"time"
|
"time"
|
||||||
|
@ -15,14 +18,16 @@ import (
|
||||||
"github.com/codegangsta/cli"
|
"github.com/codegangsta/cli"
|
||||||
"github.com/docker/containerd/api/grpc/types"
|
"github.com/docker/containerd/api/grpc/types"
|
||||||
"github.com/docker/docker/pkg/term"
|
"github.com/docker/docker/pkg/term"
|
||||||
"github.com/opencontainers/runc/libcontainer"
|
|
||||||
"github.com/opencontainers/specs"
|
"github.com/opencontainers/specs"
|
||||||
netcontext "golang.org/x/net/context"
|
netcontext "golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: parse flags and pass opts
|
// TODO: parse flags and pass opts
|
||||||
func getClient(ctx *cli.Context) types.APIClient {
|
func getClient(ctx *cli.Context) types.APIClient {
|
||||||
|
// reset the logger for grpc to log to dev/null so that it does not mess with our stdio
|
||||||
|
grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
|
||||||
dialOpts := []grpc.DialOption{grpc.WithInsecure()}
|
dialOpts := []grpc.DialOption{grpc.WithInsecure()}
|
||||||
dialOpts = append(dialOpts,
|
dialOpts = append(dialOpts,
|
||||||
grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
|
grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
|
||||||
|
@ -49,6 +54,25 @@ var containersCommand = cli.Command{
|
||||||
Action: listContainers,
|
Action: listContainers,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var stateCommand = cli.Command{
|
||||||
|
Name: "state",
|
||||||
|
Usage: "get a raw dump of the containerd state",
|
||||||
|
Action: func(context *cli.Context) {
|
||||||
|
c := getClient(context)
|
||||||
|
resp, err := c.State(netcontext.Background(), &types.StateRequest{
|
||||||
|
Id: context.Args().First(),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
fatal(err.Error(), 1)
|
||||||
|
}
|
||||||
|
data, err := json.Marshal(resp)
|
||||||
|
if err != nil {
|
||||||
|
fatal(err.Error(), 1)
|
||||||
|
}
|
||||||
|
fmt.Print(string(data))
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
var listCommand = cli.Command{
|
var listCommand = cli.Command{
|
||||||
Name: "list",
|
Name: "list",
|
||||||
Usage: "list all running containers",
|
Usage: "list all running containers",
|
||||||
|
@ -57,14 +81,20 @@ var listCommand = cli.Command{
|
||||||
|
|
||||||
func listContainers(context *cli.Context) {
|
func listContainers(context *cli.Context) {
|
||||||
c := getClient(context)
|
c := getClient(context)
|
||||||
resp, err := c.State(netcontext.Background(), &types.StateRequest{})
|
resp, err := c.State(netcontext.Background(), &types.StateRequest{
|
||||||
|
Id: context.Args().First(),
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fatal(err.Error(), 1)
|
fatal(err.Error(), 1)
|
||||||
}
|
}
|
||||||
w := tabwriter.NewWriter(os.Stdout, 20, 1, 3, ' ', 0)
|
w := tabwriter.NewWriter(os.Stdout, 20, 1, 3, ' ', 0)
|
||||||
fmt.Fprint(w, "ID\tPATH\tSTATUS\tPID1\n")
|
fmt.Fprint(w, "ID\tPATH\tSTATUS\tPROCESSES\n")
|
||||||
for _, c := range resp.Containers {
|
for _, c := range resp.Containers {
|
||||||
fmt.Fprintf(w, "%s\t%s\t%s\t%d\n", c.Id, c.BundlePath, c.Status, c.Processes[0].Pid)
|
procs := []string{}
|
||||||
|
for _, p := range c.Processes {
|
||||||
|
procs = append(procs, p.Pid)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", c.Id, c.BundlePath, c.Status, strings.Join(procs, ","))
|
||||||
}
|
}
|
||||||
if err := w.Flush(); err != nil {
|
if err := w.Flush(); err != nil {
|
||||||
fatal(err.Error(), 1)
|
fatal(err.Error(), 1)
|
||||||
|
@ -84,6 +114,11 @@ var startCommand = cli.Command{
|
||||||
Name: "attach,a",
|
Name: "attach,a",
|
||||||
Usage: "connect to the stdio of the container",
|
Usage: "connect to the stdio of the container",
|
||||||
},
|
},
|
||||||
|
cli.StringSliceFlag{
|
||||||
|
Name: "label,l",
|
||||||
|
Value: &cli.StringSlice{},
|
||||||
|
Usage: "set labels for the container",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Action: func(context *cli.Context) {
|
Action: func(context *cli.Context) {
|
||||||
var (
|
var (
|
||||||
|
@ -100,35 +135,47 @@ var startCommand = cli.Command{
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fatal(fmt.Sprintf("cannot get the absolute path of the bundle: %v", err), 1)
|
fatal(fmt.Sprintf("cannot get the absolute path of the bundle: %v", err), 1)
|
||||||
}
|
}
|
||||||
c := getClient(context)
|
s, err := createStdio()
|
||||||
events, err := c.Events(netcontext.Background(), &types.EventsRequest{})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fatal(err.Error(), 1)
|
fatal(err.Error(), 1)
|
||||||
}
|
}
|
||||||
r := &types.CreateContainerRequest{
|
var (
|
||||||
Id: id,
|
tty bool
|
||||||
BundlePath: bpath,
|
c = getClient(context)
|
||||||
Checkpoint: context.String("checkpoint"),
|
r = &types.CreateContainerRequest{
|
||||||
}
|
Id: id,
|
||||||
|
BundlePath: bpath,
|
||||||
|
Checkpoint: context.String("checkpoint"),
|
||||||
|
Stdin: s.stdin,
|
||||||
|
Stdout: s.stdout,
|
||||||
|
Stderr: s.stderr,
|
||||||
|
Labels: context.StringSlice("label"),
|
||||||
|
}
|
||||||
|
)
|
||||||
if context.Bool("attach") {
|
if context.Bool("attach") {
|
||||||
mkterm, err := readTermSetting(bpath)
|
mkterm, err := readTermSetting(bpath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fatal(err.Error(), 1)
|
fatal(err.Error(), 1)
|
||||||
}
|
}
|
||||||
|
tty = mkterm
|
||||||
if mkterm {
|
if mkterm {
|
||||||
if err := attachTty(&r.Console); err != nil {
|
s, err := term.SetRawTerminal(os.Stdin.Fd())
|
||||||
fatal(err.Error(), 1)
|
if err != nil {
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if err := attachStdio(&r.Stdin, &r.Stdout, &r.Stderr); err != nil {
|
|
||||||
fatal(err.Error(), 1)
|
fatal(err.Error(), 1)
|
||||||
}
|
}
|
||||||
|
state = s
|
||||||
|
}
|
||||||
|
if err := attachStdio(s); err != nil {
|
||||||
|
fatal(err.Error(), 1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
resp, err := c.CreateContainer(netcontext.Background(), r)
|
events, err := c.Events(netcontext.Background(), &types.EventsRequest{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fatal(err.Error(), 1)
|
fatal(err.Error(), 1)
|
||||||
}
|
}
|
||||||
|
if _, err := c.CreateContainer(netcontext.Background(), r); err != nil {
|
||||||
|
fatal(err.Error(), 1)
|
||||||
|
}
|
||||||
if context.Bool("attach") {
|
if context.Bool("attach") {
|
||||||
restoreAndCloseStdin := func() {
|
restoreAndCloseStdin := func() {
|
||||||
if state != nil {
|
if state != nil {
|
||||||
|
@ -138,25 +185,50 @@ var startCommand = cli.Command{
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
io.Copy(stdin, os.Stdin)
|
io.Copy(stdin, os.Stdin)
|
||||||
restoreAndCloseStdin()
|
if _, err := c.UpdateProcess(netcontext.Background(), &types.UpdateProcessRequest{
|
||||||
}()
|
Id: id,
|
||||||
for {
|
Pid: "init",
|
||||||
e, err := events.Recv()
|
CloseStdin: true,
|
||||||
if err != nil {
|
}); err != nil {
|
||||||
restoreAndCloseStdin()
|
|
||||||
fatal(err.Error(), 1)
|
fatal(err.Error(), 1)
|
||||||
}
|
}
|
||||||
if e.Id == id && e.Type == "exit" {
|
restoreAndCloseStdin()
|
||||||
restoreAndCloseStdin()
|
}()
|
||||||
os.Exit(int(e.Status))
|
if tty {
|
||||||
}
|
resize(id, "init", c)
|
||||||
|
go func() {
|
||||||
|
s := make(chan os.Signal, 64)
|
||||||
|
signal.Notify(s, syscall.SIGWINCH)
|
||||||
|
for range s {
|
||||||
|
if err := resize(id, "init", c); err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
if err := waitForExit(c, events, id, "init", restoreAndCloseStdin); err != nil {
|
||||||
|
fatal(err.Error(), 1)
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
fmt.Println(resp.Pid)
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func resize(id, pid string, c types.APIClient) error {
|
||||||
|
ws, err := term.GetWinsize(os.Stdin.Fd())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := c.UpdateProcess(netcontext.Background(), &types.UpdateProcessRequest{
|
||||||
|
Id: id,
|
||||||
|
Pid: "init",
|
||||||
|
Width: uint32(ws.Width),
|
||||||
|
Height: uint32(ws.Height),
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
stdin io.WriteCloser
|
stdin io.WriteCloser
|
||||||
state *term.State
|
state *term.State
|
||||||
|
@ -177,69 +249,23 @@ func readTermSetting(path string) (bool, error) {
|
||||||
return spec.Process.Terminal, nil
|
return spec.Process.Terminal, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func attachTty(consolePath *string) error {
|
func attachStdio(s stdio) error {
|
||||||
console, err := libcontainer.NewConsole(os.Getuid(), os.Getgid())
|
stdinf, err := os.OpenFile(s.stdin, syscall.O_RDWR, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
*consolePath = console.Path()
|
// FIXME: assign to global
|
||||||
stdin = console
|
stdin = stdinf
|
||||||
go func() {
|
stdoutf, err := os.OpenFile(s.stdout, syscall.O_RDWR, 0)
|
||||||
io.Copy(os.Stdout, console)
|
|
||||||
console.Close()
|
|
||||||
}()
|
|
||||||
s, err := term.SetRawTerminal(os.Stdin.Fd())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
state = s
|
go io.Copy(os.Stdout, stdoutf)
|
||||||
return nil
|
stderrf, err := os.OpenFile(s.stderr, syscall.O_RDWR, 0)
|
||||||
}
|
|
||||||
|
|
||||||
func attachStdio(stdins, stdout, stderr *string) error {
|
|
||||||
dir, err := ioutil.TempDir("", "ctr-")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, p := range []struct {
|
go io.Copy(os.Stderr, stderrf)
|
||||||
path string
|
|
||||||
flag int
|
|
||||||
done func(f *os.File)
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
path: filepath.Join(dir, "stdin"),
|
|
||||||
flag: syscall.O_RDWR,
|
|
||||||
done: func(f *os.File) {
|
|
||||||
*stdins = filepath.Join(dir, "stdin")
|
|
||||||
stdin = f
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
path: filepath.Join(dir, "stdout"),
|
|
||||||
flag: syscall.O_RDWR,
|
|
||||||
done: func(f *os.File) {
|
|
||||||
*stdout = filepath.Join(dir, "stdout")
|
|
||||||
go io.Copy(os.Stdout, f)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
path: filepath.Join(dir, "stderr"),
|
|
||||||
flag: syscall.O_RDWR,
|
|
||||||
done: func(f *os.File) {
|
|
||||||
*stderr = filepath.Join(dir, "stderr")
|
|
||||||
go io.Copy(os.Stderr, f)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
if err := syscall.Mkfifo(p.path, 0755); err != nil {
|
|
||||||
return fmt.Errorf("mkfifo: %s %v", p.path, err)
|
|
||||||
}
|
|
||||||
f, err := os.OpenFile(p.path, p.flag, 0)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("open: %s %v", p.path, err)
|
|
||||||
}
|
|
||||||
p.done(f)
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -247,8 +273,9 @@ var killCommand = cli.Command{
|
||||||
Name: "kill",
|
Name: "kill",
|
||||||
Usage: "send a signal to a container or its processes",
|
Usage: "send a signal to a container or its processes",
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
cli.IntFlag{
|
cli.StringFlag{
|
||||||
Name: "pid,p",
|
Name: "pid,p",
|
||||||
|
Value: "init",
|
||||||
Usage: "pid of the process to signal within the container",
|
Usage: "pid of the process to signal within the container",
|
||||||
},
|
},
|
||||||
cli.IntFlag{
|
cli.IntFlag{
|
||||||
|
@ -265,7 +292,7 @@ var killCommand = cli.Command{
|
||||||
c := getClient(context)
|
c := getClient(context)
|
||||||
if _, err := c.Signal(netcontext.Background(), &types.SignalRequest{
|
if _, err := c.Signal(netcontext.Background(), &types.SignalRequest{
|
||||||
Id: id,
|
Id: id,
|
||||||
Pid: uint32(context.Int("pid")),
|
Pid: context.String("pid"),
|
||||||
Signal: uint32(context.Int("signal")),
|
Signal: uint32(context.Int("signal")),
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
fatal(err.Error(), 1)
|
fatal(err.Error(), 1)
|
||||||
|
@ -281,6 +308,10 @@ var execCommand = cli.Command{
|
||||||
Name: "id",
|
Name: "id",
|
||||||
Usage: "container id to add the process to",
|
Usage: "container id to add the process to",
|
||||||
},
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "pid",
|
||||||
|
Usage: "process id for the new process",
|
||||||
|
},
|
||||||
cli.BoolFlag{
|
cli.BoolFlag{
|
||||||
Name: "attach,a",
|
Name: "attach,a",
|
||||||
Usage: "connect to the stdio of the container",
|
Usage: "connect to the stdio of the container",
|
||||||
|
@ -309,52 +340,76 @@ var execCommand = cli.Command{
|
||||||
},
|
},
|
||||||
Action: func(context *cli.Context) {
|
Action: func(context *cli.Context) {
|
||||||
p := &types.AddProcessRequest{
|
p := &types.AddProcessRequest{
|
||||||
|
Id: context.String("id"),
|
||||||
|
Pid: context.String("pid"),
|
||||||
Args: context.Args(),
|
Args: context.Args(),
|
||||||
Cwd: context.String("cwd"),
|
Cwd: context.String("cwd"),
|
||||||
Terminal: context.Bool("tty"),
|
Terminal: context.Bool("tty"),
|
||||||
Id: context.String("id"),
|
|
||||||
Env: context.StringSlice("env"),
|
Env: context.StringSlice("env"),
|
||||||
User: &types.User{
|
User: &types.User{
|
||||||
Uid: uint32(context.Int("uid")),
|
Uid: uint32(context.Int("uid")),
|
||||||
Gid: uint32(context.Int("gid")),
|
Gid: uint32(context.Int("gid")),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
s, err := createStdio()
|
||||||
|
if err != nil {
|
||||||
|
fatal(err.Error(), 1)
|
||||||
|
}
|
||||||
|
p.Stdin = s.stdin
|
||||||
|
p.Stdout = s.stdout
|
||||||
|
p.Stderr = s.stderr
|
||||||
|
if context.Bool("attach") {
|
||||||
|
if context.Bool("tty") {
|
||||||
|
s, err := term.SetRawTerminal(os.Stdin.Fd())
|
||||||
|
if err != nil {
|
||||||
|
fatal(err.Error(), 1)
|
||||||
|
}
|
||||||
|
state = s
|
||||||
|
}
|
||||||
|
if err := attachStdio(s); err != nil {
|
||||||
|
fatal(err.Error(), 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
c := getClient(context)
|
c := getClient(context)
|
||||||
events, err := c.Events(netcontext.Background(), &types.EventsRequest{})
|
events, err := c.Events(netcontext.Background(), &types.EventsRequest{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fatal(err.Error(), 1)
|
fatal(err.Error(), 1)
|
||||||
}
|
}
|
||||||
if context.Bool("attach") {
|
if _, err := c.AddProcess(netcontext.Background(), p); err != nil {
|
||||||
if p.Terminal {
|
|
||||||
if err := attachTty(&p.Console); err != nil {
|
|
||||||
fatal(err.Error(), 1)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if err := attachStdio(&p.Stdin, &p.Stdout, &p.Stderr); err != nil {
|
|
||||||
fatal(err.Error(), 1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
r, err := c.AddProcess(netcontext.Background(), p)
|
|
||||||
if err != nil {
|
|
||||||
fatal(err.Error(), 1)
|
fatal(err.Error(), 1)
|
||||||
}
|
}
|
||||||
if context.Bool("attach") {
|
if context.Bool("attach") {
|
||||||
go func() {
|
restoreAndCloseStdin := func() {
|
||||||
io.Copy(stdin, os.Stdin)
|
|
||||||
if state != nil {
|
if state != nil {
|
||||||
term.RestoreTerminal(os.Stdin.Fd(), state)
|
term.RestoreTerminal(os.Stdin.Fd(), state)
|
||||||
}
|
}
|
||||||
stdin.Close()
|
stdin.Close()
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
io.Copy(stdin, os.Stdin)
|
||||||
|
if _, err := c.UpdateProcess(netcontext.Background(), &types.UpdateProcessRequest{
|
||||||
|
Id: p.Id,
|
||||||
|
Pid: p.Pid,
|
||||||
|
CloseStdin: true,
|
||||||
|
}); err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
restoreAndCloseStdin()
|
||||||
}()
|
}()
|
||||||
for {
|
if context.Bool("tty") {
|
||||||
e, err := events.Recv()
|
resize(p.Id, p.Pid, c)
|
||||||
if err != nil {
|
go func() {
|
||||||
fatal(err.Error(), 1)
|
s := make(chan os.Signal, 64)
|
||||||
}
|
signal.Notify(s, syscall.SIGWINCH)
|
||||||
if e.Pid == r.Pid && e.Type == "exit" {
|
for range s {
|
||||||
os.Exit(int(e.Status))
|
if err := resize(p.Id, p.Pid, c); err != nil {
|
||||||
}
|
log.Println(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
if err := waitForExit(c, events, context.String("id"), context.String("pid"), restoreAndCloseStdin); err != nil {
|
||||||
|
fatal(err.Error(), 1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -368,16 +423,56 @@ var statsCommand = cli.Command{
|
||||||
Id: context.Args().First(),
|
Id: context.Args().First(),
|
||||||
}
|
}
|
||||||
c := getClient(context)
|
c := getClient(context)
|
||||||
stream, err := c.GetStats(netcontext.Background(), req)
|
stats, err := c.Stats(netcontext.Background(), req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fatal(err.Error(), 1)
|
fatal(err.Error(), 1)
|
||||||
}
|
}
|
||||||
for {
|
data, err := json.Marshal(stats)
|
||||||
stats, err := stream.Recv()
|
if err != nil {
|
||||||
if err != nil {
|
fatal(err.Error(), 1)
|
||||||
fatal(err.Error(), 1)
|
|
||||||
}
|
|
||||||
fmt.Println(stats)
|
|
||||||
}
|
}
|
||||||
|
fmt.Print(string(data))
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func waitForExit(c types.APIClient, events types.API_EventsClient, id, pid string, closer func()) error {
|
||||||
|
for {
|
||||||
|
e, err := events.Recv()
|
||||||
|
if err != nil {
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
events, _ = c.Events(netcontext.Background(), &types.EventsRequest{})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if e.Id == id && e.Type == "exit" && e.Pid == pid {
|
||||||
|
closer()
|
||||||
|
os.Exit(int(e.Status))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type stdio struct {
|
||||||
|
stdin string
|
||||||
|
stdout string
|
||||||
|
stderr string
|
||||||
|
}
|
||||||
|
|
||||||
|
func createStdio() (s stdio, err error) {
|
||||||
|
tmp, err := ioutil.TempDir("", "ctr-")
|
||||||
|
if err != nil {
|
||||||
|
return s, err
|
||||||
|
}
|
||||||
|
// create fifo's for the process
|
||||||
|
for name, fd := range map[string]*string{
|
||||||
|
"stdin": &s.stdin,
|
||||||
|
"stdout": &s.stdout,
|
||||||
|
"stderr": &s.stderr,
|
||||||
|
} {
|
||||||
|
path := filepath.Join(tmp, name)
|
||||||
|
if err := syscall.Mkfifo(path, 0755); err != nil && !os.IsExist(err) {
|
||||||
|
return s, err
|
||||||
|
}
|
||||||
|
*fd = path
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/codegangsta/cli"
|
"github.com/codegangsta/cli"
|
||||||
"github.com/docker/containerd/api/grpc/types"
|
"github.com/docker/containerd/api/grpc/types"
|
||||||
|
@ -13,21 +14,39 @@ import (
|
||||||
var eventsCommand = cli.Command{
|
var eventsCommand = cli.Command{
|
||||||
Name: "events",
|
Name: "events",
|
||||||
Usage: "receive events from the containerd daemon",
|
Usage: "receive events from the containerd daemon",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "timestamp,t",
|
||||||
|
Usage: "get events from a specific time stamp in RFC3339Nano format",
|
||||||
|
},
|
||||||
|
},
|
||||||
Action: func(context *cli.Context) {
|
Action: func(context *cli.Context) {
|
||||||
c := getClient(context)
|
var (
|
||||||
events, err := c.Events(netcontext.Background(), &types.EventsRequest{})
|
t int64
|
||||||
|
c = getClient(context)
|
||||||
|
)
|
||||||
|
if ts := context.String("timestamp"); ts != "" {
|
||||||
|
from, err := time.Parse(time.RFC3339Nano, ts)
|
||||||
|
if err != nil {
|
||||||
|
fatal(err.Error(), 1)
|
||||||
|
}
|
||||||
|
t = from.Unix()
|
||||||
|
}
|
||||||
|
events, err := c.Events(netcontext.Background(), &types.EventsRequest{
|
||||||
|
Timestamp: uint64(t),
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fatal(err.Error(), 1)
|
fatal(err.Error(), 1)
|
||||||
}
|
}
|
||||||
w := tabwriter.NewWriter(os.Stdout, 20, 1, 3, ' ', 0)
|
w := tabwriter.NewWriter(os.Stdout, 20, 1, 3, ' ', 0)
|
||||||
fmt.Fprint(w, "TYPE\tID\tPID\tSTATUS\n")
|
fmt.Fprint(w, "TIME\tTYPE\tID\tPID\tSTATUS\n")
|
||||||
w.Flush()
|
w.Flush()
|
||||||
for {
|
for {
|
||||||
e, err := events.Recv()
|
e, err := events.Recv()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fatal(err.Error(), 1)
|
fatal(err.Error(), 1)
|
||||||
}
|
}
|
||||||
fmt.Fprintf(w, "%s\t%s\t%d\t%d\n", e.Type, e.Id, e.Pid, e.Status)
|
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d\n", time.Unix(int64(e.Timestamp), 0).Format(time.RFC3339Nano), e.Type, e.Id, e.Pid, e.Status)
|
||||||
w.Flush()
|
w.Flush()
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
|
@ -31,6 +31,7 @@ func main() {
|
||||||
checkpointCommand,
|
checkpointCommand,
|
||||||
containersCommand,
|
containersCommand,
|
||||||
eventsCommand,
|
eventsCommand,
|
||||||
|
stateCommand,
|
||||||
}
|
}
|
||||||
app.Before = func(context *cli.Context) error {
|
app.Before = func(context *cli.Context) error {
|
||||||
if context.GlobalBool("debug") {
|
if context.GlobalBool("debug") {
|
||||||
|
|
|
@ -1,9 +1,6 @@
|
||||||
package eventloop
|
package eventloop
|
||||||
|
|
||||||
import (
|
import "sync"
|
||||||
"runtime"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Event is receiving notification from loop with Handle() call.
|
// Event is receiving notification from loop with Handle() call.
|
||||||
type Event interface {
|
type Event interface {
|
||||||
|
@ -35,8 +32,6 @@ func NewChanLoop(q int) EventLoop {
|
||||||
// All calls after first is no-op.
|
// All calls after first is no-op.
|
||||||
func (el *ChanLoop) Start() error {
|
func (el *ChanLoop) Start() error {
|
||||||
go el.once.Do(func() {
|
go el.once.Do(func() {
|
||||||
// allocate whole OS thread, so nothing can get scheduled over eventloop
|
|
||||||
runtime.LockOSThread()
|
|
||||||
for ev := range el.events {
|
for ev := range el.events {
|
||||||
ev.Handle()
|
ev.Handle()
|
||||||
}
|
}
|
||||||
|
|
955
linux/linux.go
955
linux/linux.go
|
@ -1,955 +0,0 @@
|
||||||
// +build libcontainer
|
|
||||||
|
|
||||||
package linux
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
goruntime "runtime"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/containerd/runtime"
|
|
||||||
"github.com/opencontainers/runc/libcontainer"
|
|
||||||
"github.com/opencontainers/runc/libcontainer/configs"
|
|
||||||
_ "github.com/opencontainers/runc/libcontainer/nsenter"
|
|
||||||
"github.com/opencontainers/runc/libcontainer/seccomp"
|
|
||||||
"github.com/opencontainers/specs"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
RLIMIT_CPU = iota // CPU time in sec
|
|
||||||
RLIMIT_FSIZE // Maximum filesize
|
|
||||||
RLIMIT_DATA // max data size
|
|
||||||
RLIMIT_STACK // max stack size
|
|
||||||
RLIMIT_CORE // max core file size
|
|
||||||
RLIMIT_RSS // max resident set size
|
|
||||||
RLIMIT_NPROC // max number of processes
|
|
||||||
RLIMIT_NOFILE // max number of open files
|
|
||||||
RLIMIT_MEMLOCK // max locked-in-memory address space
|
|
||||||
RLIMIT_AS // address space limit
|
|
||||||
RLIMIT_LOCKS // maximum file locks held
|
|
||||||
RLIMIT_SIGPENDING // max number of pending signals
|
|
||||||
RLIMIT_MSGQUEUE // maximum bytes in POSIX mqueues
|
|
||||||
RLIMIT_NICE // max nice prio allowed to raise to
|
|
||||||
RLIMIT_RTPRIO // maximum realtime priority
|
|
||||||
RLIMIT_RTTIME // timeout for RT tasks in us
|
|
||||||
)
|
|
||||||
|
|
||||||
var rlimitMap = map[string]int{
|
|
||||||
"RLIMIT_CPU": RLIMIT_CPU,
|
|
||||||
"RLIMIT_FSIZE": RLIMIT_FSIZE,
|
|
||||||
"RLIMIT_DATA": RLIMIT_DATA,
|
|
||||||
"RLIMIT_STACK": RLIMIT_STACK,
|
|
||||||
"RLIMIT_CORE": RLIMIT_CORE,
|
|
||||||
"RLIMIT_RSS": RLIMIT_RSS,
|
|
||||||
"RLIMIT_NPROC": RLIMIT_NPROC,
|
|
||||||
"RLIMIT_NOFILE": RLIMIT_NOFILE,
|
|
||||||
"RLIMIT_MEMLOCK": RLIMIT_MEMLOCK,
|
|
||||||
"RLIMIT_AS": RLIMIT_AS,
|
|
||||||
"RLIMIT_LOCKS": RLIMIT_LOCKS,
|
|
||||||
"RLIMIT_SGPENDING": RLIMIT_SIGPENDING,
|
|
||||||
"RLIMIT_MSGQUEUE": RLIMIT_MSGQUEUE,
|
|
||||||
"RLIMIT_NICE": RLIMIT_NICE,
|
|
||||||
"RLIMIT_RTPRIO": RLIMIT_RTPRIO,
|
|
||||||
"RLIMIT_RTTIME": RLIMIT_RTTIME,
|
|
||||||
}
|
|
||||||
|
|
||||||
func strToRlimit(key string) (int, error) {
|
|
||||||
rl, ok := rlimitMap[key]
|
|
||||||
if !ok {
|
|
||||||
return 0, fmt.Errorf("Wrong rlimit value: %s", key)
|
|
||||||
}
|
|
||||||
return rl, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
const wildcard = -1
|
|
||||||
|
|
||||||
var allowedDevices = []*configs.Device{
|
|
||||||
// allow mknod for any device
|
|
||||||
{
|
|
||||||
Type: 'c',
|
|
||||||
Major: wildcard,
|
|
||||||
Minor: wildcard,
|
|
||||||
Permissions: "m",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Type: 'b',
|
|
||||||
Major: wildcard,
|
|
||||||
Minor: wildcard,
|
|
||||||
Permissions: "m",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Path: "/dev/console",
|
|
||||||
Type: 'c',
|
|
||||||
Major: 5,
|
|
||||||
Minor: 1,
|
|
||||||
Permissions: "rwm",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Path: "/dev/tty0",
|
|
||||||
Type: 'c',
|
|
||||||
Major: 4,
|
|
||||||
Minor: 0,
|
|
||||||
Permissions: "rwm",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Path: "/dev/tty1",
|
|
||||||
Type: 'c',
|
|
||||||
Major: 4,
|
|
||||||
Minor: 1,
|
|
||||||
Permissions: "rwm",
|
|
||||||
},
|
|
||||||
// /dev/pts/ - pts namespaces are "coming soon"
|
|
||||||
{
|
|
||||||
Path: "",
|
|
||||||
Type: 'c',
|
|
||||||
Major: 136,
|
|
||||||
Minor: wildcard,
|
|
||||||
Permissions: "rwm",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Path: "",
|
|
||||||
Type: 'c',
|
|
||||||
Major: 5,
|
|
||||||
Minor: 2,
|
|
||||||
Permissions: "rwm",
|
|
||||||
},
|
|
||||||
// tuntap
|
|
||||||
{
|
|
||||||
Path: "",
|
|
||||||
Type: 'c',
|
|
||||||
Major: 10,
|
|
||||||
Minor: 200,
|
|
||||||
Permissions: "rwm",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var namespaceMapping = map[specs.NamespaceType]configs.NamespaceType{
|
|
||||||
specs.PIDNamespace: configs.NEWPID,
|
|
||||||
specs.NetworkNamespace: configs.NEWNET,
|
|
||||||
specs.MountNamespace: configs.NEWNS,
|
|
||||||
specs.UserNamespace: configs.NEWUSER,
|
|
||||||
specs.IPCNamespace: configs.NEWIPC,
|
|
||||||
specs.UTSNamespace: configs.NEWUTS,
|
|
||||||
}
|
|
||||||
|
|
||||||
var mountPropagationMapping = map[string]int{
|
|
||||||
"rprivate": syscall.MS_PRIVATE | syscall.MS_REC,
|
|
||||||
"private": syscall.MS_PRIVATE,
|
|
||||||
"rslave": syscall.MS_SLAVE | syscall.MS_REC,
|
|
||||||
"slave": syscall.MS_SLAVE,
|
|
||||||
"rshared": syscall.MS_SHARED | syscall.MS_REC,
|
|
||||||
"shared": syscall.MS_SHARED,
|
|
||||||
"": syscall.MS_PRIVATE | syscall.MS_REC,
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
if len(os.Args) > 1 && os.Args[1] == "init" {
|
|
||||||
goruntime.GOMAXPROCS(1)
|
|
||||||
goruntime.LockOSThread()
|
|
||||||
factory, _ := libcontainer.New("")
|
|
||||||
if err := factory.StartInitialization(); err != nil {
|
|
||||||
fmt.Fprint(os.Stderr, err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
panic("--this line should have never been executed, congratulations--")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type libcontainerProcess struct {
|
|
||||||
process *libcontainer.Process
|
|
||||||
spec specs.Process
|
|
||||||
}
|
|
||||||
|
|
||||||
// change interface to support an error
|
|
||||||
func (p *libcontainerProcess) Pid() (int, error) {
|
|
||||||
pid, err := p.process.Pid()
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
return pid, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *libcontainerProcess) Spec() specs.Process {
|
|
||||||
return p.spec
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *libcontainerProcess) Signal(s os.Signal) error {
|
|
||||||
return p.process.Signal(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *libcontainerProcess) Close() error {
|
|
||||||
// in close we always need to call wait to close/flush any pipes
|
|
||||||
p.process.Wait()
|
|
||||||
// explicitly close any open fd on the process
|
|
||||||
for _, cl := range []interface{}{
|
|
||||||
p.process.Stderr,
|
|
||||||
p.process.Stdout,
|
|
||||||
p.process.Stdin,
|
|
||||||
} {
|
|
||||||
if cl != nil {
|
|
||||||
if c, ok := cl.(io.Closer); ok {
|
|
||||||
c.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type libcontainerContainer struct {
|
|
||||||
c libcontainer.Container
|
|
||||||
initProcess *libcontainerProcess
|
|
||||||
additionalProcesses map[int]*libcontainerProcess
|
|
||||||
exitStatus int
|
|
||||||
exited bool
|
|
||||||
path string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *libcontainerContainer) Checkpoints() ([]runtime.Checkpoint, error) {
|
|
||||||
out := []runtime.Checkpoint{}
|
|
||||||
files, err := ioutil.ReadDir(c.getCheckpointPath(""))
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, fi := range files {
|
|
||||||
out = append(out, runtime.Checkpoint{
|
|
||||||
Name: fi.Name(),
|
|
||||||
Timestamp: fi.ModTime(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *libcontainerContainer) DeleteCheckpoint(name string) error {
|
|
||||||
path := c.getCheckpointPath(name)
|
|
||||||
if err := os.RemoveAll(path); err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return runtime.ErrCheckpointNotExists
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *libcontainerContainer) getCheckpointPath(name string) string {
|
|
||||||
return filepath.Join(c.path, "checkpoints", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *libcontainerContainer) Checkpoint(cp runtime.Checkpoint) error {
|
|
||||||
opts := c.createCheckpointOpts(cp)
|
|
||||||
if err := os.MkdirAll(filepath.Dir(opts.ImagesDirectory), 0755); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// mkdir is atomic so if it already exists we can fail
|
|
||||||
if err := os.Mkdir(opts.ImagesDirectory, 0755); err != nil {
|
|
||||||
if os.IsExist(err) {
|
|
||||||
return runtime.ErrCheckpointExists
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := c.c.Checkpoint(opts); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *libcontainerContainer) createCheckpointOpts(cp runtime.Checkpoint) *libcontainer.CriuOpts {
|
|
||||||
opts := libcontainer.CriuOpts{}
|
|
||||||
opts.LeaveRunning = !cp.Exit
|
|
||||||
opts.ShellJob = cp.Shell
|
|
||||||
opts.TcpEstablished = cp.Tcp
|
|
||||||
opts.ExternalUnixConnections = cp.UnixSockets
|
|
||||||
opts.ImagesDirectory = c.getCheckpointPath(cp.Name)
|
|
||||||
return &opts
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *libcontainerContainer) Restore(name string) error {
|
|
||||||
path := c.getCheckpointPath(name)
|
|
||||||
var opts libcontainer.CriuOpts
|
|
||||||
opts.ImagesDirectory = path
|
|
||||||
return c.c.Restore(c.initProcess.process, &opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *libcontainerContainer) Resume() error {
|
|
||||||
return c.c.Resume()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *libcontainerContainer) Pause() error {
|
|
||||||
return c.c.Pause()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *libcontainerContainer) State() runtime.State {
|
|
||||||
// TODO: what to do with error
|
|
||||||
state, err := c.c.Status()
|
|
||||||
if err != nil {
|
|
||||||
return runtime.State("")
|
|
||||||
}
|
|
||||||
switch state {
|
|
||||||
case libcontainer.Paused, libcontainer.Pausing:
|
|
||||||
return runtime.Paused
|
|
||||||
}
|
|
||||||
return runtime.State("")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *libcontainerContainer) ID() string {
|
|
||||||
return c.c.ID()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *libcontainerContainer) Path() string {
|
|
||||||
return c.path
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *libcontainerContainer) Pid() (int, error) {
|
|
||||||
return c.initProcess.Pid()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *libcontainerContainer) Start() error {
|
|
||||||
return c.c.Start(c.initProcess.process)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *libcontainerContainer) SetExited(status int) {
|
|
||||||
c.exitStatus = status
|
|
||||||
// meh
|
|
||||||
c.exited = true
|
|
||||||
c.initProcess.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *libcontainerContainer) Stats() (*runtime.Stat, error) {
|
|
||||||
now := time.Now()
|
|
||||||
stats, err := c.c.Stats()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &runtime.Stat{
|
|
||||||
Timestamp: now,
|
|
||||||
Data: stats,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *libcontainerContainer) Delete() error {
|
|
||||||
return c.c.Destroy()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *libcontainerContainer) Processes() ([]runtime.Process, error) {
|
|
||||||
procs := []runtime.Process{
|
|
||||||
c.initProcess,
|
|
||||||
}
|
|
||||||
for _, p := range c.additionalProcesses {
|
|
||||||
procs = append(procs, p)
|
|
||||||
}
|
|
||||||
return procs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *libcontainerContainer) RemoveProcess(pid int) error {
|
|
||||||
proc, ok := c.additionalProcesses[pid]
|
|
||||||
if !ok {
|
|
||||||
return runtime.ErrNotChildProcess
|
|
||||||
}
|
|
||||||
err := proc.Close()
|
|
||||||
delete(c.additionalProcesses, pid)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *libcontainerContainer) OOM() (<-chan struct{}, error) {
|
|
||||||
return c.c.NotifyOOM()
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRuntime(stateDir string) (runtime.Runtime, error) {
|
|
||||||
f, err := libcontainer.New(stateDir, libcontainer.Cgroupfs, func(l *libcontainer.LinuxFactory) error {
|
|
||||||
//l.CriuPath = context.GlobalString("criu")
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &libcontainerRuntime{
|
|
||||||
factory: f,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type libcontainerRuntime struct {
|
|
||||||
factory libcontainer.Factory
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *libcontainerRuntime) Type() string {
|
|
||||||
return "libcontainer"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *libcontainerRuntime) Create(id, bundlePath, consolePath string) (runtime.Container, *runtime.IO, error) {
|
|
||||||
spec, rspec, err := r.loadSpec(
|
|
||||||
filepath.Join(bundlePath, "config.json"),
|
|
||||||
filepath.Join(bundlePath, "runtime.json"),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
config, err := r.createLibcontainerConfig(id, bundlePath, spec, rspec)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
container, err := r.factory.Create(id, config)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("create container: %v", err)
|
|
||||||
}
|
|
||||||
process, err := r.newProcess(spec.Process)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
var rio runtime.IO
|
|
||||||
if spec.Process.Terminal {
|
|
||||||
if err := process.ConsoleFromPath(consolePath); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
uid, err := config.HostUID()
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
i, err := process.InitializeIO(uid)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
rio.Stdin = i.Stdin
|
|
||||||
rio.Stderr = i.Stderr
|
|
||||||
rio.Stdout = i.Stdout
|
|
||||||
}
|
|
||||||
c := &libcontainerContainer{
|
|
||||||
c: container,
|
|
||||||
additionalProcesses: make(map[int]*libcontainerProcess),
|
|
||||||
initProcess: &libcontainerProcess{
|
|
||||||
process: process,
|
|
||||||
spec: spec.Process,
|
|
||||||
},
|
|
||||||
path: bundlePath,
|
|
||||||
}
|
|
||||||
return c, &rio, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *libcontainerRuntime) StartProcess(ci runtime.Container, p specs.Process, consolePath string) (runtime.Process, *runtime.IO, error) {
|
|
||||||
c, ok := ci.(*libcontainerContainer)
|
|
||||||
if !ok {
|
|
||||||
return nil, nil, runtime.ErrInvalidContainerType
|
|
||||||
}
|
|
||||||
process, err := r.newProcess(p)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
var rio runtime.IO
|
|
||||||
if p.Terminal {
|
|
||||||
if err := process.ConsoleFromPath(consolePath); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
uid, err := c.c.Config().HostUID()
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
i, err := process.InitializeIO(uid)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
rio.Stdin = i.Stdin
|
|
||||||
rio.Stderr = i.Stderr
|
|
||||||
rio.Stdout = i.Stdout
|
|
||||||
}
|
|
||||||
if err := c.c.Start(process); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
lp := &libcontainerProcess{
|
|
||||||
process: process,
|
|
||||||
spec: p,
|
|
||||||
}
|
|
||||||
pid, err := process.Pid()
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
c.additionalProcesses[pid] = lp
|
|
||||||
return lp, &rio, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newProcess returns a new libcontainer Process with the arguments from the
|
|
||||||
// spec and stdio from the current process.
|
|
||||||
func (r *libcontainerRuntime) newProcess(p specs.Process) (*libcontainer.Process, error) {
|
|
||||||
return &libcontainer.Process{
|
|
||||||
Args: p.Args,
|
|
||||||
Env: p.Env,
|
|
||||||
// TODO: fix libcontainer's API to better support uid/gid in a typesafe way.
|
|
||||||
User: fmt.Sprintf("%d:%d", p.User.UID, p.User.GID),
|
|
||||||
Cwd: p.Cwd,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// loadSpec loads the specification from the provided path.
|
|
||||||
// If the path is empty then the default path will be "config.json"
|
|
||||||
func (r *libcontainerRuntime) loadSpec(cPath, rPath string) (spec *specs.LinuxSpec, rspec *specs.LinuxRuntimeSpec, err error) {
|
|
||||||
cf, err := os.Open(cPath)
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return nil, nil, fmt.Errorf("JSON specification file at %s not found", cPath)
|
|
||||||
}
|
|
||||||
return spec, rspec, err
|
|
||||||
}
|
|
||||||
defer cf.Close()
|
|
||||||
|
|
||||||
rf, err := os.Open(rPath)
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return nil, nil, fmt.Errorf("JSON runtime config file at %s not found", rPath)
|
|
||||||
}
|
|
||||||
return spec, rspec, err
|
|
||||||
}
|
|
||||||
defer rf.Close()
|
|
||||||
|
|
||||||
if err = json.NewDecoder(cf).Decode(&spec); err != nil {
|
|
||||||
return spec, rspec, fmt.Errorf("unmarshal %s: %v", cPath, err)
|
|
||||||
}
|
|
||||||
if err = json.NewDecoder(rf).Decode(&rspec); err != nil {
|
|
||||||
return spec, rspec, fmt.Errorf("unmarshal %s: %v", rPath, err)
|
|
||||||
}
|
|
||||||
return spec, rspec, r.checkSpecVersion(spec)
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkSpecVersion makes sure that the spec version matches runc's while we are in the initial
|
|
||||||
// development period. It is better to hard fail than have missing fields or options in the spec.
|
|
||||||
func (r *libcontainerRuntime) checkSpecVersion(s *specs.LinuxSpec) error {
|
|
||||||
if s.Version != specs.Version {
|
|
||||||
return fmt.Errorf("spec version is not compatible with implemented version %q: spec %q", specs.Version, s.Version)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *libcontainerRuntime) createLibcontainerConfig(cgroupName, bundlePath string, spec *specs.LinuxSpec, rspec *specs.LinuxRuntimeSpec) (*configs.Config, error) {
|
|
||||||
rootfsPath := spec.Root.Path
|
|
||||||
if !filepath.IsAbs(rootfsPath) {
|
|
||||||
rootfsPath = filepath.Join(bundlePath, rootfsPath)
|
|
||||||
}
|
|
||||||
config := &configs.Config{
|
|
||||||
Rootfs: rootfsPath,
|
|
||||||
Capabilities: spec.Linux.Capabilities,
|
|
||||||
Readonlyfs: spec.Root.Readonly,
|
|
||||||
Hostname: spec.Hostname,
|
|
||||||
}
|
|
||||||
for _, ns := range rspec.Linux.Namespaces {
|
|
||||||
t, exists := namespaceMapping[ns.Type]
|
|
||||||
if !exists {
|
|
||||||
return nil, fmt.Errorf("namespace %q does not exist", ns)
|
|
||||||
}
|
|
||||||
config.Namespaces.Add(t, ns.Path)
|
|
||||||
}
|
|
||||||
if config.Namespaces.Contains(configs.NEWNET) {
|
|
||||||
config.Networks = []*configs.Network{
|
|
||||||
{
|
|
||||||
Type: "loopback",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, mp := range spec.Mounts {
|
|
||||||
m, ok := rspec.Mounts[mp.Name]
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("Mount with Name %q not found in runtime config", mp.Name)
|
|
||||||
}
|
|
||||||
config.Mounts = append(config.Mounts, r.createLibcontainerMount(bundlePath, mp.Path, m))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert rootfs propagation flag
|
|
||||||
if rspec.Linux.RootfsPropagation != "" {
|
|
||||||
_, pflags, _ := parseMountOptions([]string{rspec.Linux.RootfsPropagation})
|
|
||||||
if len(pflags) == 1 {
|
|
||||||
config.RootPropagation = pflags[0]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := r.createDevices(rspec, config); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := r.setupUserNamespace(rspec, config); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, rlimit := range rspec.Linux.Rlimits {
|
|
||||||
rl, err := r.createLibContainerRlimit(rlimit)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
config.Rlimits = append(config.Rlimits, rl)
|
|
||||||
}
|
|
||||||
c, err := r.createCgroupConfig(cgroupName, rspec, config.Devices)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
config.Cgroups = c
|
|
||||||
if config.Readonlyfs {
|
|
||||||
r.setReadonly(config)
|
|
||||||
config.MaskPaths = []string{
|
|
||||||
"/proc/kcore",
|
|
||||||
}
|
|
||||||
config.ReadonlyPaths = []string{
|
|
||||||
"/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
seccomp, err := r.setupSeccomp(&rspec.Linux.Seccomp)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
config.Seccomp = seccomp
|
|
||||||
config.Sysctl = rspec.Linux.Sysctl
|
|
||||||
config.ProcessLabel = rspec.Linux.SelinuxProcessLabel
|
|
||||||
config.AppArmorProfile = rspec.Linux.ApparmorProfile
|
|
||||||
for _, g := range spec.Process.User.AdditionalGids {
|
|
||||||
config.AdditionalGroups = append(config.AdditionalGroups, strconv.FormatUint(uint64(g), 10))
|
|
||||||
}
|
|
||||||
r.createHooks(rspec, config)
|
|
||||||
config.Version = specs.Version
|
|
||||||
return config, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *libcontainerRuntime) createLibcontainerMount(cwd, dest string, m specs.Mount) *configs.Mount {
|
|
||||||
flags, pgflags, data := parseMountOptions(m.Options)
|
|
||||||
source := m.Source
|
|
||||||
if m.Type == "bind" {
|
|
||||||
if !filepath.IsAbs(source) {
|
|
||||||
source = filepath.Join(cwd, m.Source)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &configs.Mount{
|
|
||||||
Device: m.Type,
|
|
||||||
Source: source,
|
|
||||||
Destination: dest,
|
|
||||||
Data: data,
|
|
||||||
Flags: flags,
|
|
||||||
PropagationFlags: pgflags,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rt *libcontainerRuntime) createCgroupConfig(name string, spec *specs.LinuxRuntimeSpec, devices []*configs.Device) (*configs.Cgroup, error) {
|
|
||||||
cr := &configs.Cgroup{
|
|
||||||
Name: name,
|
|
||||||
Parent: "/containerd",
|
|
||||||
}
|
|
||||||
c := &configs.Resources{
|
|
||||||
AllowedDevices: append(devices, allowedDevices...),
|
|
||||||
}
|
|
||||||
cr.Resources = c
|
|
||||||
r := spec.Linux.Resources
|
|
||||||
if r.Memory != nil {
|
|
||||||
if r.Memory.Limit != nil {
|
|
||||||
c.Memory = int64(*r.Memory.Limit)
|
|
||||||
}
|
|
||||||
if r.Memory.Reservation != nil {
|
|
||||||
c.MemoryReservation = int64(*r.Memory.Reservation)
|
|
||||||
}
|
|
||||||
if r.Memory.Swap != nil {
|
|
||||||
c.MemorySwap = int64(*r.Memory.Swap)
|
|
||||||
}
|
|
||||||
if r.Memory.Kernel != nil {
|
|
||||||
c.KernelMemory = int64(*r.Memory.Kernel)
|
|
||||||
}
|
|
||||||
if r.Memory.Swappiness != nil {
|
|
||||||
c.MemorySwappiness = int64(*r.Memory.Swappiness)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if r.CPU != nil {
|
|
||||||
if r.CPU.Shares != nil {
|
|
||||||
c.CpuShares = int64(*r.CPU.Shares)
|
|
||||||
}
|
|
||||||
if r.CPU.Quota != nil {
|
|
||||||
c.CpuQuota = int64(*r.CPU.Quota)
|
|
||||||
}
|
|
||||||
if r.CPU.Period != nil {
|
|
||||||
c.CpuPeriod = int64(*r.CPU.Period)
|
|
||||||
}
|
|
||||||
if r.CPU.RealtimeRuntime != nil {
|
|
||||||
c.CpuRtRuntime = int64(*r.CPU.RealtimeRuntime)
|
|
||||||
}
|
|
||||||
if r.CPU.RealtimePeriod != nil {
|
|
||||||
c.CpuRtPeriod = int64(*r.CPU.RealtimePeriod)
|
|
||||||
}
|
|
||||||
if r.CPU.Cpus != nil {
|
|
||||||
c.CpusetCpus = *r.CPU.Cpus
|
|
||||||
}
|
|
||||||
if r.CPU.Mems != nil {
|
|
||||||
c.CpusetMems = *r.CPU.Mems
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if r.BlockIO != nil {
|
|
||||||
if r.BlockIO.Weight != nil {
|
|
||||||
c.BlkioWeight = *r.BlockIO.Weight
|
|
||||||
}
|
|
||||||
if r.BlockIO.LeafWeight != nil {
|
|
||||||
c.BlkioLeafWeight = *r.BlockIO.LeafWeight
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, wd := range r.BlockIO.WeightDevice {
|
|
||||||
weightDevice := configs.NewWeightDevice(wd.Major, wd.Minor, *wd.Weight, *wd.LeafWeight)
|
|
||||||
c.BlkioWeightDevice = append(c.BlkioWeightDevice, weightDevice)
|
|
||||||
}
|
|
||||||
for _, td := range r.BlockIO.ThrottleReadBpsDevice {
|
|
||||||
throttleDevice := configs.NewThrottleDevice(td.Major, td.Minor, *td.Rate)
|
|
||||||
c.BlkioThrottleReadBpsDevice = append(c.BlkioThrottleReadBpsDevice, throttleDevice)
|
|
||||||
}
|
|
||||||
for _, td := range r.BlockIO.ThrottleWriteBpsDevice {
|
|
||||||
throttleDevice := configs.NewThrottleDevice(td.Major, td.Minor, *td.Rate)
|
|
||||||
c.BlkioThrottleWriteBpsDevice = append(c.BlkioThrottleWriteBpsDevice, throttleDevice)
|
|
||||||
}
|
|
||||||
for _, td := range r.BlockIO.ThrottleReadIOPSDevice {
|
|
||||||
throttleDevice := configs.NewThrottleDevice(td.Major, td.Minor, *td.Rate)
|
|
||||||
c.BlkioThrottleReadIOPSDevice = append(c.BlkioThrottleReadIOPSDevice, throttleDevice)
|
|
||||||
}
|
|
||||||
for _, td := range r.BlockIO.ThrottleWriteIOPSDevice {
|
|
||||||
throttleDevice := configs.NewThrottleDevice(td.Major, td.Minor, *td.Rate)
|
|
||||||
c.BlkioThrottleWriteIOPSDevice = append(c.BlkioThrottleWriteIOPSDevice, throttleDevice)
|
|
||||||
}
|
|
||||||
for _, l := range r.HugepageLimits {
|
|
||||||
c.HugetlbLimit = append(c.HugetlbLimit, &configs.HugepageLimit{
|
|
||||||
Pagesize: *l.Pagesize,
|
|
||||||
Limit: *l.Limit,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
c.OomKillDisable = r.DisableOOMKiller != nil && *r.DisableOOMKiller
|
|
||||||
if r.Network != nil {
|
|
||||||
c.NetClsClassid = r.Network.ClassID
|
|
||||||
for _, m := range r.Network.Priorities {
|
|
||||||
c.NetPrioIfpriomap = append(c.NetPrioIfpriomap, &configs.IfPrioMap{
|
|
||||||
Interface: m.Name,
|
|
||||||
Priority: int64(m.Priority),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return cr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *libcontainerRuntime) createDevices(spec *specs.LinuxRuntimeSpec, config *configs.Config) error {
|
|
||||||
for _, d := range spec.Linux.Devices {
|
|
||||||
device := &configs.Device{
|
|
||||||
Type: d.Type,
|
|
||||||
Path: d.Path,
|
|
||||||
Major: d.Major,
|
|
||||||
Minor: d.Minor,
|
|
||||||
Permissions: d.Permissions,
|
|
||||||
FileMode: d.FileMode,
|
|
||||||
Uid: d.UID,
|
|
||||||
Gid: d.GID,
|
|
||||||
}
|
|
||||||
config.Devices = append(config.Devices, device)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *libcontainerRuntime) setReadonly(config *configs.Config) {
|
|
||||||
for _, m := range config.Mounts {
|
|
||||||
if m.Device == "sysfs" {
|
|
||||||
m.Flags |= syscall.MS_RDONLY
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *libcontainerRuntime) setupUserNamespace(spec *specs.LinuxRuntimeSpec, config *configs.Config) error {
|
|
||||||
if len(spec.Linux.UIDMappings) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
config.Namespaces.Add(configs.NEWUSER, "")
|
|
||||||
create := func(m specs.IDMapping) configs.IDMap {
|
|
||||||
return configs.IDMap{
|
|
||||||
HostID: int(m.HostID),
|
|
||||||
ContainerID: int(m.ContainerID),
|
|
||||||
Size: int(m.Size),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, m := range spec.Linux.UIDMappings {
|
|
||||||
config.UidMappings = append(config.UidMappings, create(m))
|
|
||||||
}
|
|
||||||
for _, m := range spec.Linux.GIDMappings {
|
|
||||||
config.GidMappings = append(config.GidMappings, create(m))
|
|
||||||
}
|
|
||||||
rootUID, err := config.HostUID()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rootGID, err := config.HostGID()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, node := range config.Devices {
|
|
||||||
node.Uid = uint32(rootUID)
|
|
||||||
node.Gid = uint32(rootGID)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *libcontainerRuntime) createLibContainerRlimit(rlimit specs.Rlimit) (configs.Rlimit, error) {
|
|
||||||
rl, err := strToRlimit(rlimit.Type)
|
|
||||||
if err != nil {
|
|
||||||
return configs.Rlimit{}, err
|
|
||||||
}
|
|
||||||
return configs.Rlimit{
|
|
||||||
Type: rl,
|
|
||||||
Hard: uint64(rlimit.Hard),
|
|
||||||
Soft: uint64(rlimit.Soft),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseMountOptions parses the string and returns the flags, propagation
|
|
||||||
// flags and any mount data that it contains.
|
|
||||||
func parseMountOptions(options []string) (int, []int, string) {
|
|
||||||
var (
|
|
||||||
flag int
|
|
||||||
pgflag []int
|
|
||||||
data []string
|
|
||||||
)
|
|
||||||
flags := map[string]struct {
|
|
||||||
clear bool
|
|
||||||
flag int
|
|
||||||
}{
|
|
||||||
"async": {true, syscall.MS_SYNCHRONOUS},
|
|
||||||
"atime": {true, syscall.MS_NOATIME},
|
|
||||||
"bind": {false, syscall.MS_BIND},
|
|
||||||
"defaults": {false, 0},
|
|
||||||
"dev": {true, syscall.MS_NODEV},
|
|
||||||
"diratime": {true, syscall.MS_NODIRATIME},
|
|
||||||
"dirsync": {false, syscall.MS_DIRSYNC},
|
|
||||||
"exec": {true, syscall.MS_NOEXEC},
|
|
||||||
"mand": {false, syscall.MS_MANDLOCK},
|
|
||||||
"noatime": {false, syscall.MS_NOATIME},
|
|
||||||
"nodev": {false, syscall.MS_NODEV},
|
|
||||||
"nodiratime": {false, syscall.MS_NODIRATIME},
|
|
||||||
"noexec": {false, syscall.MS_NOEXEC},
|
|
||||||
"nomand": {true, syscall.MS_MANDLOCK},
|
|
||||||
"norelatime": {true, syscall.MS_RELATIME},
|
|
||||||
"nostrictatime": {true, syscall.MS_STRICTATIME},
|
|
||||||
"nosuid": {false, syscall.MS_NOSUID},
|
|
||||||
"rbind": {false, syscall.MS_BIND | syscall.MS_REC},
|
|
||||||
"relatime": {false, syscall.MS_RELATIME},
|
|
||||||
"remount": {false, syscall.MS_REMOUNT},
|
|
||||||
"ro": {false, syscall.MS_RDONLY},
|
|
||||||
"rw": {true, syscall.MS_RDONLY},
|
|
||||||
"strictatime": {false, syscall.MS_STRICTATIME},
|
|
||||||
"suid": {true, syscall.MS_NOSUID},
|
|
||||||
"sync": {false, syscall.MS_SYNCHRONOUS},
|
|
||||||
}
|
|
||||||
propagationFlags := map[string]struct {
|
|
||||||
clear bool
|
|
||||||
flag int
|
|
||||||
}{
|
|
||||||
"private": {false, syscall.MS_PRIVATE},
|
|
||||||
"shared": {false, syscall.MS_SHARED},
|
|
||||||
"slave": {false, syscall.MS_SLAVE},
|
|
||||||
"unbindable": {false, syscall.MS_UNBINDABLE},
|
|
||||||
"rprivate": {false, syscall.MS_PRIVATE | syscall.MS_REC},
|
|
||||||
"rshared": {false, syscall.MS_SHARED | syscall.MS_REC},
|
|
||||||
"rslave": {false, syscall.MS_SLAVE | syscall.MS_REC},
|
|
||||||
"runbindable": {false, syscall.MS_UNBINDABLE | syscall.MS_REC},
|
|
||||||
}
|
|
||||||
for _, o := range options {
|
|
||||||
// If the option does not exist in the flags table or the flag
|
|
||||||
// is not supported on the platform,
|
|
||||||
// then it is a data value for a specific fs type
|
|
||||||
if f, exists := flags[o]; exists && f.flag != 0 {
|
|
||||||
if f.clear {
|
|
||||||
flag &= ^f.flag
|
|
||||||
} else {
|
|
||||||
flag |= f.flag
|
|
||||||
}
|
|
||||||
} else if f, exists := propagationFlags[o]; exists && f.flag != 0 {
|
|
||||||
pgflag = append(pgflag, f.flag)
|
|
||||||
} else {
|
|
||||||
data = append(data, o)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return flag, pgflag, strings.Join(data, ",")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *libcontainerRuntime) setupSeccomp(config *specs.Seccomp) (*configs.Seccomp, error) {
|
|
||||||
if config == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// No default action specified, no syscalls listed, assume seccomp disabled
|
|
||||||
if config.DefaultAction == "" && len(config.Syscalls) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
newConfig := new(configs.Seccomp)
|
|
||||||
newConfig.Syscalls = []*configs.Syscall{}
|
|
||||||
|
|
||||||
if len(config.Architectures) > 0 {
|
|
||||||
newConfig.Architectures = []string{}
|
|
||||||
for _, arch := range config.Architectures {
|
|
||||||
newArch, err := seccomp.ConvertStringToArch(string(arch))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
newConfig.Architectures = append(newConfig.Architectures, newArch)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert default action from string representation
|
|
||||||
newDefaultAction, err := seccomp.ConvertStringToAction(string(config.DefaultAction))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
newConfig.DefaultAction = newDefaultAction
|
|
||||||
|
|
||||||
// Loop through all syscall blocks and convert them to libcontainer format
|
|
||||||
for _, call := range config.Syscalls {
|
|
||||||
newAction, err := seccomp.ConvertStringToAction(string(call.Action))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
newCall := configs.Syscall{
|
|
||||||
Name: call.Name,
|
|
||||||
Action: newAction,
|
|
||||||
Args: []*configs.Arg{},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Loop through all the arguments of the syscall and convert them
|
|
||||||
for _, arg := range call.Args {
|
|
||||||
newOp, err := seccomp.ConvertStringToOperator(string(arg.Op))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
newArg := configs.Arg{
|
|
||||||
Index: arg.Index,
|
|
||||||
Value: arg.Value,
|
|
||||||
ValueTwo: arg.ValueTwo,
|
|
||||||
Op: newOp,
|
|
||||||
}
|
|
||||||
|
|
||||||
newCall.Args = append(newCall.Args, &newArg)
|
|
||||||
}
|
|
||||||
|
|
||||||
newConfig.Syscalls = append(newConfig.Syscalls, &newCall)
|
|
||||||
}
|
|
||||||
|
|
||||||
return newConfig, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *libcontainerRuntime) createHooks(rspec *specs.LinuxRuntimeSpec, config *configs.Config) {
|
|
||||||
config.Hooks = &configs.Hooks{}
|
|
||||||
for _, h := range rspec.Hooks.Prestart {
|
|
||||||
cmd := configs.Command{
|
|
||||||
Path: h.Path,
|
|
||||||
Args: h.Args,
|
|
||||||
Env: h.Env,
|
|
||||||
}
|
|
||||||
config.Hooks.Prestart = append(config.Hooks.Prestart, configs.NewCommandHook(cmd))
|
|
||||||
}
|
|
||||||
for _, h := range rspec.Hooks.Poststop {
|
|
||||||
cmd := configs.Command{
|
|
||||||
Path: h.Path,
|
|
||||||
Args: h.Args,
|
|
||||||
Env: h.Env,
|
|
||||||
}
|
|
||||||
config.Hooks.Poststop = append(config.Hooks.Poststop, configs.NewCommandHook(cmd))
|
|
||||||
}
|
|
||||||
}
|
|
235
runc/runc.go
235
runc/runc.go
|
@ -1,235 +0,0 @@
|
||||||
// +build runc
|
|
||||||
|
|
||||||
package runc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/docker/containerd/runtime"
|
|
||||||
"github.com/opencontainers/specs"
|
|
||||||
)
|
|
||||||
|
|
||||||
func NewRuntime(stateDir string) (runtime.Runtime, error) {
|
|
||||||
return &runcRuntime{
|
|
||||||
stateDir: stateDir,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type runcContainer struct {
|
|
||||||
id string
|
|
||||||
path string
|
|
||||||
stateDir string
|
|
||||||
exitStatus int
|
|
||||||
processes map[int]*runcProcess
|
|
||||||
initProcess *runcProcess
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *runcContainer) ID() string {
|
|
||||||
return c.id
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *runcContainer) Start() error {
|
|
||||||
return c.initProcess.cmd.Start()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *runcContainer) Stats() (*runtime.Stat, error) {
|
|
||||||
return nil, errors.New("containerd: runc does not support stats in containerd")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *runcContainer) Path() string {
|
|
||||||
return c.path
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *runcContainer) Pid() (int, error) {
|
|
||||||
return c.initProcess.cmd.Process.Pid, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *runcContainer) SetExited(status int) {
|
|
||||||
c.exitStatus = status
|
|
||||||
}
|
|
||||||
|
|
||||||
// noop for runc
|
|
||||||
func (c *runcContainer) Delete() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *runcContainer) Processes() ([]runtime.Process, error) {
|
|
||||||
procs := []runtime.Process{
|
|
||||||
c.initProcess,
|
|
||||||
}
|
|
||||||
for _, p := range c.processes {
|
|
||||||
procs = append(procs, p)
|
|
||||||
}
|
|
||||||
return procs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *runcContainer) RemoveProcess(pid int) error {
|
|
||||||
if _, ok := c.processes[pid]; !ok {
|
|
||||||
return runtime.ErrNotChildProcess
|
|
||||||
}
|
|
||||||
delete(c.processes, pid)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *runcContainer) State() runtime.State {
|
|
||||||
// TODO: how to do this with runc
|
|
||||||
return runtime.State{
|
|
||||||
Status: runtime.Running,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *runcContainer) Resume() error {
|
|
||||||
return c.newCommand("resume").Run()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *runcContainer) Pause() error {
|
|
||||||
return c.newCommand("pause").Run()
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: pass arguments
|
|
||||||
func (c *runcContainer) Checkpoint(runtime.Checkpoint) error {
|
|
||||||
return c.newCommand("checkpoint").Run()
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: pass arguments
|
|
||||||
func (c *runcContainer) Restore(cp string) error {
|
|
||||||
return c.newCommand("restore").Run()
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: pass arguments
|
|
||||||
func (c *runcContainer) DeleteCheckpoint(cp string) error {
|
|
||||||
return errors.New("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: implement in runc
|
|
||||||
func (c *runcContainer) Checkpoints() ([]runtime.Checkpoint, error) {
|
|
||||||
return nil, errors.New("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *runcContainer) OOM() (<-chan struct{}, error) {
|
|
||||||
return nil, errors.New("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *runcContainer) newCommand(args ...string) *exec.Cmd {
|
|
||||||
cmd := exec.Command("runc", append([]string{"--root", c.stateDir, "--id", c.id}, args...)...)
|
|
||||||
cmd.Dir = c.path
|
|
||||||
return cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
type runcProcess struct {
|
|
||||||
cmd *exec.Cmd
|
|
||||||
spec specs.Process
|
|
||||||
}
|
|
||||||
|
|
||||||
// pid of the container, not of runc
|
|
||||||
func (p *runcProcess) Pid() (int, error) {
|
|
||||||
return p.cmd.Process.Pid, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *runcProcess) Spec() specs.Process {
|
|
||||||
return p.spec
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *runcProcess) Signal(s os.Signal) error {
|
|
||||||
return p.cmd.Process.Signal(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *runcProcess) Close() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type runcRuntime struct {
|
|
||||||
stateDir string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *runcRuntime) Type() string {
|
|
||||||
return "runc"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *runcRuntime) Create(id, bundlePath, consolePath string) (runtime.Container, *runtime.IO, error) {
|
|
||||||
var s specs.Spec
|
|
||||||
f, err := os.Open(filepath.Join(bundlePath, "config.json"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
if err := json.NewDecoder(f).Decode(&s); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
cmd := exec.Command("runc", "--root", r.stateDir, "--id", id, "start")
|
|
||||||
cmd.Dir = bundlePath
|
|
||||||
i, err := r.createIO(cmd)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
return &runcContainer{
|
|
||||||
id: id,
|
|
||||||
path: bundlePath,
|
|
||||||
stateDir: r.stateDir,
|
|
||||||
initProcess: &runcProcess{
|
|
||||||
cmd: cmd,
|
|
||||||
spec: s.Process,
|
|
||||||
},
|
|
||||||
processes: make(map[int]*runcProcess),
|
|
||||||
}, i, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *runcRuntime) createIO(cmd *exec.Cmd) (*runtime.IO, error) {
|
|
||||||
w, err := cmd.StdinPipe()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
ro, err := cmd.StdoutPipe()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
re, err := cmd.StderrPipe()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &runtime.IO{
|
|
||||||
Stdin: w,
|
|
||||||
Stdout: ro,
|
|
||||||
Stderr: re,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *runcRuntime) StartProcess(ci runtime.Container, p specs.Process, consolePath string) (runtime.Process, *runtime.IO, error) {
|
|
||||||
c, ok := ci.(*runcContainer)
|
|
||||||
if !ok {
|
|
||||||
return nil, nil, runtime.ErrInvalidContainerType
|
|
||||||
}
|
|
||||||
f, err := ioutil.TempFile("", "containerd")
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
err = json.NewEncoder(f).Encode(p)
|
|
||||||
f.Close()
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
cmd := c.newCommand("exec", f.Name())
|
|
||||||
i, err := r.createIO(cmd)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
process := &runcProcess{
|
|
||||||
cmd: cmd,
|
|
||||||
spec: p,
|
|
||||||
}
|
|
||||||
if err := cmd.Start(); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
pid, err := process.Pid()
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
c.processes[pid] = process
|
|
||||||
return process, i, nil
|
|
||||||
}
|
|
|
@ -1,112 +1,418 @@
|
||||||
package runtime
|
package runtime
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"encoding/json"
|
||||||
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/opencontainers/runc/libcontainer"
|
||||||
"github.com/opencontainers/specs"
|
"github.com/opencontainers/specs"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Process interface {
|
|
||||||
io.Closer
|
|
||||||
Pid() (int, error)
|
|
||||||
Spec() specs.Process
|
|
||||||
Signal(os.Signal) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type State string
|
|
||||||
|
|
||||||
const (
|
|
||||||
Paused = State("paused")
|
|
||||||
Running = State("running")
|
|
||||||
)
|
|
||||||
|
|
||||||
type Console interface {
|
|
||||||
io.ReadWriter
|
|
||||||
io.Closer
|
|
||||||
}
|
|
||||||
|
|
||||||
type IO struct {
|
|
||||||
Stdin io.WriteCloser
|
|
||||||
Stdout io.ReadCloser
|
|
||||||
Stderr io.ReadCloser
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IO) Close() error {
|
|
||||||
var oerr error
|
|
||||||
for _, c := range []io.Closer{
|
|
||||||
i.Stdin,
|
|
||||||
i.Stdout,
|
|
||||||
i.Stderr,
|
|
||||||
} {
|
|
||||||
if c != nil {
|
|
||||||
if err := c.Close(); oerr == nil {
|
|
||||||
oerr = err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return oerr
|
|
||||||
}
|
|
||||||
|
|
||||||
type Stat struct {
|
|
||||||
// Timestamp is the time that the statistics where collected
|
|
||||||
Timestamp time.Time
|
|
||||||
// Data is the raw stats
|
|
||||||
// TODO: it is currently an interface because we don't know what type of exec drivers
|
|
||||||
// we will have or what the structure should look like at the moment os the containers
|
|
||||||
// can return what they want and we could marshal to json or whatever.
|
|
||||||
Data interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
type Checkpoint struct {
|
|
||||||
// Timestamp is the time that checkpoint happened
|
|
||||||
Timestamp time.Time
|
|
||||||
// Name is the name of the checkpoint
|
|
||||||
Name string
|
|
||||||
// Tcp checkpoints open tcp connections
|
|
||||||
Tcp bool
|
|
||||||
// UnixSockets persists unix sockets in the checkpoint
|
|
||||||
UnixSockets bool
|
|
||||||
// Shell persists tty sessions in the checkpoint
|
|
||||||
Shell bool
|
|
||||||
// Exit exits the container after the checkpoint is finished
|
|
||||||
Exit bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type Container interface {
|
type Container interface {
|
||||||
// ID returns the container ID
|
// ID returns the container ID
|
||||||
ID() string
|
ID() string
|
||||||
// Start starts the init process of the container
|
|
||||||
Start() error
|
|
||||||
// Path returns the path to the bundle
|
// Path returns the path to the bundle
|
||||||
Path() string
|
Path() string
|
||||||
// Pid returns the container's init process id
|
// Start starts the init process of the container
|
||||||
Pid() (int, error)
|
Start(checkpoint string, s Stdio) (Process, error)
|
||||||
// SetExited sets the exit status of the container after its init dies
|
// Exec starts another process in an existing container
|
||||||
SetExited(status int)
|
Exec(string, specs.Process, Stdio) (Process, error)
|
||||||
// Delete deletes the container
|
// Delete removes the container's state and any resources
|
||||||
Delete() error
|
Delete() error
|
||||||
// Processes returns all the containers processes that have been added
|
// Processes returns all the containers processes that have been added
|
||||||
Processes() ([]Process, error)
|
Processes() ([]Process, error)
|
||||||
// RemoveProcess removes a specific process for the container because it exited
|
|
||||||
RemoveProcess(pid int) error
|
|
||||||
// State returns the containers runtime state
|
// State returns the containers runtime state
|
||||||
State() State
|
State() State
|
||||||
// Resume resumes a paused container
|
// Resume resumes a paused container
|
||||||
Resume() error
|
Resume() error
|
||||||
// Pause pauses a running container
|
// Pause pauses a running container
|
||||||
Pause() error
|
Pause() error
|
||||||
|
// RemoveProcess removes the specified process from the container
|
||||||
|
RemoveProcess(string) error
|
||||||
// Checkpoints returns all the checkpoints for a container
|
// Checkpoints returns all the checkpoints for a container
|
||||||
Checkpoints() ([]Checkpoint, error)
|
Checkpoints() ([]Checkpoint, error)
|
||||||
// Checkpoint creates a new checkpoint
|
// Checkpoint creates a new checkpoint
|
||||||
Checkpoint(Checkpoint) error
|
Checkpoint(Checkpoint) error
|
||||||
// DeleteCheckpoint deletes the checkpoint for the provided name
|
// DeleteCheckpoint deletes the checkpoint for the provided name
|
||||||
DeleteCheckpoint(name string) error
|
DeleteCheckpoint(name string) error
|
||||||
// Restore restores the container to that of the checkpoint provided by name
|
// Labels are user provided labels for the container
|
||||||
Restore(name string) error
|
Labels() []string
|
||||||
|
// Pids returns all pids inside the container
|
||||||
|
Pids() ([]int, error)
|
||||||
// Stats returns realtime container stats and resource information
|
// Stats returns realtime container stats and resource information
|
||||||
Stats() (*Stat, error)
|
Stats() (*Stat, error)
|
||||||
// OOM signals the channel if the container received an OOM notification
|
// OOM signals the channel if the container received an OOM notification
|
||||||
OOM() (<-chan struct{}, error)
|
// OOM() (<-chan struct{}, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Stdio struct {
|
||||||
|
Stdin string
|
||||||
|
Stdout string
|
||||||
|
Stderr string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewStdio(stdin, stdout, stderr string) Stdio {
|
||||||
|
for _, s := range []*string{
|
||||||
|
&stdin, &stdout, &stderr,
|
||||||
|
} {
|
||||||
|
if *s == "" {
|
||||||
|
*s = "/dev/null"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Stdio{
|
||||||
|
Stdin: stdin,
|
||||||
|
Stdout: stdout,
|
||||||
|
Stderr: stderr,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a new container
|
||||||
|
func New(root, id, bundle string, labels []string) (Container, error) {
|
||||||
|
c := &container{
|
||||||
|
root: root,
|
||||||
|
id: id,
|
||||||
|
bundle: bundle,
|
||||||
|
labels: labels,
|
||||||
|
processes: make(map[string]*process),
|
||||||
|
}
|
||||||
|
if err := os.Mkdir(filepath.Join(root, id), 0755); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
f, err := os.Create(filepath.Join(root, id, StateFile))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
if err := json.NewEncoder(f).Encode(state{
|
||||||
|
Bundle: bundle,
|
||||||
|
Labels: labels,
|
||||||
|
}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func Load(root, id string) (Container, error) {
|
||||||
|
var s state
|
||||||
|
f, err := os.Open(filepath.Join(root, id, StateFile))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
if err := json.NewDecoder(f).Decode(&s); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
c := &container{
|
||||||
|
root: root,
|
||||||
|
id: id,
|
||||||
|
bundle: s.Bundle,
|
||||||
|
labels: s.Labels,
|
||||||
|
processes: make(map[string]*process),
|
||||||
|
}
|
||||||
|
dirs, err := ioutil.ReadDir(filepath.Join(root, id))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, d := range dirs {
|
||||||
|
if !d.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
pid := d.Name()
|
||||||
|
s, err := readProcessState(filepath.Join(root, id, pid))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
p, err := loadProcess(filepath.Join(root, id, pid), pid, c, s)
|
||||||
|
if err != nil {
|
||||||
|
logrus.WithField("id", id).WithField("pid", pid).Debug("containerd: error loading process %s", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
c.processes[pid] = p
|
||||||
|
}
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readProcessState(dir string) (*ProcessState, error) {
|
||||||
|
f, err := os.Open(filepath.Join(dir, "process.json"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
var s ProcessState
|
||||||
|
if err := json.NewDecoder(f).Decode(&s); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type container struct {
|
||||||
|
// path to store runtime state information
|
||||||
|
root string
|
||||||
|
id string
|
||||||
|
bundle string
|
||||||
|
processes map[string]*process
|
||||||
|
stdio Stdio
|
||||||
|
labels []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *container) ID() string {
|
||||||
|
return c.id
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *container) Path() string {
|
||||||
|
return c.bundle
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *container) Labels() []string {
|
||||||
|
return c.labels
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *container) Start(checkpoint string, s Stdio) (Process, error) {
|
||||||
|
processRoot := filepath.Join(c.root, c.id, InitProcessID)
|
||||||
|
if err := os.Mkdir(processRoot, 0755); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cmd := exec.Command("containerd-shim",
|
||||||
|
c.id, c.bundle,
|
||||||
|
)
|
||||||
|
cmd.Dir = processRoot
|
||||||
|
cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||||
|
Setpgid: true,
|
||||||
|
}
|
||||||
|
spec, err := c.readSpec()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
config := &processConfig{
|
||||||
|
checkpoint: checkpoint,
|
||||||
|
root: processRoot,
|
||||||
|
id: InitProcessID,
|
||||||
|
c: c,
|
||||||
|
stdio: s,
|
||||||
|
spec: spec,
|
||||||
|
processSpec: spec.Process,
|
||||||
|
}
|
||||||
|
p, err := newProcess(config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if _, err := p.getPid(); err != nil {
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
c.processes[InitProcessID] = p
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *container) Exec(pid string, spec specs.Process, s Stdio) (Process, error) {
|
||||||
|
processRoot := filepath.Join(c.root, c.id, pid)
|
||||||
|
if err := os.Mkdir(processRoot, 0755); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cmd := exec.Command("containerd-shim",
|
||||||
|
c.id, c.bundle,
|
||||||
|
)
|
||||||
|
cmd.Dir = processRoot
|
||||||
|
cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||||
|
Setpgid: true,
|
||||||
|
}
|
||||||
|
config := &processConfig{
|
||||||
|
exec: true,
|
||||||
|
id: pid,
|
||||||
|
root: processRoot,
|
||||||
|
c: c,
|
||||||
|
processSpec: spec,
|
||||||
|
stdio: s,
|
||||||
|
}
|
||||||
|
p, err := newProcess(config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if _, err := p.getPid(); err != nil {
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
c.processes[pid] = p
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *container) readSpec() (*specs.LinuxSpec, error) {
|
||||||
|
var spec specs.LinuxSpec
|
||||||
|
f, err := os.Open(filepath.Join(c.bundle, "config.json"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
if err := json.NewDecoder(f).Decode(&spec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &spec, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *container) Pause() error {
|
||||||
|
return exec.Command("runc", "pause", c.id).Run()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *container) Resume() error {
|
||||||
|
return exec.Command("runc", "resume", c.id).Run()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *container) State() State {
|
||||||
|
return Running
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *container) Delete() error {
|
||||||
|
return os.RemoveAll(filepath.Join(c.root, c.id))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *container) Processes() ([]Process, error) {
|
||||||
|
out := []Process{}
|
||||||
|
for _, p := range c.processes {
|
||||||
|
out = append(out, p)
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *container) RemoveProcess(pid string) error {
|
||||||
|
delete(c.processes, pid)
|
||||||
|
return os.RemoveAll(filepath.Join(c.root, c.id, pid))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *container) Checkpoints() ([]Checkpoint, error) {
|
||||||
|
dirs, err := ioutil.ReadDir(filepath.Join(c.bundle, "checkpoints"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var out []Checkpoint
|
||||||
|
for _, d := range dirs {
|
||||||
|
if !d.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
path := filepath.Join(c.bundle, "checkpoints", d.Name(), "config.json")
|
||||||
|
data, err := ioutil.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var cpt Checkpoint
|
||||||
|
if err := json.Unmarshal(data, &cpt); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
out = append(out, cpt)
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *container) Checkpoint(cpt Checkpoint) error {
|
||||||
|
if err := os.MkdirAll(filepath.Join(c.bundle, "checkpoints"), 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
path := filepath.Join(c.bundle, "checkpoints", cpt.Name)
|
||||||
|
if err := os.Mkdir(path, 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f, err := os.Create(filepath.Join(path, "config.json"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cpt.Created = time.Now()
|
||||||
|
err = json.NewEncoder(f).Encode(cpt)
|
||||||
|
f.Close()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
args := []string{
|
||||||
|
"checkpoint",
|
||||||
|
"--image-path", path,
|
||||||
|
}
|
||||||
|
add := func(flags ...string) {
|
||||||
|
args = append(args, flags...)
|
||||||
|
}
|
||||||
|
if !cpt.Exit {
|
||||||
|
add("--leave-running")
|
||||||
|
}
|
||||||
|
if cpt.Shell {
|
||||||
|
add("--shell-job")
|
||||||
|
}
|
||||||
|
if cpt.Tcp {
|
||||||
|
add("--tcp-established")
|
||||||
|
}
|
||||||
|
if cpt.UnixSockets {
|
||||||
|
add("--ext-unix-sk")
|
||||||
|
}
|
||||||
|
add(c.id)
|
||||||
|
return exec.Command("runc", args...).Run()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *container) DeleteCheckpoint(name string) error {
|
||||||
|
return os.RemoveAll(filepath.Join(c.bundle, "checkpoints", name))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *container) Pids() ([]int, error) {
|
||||||
|
container, err := c.getLibctContainer()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return container.Processes()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *container) Stats() (*Stat, error) {
|
||||||
|
container, err := c.getLibctContainer()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
now := time.Now()
|
||||||
|
stats, err := container.Stats()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Stat{
|
||||||
|
Timestamp: now,
|
||||||
|
Data: stats,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *container) getLibctContainer() (libcontainer.Container, error) {
|
||||||
|
f, err := libcontainer.New(specs.LinuxStateDirectory, libcontainer.Cgroupfs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return f.Load(c.id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getRootIDs(s *specs.LinuxSpec) (int, int, error) {
|
||||||
|
if s == nil {
|
||||||
|
return 0, 0, nil
|
||||||
|
}
|
||||||
|
var hasUserns bool
|
||||||
|
for _, ns := range s.Linux.Namespaces {
|
||||||
|
if ns.Type == specs.UserNamespace {
|
||||||
|
hasUserns = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !hasUserns {
|
||||||
|
return 0, 0, nil
|
||||||
|
}
|
||||||
|
uid := hostIDFromMap(0, s.Linux.UIDMappings)
|
||||||
|
gid := hostIDFromMap(0, s.Linux.GIDMappings)
|
||||||
|
return uid, gid, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func hostIDFromMap(id uint32, mp []specs.IDMapping) int {
|
||||||
|
for _, m := range mp {
|
||||||
|
if (id >= m.ContainerID) && (id <= (m.ContainerID + m.Size - 1)) {
|
||||||
|
return int(m.HostID + (id - m.ContainerID))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0
|
||||||
}
|
}
|
||||||
|
|
229
runtime/process.go
Normal file
229
runtime/process.go
Normal file
|
@ -0,0 +1,229 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/opencontainers/specs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Process interface {
|
||||||
|
io.Closer
|
||||||
|
|
||||||
|
// ID of the process.
|
||||||
|
// This is either "init" when it is the container's init process or
|
||||||
|
// it is a user provided id for the process similar to the container id
|
||||||
|
ID() string
|
||||||
|
CloseStdin() error
|
||||||
|
Resize(int, int) error
|
||||||
|
// ExitFD returns the fd the provides an event when the process exits
|
||||||
|
ExitFD() int
|
||||||
|
// ExitStatus returns the exit status of the process or an error if it
|
||||||
|
// has not exited
|
||||||
|
ExitStatus() (int, error)
|
||||||
|
// Spec returns the process spec that created the process
|
||||||
|
Spec() specs.Process
|
||||||
|
// Signal sends the provided signal to the process
|
||||||
|
Signal(os.Signal) error
|
||||||
|
// Container returns the container that the process belongs to
|
||||||
|
Container() Container
|
||||||
|
// Stdio of the container
|
||||||
|
Stdio() Stdio
|
||||||
|
// SystemPid is the pid on the system
|
||||||
|
SystemPid() int
|
||||||
|
}
|
||||||
|
|
||||||
|
type processConfig struct {
|
||||||
|
id string
|
||||||
|
root string
|
||||||
|
processSpec specs.Process
|
||||||
|
spec *specs.LinuxSpec
|
||||||
|
c *container
|
||||||
|
stdio Stdio
|
||||||
|
exec bool
|
||||||
|
checkpoint string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newProcess(config *processConfig) (*process, error) {
|
||||||
|
p := &process{
|
||||||
|
root: config.root,
|
||||||
|
id: config.id,
|
||||||
|
container: config.c,
|
||||||
|
spec: config.processSpec,
|
||||||
|
stdio: config.stdio,
|
||||||
|
}
|
||||||
|
uid, gid, err := getRootIDs(config.spec)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
f, err := os.Create(filepath.Join(config.root, "process.json"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
if err := json.NewEncoder(f).Encode(ProcessState{
|
||||||
|
Process: config.processSpec,
|
||||||
|
Exec: config.exec,
|
||||||
|
Checkpoint: config.checkpoint,
|
||||||
|
RootUID: uid,
|
||||||
|
RootGID: gid,
|
||||||
|
Stdin: config.stdio.Stdin,
|
||||||
|
Stdout: config.stdio.Stdout,
|
||||||
|
Stderr: config.stdio.Stderr,
|
||||||
|
}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
exit, err := getExitPipe(filepath.Join(config.root, ExitFile))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
control, err := getControlPipe(filepath.Join(config.root, ControlFile))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
p.exitPipe = exit
|
||||||
|
p.controlPipe = control
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadProcess(root, id string, c *container, s *ProcessState) (*process, error) {
|
||||||
|
p := &process{
|
||||||
|
root: root,
|
||||||
|
id: id,
|
||||||
|
container: c,
|
||||||
|
spec: s.Process,
|
||||||
|
stdio: Stdio{
|
||||||
|
Stdin: s.Stdin,
|
||||||
|
Stdout: s.Stdout,
|
||||||
|
Stderr: s.Stderr,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if _, err := p.getPid(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if _, err := p.ExitStatus(); err != nil {
|
||||||
|
if err == ErrProcessNotExited {
|
||||||
|
exit, err := getExitPipe(filepath.Join(root, ExitFile))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
p.exitPipe = exit
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getExitPipe(path string) (*os.File, error) {
|
||||||
|
if err := syscall.Mkfifo(path, 0755); err != nil && !os.IsExist(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// add NONBLOCK in case the other side has already closed or else
|
||||||
|
// this function would never return
|
||||||
|
return os.OpenFile(path, syscall.O_RDONLY|syscall.O_NONBLOCK, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getControlPipe(path string) (*os.File, error) {
|
||||||
|
if err := syscall.Mkfifo(path, 0755); err != nil && !os.IsExist(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return os.OpenFile(path, syscall.O_RDWR|syscall.O_NONBLOCK, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
type process struct {
|
||||||
|
root string
|
||||||
|
id string
|
||||||
|
pid int
|
||||||
|
exitPipe *os.File
|
||||||
|
controlPipe *os.File
|
||||||
|
container *container
|
||||||
|
spec specs.Process
|
||||||
|
stdio Stdio
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *process) ID() string {
|
||||||
|
return p.id
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *process) Container() Container {
|
||||||
|
return p.container
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *process) SystemPid() int {
|
||||||
|
return p.pid
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExitFD returns the fd of the exit pipe
|
||||||
|
func (p *process) ExitFD() int {
|
||||||
|
return int(p.exitPipe.Fd())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *process) CloseStdin() error {
|
||||||
|
_, err := fmt.Fprintf(p.controlPipe, "%d %d %d\n", 0, 0, 0)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *process) Resize(w, h int) error {
|
||||||
|
_, err := fmt.Fprintf(p.controlPipe, "%d %d %d\n", 1, w, h)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *process) ExitStatus() (int, error) {
|
||||||
|
data, err := ioutil.ReadFile(filepath.Join(p.root, ExitStatusFile))
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return -1, ErrProcessNotExited
|
||||||
|
}
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
if len(data) == 0 {
|
||||||
|
return -1, ErrProcessNotExited
|
||||||
|
}
|
||||||
|
return strconv.Atoi(string(data))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Signal sends the provided signal to the process
|
||||||
|
func (p *process) Signal(s os.Signal) error {
|
||||||
|
return syscall.Kill(p.pid, s.(syscall.Signal))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *process) Spec() specs.Process {
|
||||||
|
return p.spec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *process) Stdio() Stdio {
|
||||||
|
return p.stdio
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes any open files and/or resouces on the process
|
||||||
|
func (p *process) Close() error {
|
||||||
|
return p.exitPipe.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *process) getPid() (int, error) {
|
||||||
|
for i := 0; i < 20; i++ {
|
||||||
|
data, err := ioutil.ReadFile(filepath.Join(p.root, "pid"))
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
i, err := strconv.Atoi(string(data))
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
p.pid = i
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
return -1, fmt.Errorf("containerd: cannot read pid file")
|
||||||
|
}
|
|
@ -2,6 +2,7 @@ package runtime
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/opencontainers/specs"
|
"github.com/opencontainers/specs"
|
||||||
)
|
)
|
||||||
|
@ -13,14 +14,67 @@ var (
|
||||||
ErrCheckpointExists = errors.New("containerd: checkpoint already exists")
|
ErrCheckpointExists = errors.New("containerd: checkpoint already exists")
|
||||||
ErrContainerExited = errors.New("containerd: container has exited")
|
ErrContainerExited = errors.New("containerd: container has exited")
|
||||||
ErrTerminalsNotSupported = errors.New("containerd: terminals are not supported for runtime")
|
ErrTerminalsNotSupported = errors.New("containerd: terminals are not supported for runtime")
|
||||||
|
ErrProcessNotExited = errors.New("containerd: process has not exited")
|
||||||
|
ErrProcessExited = errors.New("containerd: process has exited")
|
||||||
|
|
||||||
|
errNotImplemented = errors.New("containerd: not implemented")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Runtime handles containers, containers handle their own actions
|
const (
|
||||||
type Runtime interface {
|
ExitFile = "exit"
|
||||||
// Type of the runtime
|
ExitStatusFile = "exitStatus"
|
||||||
Type() string
|
StateFile = "state.json"
|
||||||
// Create creates a new container initialized but without it starting it
|
ControlFile = "control"
|
||||||
Create(id, bundlePath, consolePath string) (Container, *IO, error)
|
InitProcessID = "init"
|
||||||
// StartProcess adds a new process to the container
|
)
|
||||||
StartProcess(c Container, p specs.Process, consolePath string) (Process, *IO, error)
|
|
||||||
|
type State string
|
||||||
|
|
||||||
|
const (
|
||||||
|
Paused = State("paused")
|
||||||
|
Running = State("running")
|
||||||
|
)
|
||||||
|
|
||||||
|
type state struct {
|
||||||
|
Bundle string `json:"bundle"`
|
||||||
|
Labels []string `json:"labels"`
|
||||||
|
Stdin string `json:"stdin"`
|
||||||
|
Stdout string `json:"stdout"`
|
||||||
|
Stderr string `json:"stderr"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ProcessState struct {
|
||||||
|
specs.Process
|
||||||
|
Exec bool `json:"exec"`
|
||||||
|
Checkpoint string `json:"checkpoint"`
|
||||||
|
RootUID int `json:"rootUID"`
|
||||||
|
RootGID int `json:"rootGID"`
|
||||||
|
Stdin string `json:"containerdStdin"`
|
||||||
|
Stdout string `json:"containerdStdout"`
|
||||||
|
Stderr string `json:"containerdStderr"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Stat struct {
|
||||||
|
// Timestamp is the time that the statistics where collected
|
||||||
|
Timestamp time.Time
|
||||||
|
// Data is the raw stats
|
||||||
|
// TODO: it is currently an interface because we don't know what type of exec drivers
|
||||||
|
// we will have or what the structure should look like at the moment os the containers
|
||||||
|
// can return what they want and we could marshal to json or whatever.
|
||||||
|
Data interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Checkpoint struct {
|
||||||
|
// Timestamp is the time that checkpoint happened
|
||||||
|
Created time.Time `json:"created"`
|
||||||
|
// Name is the name of the checkpoint
|
||||||
|
Name string `json:"name"`
|
||||||
|
// Tcp checkpoints open tcp connections
|
||||||
|
Tcp bool `json:"tcp"`
|
||||||
|
// UnixSockets persists unix sockets in the checkpoint
|
||||||
|
UnixSockets bool `json:"unixSockets"`
|
||||||
|
// Shell persists tty sessions in the checkpoint
|
||||||
|
Shell bool `json:"shell"`
|
||||||
|
// Exit exits the container after the checkpoint is finished
|
||||||
|
Exit bool `json:"exit"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,40 +3,35 @@ package supervisor
|
||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/docker/containerd/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
type AddProcessEvent struct {
|
type AddProcessTask struct {
|
||||||
s *Supervisor
|
s *Supervisor
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: add this to worker for concurrent starts??? maybe not because of races where the container
|
// TODO: add this to worker for concurrent starts??? maybe not because of races where the container
|
||||||
// could be stopped and removed...
|
// could be stopped and removed...
|
||||||
func (h *AddProcessEvent) Handle(e *Event) error {
|
func (h *AddProcessTask) Handle(e *Task) error {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
ci, ok := h.s.containers[e.ID]
|
ci, ok := h.s.containers[e.ID]
|
||||||
if !ok {
|
if !ok {
|
||||||
return ErrContainerNotFound
|
return ErrContainerNotFound
|
||||||
}
|
}
|
||||||
p, io, err := h.s.runtime.StartProcess(ci.container, *e.Process, e.Console)
|
process, err := ci.container.Exec(e.Pid, *e.ProcessSpec, runtime.NewStdio(e.Stdin, e.Stdout, e.Stderr))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if e.Pid, err = p.Pid(); err != nil {
|
if err := h.s.monitorProcess(process); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
h.s.processes[e.Pid] = &containerInfo{
|
|
||||||
container: ci.container,
|
|
||||||
}
|
|
||||||
l, err := h.s.copyIO(e.Stdin, e.Stdout, e.Stderr, io)
|
|
||||||
if err != nil {
|
|
||||||
// log the error but continue with the other commands
|
|
||||||
logrus.WithFields(logrus.Fields{
|
|
||||||
"error": err,
|
|
||||||
"id": e.ID,
|
|
||||||
}).Error("log stdio")
|
|
||||||
}
|
|
||||||
h.s.processes[e.Pid].copier = l
|
|
||||||
ExecProcessTimer.UpdateSince(start)
|
ExecProcessTimer.UpdateSince(start)
|
||||||
|
e.StartResponse <- StartResponse{}
|
||||||
|
h.s.notifySubscribers(Event{
|
||||||
|
Timestamp: time.Now(),
|
||||||
|
Type: "start-process",
|
||||||
|
Pid: e.Pid,
|
||||||
|
ID: e.ID,
|
||||||
|
})
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
package supervisor
|
package supervisor
|
||||||
|
|
||||||
type CreateCheckpointEvent struct {
|
type CreateCheckpointTask struct {
|
||||||
s *Supervisor
|
s *Supervisor
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *CreateCheckpointEvent) Handle(e *Event) error {
|
func (h *CreateCheckpointTask) Handle(e *Task) error {
|
||||||
i, ok := h.s.containers[e.ID]
|
i, ok := h.s.containers[e.ID]
|
||||||
if !ok {
|
if !ok {
|
||||||
return ErrContainerNotFound
|
return ErrContainerNotFound
|
||||||
|
@ -12,11 +12,11 @@ func (h *CreateCheckpointEvent) Handle(e *Event) error {
|
||||||
return i.container.Checkpoint(*e.Checkpoint)
|
return i.container.Checkpoint(*e.Checkpoint)
|
||||||
}
|
}
|
||||||
|
|
||||||
type DeleteCheckpointEvent struct {
|
type DeleteCheckpointTask struct {
|
||||||
s *Supervisor
|
s *Supervisor
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *DeleteCheckpointEvent) Handle(e *Event) error {
|
func (h *DeleteCheckpointTask) Handle(e *Task) error {
|
||||||
i, ok := h.s.containers[e.ID]
|
i, ok := h.s.containers[e.ID]
|
||||||
if !ok {
|
if !ok {
|
||||||
return ErrContainerNotFound
|
return ErrContainerNotFound
|
||||||
|
|
|
@ -1,30 +1,32 @@
|
||||||
package supervisor
|
package supervisor
|
||||||
|
|
||||||
import "time"
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
type StartEvent struct {
|
"github.com/docker/containerd/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
type StartTask struct {
|
||||||
s *Supervisor
|
s *Supervisor
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *StartEvent) Handle(e *Event) error {
|
func (h *StartTask) Handle(e *Task) error {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
container, io, err := h.s.runtime.Create(e.ID, e.BundlePath, e.Console)
|
container, err := runtime.New(h.s.stateDir, e.ID, e.BundlePath, e.Labels)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
h.s.containerGroup.Add(1)
|
|
||||||
h.s.containers[e.ID] = &containerInfo{
|
h.s.containers[e.ID] = &containerInfo{
|
||||||
container: container,
|
container: container,
|
||||||
}
|
}
|
||||||
ContainersCounter.Inc(1)
|
ContainersCounter.Inc(1)
|
||||||
task := &StartTask{
|
task := &startTask{
|
||||||
Err: e.Err,
|
Err: e.Err,
|
||||||
IO: io,
|
|
||||||
Container: container,
|
Container: container,
|
||||||
|
StartResponse: e.StartResponse,
|
||||||
Stdin: e.Stdin,
|
Stdin: e.Stdin,
|
||||||
Stdout: e.Stdout,
|
Stdout: e.Stdout,
|
||||||
Stderr: e.Stderr,
|
Stderr: e.Stderr,
|
||||||
StartResponse: e.StartResponse,
|
|
||||||
}
|
}
|
||||||
if e.Checkpoint != nil {
|
if e.Checkpoint != nil {
|
||||||
task.Checkpoint = e.Checkpoint.Name
|
task.Checkpoint = e.Checkpoint.Name
|
||||||
|
|
|
@ -7,35 +7,30 @@ import (
|
||||||
"github.com/docker/containerd/runtime"
|
"github.com/docker/containerd/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
type DeleteEvent struct {
|
type DeleteTask struct {
|
||||||
s *Supervisor
|
s *Supervisor
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *DeleteEvent) Handle(e *Event) error {
|
func (h *DeleteTask) Handle(e *Task) error {
|
||||||
if i, ok := h.s.containers[e.ID]; ok {
|
if i, ok := h.s.containers[e.ID]; ok {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
if err := h.deleteContainer(i.container); err != nil {
|
if err := h.deleteContainer(i.container); err != nil {
|
||||||
logrus.WithField("error", err).Error("containerd: deleting container")
|
logrus.WithField("error", err).Error("containerd: deleting container")
|
||||||
}
|
}
|
||||||
if i.copier != nil {
|
h.s.notifySubscribers(Event{
|
||||||
if err := i.copier.Close(); err != nil {
|
Type: "exit",
|
||||||
logrus.WithField("error", err).Error("containerd: close container copier")
|
Timestamp: time.Now(),
|
||||||
}
|
ID: e.ID,
|
||||||
}
|
Status: e.Status,
|
||||||
h.s.notifySubscribers(&Event{
|
Pid: e.Pid,
|
||||||
Type: ExitEventType,
|
|
||||||
ID: e.ID,
|
|
||||||
Status: e.Status,
|
|
||||||
Pid: e.Pid,
|
|
||||||
})
|
})
|
||||||
ContainersCounter.Dec(1)
|
ContainersCounter.Dec(1)
|
||||||
h.s.containerGroup.Done()
|
|
||||||
ContainerDeleteTimer.UpdateSince(start)
|
ContainerDeleteTimer.UpdateSince(start)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *DeleteEvent) deleteContainer(container runtime.Container) error {
|
func (h *DeleteTask) deleteContainer(container runtime.Container) error {
|
||||||
delete(h.s.containers, container.ID())
|
delete(h.s.containers, container.ID())
|
||||||
return container.Delete()
|
return container.Delete()
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,13 +4,13 @@ import "errors"
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// External errors
|
// External errors
|
||||||
ErrEventChanNil = errors.New("containerd: event channel is nil")
|
ErrTaskChanNil = errors.New("containerd: task channel is nil")
|
||||||
ErrBundleNotFound = errors.New("containerd: bundle not found")
|
ErrBundleNotFound = errors.New("containerd: bundle not found")
|
||||||
ErrContainerNotFound = errors.New("containerd: container not found")
|
ErrContainerNotFound = errors.New("containerd: container not found")
|
||||||
ErrContainerExists = errors.New("containerd: container already exists")
|
ErrContainerExists = errors.New("containerd: container already exists")
|
||||||
ErrProcessNotFound = errors.New("containerd: processs not found for container")
|
ErrProcessNotFound = errors.New("containerd: processs not found for container")
|
||||||
ErrUnknownContainerStatus = errors.New("containerd: unknown container status ")
|
ErrUnknownContainerStatus = errors.New("containerd: unknown container status ")
|
||||||
ErrUnknownEvent = errors.New("containerd: unknown event type")
|
ErrUnknownTask = errors.New("containerd: unknown task type")
|
||||||
|
|
||||||
// Internal errors
|
// Internal errors
|
||||||
errShutdown = errors.New("containerd: supervisor is shutdown")
|
errShutdown = errors.New("containerd: supervisor is shutdown")
|
||||||
|
|
|
@ -1,84 +0,0 @@
|
||||||
package supervisor
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/containerd/runtime"
|
|
||||||
"github.com/opencontainers/specs"
|
|
||||||
)
|
|
||||||
|
|
||||||
type EventType string
|
|
||||||
|
|
||||||
const (
|
|
||||||
ExecExitEventType EventType = "execExit"
|
|
||||||
ExitEventType EventType = "exit"
|
|
||||||
StartContainerEventType EventType = "startContainer"
|
|
||||||
DeleteEventType EventType = "deleteContainerEvent"
|
|
||||||
GetContainerEventType EventType = "getContainer"
|
|
||||||
SignalEventType EventType = "signal"
|
|
||||||
AddProcessEventType EventType = "addProcess"
|
|
||||||
UpdateContainerEventType EventType = "updateContainer"
|
|
||||||
CreateCheckpointEventType EventType = "createCheckpoint"
|
|
||||||
DeleteCheckpointEventType EventType = "deleteCheckpoint"
|
|
||||||
StatsEventType EventType = "events"
|
|
||||||
UnsubscribeStatsEventType EventType = "unsubscribeStats"
|
|
||||||
StopStatsEventType EventType = "stopStats"
|
|
||||||
OOMEventType EventType = "oom"
|
|
||||||
)
|
|
||||||
|
|
||||||
func NewEvent(t EventType) *Event {
|
|
||||||
return &Event{
|
|
||||||
Type: t,
|
|
||||||
Timestamp: time.Now(),
|
|
||||||
Err: make(chan error, 1),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type StartResponse struct {
|
|
||||||
Pid int
|
|
||||||
}
|
|
||||||
|
|
||||||
type Event struct {
|
|
||||||
Type EventType
|
|
||||||
Timestamp time.Time
|
|
||||||
ID string
|
|
||||||
BundlePath string
|
|
||||||
Stdout string
|
|
||||||
Stderr string
|
|
||||||
Stdin string
|
|
||||||
Console string
|
|
||||||
Pid int
|
|
||||||
Status int
|
|
||||||
Signal os.Signal
|
|
||||||
Process *specs.Process
|
|
||||||
State runtime.State
|
|
||||||
Containers []runtime.Container
|
|
||||||
Checkpoint *runtime.Checkpoint
|
|
||||||
Err chan error
|
|
||||||
StartResponse chan StartResponse
|
|
||||||
Stats chan interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
type Handler interface {
|
|
||||||
Handle(*Event) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type commonEvent struct {
|
|
||||||
data *Event
|
|
||||||
sv *Supervisor
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *commonEvent) Handle() {
|
|
||||||
h, ok := e.sv.handlers[e.data.Type]
|
|
||||||
if !ok {
|
|
||||||
e.data.Err <- ErrUnknownEvent
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err := h.Handle(e.data)
|
|
||||||
if err != errDeferedResponse {
|
|
||||||
e.data.Err <- err
|
|
||||||
close(e.data.Err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -4,61 +4,62 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/containerd/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ExitEvent struct {
|
type ExitTask struct {
|
||||||
s *Supervisor
|
s *Supervisor
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *ExitEvent) Handle(e *Event) error {
|
func (h *ExitTask) Handle(e *Task) error {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
logrus.WithFields(logrus.Fields{"pid": e.Pid, "status": e.Status}).
|
proc := e.Process
|
||||||
Debug("containerd: process exited")
|
status, err := proc.ExitStatus()
|
||||||
// is it the child process of a container
|
|
||||||
if info, ok := h.s.processes[e.Pid]; ok {
|
|
||||||
ne := NewEvent(ExecExitEventType)
|
|
||||||
ne.ID = info.container.ID()
|
|
||||||
ne.Pid = e.Pid
|
|
||||||
ne.Status = e.Status
|
|
||||||
h.s.SendEvent(ne)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// is it the main container's process
|
|
||||||
container, err := h.s.getContainerForPid(e.Pid)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err != errNoContainerForPid {
|
logrus.WithField("error", err).Error("containerd: get exit status")
|
||||||
logrus.WithField("error", err).Error("containerd: find containers main pid")
|
}
|
||||||
}
|
logrus.WithFields(logrus.Fields{"pid": proc.ID(), "status": status}).Debug("containerd: process exited")
|
||||||
|
|
||||||
|
// if the process is the the init process of the container then
|
||||||
|
// fire a separate event for this process
|
||||||
|
if proc.ID() != runtime.InitProcessID {
|
||||||
|
ne := NewTask(ExecExitTaskType)
|
||||||
|
ne.ID = proc.Container().ID()
|
||||||
|
ne.Pid = proc.ID()
|
||||||
|
ne.Status = status
|
||||||
|
ne.Process = proc
|
||||||
|
h.s.SendTask(ne)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
container.SetExited(e.Status)
|
container := proc.Container()
|
||||||
ne := NewEvent(DeleteEventType)
|
ne := NewTask(DeleteTaskType)
|
||||||
ne.ID = container.ID()
|
ne.ID = container.ID()
|
||||||
ne.Pid = e.Pid
|
ne.Status = status
|
||||||
ne.Status = e.Status
|
ne.Pid = proc.ID()
|
||||||
h.s.SendEvent(ne)
|
h.s.SendTask(ne)
|
||||||
|
|
||||||
stopCollect := NewEvent(StopStatsEventType)
|
|
||||||
stopCollect.ID = container.ID()
|
|
||||||
h.s.SendEvent(stopCollect)
|
|
||||||
ExitProcessTimer.UpdateSince(start)
|
ExitProcessTimer.UpdateSince(start)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type ExecExitEvent struct {
|
type ExecExitTask struct {
|
||||||
s *Supervisor
|
s *Supervisor
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *ExecExitEvent) Handle(e *Event) error {
|
func (h *ExecExitTask) Handle(e *Task) error {
|
||||||
|
container := e.Process.Container()
|
||||||
// exec process: we remove this process without notifying the main event loop
|
// exec process: we remove this process without notifying the main event loop
|
||||||
info := h.s.processes[e.Pid]
|
if err := container.RemoveProcess(e.Pid); err != nil {
|
||||||
if err := info.container.RemoveProcess(e.Pid); err != nil {
|
|
||||||
logrus.WithField("error", err).Error("containerd: find container for pid")
|
logrus.WithField("error", err).Error("containerd: find container for pid")
|
||||||
}
|
}
|
||||||
if err := info.copier.Close(); err != nil {
|
h.s.notifySubscribers(Event{
|
||||||
logrus.WithField("error", err).Error("containerd: close process IO")
|
Timestamp: time.Now(),
|
||||||
}
|
ID: e.ID,
|
||||||
delete(h.s.processes, e.Pid)
|
Type: "exit",
|
||||||
h.s.notifySubscribers(e)
|
Pid: e.Pid,
|
||||||
|
Status: e.Status,
|
||||||
|
})
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,10 +1,18 @@
|
||||||
package supervisor
|
package supervisor
|
||||||
|
|
||||||
type GetContainersEvent struct {
|
type GetContainersTask struct {
|
||||||
s *Supervisor
|
s *Supervisor
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *GetContainersEvent) Handle(e *Event) error {
|
func (h *GetContainersTask) Handle(e *Task) error {
|
||||||
|
if e.ID != "" {
|
||||||
|
ci := h.s.containers[e.ID]
|
||||||
|
if ci == nil {
|
||||||
|
return ErrContainerNotFound
|
||||||
|
}
|
||||||
|
e.Containers = append(e.Containers, ci.container)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
for _, i := range h.s.containers {
|
for _, i := range h.s.containers {
|
||||||
e.Containers = append(e.Containers, i.container)
|
e.Containers = append(e.Containers, i.container)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,66 +0,0 @@
|
||||||
package supervisor
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ioConfig struct {
|
|
||||||
StdoutPath string
|
|
||||||
StderrPath string
|
|
||||||
StdinPath string
|
|
||||||
|
|
||||||
Stdin io.WriteCloser
|
|
||||||
Stdout io.ReadCloser
|
|
||||||
Stderr io.ReadCloser
|
|
||||||
}
|
|
||||||
|
|
||||||
func newCopier(i *ioConfig) (*copier, error) {
|
|
||||||
l := &copier{
|
|
||||||
config: i,
|
|
||||||
}
|
|
||||||
if i.StdinPath != "" {
|
|
||||||
f, err := os.OpenFile(i.StdinPath, os.O_RDONLY, 0)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
l.closers = append(l.closers, f)
|
|
||||||
go func() {
|
|
||||||
io.Copy(i.Stdin, f)
|
|
||||||
i.Stdin.Close()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
if i.StdoutPath != "" {
|
|
||||||
f, err := os.OpenFile(i.StdoutPath, os.O_RDWR, 0)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
l.closers = append(l.closers, f)
|
|
||||||
go io.Copy(f, i.Stdout)
|
|
||||||
}
|
|
||||||
if i.StderrPath != "" {
|
|
||||||
f, err := os.OpenFile(i.StderrPath, os.O_RDWR, 0)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
l.closers = append(l.closers, f)
|
|
||||||
go io.Copy(f, i.Stderr)
|
|
||||||
}
|
|
||||||
return l, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type copier struct {
|
|
||||||
config *ioConfig
|
|
||||||
closers []io.Closer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *copier) Close() (err error) {
|
|
||||||
for _, c := range append(l.closers, l.config.Stdin, l.config.Stdout, l.config.Stderr) {
|
|
||||||
if c != nil {
|
|
||||||
if cerr := c.Close(); err == nil {
|
|
||||||
err = cerr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
|
@ -3,15 +3,12 @@ package supervisor
|
||||||
import "github.com/cloudfoundry/gosigar"
|
import "github.com/cloudfoundry/gosigar"
|
||||||
|
|
||||||
type Machine struct {
|
type Machine struct {
|
||||||
ID string
|
|
||||||
Cpus int
|
Cpus int
|
||||||
Memory int64
|
Memory int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func CollectMachineInformation(id string) (Machine, error) {
|
func CollectMachineInformation() (Machine, error) {
|
||||||
m := Machine{
|
m := Machine{}
|
||||||
ID: id,
|
|
||||||
}
|
|
||||||
cpu := sigar.CpuList{}
|
cpu := sigar.CpuList{}
|
||||||
if err := cpu.Get(); err != nil {
|
if err := cpu.Get(); err != nil {
|
||||||
return m, err
|
return m, err
|
||||||
|
@ -21,6 +18,6 @@ func CollectMachineInformation(id string) (Machine, error) {
|
||||||
if err := mem.Get(); err != nil {
|
if err := mem.Get(); err != nil {
|
||||||
return m, err
|
return m, err
|
||||||
}
|
}
|
||||||
m.Memory = int64(mem.Total)
|
m.Memory = int64(mem.Total / 1024 / 1024)
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,11 +6,13 @@ var (
|
||||||
ContainerCreateTimer = metrics.NewTimer()
|
ContainerCreateTimer = metrics.NewTimer()
|
||||||
ContainerDeleteTimer = metrics.NewTimer()
|
ContainerDeleteTimer = metrics.NewTimer()
|
||||||
ContainerStartTimer = metrics.NewTimer()
|
ContainerStartTimer = metrics.NewTimer()
|
||||||
|
ContainerStatsTimer = metrics.NewTimer()
|
||||||
ContainersCounter = metrics.NewCounter()
|
ContainersCounter = metrics.NewCounter()
|
||||||
EventSubscriberCounter = metrics.NewCounter()
|
EventSubscriberCounter = metrics.NewCounter()
|
||||||
EventsCounter = metrics.NewCounter()
|
TasksCounter = metrics.NewCounter()
|
||||||
ExecProcessTimer = metrics.NewTimer()
|
ExecProcessTimer = metrics.NewTimer()
|
||||||
ExitProcessTimer = metrics.NewTimer()
|
ExitProcessTimer = metrics.NewTimer()
|
||||||
|
EpollFdCounter = metrics.NewCounter()
|
||||||
)
|
)
|
||||||
|
|
||||||
func Metrics() map[string]interface{} {
|
func Metrics() map[string]interface{} {
|
||||||
|
@ -18,10 +20,12 @@ func Metrics() map[string]interface{} {
|
||||||
"container-create-time": ContainerCreateTimer,
|
"container-create-time": ContainerCreateTimer,
|
||||||
"container-delete-time": ContainerDeleteTimer,
|
"container-delete-time": ContainerDeleteTimer,
|
||||||
"container-start-time": ContainerStartTimer,
|
"container-start-time": ContainerStartTimer,
|
||||||
|
"container-stats-time": ContainerStatsTimer,
|
||||||
"containers": ContainersCounter,
|
"containers": ContainersCounter,
|
||||||
"event-subscribers": EventSubscriberCounter,
|
"event-subscribers": EventSubscriberCounter,
|
||||||
"events": EventsCounter,
|
"tasks": TasksCounter,
|
||||||
"exec-process-time": ExecProcessTimer,
|
"exec-process-time": ExecProcessTimer,
|
||||||
"exit-process-time": ExitProcessTimer,
|
"exit-process-time": ExitProcessTimer,
|
||||||
|
"epoll-fds": EpollFdCounter,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
88
supervisor/monitor.go
Normal file
88
supervisor/monitor.go
Normal file
|
@ -0,0 +1,88 @@
|
||||||
|
package supervisor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/containerd/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewMonitor() (*Monitor, error) {
|
||||||
|
m := &Monitor{
|
||||||
|
processes: make(map[int]runtime.Process),
|
||||||
|
exits: make(chan runtime.Process, 1024),
|
||||||
|
}
|
||||||
|
fd, err := syscall.EpollCreate1(0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
m.epollFd = fd
|
||||||
|
go m.start()
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Monitor struct {
|
||||||
|
m sync.Mutex
|
||||||
|
processes map[int]runtime.Process
|
||||||
|
exits chan runtime.Process
|
||||||
|
epollFd int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Monitor) Exits() chan runtime.Process {
|
||||||
|
return m.exits
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Monitor) Monitor(p runtime.Process) error {
|
||||||
|
m.m.Lock()
|
||||||
|
defer m.m.Unlock()
|
||||||
|
fd := p.ExitFD()
|
||||||
|
event := syscall.EpollEvent{
|
||||||
|
Fd: int32(fd),
|
||||||
|
Events: syscall.EPOLLHUP,
|
||||||
|
}
|
||||||
|
if err := syscall.EpollCtl(m.epollFd, syscall.EPOLL_CTL_ADD, fd, &event); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
EpollFdCounter.Inc(1)
|
||||||
|
m.processes[fd] = p
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Monitor) Close() error {
|
||||||
|
return syscall.Close(m.epollFd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Monitor) start() {
|
||||||
|
var events [128]syscall.EpollEvent
|
||||||
|
for {
|
||||||
|
n, err := syscall.EpollWait(m.epollFd, events[:], -1)
|
||||||
|
if err != nil {
|
||||||
|
if err == syscall.EINTR {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
logrus.WithField("error", err).Fatal("containerd: epoll wait")
|
||||||
|
}
|
||||||
|
// process events
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
if events[i].Events == syscall.EPOLLHUP {
|
||||||
|
fd := int(events[i].Fd)
|
||||||
|
m.m.Lock()
|
||||||
|
proc := m.processes[fd]
|
||||||
|
delete(m.processes, fd)
|
||||||
|
if err = syscall.EpollCtl(m.epollFd, syscall.EPOLL_CTL_DEL, fd, &syscall.EpollEvent{
|
||||||
|
Events: syscall.EPOLLHUP,
|
||||||
|
Fd: int32(fd),
|
||||||
|
}); err != nil {
|
||||||
|
logrus.WithField("error", err).Fatal("containerd: epoll remove fd")
|
||||||
|
}
|
||||||
|
EpollFdCounter.Dec(1)
|
||||||
|
if err := proc.Close(); err != nil {
|
||||||
|
logrus.WithField("error", err).Error("containerd: close process IO")
|
||||||
|
}
|
||||||
|
m.m.Unlock()
|
||||||
|
m.exits <- proc
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,10 +1,10 @@
|
||||||
package supervisor
|
package supervisor
|
||||||
|
|
||||||
type SignalEvent struct {
|
type SignalTask struct {
|
||||||
s *Supervisor
|
s *Supervisor
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *SignalEvent) Handle(e *Event) error {
|
func (h *SignalTask) Handle(e *Task) error {
|
||||||
i, ok := h.s.containers[e.ID]
|
i, ok := h.s.containers[e.ID]
|
||||||
if !ok {
|
if !ok {
|
||||||
return ErrContainerNotFound
|
return ErrContainerNotFound
|
||||||
|
@ -14,7 +14,7 @@ func (h *SignalEvent) Handle(e *Event) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, p := range processes {
|
for _, p := range processes {
|
||||||
if pid, err := p.Pid(); err == nil && pid == e.Pid {
|
if p.ID() == e.Pid {
|
||||||
return p.Signal(e.Signal)
|
return p.Signal(e.Signal)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
73
supervisor/sort_test.go
Normal file
73
supervisor/sort_test.go
Normal file
|
@ -0,0 +1,73 @@
|
||||||
|
package supervisor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/containerd/runtime"
|
||||||
|
"github.com/opencontainers/specs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type testProcess struct {
|
||||||
|
id string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *testProcess) ID() string {
|
||||||
|
return p.id
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *testProcess) CloseStdin() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *testProcess) Resize(w, h int) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *testProcess) Stdio() runtime.Stdio {
|
||||||
|
return runtime.Stdio{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *testProcess) SystemPid() int {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *testProcess) ExitFD() int {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *testProcess) ExitStatus() (int, error) {
|
||||||
|
return -1, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *testProcess) Container() runtime.Container {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *testProcess) Spec() specs.Process {
|
||||||
|
return specs.Process{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *testProcess) Signal(os.Signal) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *testProcess) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSortProcesses(t *testing.T) {
|
||||||
|
p := []runtime.Process{
|
||||||
|
&testProcess{"ls"},
|
||||||
|
&testProcess{"other"},
|
||||||
|
&testProcess{"init"},
|
||||||
|
&testProcess{"other2"},
|
||||||
|
}
|
||||||
|
s := &processSorter{p}
|
||||||
|
sort.Sort(s)
|
||||||
|
|
||||||
|
if id := p[len(p)-1].ID(); id != "init" {
|
||||||
|
t.Fatalf("expected init but received %q", id)
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,40 +1,27 @@
|
||||||
package supervisor
|
package supervisor
|
||||||
|
|
||||||
type StatsEvent struct {
|
import "time"
|
||||||
|
|
||||||
|
type StatsTask struct {
|
||||||
s *Supervisor
|
s *Supervisor
|
||||||
}
|
}
|
||||||
|
|
||||||
type UnsubscribeStatsEvent struct {
|
func (h *StatsTask) Handle(e *Task) error {
|
||||||
s *Supervisor
|
start := time.Now()
|
||||||
}
|
|
||||||
|
|
||||||
type StopStatsEvent struct {
|
|
||||||
s *Supervisor
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *StatsEvent) Handle(e *Event) error {
|
|
||||||
i, ok := h.s.containers[e.ID]
|
i, ok := h.s.containers[e.ID]
|
||||||
if !ok {
|
if !ok {
|
||||||
return ErrContainerNotFound
|
return ErrContainerNotFound
|
||||||
}
|
}
|
||||||
e.Stats = h.s.statsCollector.collect(i.container)
|
// TODO: use workers for this
|
||||||
return nil
|
go func() {
|
||||||
}
|
s, err := i.container.Stats()
|
||||||
|
if err != nil {
|
||||||
func (h *UnsubscribeStatsEvent) Handle(e *Event) error {
|
e.Err <- err
|
||||||
i, ok := h.s.containers[e.ID]
|
return
|
||||||
if !ok {
|
}
|
||||||
return ErrContainerNotFound
|
e.Err <- nil
|
||||||
}
|
e.Stat <- s
|
||||||
h.s.statsCollector.unsubscribe(i.container, e.Stats)
|
ContainerStatsTimer.UpdateSince(start)
|
||||||
return nil
|
}()
|
||||||
}
|
return errDeferedResponse
|
||||||
|
|
||||||
func (h *StopStatsEvent) Handle(e *Event) error {
|
|
||||||
i, ok := h.s.containers[e.ID]
|
|
||||||
if !ok {
|
|
||||||
return ErrContainerNotFound
|
|
||||||
}
|
|
||||||
h.s.statsCollector.stopCollection(i.container)
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,240 +0,0 @@
|
||||||
package supervisor
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/docker/containerd/api/grpc/types"
|
|
||||||
"github.com/docker/containerd/runtime"
|
|
||||||
"github.com/docker/docker/pkg/pubsub"
|
|
||||||
"github.com/opencontainers/runc/libcontainer"
|
|
||||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
|
||||||
"github.com/opencontainers/runc/libcontainer/system"
|
|
||||||
)
|
|
||||||
|
|
||||||
func convertBlkioEntryToPb(b []cgroups.BlkioStatEntry) []*types.BlkioStatsEntry {
|
|
||||||
var pbEs []*types.BlkioStatsEntry
|
|
||||||
for _, e := range b {
|
|
||||||
pbEs = append(pbEs, &types.BlkioStatsEntry{
|
|
||||||
Major: e.Major,
|
|
||||||
Minor: e.Minor,
|
|
||||||
Op: e.Op,
|
|
||||||
Value: e.Value,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return pbEs
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertToPb(st *runtime.Stat) *types.Stats {
|
|
||||||
pbSt := &types.Stats{
|
|
||||||
Timestamp: uint64(st.Timestamp.Unix()),
|
|
||||||
CgroupStats: &types.CgroupStats{},
|
|
||||||
}
|
|
||||||
lcSt, ok := st.Data.(*libcontainer.Stats)
|
|
||||||
if !ok {
|
|
||||||
return pbSt
|
|
||||||
}
|
|
||||||
cpuSt := lcSt.CgroupStats.CpuStats
|
|
||||||
pbSt.CgroupStats.CpuStats = &types.CpuStats{
|
|
||||||
CpuUsage: &types.CpuUsage{
|
|
||||||
TotalUsage: cpuSt.CpuUsage.TotalUsage,
|
|
||||||
PercpuUsage: cpuSt.CpuUsage.PercpuUsage,
|
|
||||||
UsageInKernelmode: cpuSt.CpuUsage.UsageInKernelmode,
|
|
||||||
UsageInUsermode: cpuSt.CpuUsage.UsageInUsermode,
|
|
||||||
},
|
|
||||||
ThrottlingData: &types.ThrottlingData{
|
|
||||||
Periods: cpuSt.ThrottlingData.Periods,
|
|
||||||
ThrottledPeriods: cpuSt.ThrottlingData.ThrottledPeriods,
|
|
||||||
ThrottledTime: cpuSt.ThrottlingData.ThrottledTime,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
memSt := lcSt.CgroupStats.MemoryStats
|
|
||||||
pbSt.CgroupStats.MemoryStats = &types.MemoryStats{
|
|
||||||
Cache: memSt.Cache,
|
|
||||||
Usage: &types.MemoryData{
|
|
||||||
Usage: memSt.Usage.Usage,
|
|
||||||
MaxUsage: memSt.Usage.MaxUsage,
|
|
||||||
Failcnt: memSt.Usage.Failcnt,
|
|
||||||
},
|
|
||||||
SwapUsage: &types.MemoryData{
|
|
||||||
Usage: memSt.SwapUsage.Usage,
|
|
||||||
MaxUsage: memSt.SwapUsage.MaxUsage,
|
|
||||||
Failcnt: memSt.SwapUsage.Failcnt,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
blkSt := lcSt.CgroupStats.BlkioStats
|
|
||||||
pbSt.CgroupStats.BlkioStats = &types.BlkioStats{
|
|
||||||
IoServiceBytesRecursive: convertBlkioEntryToPb(blkSt.IoServiceBytesRecursive),
|
|
||||||
IoServicedRecursive: convertBlkioEntryToPb(blkSt.IoServicedRecursive),
|
|
||||||
IoQueuedRecursive: convertBlkioEntryToPb(blkSt.IoQueuedRecursive),
|
|
||||||
IoServiceTimeRecursive: convertBlkioEntryToPb(blkSt.IoServiceTimeRecursive),
|
|
||||||
IoWaitTimeRecursive: convertBlkioEntryToPb(blkSt.IoWaitTimeRecursive),
|
|
||||||
IoMergedRecursive: convertBlkioEntryToPb(blkSt.IoMergedRecursive),
|
|
||||||
IoTimeRecursive: convertBlkioEntryToPb(blkSt.IoTimeRecursive),
|
|
||||||
SectorsRecursive: convertBlkioEntryToPb(blkSt.SectorsRecursive),
|
|
||||||
}
|
|
||||||
pbSt.CgroupStats.HugetlbStats = make(map[string]*types.HugetlbStats)
|
|
||||||
for k, st := range lcSt.CgroupStats.HugetlbStats {
|
|
||||||
pbSt.CgroupStats.HugetlbStats[k] = &types.HugetlbStats{
|
|
||||||
Usage: st.Usage,
|
|
||||||
MaxUsage: st.MaxUsage,
|
|
||||||
Failcnt: st.Failcnt,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return pbSt
|
|
||||||
}
|
|
||||||
|
|
||||||
type statsPair struct {
|
|
||||||
ct runtime.Container
|
|
||||||
pub *pubsub.Publisher
|
|
||||||
}
|
|
||||||
|
|
||||||
func newStatsCollector(interval time.Duration) *statsCollector {
|
|
||||||
s := &statsCollector{
|
|
||||||
interval: interval,
|
|
||||||
clockTicksPerSecond: uint64(system.GetClockTicks()),
|
|
||||||
bufReader: bufio.NewReaderSize(nil, 128),
|
|
||||||
publishers: make(map[string]*statsPair),
|
|
||||||
}
|
|
||||||
go s.run()
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// statsCollector manages and provides container resource stats
|
|
||||||
type statsCollector struct {
|
|
||||||
m sync.Mutex
|
|
||||||
supervisor *Supervisor
|
|
||||||
interval time.Duration
|
|
||||||
clockTicksPerSecond uint64
|
|
||||||
publishers map[string]*statsPair
|
|
||||||
bufReader *bufio.Reader
|
|
||||||
}
|
|
||||||
|
|
||||||
// collect registers the container with the collector and adds it to
|
|
||||||
// the event loop for collection on the specified interval returning
|
|
||||||
// a channel for the subscriber to receive on.
|
|
||||||
func (s *statsCollector) collect(c runtime.Container) chan interface{} {
|
|
||||||
s.m.Lock()
|
|
||||||
defer s.m.Unlock()
|
|
||||||
publisher, exists := s.publishers[c.ID()]
|
|
||||||
if !exists {
|
|
||||||
pub := pubsub.NewPublisher(100*time.Millisecond, 1024)
|
|
||||||
publisher = &statsPair{ct: c, pub: pub}
|
|
||||||
s.publishers[c.ID()] = publisher
|
|
||||||
}
|
|
||||||
return publisher.pub.Subscribe()
|
|
||||||
}
|
|
||||||
|
|
||||||
// stopCollection closes the channels for all subscribers and removes
|
|
||||||
// the container from metrics collection.
|
|
||||||
func (s *statsCollector) stopCollection(c runtime.Container) {
|
|
||||||
s.m.Lock()
|
|
||||||
if publisher, exists := s.publishers[c.ID()]; exists {
|
|
||||||
publisher.pub.Close()
|
|
||||||
delete(s.publishers, c.ID())
|
|
||||||
}
|
|
||||||
s.m.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// unsubscribe removes a specific subscriber from receiving updates for a container's stats.
|
|
||||||
func (s *statsCollector) unsubscribe(c runtime.Container, ch chan interface{}) {
|
|
||||||
s.m.Lock()
|
|
||||||
publisher := s.publishers[c.ID()]
|
|
||||||
if publisher != nil {
|
|
||||||
publisher.pub.Evict(ch)
|
|
||||||
if publisher.pub.Len() == 0 {
|
|
||||||
delete(s.publishers, c.ID())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s.m.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *statsCollector) run() {
|
|
||||||
type publishersPair struct {
|
|
||||||
container runtime.Container
|
|
||||||
publisher *pubsub.Publisher
|
|
||||||
}
|
|
||||||
// we cannot determine the capacity here.
|
|
||||||
// it will grow enough in first iteration
|
|
||||||
var pairs []*statsPair
|
|
||||||
|
|
||||||
for range time.Tick(s.interval) {
|
|
||||||
// it does not make sense in the first iteration,
|
|
||||||
// but saves allocations in further iterations
|
|
||||||
pairs = pairs[:0]
|
|
||||||
|
|
||||||
s.m.Lock()
|
|
||||||
for _, publisher := range s.publishers {
|
|
||||||
// copy pointers here to release the lock ASAP
|
|
||||||
pairs = append(pairs, publisher)
|
|
||||||
}
|
|
||||||
s.m.Unlock()
|
|
||||||
if len(pairs) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, pair := range pairs {
|
|
||||||
stats, err := pair.ct.Stats()
|
|
||||||
if err != nil {
|
|
||||||
logrus.Errorf("Error getting stats for container ID %s", pair.ct.ID())
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
pair.pub.Publish(convertToPb(stats))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const nanoSecondsPerSecond = 1e9
|
|
||||||
|
|
||||||
// getSystemCPUUsage returns the host system's cpu usage in
|
|
||||||
// nanoseconds. An error is returned if the format of the underlying
|
|
||||||
// file does not match.
|
|
||||||
//
|
|
||||||
// Uses /proc/stat defined by POSIX. Looks for the cpu
|
|
||||||
// statistics line and then sums up the first seven fields
|
|
||||||
// provided. See `man 5 proc` for details on specific field
|
|
||||||
// information.
|
|
||||||
func (s *statsCollector) getSystemCPUUsage() (uint64, error) {
|
|
||||||
var line string
|
|
||||||
f, err := os.Open("/proc/stat")
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
s.bufReader.Reset(nil)
|
|
||||||
f.Close()
|
|
||||||
}()
|
|
||||||
s.bufReader.Reset(f)
|
|
||||||
err = nil
|
|
||||||
for err == nil {
|
|
||||||
line, err = s.bufReader.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
parts := strings.Fields(line)
|
|
||||||
switch parts[0] {
|
|
||||||
case "cpu":
|
|
||||||
if len(parts) < 8 {
|
|
||||||
return 0, fmt.Errorf("bad format of cpu stats")
|
|
||||||
}
|
|
||||||
var totalClockTicks uint64
|
|
||||||
for _, i := range parts[1:8] {
|
|
||||||
v, err := strconv.ParseUint(i, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("error parsing cpu stats")
|
|
||||||
}
|
|
||||||
totalClockTicks += v
|
|
||||||
}
|
|
||||||
return (totalClockTicks * nanoSecondsPerSecond) /
|
|
||||||
s.clockTicksPerSecond, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0, fmt.Errorf("bad stats format")
|
|
||||||
}
|
|
|
@ -1,144 +1,156 @@
|
||||||
package supervisor
|
package supervisor
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
"github.com/docker/containerd/chanotify"
|
"github.com/docker/containerd/chanotify"
|
||||||
"github.com/docker/containerd/eventloop"
|
"github.com/docker/containerd/eventloop"
|
||||||
"github.com/docker/containerd/runtime"
|
"github.com/docker/containerd/runtime"
|
||||||
"github.com/opencontainers/runc/libcontainer"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
statsInterval = 1 * time.Second
|
|
||||||
defaultBufferSize = 2048 // size of queue in eventloop
|
defaultBufferSize = 2048 // size of queue in eventloop
|
||||||
)
|
)
|
||||||
|
|
||||||
// New returns an initialized Process supervisor.
|
// New returns an initialized Process supervisor.
|
||||||
func New(id, stateDir string, tasks chan *StartTask, oom bool) (*Supervisor, error) {
|
func New(stateDir string, oom bool) (*Supervisor, error) {
|
||||||
|
tasks := make(chan *startTask, 10)
|
||||||
if err := os.MkdirAll(stateDir, 0755); err != nil {
|
if err := os.MkdirAll(stateDir, 0755); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// register counters
|
machine, err := CollectMachineInformation()
|
||||||
r, err := newRuntime(filepath.Join(stateDir, id))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
machine, err := CollectMachineInformation(id)
|
monitor, err := NewMonitor()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
s := &Supervisor{
|
s := &Supervisor{
|
||||||
stateDir: stateDir,
|
stateDir: stateDir,
|
||||||
containers: make(map[string]*containerInfo),
|
containers: make(map[string]*containerInfo),
|
||||||
processes: make(map[int]*containerInfo),
|
tasks: tasks,
|
||||||
runtime: r,
|
machine: machine,
|
||||||
tasks: tasks,
|
subscribers: make(map[chan Event]struct{}),
|
||||||
machine: machine,
|
el: eventloop.NewChanLoop(defaultBufferSize),
|
||||||
subscribers: make(map[chan *Event]struct{}),
|
monitor: monitor,
|
||||||
statsCollector: newStatsCollector(statsInterval),
|
}
|
||||||
el: eventloop.NewChanLoop(defaultBufferSize),
|
if err := setupEventLog(s); err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
if oom {
|
if oom {
|
||||||
s.notifier = chanotify.New()
|
s.notifier = chanotify.New()
|
||||||
go func() {
|
go func() {
|
||||||
for id := range s.notifier.Chan() {
|
for id := range s.notifier.Chan() {
|
||||||
e := NewEvent(OOMEventType)
|
e := NewTask(OOMTaskType)
|
||||||
e.ID = id.(string)
|
e.ID = id.(string)
|
||||||
s.SendEvent(e)
|
s.SendTask(e)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
// register default event handlers
|
// register default event handlers
|
||||||
s.handlers = map[EventType]Handler{
|
s.handlers = map[TaskType]Handler{
|
||||||
ExecExitEventType: &ExecExitEvent{s},
|
ExecExitTaskType: &ExecExitTask{s},
|
||||||
ExitEventType: &ExitEvent{s},
|
ExitTaskType: &ExitTask{s},
|
||||||
StartContainerEventType: &StartEvent{s},
|
StartContainerTaskType: &StartTask{s},
|
||||||
DeleteEventType: &DeleteEvent{s},
|
DeleteTaskType: &DeleteTask{s},
|
||||||
GetContainerEventType: &GetContainersEvent{s},
|
GetContainerTaskType: &GetContainersTask{s},
|
||||||
SignalEventType: &SignalEvent{s},
|
SignalTaskType: &SignalTask{s},
|
||||||
AddProcessEventType: &AddProcessEvent{s},
|
AddProcessTaskType: &AddProcessTask{s},
|
||||||
UpdateContainerEventType: &UpdateEvent{s},
|
UpdateContainerTaskType: &UpdateTask{s},
|
||||||
CreateCheckpointEventType: &CreateCheckpointEvent{s},
|
CreateCheckpointTaskType: &CreateCheckpointTask{s},
|
||||||
DeleteCheckpointEventType: &DeleteCheckpointEvent{s},
|
DeleteCheckpointTaskType: &DeleteCheckpointTask{s},
|
||||||
StatsEventType: &StatsEvent{s},
|
StatsTaskType: &StatsTask{s},
|
||||||
UnsubscribeStatsEventType: &UnsubscribeStatsEvent{s},
|
UpdateProcessTaskType: &UpdateProcessTask{s},
|
||||||
StopStatsEventType: &StopStatsEvent{s},
|
}
|
||||||
|
go s.exitHandler()
|
||||||
|
if err := s.restore(); err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
// start the container workers for concurrent container starts
|
|
||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type containerInfo struct {
|
type containerInfo struct {
|
||||||
container runtime.Container
|
container runtime.Container
|
||||||
copier *copier
|
}
|
||||||
|
|
||||||
|
func setupEventLog(s *Supervisor) error {
|
||||||
|
if err := readEventLog(s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
logrus.WithField("count", len(s.eventLog)).Debug("containerd: read past events")
|
||||||
|
events := s.Events(time.Time{})
|
||||||
|
f, err := os.OpenFile(filepath.Join(s.stateDir, "events.log"), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0755)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
enc := json.NewEncoder(f)
|
||||||
|
go func() {
|
||||||
|
for e := range events {
|
||||||
|
s.eventLog = append(s.eventLog, e)
|
||||||
|
if err := enc.Encode(e); err != nil {
|
||||||
|
logrus.WithField("error", err).Error("containerd: write event to journal")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readEventLog(s *Supervisor) error {
|
||||||
|
f, err := os.Open(filepath.Join(s.stateDir, "events.log"))
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
dec := json.NewDecoder(f)
|
||||||
|
for {
|
||||||
|
var e Event
|
||||||
|
if err := dec.Decode(&e); err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.eventLog = append(s.eventLog, e)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type Supervisor struct {
|
type Supervisor struct {
|
||||||
// stateDir is the directory on the system to store container runtime state information.
|
// stateDir is the directory on the system to store container runtime state information.
|
||||||
stateDir string
|
stateDir string
|
||||||
containers map[string]*containerInfo
|
containers map[string]*containerInfo
|
||||||
processes map[int]*containerInfo
|
handlers map[TaskType]Handler
|
||||||
handlers map[EventType]Handler
|
events chan *Task
|
||||||
runtime runtime.Runtime
|
tasks chan *startTask
|
||||||
events chan *Event
|
|
||||||
tasks chan *StartTask
|
|
||||||
// we need a lock around the subscribers map only because additions and deletions from
|
// we need a lock around the subscribers map only because additions and deletions from
|
||||||
// the map are via the API so we cannot really control the concurrency
|
// the map are via the API so we cannot really control the concurrency
|
||||||
subscriberLock sync.RWMutex
|
subscriberLock sync.RWMutex
|
||||||
subscribers map[chan *Event]struct{}
|
subscribers map[chan Event]struct{}
|
||||||
machine Machine
|
machine Machine
|
||||||
containerGroup sync.WaitGroup
|
|
||||||
statsCollector *statsCollector
|
|
||||||
notifier *chanotify.Notifier
|
notifier *chanotify.Notifier
|
||||||
el eventloop.EventLoop
|
el eventloop.EventLoop
|
||||||
|
monitor *Monitor
|
||||||
|
eventLog []Event
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop closes all tasks and sends a SIGTERM to each container's pid1 then waits for they to
|
// Stop closes all tasks and sends a SIGTERM to each container's pid1 then waits for they to
|
||||||
// terminate. After it has handled all the SIGCHILD events it will close the signals chan
|
// terminate. After it has handled all the SIGCHILD events it will close the signals chan
|
||||||
// and exit. Stop is a non-blocking call and will return after the containers have been signaled
|
// and exit. Stop is a non-blocking call and will return after the containers have been signaled
|
||||||
func (s *Supervisor) Stop(sig chan os.Signal) {
|
func (s *Supervisor) Stop() {
|
||||||
// Close the tasks channel so that no new containers get started
|
// Close the tasks channel so that no new containers get started
|
||||||
close(s.tasks)
|
close(s.tasks)
|
||||||
// send a SIGTERM to all containers
|
|
||||||
for id, i := range s.containers {
|
|
||||||
c := i.container
|
|
||||||
logrus.WithField("id", id).Debug("sending TERM to container processes")
|
|
||||||
procs, err := c.Processes()
|
|
||||||
if err != nil {
|
|
||||||
logrus.WithField("id", id).Warn("get container processes")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if len(procs) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
mainProc := procs[0]
|
|
||||||
if err := mainProc.Signal(syscall.SIGTERM); err != nil {
|
|
||||||
pid, _ := mainProc.Pid()
|
|
||||||
logrus.WithFields(logrus.Fields{
|
|
||||||
"id": id,
|
|
||||||
"pid": pid,
|
|
||||||
"error": err,
|
|
||||||
}).Error("send SIGTERM to process")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
go func() {
|
|
||||||
logrus.Debug("waiting for containers to exit")
|
|
||||||
s.containerGroup.Wait()
|
|
||||||
logrus.Debug("all containers exited")
|
|
||||||
if s.notifier != nil {
|
|
||||||
s.notifier.Close()
|
|
||||||
}
|
|
||||||
// stop receiving signals and close the channel
|
|
||||||
signal.Stop(sig)
|
|
||||||
close(sig)
|
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes any open files in the supervisor but expects that Stop has been
|
// Close closes any open files in the supervisor but expects that Stop has been
|
||||||
|
@ -147,19 +159,35 @@ func (s *Supervisor) Close() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Event struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Timestamp time.Time `json:"timestamp"`
|
||||||
|
Pid string `json:"pid,omitempty"`
|
||||||
|
Status int `json:"status,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
// Events returns an event channel that external consumers can use to receive updates
|
// Events returns an event channel that external consumers can use to receive updates
|
||||||
// on container events
|
// on container events
|
||||||
func (s *Supervisor) Events() chan *Event {
|
func (s *Supervisor) Events(from time.Time) chan Event {
|
||||||
s.subscriberLock.Lock()
|
s.subscriberLock.Lock()
|
||||||
defer s.subscriberLock.Unlock()
|
defer s.subscriberLock.Unlock()
|
||||||
c := make(chan *Event, defaultBufferSize)
|
c := make(chan Event, defaultBufferSize)
|
||||||
EventSubscriberCounter.Inc(1)
|
EventSubscriberCounter.Inc(1)
|
||||||
s.subscribers[c] = struct{}{}
|
s.subscribers[c] = struct{}{}
|
||||||
|
if !from.IsZero() {
|
||||||
|
// replay old event
|
||||||
|
for _, e := range s.eventLog {
|
||||||
|
if e.Timestamp.After(from) {
|
||||||
|
c <- e
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unsubscribe removes the provided channel from receiving any more events
|
// Unsubscribe removes the provided channel from receiving any more events
|
||||||
func (s *Supervisor) Unsubscribe(sub chan *Event) {
|
func (s *Supervisor) Unsubscribe(sub chan Event) {
|
||||||
s.subscriberLock.Lock()
|
s.subscriberLock.Lock()
|
||||||
defer s.subscriberLock.Unlock()
|
defer s.subscriberLock.Unlock()
|
||||||
delete(s.subscribers, sub)
|
delete(s.subscribers, sub)
|
||||||
|
@ -169,7 +197,7 @@ func (s *Supervisor) Unsubscribe(sub chan *Event) {
|
||||||
|
|
||||||
// notifySubscribers will send the provided event to the external subscribers
|
// notifySubscribers will send the provided event to the external subscribers
|
||||||
// of the events channel
|
// of the events channel
|
||||||
func (s *Supervisor) notifySubscribers(e *Event) {
|
func (s *Supervisor) notifySubscribers(e Event) {
|
||||||
s.subscriberLock.RLock()
|
s.subscriberLock.RLock()
|
||||||
defer s.subscriberLock.RUnlock()
|
defer s.subscriberLock.RUnlock()
|
||||||
for sub := range s.subscribers {
|
for sub := range s.subscribers {
|
||||||
|
@ -177,7 +205,7 @@ func (s *Supervisor) notifySubscribers(e *Event) {
|
||||||
select {
|
select {
|
||||||
case sub <- e:
|
case sub <- e:
|
||||||
default:
|
default:
|
||||||
logrus.WithField("event", e.Type).Warn("event not sent to subscriber")
|
logrus.WithField("event", e.Type).Warn("containerd: event not sent to subscriber")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -189,10 +217,7 @@ func (s *Supervisor) notifySubscribers(e *Event) {
|
||||||
// therefore it is save to do operations in the handlers that modify state of the system or
|
// therefore it is save to do operations in the handlers that modify state of the system or
|
||||||
// state of the Supervisor
|
// state of the Supervisor
|
||||||
func (s *Supervisor) Start() error {
|
func (s *Supervisor) Start() error {
|
||||||
logrus.WithFields(logrus.Fields{
|
logrus.WithField("stateDir", s.stateDir).Debug("containerd: supervisor running")
|
||||||
"runtime": s.runtime.Type(),
|
|
||||||
"stateDir": s.stateDir,
|
|
||||||
}).Debug("Supervisor started")
|
|
||||||
return s.el.Start()
|
return s.el.Start()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -202,45 +227,83 @@ func (s *Supervisor) Machine() Machine {
|
||||||
return s.machine
|
return s.machine
|
||||||
}
|
}
|
||||||
|
|
||||||
// getContainerForPid returns the container where the provided pid is the pid1 or main
|
// SendTask sends the provided event the the supervisors main event loop
|
||||||
// process in the container
|
func (s *Supervisor) SendTask(evt *Task) {
|
||||||
func (s *Supervisor) getContainerForPid(pid int) (runtime.Container, error) {
|
TasksCounter.Inc(1)
|
||||||
for _, i := range s.containers {
|
s.el.Send(&commonTask{data: evt, sv: s})
|
||||||
container := i.container
|
}
|
||||||
cpid, err := container.Pid()
|
|
||||||
|
func (s *Supervisor) exitHandler() {
|
||||||
|
for p := range s.monitor.Exits() {
|
||||||
|
e := NewTask(ExitTaskType)
|
||||||
|
e.Process = p
|
||||||
|
s.SendTask(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Supervisor) monitorProcess(p runtime.Process) error {
|
||||||
|
return s.monitor.Monitor(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Supervisor) restore() error {
|
||||||
|
dirs, err := ioutil.ReadDir(s.stateDir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, d := range dirs {
|
||||||
|
if !d.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
id := d.Name()
|
||||||
|
container, err := runtime.Load(s.stateDir, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if lerr, ok := err.(libcontainer.Error); ok {
|
return err
|
||||||
if lerr.Code() == libcontainer.ProcessNotExecuted {
|
}
|
||||||
continue
|
processes, err := container.Processes()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ContainersCounter.Inc(1)
|
||||||
|
s.containers[id] = &containerInfo{
|
||||||
|
container: container,
|
||||||
|
}
|
||||||
|
logrus.WithField("id", id).Debug("containerd: container restored")
|
||||||
|
var exitedProcesses []runtime.Process
|
||||||
|
for _, p := range processes {
|
||||||
|
if _, err := p.ExitStatus(); err == nil {
|
||||||
|
exitedProcesses = append(exitedProcesses, p)
|
||||||
|
} else {
|
||||||
|
if err := s.monitorProcess(p); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
logrus.WithField("error", err).Error("containerd: get container pid")
|
|
||||||
}
|
}
|
||||||
if pid == cpid {
|
if len(exitedProcesses) > 0 {
|
||||||
return container, nil
|
// sort processes so that init is fired last because that is how the kernel sends the
|
||||||
|
// exit events
|
||||||
|
sort.Sort(&processSorter{exitedProcesses})
|
||||||
|
for _, p := range exitedProcesses {
|
||||||
|
e := NewTask(ExitTaskType)
|
||||||
|
e.Process = p
|
||||||
|
s.SendTask(e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, errNoContainerForPid
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendEvent sends the provided event the the supervisors main event loop
|
type processSorter struct {
|
||||||
func (s *Supervisor) SendEvent(evt *Event) {
|
processes []runtime.Process
|
||||||
EventsCounter.Inc(1)
|
|
||||||
s.el.Send(&commonEvent{data: evt, sv: s})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Supervisor) copyIO(stdin, stdout, stderr string, i *runtime.IO) (*copier, error) {
|
func (s *processSorter) Len() int {
|
||||||
config := &ioConfig{
|
return len(s.processes)
|
||||||
Stdin: i.Stdin,
|
}
|
||||||
Stdout: i.Stdout,
|
|
||||||
Stderr: i.Stderr,
|
func (s *processSorter) Swap(i, j int) {
|
||||||
StdoutPath: stdout,
|
s.processes[i], s.processes[j] = s.processes[j], s.processes[i]
|
||||||
StderrPath: stderr,
|
}
|
||||||
StdinPath: stdin,
|
|
||||||
}
|
func (s *processSorter) Less(i, j int) bool {
|
||||||
l, err := newCopier(config)
|
return s.processes[j].ID() == "init"
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return l, nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,12 +0,0 @@
|
||||||
// +build libcontainer
|
|
||||||
|
|
||||||
package supervisor
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/docker/containerd/linux"
|
|
||||||
"github.com/docker/containerd/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
func newRuntime(stateDir string) (runtime.Runtime, error) {
|
|
||||||
return linux.NewRuntime(stateDir)
|
|
||||||
}
|
|
|
@ -1,12 +0,0 @@
|
||||||
// +build runc
|
|
||||||
|
|
||||||
package supervisor
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/docker/containerd/runc"
|
|
||||||
"github.com/docker/containerd/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
func newRuntime(stateDir string) (runtime.Runtime, error) {
|
|
||||||
return runc.NewRuntime(stateDir)
|
|
||||||
}
|
|
|
@ -1,13 +0,0 @@
|
||||||
// +build !libcontainer,!runc
|
|
||||||
|
|
||||||
package supervisor
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
|
|
||||||
"github.com/docker/containerd/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
func newRuntime(stateDir string) (runtime.Runtime, error) {
|
|
||||||
return nil, errors.New("unsupported platform")
|
|
||||||
}
|
|
89
supervisor/task.go
Normal file
89
supervisor/task.go
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
package supervisor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/containerd/runtime"
|
||||||
|
"github.com/opencontainers/specs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TaskType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
ExecExitTaskType TaskType = "execExit"
|
||||||
|
ExitTaskType TaskType = "exit"
|
||||||
|
StartContainerTaskType TaskType = "startContainer"
|
||||||
|
DeleteTaskType TaskType = "deleteContainerEvent"
|
||||||
|
GetContainerTaskType TaskType = "getContainer"
|
||||||
|
SignalTaskType TaskType = "signal"
|
||||||
|
AddProcessTaskType TaskType = "addProcess"
|
||||||
|
UpdateContainerTaskType TaskType = "updateContainer"
|
||||||
|
UpdateProcessTaskType TaskType = "updateProcess"
|
||||||
|
CreateCheckpointTaskType TaskType = "createCheckpoint"
|
||||||
|
DeleteCheckpointTaskType TaskType = "deleteCheckpoint"
|
||||||
|
StatsTaskType TaskType = "events"
|
||||||
|
OOMTaskType TaskType = "oom"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewTask(t TaskType) *Task {
|
||||||
|
return &Task{
|
||||||
|
Type: t,
|
||||||
|
Timestamp: time.Now(),
|
||||||
|
Err: make(chan error, 1),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type StartResponse struct {
|
||||||
|
Container runtime.Container
|
||||||
|
}
|
||||||
|
|
||||||
|
type Task struct {
|
||||||
|
Type TaskType
|
||||||
|
Timestamp time.Time
|
||||||
|
ID string
|
||||||
|
BundlePath string
|
||||||
|
Stdout string
|
||||||
|
Stderr string
|
||||||
|
Stdin string
|
||||||
|
Console string
|
||||||
|
Pid string
|
||||||
|
Status int
|
||||||
|
Signal os.Signal
|
||||||
|
Process runtime.Process
|
||||||
|
State runtime.State
|
||||||
|
ProcessSpec *specs.Process
|
||||||
|
Containers []runtime.Container
|
||||||
|
Checkpoint *runtime.Checkpoint
|
||||||
|
Err chan error
|
||||||
|
StartResponse chan StartResponse
|
||||||
|
Stat chan *runtime.Stat
|
||||||
|
CloseStdin bool
|
||||||
|
ResizeTty bool
|
||||||
|
Width int
|
||||||
|
Height int
|
||||||
|
Labels []string
|
||||||
|
}
|
||||||
|
|
||||||
|
type Handler interface {
|
||||||
|
Handle(*Task) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type commonTask struct {
|
||||||
|
data *Task
|
||||||
|
sv *Supervisor
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *commonTask) Handle() {
|
||||||
|
h, ok := e.sv.handlers[e.data.Type]
|
||||||
|
if !ok {
|
||||||
|
e.data.Err <- ErrUnknownTask
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err := h.Handle(e.data)
|
||||||
|
if err != errDeferedResponse {
|
||||||
|
e.data.Err <- err
|
||||||
|
close(e.data.Err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,12 +1,16 @@
|
||||||
package supervisor
|
package supervisor
|
||||||
|
|
||||||
import "github.com/docker/containerd/runtime"
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
type UpdateEvent struct {
|
"github.com/docker/containerd/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
type UpdateTask struct {
|
||||||
s *Supervisor
|
s *Supervisor
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *UpdateEvent) Handle(e *Event) error {
|
func (h *UpdateTask) Handle(e *Task) error {
|
||||||
i, ok := h.s.containers[e.ID]
|
i, ok := h.s.containers[e.ID]
|
||||||
if !ok {
|
if !ok {
|
||||||
return ErrContainerNotFound
|
return ErrContainerNotFound
|
||||||
|
@ -18,24 +22,59 @@ func (h *UpdateEvent) Handle(e *Event) error {
|
||||||
if err := container.Resume(); err != nil {
|
if err := container.Resume(); err != nil {
|
||||||
return ErrUnknownContainerStatus
|
return ErrUnknownContainerStatus
|
||||||
}
|
}
|
||||||
|
h.s.notifySubscribers(Event{
|
||||||
|
ID: e.ID,
|
||||||
|
Type: "resume",
|
||||||
|
Timestamp: time.Now(),
|
||||||
|
})
|
||||||
case runtime.Paused:
|
case runtime.Paused:
|
||||||
if err := container.Pause(); err != nil {
|
if err := container.Pause(); err != nil {
|
||||||
return ErrUnknownContainerStatus
|
return ErrUnknownContainerStatus
|
||||||
}
|
}
|
||||||
|
h.s.notifySubscribers(Event{
|
||||||
|
ID: e.ID,
|
||||||
|
Type: "pause",
|
||||||
|
Timestamp: time.Now(),
|
||||||
|
})
|
||||||
default:
|
default:
|
||||||
return ErrUnknownContainerStatus
|
return ErrUnknownContainerStatus
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if e.Signal != nil {
|
return nil
|
||||||
// signal the pid1/main process of the container
|
}
|
||||||
processes, err := container.Processes()
|
|
||||||
if err != nil {
|
type UpdateProcessTask struct {
|
||||||
|
s *Supervisor
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *UpdateProcessTask) Handle(e *Task) error {
|
||||||
|
i, ok := h.s.containers[e.ID]
|
||||||
|
if !ok {
|
||||||
|
return ErrContainerNotFound
|
||||||
|
}
|
||||||
|
processes, err := i.container.Processes()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var process runtime.Process
|
||||||
|
for _, p := range processes {
|
||||||
|
if p.ID() == e.Pid {
|
||||||
|
process = p
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if process == nil {
|
||||||
|
return ErrProcessNotFound
|
||||||
|
}
|
||||||
|
if e.CloseStdin {
|
||||||
|
if err := process.CloseStdin(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(processes) == 0 {
|
}
|
||||||
return ErrProcessNotFound
|
if e.Width > 0 || e.Height > 0 {
|
||||||
|
if err := process.Resize(e.Width, e.Height); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
return processes[0].Signal(e.Signal)
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,10 +12,9 @@ type Worker interface {
|
||||||
Start()
|
Start()
|
||||||
}
|
}
|
||||||
|
|
||||||
type StartTask struct {
|
type startTask struct {
|
||||||
Container runtime.Container
|
Container runtime.Container
|
||||||
Checkpoint string
|
Checkpoint string
|
||||||
IO *runtime.IO
|
|
||||||
Stdin string
|
Stdin string
|
||||||
Stdout string
|
Stdout string
|
||||||
Stderr string
|
Stderr string
|
||||||
|
@ -39,48 +38,36 @@ func (w *worker) Start() {
|
||||||
defer w.wg.Done()
|
defer w.wg.Done()
|
||||||
for t := range w.s.tasks {
|
for t := range w.s.tasks {
|
||||||
started := time.Now()
|
started := time.Now()
|
||||||
l, err := w.s.copyIO(t.Stdin, t.Stdout, t.Stderr, t.IO)
|
process, err := t.Container.Start(t.Checkpoint, runtime.NewStdio(t.Stdin, t.Stdout, t.Stderr))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
evt := NewEvent(DeleteEventType)
|
evt := NewTask(DeleteTaskType)
|
||||||
evt.ID = t.Container.ID()
|
evt.ID = t.Container.ID()
|
||||||
w.s.SendEvent(evt)
|
w.s.SendTask(evt)
|
||||||
t.Err <- err
|
t.Err <- err
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
w.s.containers[t.Container.ID()].copier = l
|
/*
|
||||||
if t.Checkpoint != "" {
|
if w.s.notifier != nil {
|
||||||
if err := t.Container.Restore(t.Checkpoint); err != nil {
|
n, err := t.Container.OOM()
|
||||||
evt := NewEvent(DeleteEventType)
|
if err != nil {
|
||||||
evt.ID = t.Container.ID()
|
logrus.WithField("error", err).Error("containerd: notify OOM events")
|
||||||
w.s.SendEvent(evt)
|
} else {
|
||||||
t.Err <- err
|
w.s.notifier.Add(n, t.Container.ID())
|
||||||
continue
|
}
|
||||||
}
|
}
|
||||||
} else {
|
*/
|
||||||
if err := t.Container.Start(); err != nil {
|
if err := w.s.monitorProcess(process); err != nil {
|
||||||
evt := NewEvent(DeleteEventType)
|
logrus.WithField("error", err).Error("containerd: add process to monitor")
|
||||||
evt.ID = t.Container.ID()
|
|
||||||
w.s.SendEvent(evt)
|
|
||||||
t.Err <- err
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pid, err := t.Container.Pid()
|
|
||||||
if err != nil {
|
|
||||||
logrus.WithField("error", err).Error("containerd: get container main pid")
|
|
||||||
}
|
|
||||||
if w.s.notifier != nil {
|
|
||||||
n, err := t.Container.OOM()
|
|
||||||
if err != nil {
|
|
||||||
logrus.WithField("error", err).Error("containerd: notify OOM events")
|
|
||||||
} else {
|
|
||||||
w.s.notifier.Add(t.Container.ID(), n)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
ContainerStartTimer.UpdateSince(started)
|
ContainerStartTimer.UpdateSince(started)
|
||||||
t.Err <- nil
|
t.Err <- nil
|
||||||
t.StartResponse <- StartResponse{
|
t.StartResponse <- StartResponse{
|
||||||
Pid: pid,
|
Container: t.Container,
|
||||||
}
|
}
|
||||||
|
w.s.notifySubscribers(Event{
|
||||||
|
Timestamp: time.Now(),
|
||||||
|
ID: t.Container.ID(),
|
||||||
|
Type: "start-container",
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
38
util/reaper.go
Normal file
38
util/reaper.go
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/opencontainers/runc/libcontainer/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Exit is the wait4 information from an exited process
|
||||||
|
type Exit struct {
|
||||||
|
Pid int
|
||||||
|
Status int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reap reaps all child processes for the calling process and returns their
|
||||||
|
// exit information
|
||||||
|
func Reap() (exits []Exit, err error) {
|
||||||
|
var (
|
||||||
|
ws syscall.WaitStatus
|
||||||
|
rus syscall.Rusage
|
||||||
|
)
|
||||||
|
for {
|
||||||
|
pid, err := syscall.Wait4(-1, &ws, syscall.WNOHANG, &rus)
|
||||||
|
if err != nil {
|
||||||
|
if err == syscall.ECHILD {
|
||||||
|
return exits, nil
|
||||||
|
}
|
||||||
|
return exits, err
|
||||||
|
}
|
||||||
|
if pid <= 0 {
|
||||||
|
return exits, nil
|
||||||
|
}
|
||||||
|
exits = append(exits, Exit{
|
||||||
|
Pid: pid,
|
||||||
|
Status: utils.ExitStatus(ws),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
2
vendor/src/github.com/opencontainers/runc/.gitignore
vendored
Normal file
2
vendor/src/github.com/opencontainers/runc/.gitignore
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
vendor/pkg
|
||||||
|
runc
|
117
vendor/src/github.com/opencontainers/runc/CONTRIBUTING.md
vendored
Normal file
117
vendor/src/github.com/opencontainers/runc/CONTRIBUTING.md
vendored
Normal file
|
@ -0,0 +1,117 @@
|
||||||
|
## Contribution Guidelines
|
||||||
|
|
||||||
|
### Pull requests are always welcome
|
||||||
|
|
||||||
|
We are always thrilled to receive pull requests, and do our best to
|
||||||
|
process them as fast as possible. Not sure if that typo is worth a pull
|
||||||
|
request? Do it! We will appreciate it.
|
||||||
|
|
||||||
|
If your pull request is not accepted on the first try, don't be
|
||||||
|
discouraged! If there's a problem with the implementation, hopefully you
|
||||||
|
received feedback on what to improve.
|
||||||
|
|
||||||
|
We're trying very hard to keep runc lean and focused. We don't want it
|
||||||
|
to do everything for everybody. This means that we might decide against
|
||||||
|
incorporating a new feature. However, there might be a way to implement
|
||||||
|
that feature *on top of* runc.
|
||||||
|
|
||||||
|
|
||||||
|
### Conventions
|
||||||
|
|
||||||
|
Fork the repo and make changes on your fork in a feature branch:
|
||||||
|
|
||||||
|
- If it's a bugfix branch, name it XXX-something where XXX is the number of the
|
||||||
|
issue
|
||||||
|
- If it's a feature branch, create an enhancement issue to announce your
|
||||||
|
intentions, and name it XXX-something where XXX is the number of the issue.
|
||||||
|
|
||||||
|
Submit unit tests for your changes. Go has a great test framework built in; use
|
||||||
|
it! Take a look at existing tests for inspiration. Run the full test suite on
|
||||||
|
your branch before submitting a pull request.
|
||||||
|
|
||||||
|
Update the documentation when creating or modifying features. Test
|
||||||
|
your documentation changes for clarity, concision, and correctness, as
|
||||||
|
well as a clean documentation build. See ``docs/README.md`` for more
|
||||||
|
information on building the docs and how docs get released.
|
||||||
|
|
||||||
|
Write clean code. Universally formatted code promotes ease of writing, reading,
|
||||||
|
and maintenance. Always run `gofmt -s -w file.go` on each changed file before
|
||||||
|
committing your changes. Most editors have plugins that do this automatically.
|
||||||
|
|
||||||
|
Pull requests descriptions should be as clear as possible and include a
|
||||||
|
reference to all the issues that they address.
|
||||||
|
|
||||||
|
Pull requests must not contain commits from other users or branches.
|
||||||
|
|
||||||
|
Commit messages must start with a capitalized and short summary (max. 50
|
||||||
|
chars) written in the imperative, followed by an optional, more detailed
|
||||||
|
explanatory text which is separated from the summary by an empty line.
|
||||||
|
|
||||||
|
Code review comments may be added to your pull request. Discuss, then make the
|
||||||
|
suggested modifications and push additional commits to your feature branch. Be
|
||||||
|
sure to post a comment after pushing. The new commits will show up in the pull
|
||||||
|
request automatically, but the reviewers will not be notified unless you
|
||||||
|
comment.
|
||||||
|
|
||||||
|
Before the pull request is merged, make sure that you squash your commits into
|
||||||
|
logical units of work using `git rebase -i` and `git push -f`. After every
|
||||||
|
commit the test suite should be passing. Include documentation changes in the
|
||||||
|
same commit so that a revert would remove all traces of the feature or fix.
|
||||||
|
|
||||||
|
Commits that fix or close an issue should include a reference like `Closes #XXX`
|
||||||
|
or `Fixes #XXX`, which will automatically close the issue when merged.
|
||||||
|
|
||||||
|
### Sign your work
|
||||||
|
|
||||||
|
The sign-off is a simple line at the end of the explanation for the
|
||||||
|
patch, which certifies that you wrote it or otherwise have the right to
|
||||||
|
pass it on as an open-source patch. The rules are pretty simple: if you
|
||||||
|
can certify the below (from
|
||||||
|
[developercertificate.org](http://developercertificate.org/)):
|
||||||
|
|
||||||
|
```
|
||||||
|
Developer Certificate of Origin
|
||||||
|
Version 1.1
|
||||||
|
|
||||||
|
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||||
|
660 York Street, Suite 102,
|
||||||
|
San Francisco, CA 94110 USA
|
||||||
|
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies of this
|
||||||
|
license document, but changing it is not allowed.
|
||||||
|
|
||||||
|
|
||||||
|
Developer's Certificate of Origin 1.1
|
||||||
|
|
||||||
|
By making a contribution to this project, I certify that:
|
||||||
|
|
||||||
|
(a) The contribution was created in whole or in part by me and I
|
||||||
|
have the right to submit it under the open source license
|
||||||
|
indicated in the file; or
|
||||||
|
|
||||||
|
(b) The contribution is based upon previous work that, to the best
|
||||||
|
of my knowledge, is covered under an appropriate open source
|
||||||
|
license and I have the right under that license to submit that
|
||||||
|
work with modifications, whether created in whole or in part
|
||||||
|
by me, under the same open source license (unless I am
|
||||||
|
permitted to submit under a different license), as indicated
|
||||||
|
in the file; or
|
||||||
|
|
||||||
|
(c) The contribution was provided directly to me by some other
|
||||||
|
person who certified (a), (b) or (c) and I have not modified
|
||||||
|
it.
|
||||||
|
|
||||||
|
(d) I understand and agree that this project and the contribution
|
||||||
|
are public and that a record of the contribution (including all
|
||||||
|
personal information I submit with it, including my sign-off) is
|
||||||
|
maintained indefinitely and may be redistributed consistent with
|
||||||
|
this project or the open source license(s) involved.
|
||||||
|
```
|
||||||
|
|
||||||
|
then you just add a line to every git commit message:
|
||||||
|
|
||||||
|
Signed-off-by: Joe Smith <joe@gmail.com>
|
||||||
|
|
||||||
|
using your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||||
|
|
||||||
|
You can add the sign off when creating the git commit via `git commit -s`.
|
7
vendor/src/github.com/opencontainers/runc/MAINTAINERS
vendored
Normal file
7
vendor/src/github.com/opencontainers/runc/MAINTAINERS
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
Michael Crosby <michael@docker.com> (@crosbymichael)
|
||||||
|
Rohit Jnagal <jnagal@google.com> (@rjnagal)
|
||||||
|
Victor Marmol <vmarmol@google.com> (@vmarmol)
|
||||||
|
Mrunal Patel <mpatel@redhat.com> (@mrunalp)
|
||||||
|
Alexander Morozov <lk4d4@docker.com> (@LK4D4)
|
||||||
|
Daniel, Dao Quang Minh <dqminh89@gmail.com> (@dqminh)
|
||||||
|
Andrey Vagin <avagin@virtuozzo.com> (@avagin)
|
120
vendor/src/github.com/opencontainers/runc/MAINTAINERS_GUIDE.md
vendored
Normal file
120
vendor/src/github.com/opencontainers/runc/MAINTAINERS_GUIDE.md
vendored
Normal file
|
@ -0,0 +1,120 @@
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
Dear maintainer. Thank you for investing the time and energy to help
|
||||||
|
make runc as useful as possible. Maintaining a project is difficult,
|
||||||
|
sometimes unrewarding work. Sure, you will get to contribute cool
|
||||||
|
features to the project. But most of your time will be spent reviewing,
|
||||||
|
cleaning up, documenting, answering questions, justifying design
|
||||||
|
decisions - while everyone has all the fun! But remember - the quality
|
||||||
|
of the maintainers work is what distinguishes the good projects from the
|
||||||
|
great. So please be proud of your work, even the unglamourous parts,
|
||||||
|
and encourage a culture of appreciation and respect for *every* aspect
|
||||||
|
of improving the project - not just the hot new features.
|
||||||
|
|
||||||
|
This document is a manual for maintainers old and new. It explains what
|
||||||
|
is expected of maintainers, how they should work, and what tools are
|
||||||
|
available to them.
|
||||||
|
|
||||||
|
This is a living document - if you see something out of date or missing,
|
||||||
|
speak up!
|
||||||
|
|
||||||
|
## What are a maintainer's responsibility?
|
||||||
|
|
||||||
|
It is every maintainer's responsibility to:
|
||||||
|
|
||||||
|
* 1) Expose a clear roadmap for improving their component.
|
||||||
|
* 2) Deliver prompt feedback and decisions on pull requests.
|
||||||
|
* 3) Be available to anyone with questions, bug reports, criticism etc.
|
||||||
|
on their component. This includes IRC and GitHub issues and pull requests.
|
||||||
|
* 4) Make sure their component respects the philosophy, design and
|
||||||
|
roadmap of the project.
|
||||||
|
|
||||||
|
## How are decisions made?
|
||||||
|
|
||||||
|
Short answer: with pull requests to the runc repository.
|
||||||
|
|
||||||
|
runc is an open-source project with an open design philosophy. This
|
||||||
|
means that the repository is the source of truth for EVERY aspect of the
|
||||||
|
project, including its philosophy, design, roadmap and APIs. *If it's
|
||||||
|
part of the project, it's in the repo. It's in the repo, it's part of
|
||||||
|
the project.*
|
||||||
|
|
||||||
|
As a result, all decisions can be expressed as changes to the
|
||||||
|
repository. An implementation change is a change to the source code. An
|
||||||
|
API change is a change to the API specification. A philosophy change is
|
||||||
|
a change to the philosophy manifesto. And so on.
|
||||||
|
|
||||||
|
All decisions affecting runc, big and small, follow the same 3 steps:
|
||||||
|
|
||||||
|
* Step 1: Open a pull request. Anyone can do this.
|
||||||
|
|
||||||
|
* Step 2: Discuss the pull request. Anyone can do this.
|
||||||
|
|
||||||
|
* Step 3: Accept (`LGTM`) or refuse a pull request. The relevant maintainers do
|
||||||
|
this (see below "Who decides what?")
|
||||||
|
|
||||||
|
### I'm a maintainer, should I make pull requests too?
|
||||||
|
|
||||||
|
Yes. Nobody should ever push to master directly. All changes should be
|
||||||
|
made through a pull request.
|
||||||
|
|
||||||
|
## Who decides what?
|
||||||
|
|
||||||
|
All decisions are pull requests, and the relevant maintainers make
|
||||||
|
decisions by accepting or refusing the pull request. Review and acceptance
|
||||||
|
by anyone is denoted by adding a comment in the pull request: `LGTM`.
|
||||||
|
However, only currently listed `MAINTAINERS` are counted towards the required
|
||||||
|
two LGTMs.
|
||||||
|
|
||||||
|
Overall the maintainer system works because of mutual respect across the
|
||||||
|
maintainers of the project. The maintainers trust one another to make decisions
|
||||||
|
in the best interests of the project. Sometimes maintainers can disagree and
|
||||||
|
this is part of a healthy project to represent the point of views of various people.
|
||||||
|
In the case where maintainers cannot find agreement on a specific change the
|
||||||
|
role of a Chief Maintainer comes into play.
|
||||||
|
|
||||||
|
The Chief Maintainer for the project is responsible for overall architecture
|
||||||
|
of the project to maintain conceptual integrity. Large decisions and
|
||||||
|
architecture changes should be reviewed by the chief maintainer.
|
||||||
|
The current chief maintainer for the project is Michael Crosby (@crosbymichael).
|
||||||
|
|
||||||
|
Even though the maintainer system is built on trust, if there is a conflict
|
||||||
|
with the chief maintainer on a decision, their decision can be challenged
|
||||||
|
and brought to the technical oversight board if two-thirds of the
|
||||||
|
maintainers vote for an appeal. It is expected that this would be a
|
||||||
|
very exceptional event.
|
||||||
|
|
||||||
|
|
||||||
|
### How are maintainers added?
|
||||||
|
|
||||||
|
The best maintainers have a vested interest in the project. Maintainers
|
||||||
|
are first and foremost contributors that have shown they are committed to
|
||||||
|
the long term success of the project. Contributors wanting to become
|
||||||
|
maintainers are expected to be deeply involved in contributing code,
|
||||||
|
pull request review, and triage of issues in the project for more than two months.
|
||||||
|
|
||||||
|
Just contributing does not make you a maintainer, it is about building trust
|
||||||
|
with the current maintainers of the project and being a person that they can
|
||||||
|
depend on and trust to make decisions in the best interest of the project. The
|
||||||
|
final vote to add a new maintainer should be approved by over 66% of the current
|
||||||
|
maintainers with the chief maintainer having veto power. In case of a veto,
|
||||||
|
conflict resolution rules expressed above apply. The voting period is
|
||||||
|
five business days on the Pull Request to add the new maintainer.
|
||||||
|
|
||||||
|
|
||||||
|
### What is expected of maintainers?
|
||||||
|
|
||||||
|
Part of a healthy project is to have active maintainers to support the community
|
||||||
|
in contributions and perform tasks to keep the project running. Maintainers are
|
||||||
|
expected to be able to respond in a timely manner if their help is required on specific
|
||||||
|
issues where they are pinged. Being a maintainer is a time consuming commitment and should
|
||||||
|
not be taken lightly.
|
||||||
|
|
||||||
|
When a maintainer is unable to perform the required duties they can be removed with
|
||||||
|
a vote by 66% of the current maintainers with the chief maintainer having veto power.
|
||||||
|
The voting period is ten business days. Issues related to a maintainer's performance should
|
||||||
|
be discussed with them among the other maintainers so that they are not surprised by
|
||||||
|
a pull request removing them.
|
||||||
|
|
||||||
|
|
||||||
|
|
40
vendor/src/github.com/opencontainers/runc/Makefile
vendored
Normal file
40
vendor/src/github.com/opencontainers/runc/Makefile
vendored
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
RUNC_TEST_IMAGE=runc_test
|
||||||
|
PROJECT=github.com/opencontainers/runc
|
||||||
|
TEST_DOCKERFILE=script/test_Dockerfile
|
||||||
|
BUILDTAGS=seccomp
|
||||||
|
export GOPATH:=$(CURDIR)/Godeps/_workspace:$(GOPATH)
|
||||||
|
|
||||||
|
all:
|
||||||
|
go build -tags "$(BUILDTAGS)" -o runc .
|
||||||
|
|
||||||
|
static:
|
||||||
|
CGO_ENABLED=1 go build -tags "$(BUILDTAGS) cgo static_build" -ldflags "-w -extldflags -static" -o runc .
|
||||||
|
|
||||||
|
vet:
|
||||||
|
go get golang.org/x/tools/cmd/vet
|
||||||
|
|
||||||
|
lint: vet
|
||||||
|
go vet ./...
|
||||||
|
go fmt ./...
|
||||||
|
|
||||||
|
runctestimage:
|
||||||
|
docker build -t $(RUNC_TEST_IMAGE) -f $(TEST_DOCKERFILE) .
|
||||||
|
|
||||||
|
test: runctestimage
|
||||||
|
docker run -e TESTFLAGS --privileged --rm -v $(CURDIR):/go/src/$(PROJECT) $(RUNC_TEST_IMAGE) make localtest
|
||||||
|
|
||||||
|
localtest:
|
||||||
|
go test -tags "$(BUILDTAGS)" ${TESTFLAGS} -v ./...
|
||||||
|
|
||||||
|
|
||||||
|
install:
|
||||||
|
cp runc /usr/local/bin/runc
|
||||||
|
|
||||||
|
clean:
|
||||||
|
rm runc
|
||||||
|
|
||||||
|
validate: vet
|
||||||
|
script/validate-gofmt
|
||||||
|
go vet ./...
|
||||||
|
|
||||||
|
ci: validate localtest
|
17
vendor/src/github.com/opencontainers/runc/NOTICE
vendored
Normal file
17
vendor/src/github.com/opencontainers/runc/NOTICE
vendored
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
runc
|
||||||
|
|
||||||
|
Copyright 2012-2015 Docker, Inc.
|
||||||
|
|
||||||
|
This product includes software developed at Docker, Inc. (http://www.docker.com).
|
||||||
|
|
||||||
|
The following is courtesy of our legal counsel:
|
||||||
|
|
||||||
|
|
||||||
|
Use and transfer of Docker may be subject to certain restrictions by the
|
||||||
|
United States and other governments.
|
||||||
|
It is your responsibility to ensure that your use and/or transfer does not
|
||||||
|
violate applicable laws.
|
||||||
|
|
||||||
|
For more information, please see http://www.bis.doc.gov
|
||||||
|
|
||||||
|
See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
19
vendor/src/github.com/opencontainers/runc/PRINCIPLES.md
vendored
Normal file
19
vendor/src/github.com/opencontainers/runc/PRINCIPLES.md
vendored
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
# runc principles
|
||||||
|
|
||||||
|
In the design and development of runc and libcontainer we try to follow these principles:
|
||||||
|
|
||||||
|
(Work in progress)
|
||||||
|
|
||||||
|
* Don't try to replace every tool. Instead, be an ingredient to improve them.
|
||||||
|
* Less code is better.
|
||||||
|
* Fewer components are better. Do you really need to add one more class?
|
||||||
|
* 50 lines of straightforward, readable code is better than 10 lines of magic that nobody can understand.
|
||||||
|
* Don't do later what you can do now. "//TODO: refactor" is not acceptable in new code.
|
||||||
|
* When hesitating between two options, choose the one that is easier to reverse.
|
||||||
|
* "No" is temporary; "Yes" is forever. If you're not sure about a new feature, say no. You can change your mind later.
|
||||||
|
* Containers must be portable to the greatest possible number of machines. Be suspicious of any change which makes machines less interchangeable.
|
||||||
|
* The fewer moving parts in a container, the better.
|
||||||
|
* Don't merge it unless you document it.
|
||||||
|
* Don't document it unless you can keep it up-to-date.
|
||||||
|
* Don't merge it unless you test it!
|
||||||
|
* Everyone's problem is slightly different. Focus on the part that is the same for everyone, and solve that.
|
144
vendor/src/github.com/opencontainers/runc/README.md
vendored
Normal file
144
vendor/src/github.com/opencontainers/runc/README.md
vendored
Normal file
|
@ -0,0 +1,144 @@
|
||||||
|
[![Build Status](https://jenkins.dockerproject.org/buildStatus/icon?job=runc Master)](https://jenkins.dockerproject.org/job/runc Master)
|
||||||
|
|
||||||
|
## runc
|
||||||
|
|
||||||
|
`runc` is a CLI tool for spawning and running containers according to the OCF specification.
|
||||||
|
|
||||||
|
## State of the project
|
||||||
|
|
||||||
|
Currently `runc` is an implementation of the OCI specification. We are currently sprinting
|
||||||
|
to have a v1 of the spec out. So the `runc` config format will be constantly changing until
|
||||||
|
the spec is finalized. However, we encourage you to try out the tool and give feedback.
|
||||||
|
|
||||||
|
### OCF
|
||||||
|
|
||||||
|
How does `runc` integrate with the Open Container Initiative Specification?
|
||||||
|
`runc` depends on the types specified in the
|
||||||
|
[specs](https://github.com/opencontainers/specs) repository. Whenever the
|
||||||
|
specification is updated and ready to be versioned `runc` will update its dependency
|
||||||
|
on the specs repository and support the update spec.
|
||||||
|
|
||||||
|
### Building:
|
||||||
|
|
||||||
|
At the time of writing, runc only builds on the Linux platform.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# create a 'github.com/opencontainers' in your GOPATH/src
|
||||||
|
cd github.com/opencontainers
|
||||||
|
git clone https://github.com/opencontainers/runc
|
||||||
|
cd runc
|
||||||
|
make
|
||||||
|
sudo make install
|
||||||
|
```
|
||||||
|
|
||||||
|
In order to enable seccomp support you will need to install libseccomp on your platform.
|
||||||
|
If you do not with to build `runc` with seccomp support you can add `BUILDTAGS=""` when running make.
|
||||||
|
|
||||||
|
#### Build Tags
|
||||||
|
|
||||||
|
`runc` supports optional build tags for compiling in support for various features.
|
||||||
|
|
||||||
|
|
||||||
|
| Build Tag | Feature | Dependency |
|
||||||
|
|-----------|------------------------------------|-------------|
|
||||||
|
| seccomp | Syscall filtering | libseccomp |
|
||||||
|
| selinux | selinux process and mount labeling | <none> |
|
||||||
|
| apparmor | apparmor profile support | libapparmor |
|
||||||
|
|
||||||
|
### Testing:
|
||||||
|
|
||||||
|
You can run tests for runC by using command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# make test
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that test cases are run in Docker container, so you need to install
|
||||||
|
`docker` first. And test requires mounting cgroups inside container, it's
|
||||||
|
done by docker now, so you need a docker version newer than 1.8.0-rc2.
|
||||||
|
|
||||||
|
You can also run specific test cases by:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# make test TESTFLAGS="-run=SomeTestFunction"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Using:
|
||||||
|
|
||||||
|
To run a container with the id "test", execute `runc start` with the containers id as arg one
|
||||||
|
in the bundle's root directory:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
runc start test
|
||||||
|
/ $ ps
|
||||||
|
PID USER COMMAND
|
||||||
|
1 daemon sh
|
||||||
|
5 daemon sh
|
||||||
|
/ $
|
||||||
|
```
|
||||||
|
|
||||||
|
### OCI Container JSON Format:
|
||||||
|
|
||||||
|
OCI container JSON format is based on OCI [specs](https://github.com/opencontainers/specs).
|
||||||
|
You can generate JSON files by using `runc spec`.
|
||||||
|
It assumes that the file-system is found in a directory called
|
||||||
|
`rootfs` and there is a user with uid and gid of `0` defined within that file-system.
|
||||||
|
|
||||||
|
### Examples:
|
||||||
|
|
||||||
|
#### Using a Docker image (requires version 1.3 or later)
|
||||||
|
|
||||||
|
To test using Docker's `busybox` image follow these steps:
|
||||||
|
* Install `docker` and download the `busybox` image: `docker pull busybox`
|
||||||
|
* Create a container from that image and export its contents to a tar file:
|
||||||
|
`docker export $(docker create busybox) > busybox.tar`
|
||||||
|
* Untar the contents to create your filesystem directory:
|
||||||
|
```
|
||||||
|
mkdir rootfs
|
||||||
|
tar -C rootfs -xf busybox.tar
|
||||||
|
```
|
||||||
|
* Create `config.json` by using `runc spec`.
|
||||||
|
* Execute `runc start` and you should be placed into a shell where you can run `ps`:
|
||||||
|
```
|
||||||
|
$ runc start test
|
||||||
|
/ # ps
|
||||||
|
PID USER COMMAND
|
||||||
|
1 root sh
|
||||||
|
9 root ps
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Using runc with systemd
|
||||||
|
|
||||||
|
To use runc with systemd, you can create a unit file
|
||||||
|
`/usr/lib/systemd/system/minecraft.service` as below (edit your
|
||||||
|
own Description or WorkingDirectory or service name as you need).
|
||||||
|
|
||||||
|
```service
|
||||||
|
[Unit]
|
||||||
|
Description=Minecraft Build Server
|
||||||
|
Documentation=http://minecraft.net
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
CPUQuota=200%
|
||||||
|
MemoryLimit=1536M
|
||||||
|
ExecStart=/usr/local/bin/runc start minecraft
|
||||||
|
Restart=on-failure
|
||||||
|
WorkingDirectory=/containers/minecraftbuild
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
```
|
||||||
|
|
||||||
|
Make sure you have the bundle's root directory and JSON configs in
|
||||||
|
your WorkingDirectory, then use systemd commands to start the service:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl start minecraft.service
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that if you use JSON configs by `runc spec`, you need to modify
|
||||||
|
`config.json` and change `process.terminal` to false so runc won't
|
||||||
|
create tty, because we can't set terminal from the stdin when using
|
||||||
|
systemd service.
|
84
vendor/src/github.com/opencontainers/runc/checkpoint.go
vendored
Normal file
84
vendor/src/github.com/opencontainers/runc/checkpoint.go
vendored
Normal file
|
@ -0,0 +1,84 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/codegangsta/cli"
|
||||||
|
"github.com/opencontainers/runc/libcontainer"
|
||||||
|
)
|
||||||
|
|
||||||
|
var checkpointCommand = cli.Command{
|
||||||
|
Name: "checkpoint",
|
||||||
|
Usage: "checkpoint a running container",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
cli.StringFlag{Name: "image-path", Value: "", Usage: "path for saving criu image files"},
|
||||||
|
cli.StringFlag{Name: "work-path", Value: "", Usage: "path for saving work files and logs"},
|
||||||
|
cli.BoolFlag{Name: "leave-running", Usage: "leave the process running after checkpointing"},
|
||||||
|
cli.BoolFlag{Name: "tcp-established", Usage: "allow open tcp connections"},
|
||||||
|
cli.BoolFlag{Name: "ext-unix-sk", Usage: "allow external unix sockets"},
|
||||||
|
cli.BoolFlag{Name: "shell-job", Usage: "allow shell jobs"},
|
||||||
|
cli.StringFlag{Name: "page-server", Value: "", Usage: "ADDRESS:PORT of the page server"},
|
||||||
|
cli.BoolFlag{Name: "file-locks", Usage: "handle file locks, for safety"},
|
||||||
|
cli.StringFlag{Name: "manage-cgroups-mode", Value: "", Usage: "cgroups mode: 'soft' (default), 'full' and 'strict'."},
|
||||||
|
},
|
||||||
|
Action: func(context *cli.Context) {
|
||||||
|
container, err := getContainer(context)
|
||||||
|
if err != nil {
|
||||||
|
fatal(err)
|
||||||
|
}
|
||||||
|
defer destroy(container)
|
||||||
|
options := criuOptions(context)
|
||||||
|
// these are the mandatory criu options for a container
|
||||||
|
setPageServer(context, options)
|
||||||
|
setManageCgroupsMode(context, options)
|
||||||
|
if err := container.Checkpoint(options); err != nil {
|
||||||
|
fatal(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCheckpointImagePath(context *cli.Context) string {
|
||||||
|
imagePath := context.String("image-path")
|
||||||
|
if imagePath == "" {
|
||||||
|
imagePath = getDefaultImagePath(context)
|
||||||
|
}
|
||||||
|
return imagePath
|
||||||
|
}
|
||||||
|
|
||||||
|
func setPageServer(context *cli.Context, options *libcontainer.CriuOpts) {
|
||||||
|
// xxx following criu opts are optional
|
||||||
|
// The dump image can be sent to a criu page server
|
||||||
|
if psOpt := context.String("page-server"); psOpt != "" {
|
||||||
|
addressPort := strings.Split(psOpt, ":")
|
||||||
|
if len(addressPort) != 2 {
|
||||||
|
fatal(fmt.Errorf("Use --page-server ADDRESS:PORT to specify page server"))
|
||||||
|
}
|
||||||
|
portInt, err := strconv.Atoi(addressPort[1])
|
||||||
|
if err != nil {
|
||||||
|
fatal(fmt.Errorf("Invalid port number"))
|
||||||
|
}
|
||||||
|
options.PageServer = libcontainer.CriuPageServerInfo{
|
||||||
|
Address: addressPort[0],
|
||||||
|
Port: int32(portInt),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setManageCgroupsMode(context *cli.Context, options *libcontainer.CriuOpts) {
|
||||||
|
if cgOpt := context.String("manage-cgroups-mode"); cgOpt != "" {
|
||||||
|
switch cgOpt {
|
||||||
|
case "soft":
|
||||||
|
options.ManageCgroupsMode = libcontainer.CRIU_CG_MODE_SOFT
|
||||||
|
case "full":
|
||||||
|
options.ManageCgroupsMode = libcontainer.CRIU_CG_MODE_FULL
|
||||||
|
case "strict":
|
||||||
|
options.ManageCgroupsMode = libcontainer.CRIU_CG_MODE_STRICT
|
||||||
|
default:
|
||||||
|
fatal(fmt.Errorf("Invalid manage cgroups mode"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
15
vendor/src/github.com/opencontainers/runc/delete.go
vendored
Normal file
15
vendor/src/github.com/opencontainers/runc/delete.go
vendored
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import "github.com/codegangsta/cli"
|
||||||
|
|
||||||
|
var deleteCommand = cli.Command{
|
||||||
|
Name: "delete",
|
||||||
|
Usage: "delete any resources held by the container often used with detached containers",
|
||||||
|
Action: func(context *cli.Context) {
|
||||||
|
container, err := getContainer(context)
|
||||||
|
if err != nil {
|
||||||
|
fatal(err)
|
||||||
|
}
|
||||||
|
destroy(container)
|
||||||
|
},
|
||||||
|
}
|
95
vendor/src/github.com/opencontainers/runc/events.go
vendored
Normal file
95
vendor/src/github.com/opencontainers/runc/events.go
vendored
Normal file
|
@ -0,0 +1,95 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/codegangsta/cli"
|
||||||
|
"github.com/opencontainers/runc/libcontainer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// event struct for encoding the event data to json.
|
||||||
|
type event struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
Data interface{} `json:"data,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var eventsCommand = cli.Command{
|
||||||
|
Name: "events",
|
||||||
|
Usage: "display container events such as OOM notifications, cpu, memory, IO and network stats",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
cli.DurationFlag{Name: "interval", Value: 5 * time.Second, Usage: "set the stats collection interval"},
|
||||||
|
cli.BoolFlag{Name: "stats", Usage: "display the container's stats then exit"},
|
||||||
|
},
|
||||||
|
Action: func(context *cli.Context) {
|
||||||
|
container, err := getContainer(context)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Fatal(err)
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
stats = make(chan *libcontainer.Stats, 1)
|
||||||
|
events = make(chan *event, 1024)
|
||||||
|
group = &sync.WaitGroup{}
|
||||||
|
)
|
||||||
|
group.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer group.Done()
|
||||||
|
enc := json.NewEncoder(os.Stdout)
|
||||||
|
for e := range events {
|
||||||
|
if err := enc.Encode(e); err != nil {
|
||||||
|
logrus.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if context.Bool("stats") {
|
||||||
|
s, err := container.Stats()
|
||||||
|
if err != nil {
|
||||||
|
fatal(err)
|
||||||
|
}
|
||||||
|
events <- &event{Type: "stats", ID: container.ID(), Data: s}
|
||||||
|
close(events)
|
||||||
|
group.Wait()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
for range time.Tick(context.Duration("interval")) {
|
||||||
|
s, err := container.Stats()
|
||||||
|
if err != nil {
|
||||||
|
logrus.Error(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
stats <- s
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
n, err := container.NotifyOOM()
|
||||||
|
if err != nil {
|
||||||
|
logrus.Fatal(err)
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case _, ok := <-n:
|
||||||
|
if ok {
|
||||||
|
// this means an oom event was received, if it is !ok then
|
||||||
|
// the channel was closed because the container stopped and
|
||||||
|
// the cgroups no longer exist.
|
||||||
|
events <- &event{Type: "oom", ID: container.ID()}
|
||||||
|
} else {
|
||||||
|
n = nil
|
||||||
|
}
|
||||||
|
case s := <-stats:
|
||||||
|
events <- &event{Type: "stats", ID: container.ID(), Data: s}
|
||||||
|
}
|
||||||
|
if n == nil {
|
||||||
|
close(events)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
group.Wait()
|
||||||
|
},
|
||||||
|
}
|
140
vendor/src/github.com/opencontainers/runc/exec.go
vendored
Normal file
140
vendor/src/github.com/opencontainers/runc/exec.go
vendored
Normal file
|
@ -0,0 +1,140 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/codegangsta/cli"
|
||||||
|
"github.com/opencontainers/specs"
|
||||||
|
)
|
||||||
|
|
||||||
|
var execCommand = cli.Command{
|
||||||
|
Name: "exec",
|
||||||
|
Usage: "execute new process inside the container",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "console",
|
||||||
|
Usage: "specify the pty slave path for use with the container",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "cwd",
|
||||||
|
Usage: "current working directory in the container",
|
||||||
|
},
|
||||||
|
cli.StringSliceFlag{
|
||||||
|
Name: "env, e",
|
||||||
|
Usage: "set environment variables",
|
||||||
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "tty, t",
|
||||||
|
Usage: "allocate a pseudo-TTY",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "user, u",
|
||||||
|
Usage: "UID (format: <uid>[:<gid>])",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "process,p",
|
||||||
|
Usage: "path to the process.json",
|
||||||
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "detach,d",
|
||||||
|
Usage: "detach from the container's process",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "pid-file",
|
||||||
|
Value: "",
|
||||||
|
Usage: "specify the file to write the process id to",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(context *cli.Context) {
|
||||||
|
if os.Geteuid() != 0 {
|
||||||
|
logrus.Fatal("runc should be run as root")
|
||||||
|
}
|
||||||
|
status, err := execProcess(context)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Fatalf("exec failed: %v", err)
|
||||||
|
}
|
||||||
|
os.Exit(status)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func execProcess(context *cli.Context) (int, error) {
|
||||||
|
container, err := getContainer(context)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
detach = context.Bool("detach")
|
||||||
|
rootfs = container.Config().Rootfs
|
||||||
|
)
|
||||||
|
|
||||||
|
p, err := getProcess(context, path.Dir(rootfs))
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return runProcess(container, p, nil, context.String("console"), context.String("pid-file"), detach)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func getProcess(context *cli.Context, bundle string) (*specs.Process, error) {
|
||||||
|
if path := context.String("process"); path != "" {
|
||||||
|
f, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
var p specs.Process
|
||||||
|
if err := json.NewDecoder(f).Decode(&p); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &p, nil
|
||||||
|
}
|
||||||
|
// process via cli flags
|
||||||
|
if err := os.Chdir(bundle); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
spec, err := loadSpec(specConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
p := spec.Process
|
||||||
|
p.Args = context.Args()[1:]
|
||||||
|
// override the cwd, if passed
|
||||||
|
if context.String("cwd") != "" {
|
||||||
|
p.Cwd = context.String("cwd")
|
||||||
|
}
|
||||||
|
// append the passed env variables
|
||||||
|
for _, e := range context.StringSlice("env") {
|
||||||
|
p.Env = append(p.Env, e)
|
||||||
|
}
|
||||||
|
// set the tty
|
||||||
|
if context.IsSet("tty") {
|
||||||
|
p.Terminal = context.Bool("tty")
|
||||||
|
}
|
||||||
|
// override the user, if passed
|
||||||
|
if context.String("user") != "" {
|
||||||
|
u := strings.SplitN(context.String("user"), ":", 2)
|
||||||
|
if len(u) > 1 {
|
||||||
|
gid, err := strconv.Atoi(u[1])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parsing %s as int for gid failed: %v", u[1], err)
|
||||||
|
}
|
||||||
|
p.User.GID = uint32(gid)
|
||||||
|
}
|
||||||
|
uid, err := strconv.Atoi(u[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parsing %s as int for uid failed: %v", u[0], err)
|
||||||
|
}
|
||||||
|
p.User.UID = uint32(uid)
|
||||||
|
}
|
||||||
|
return &p, nil
|
||||||
|
}
|
87
vendor/src/github.com/opencontainers/runc/kill.go
vendored
Normal file
87
vendor/src/github.com/opencontainers/runc/kill.go
vendored
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/codegangsta/cli"
|
||||||
|
)
|
||||||
|
|
||||||
|
var signalMap = map[string]syscall.Signal{
|
||||||
|
"ABRT": syscall.SIGABRT,
|
||||||
|
"ALRM": syscall.SIGALRM,
|
||||||
|
"BUS": syscall.SIGBUS,
|
||||||
|
"CHLD": syscall.SIGCHLD,
|
||||||
|
"CLD": syscall.SIGCLD,
|
||||||
|
"CONT": syscall.SIGCONT,
|
||||||
|
"FPE": syscall.SIGFPE,
|
||||||
|
"HUP": syscall.SIGHUP,
|
||||||
|
"ILL": syscall.SIGILL,
|
||||||
|
"INT": syscall.SIGINT,
|
||||||
|
"IO": syscall.SIGIO,
|
||||||
|
"IOT": syscall.SIGIOT,
|
||||||
|
"KILL": syscall.SIGKILL,
|
||||||
|
"PIPE": syscall.SIGPIPE,
|
||||||
|
"POLL": syscall.SIGPOLL,
|
||||||
|
"PROF": syscall.SIGPROF,
|
||||||
|
"PWR": syscall.SIGPWR,
|
||||||
|
"QUIT": syscall.SIGQUIT,
|
||||||
|
"SEGV": syscall.SIGSEGV,
|
||||||
|
"STKFLT": syscall.SIGSTKFLT,
|
||||||
|
"STOP": syscall.SIGSTOP,
|
||||||
|
"SYS": syscall.SIGSYS,
|
||||||
|
"TERM": syscall.SIGTERM,
|
||||||
|
"TRAP": syscall.SIGTRAP,
|
||||||
|
"TSTP": syscall.SIGTSTP,
|
||||||
|
"TTIN": syscall.SIGTTIN,
|
||||||
|
"TTOU": syscall.SIGTTOU,
|
||||||
|
"UNUSED": syscall.SIGUNUSED,
|
||||||
|
"URG": syscall.SIGURG,
|
||||||
|
"USR1": syscall.SIGUSR1,
|
||||||
|
"USR2": syscall.SIGUSR2,
|
||||||
|
"VTALRM": syscall.SIGVTALRM,
|
||||||
|
"WINCH": syscall.SIGWINCH,
|
||||||
|
"XCPU": syscall.SIGXCPU,
|
||||||
|
"XFSZ": syscall.SIGXFSZ,
|
||||||
|
}
|
||||||
|
|
||||||
|
var killCommand = cli.Command{
|
||||||
|
Name: "kill",
|
||||||
|
Usage: "kill sends the specified signal (default: SIGTERM) to the container's init process",
|
||||||
|
Action: func(context *cli.Context) {
|
||||||
|
container, err := getContainer(context)
|
||||||
|
if err != nil {
|
||||||
|
fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sigstr := context.Args().Get(1)
|
||||||
|
if sigstr == "" {
|
||||||
|
sigstr = "SIGTERM"
|
||||||
|
}
|
||||||
|
|
||||||
|
signal, err := parseSignal(sigstr)
|
||||||
|
if err != nil {
|
||||||
|
fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := container.Signal(signal); err != nil {
|
||||||
|
fatal(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseSignal(rawSignal string) (syscall.Signal, error) {
|
||||||
|
s, err := strconv.Atoi(rawSignal)
|
||||||
|
if err == nil {
|
||||||
|
return syscall.Signal(s), nil
|
||||||
|
}
|
||||||
|
signal, ok := signalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")]
|
||||||
|
if !ok {
|
||||||
|
return -1, fmt.Errorf("unknown signal %q", rawSignal)
|
||||||
|
}
|
||||||
|
return signal, nil
|
||||||
|
}
|
|
@ -10,80 +10,165 @@ host system and which is (optionally) isolated from other containers in the syst
|
||||||
|
|
||||||
#### Using libcontainer
|
#### Using libcontainer
|
||||||
|
|
||||||
To create a container you first have to initialize an instance of a factory
|
Because containers are spawned in a two step process you will need a binary that
|
||||||
that will handle the creation and initialization for a container.
|
will be executed as the init process for the container. In libcontainer, we use
|
||||||
|
the current binary (/proc/self/exe) to be executed as the init process, and use
|
||||||
Because containers are spawned in a two step process you will need to provide
|
arg "init", we call the first step process "bootstrap", so you always need a "init"
|
||||||
arguments to a binary that will be executed as the init process for the container.
|
function as the entry of "bootstrap".
|
||||||
To use the current binary that is spawning the containers and acting as the parent
|
|
||||||
you can use `os.Args[0]` and we have a command called `init` setup.
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
root, err := libcontainer.New("/var/lib/container", libcontainer.InitArgs(os.Args[0], "init"))
|
func init() {
|
||||||
|
if len(os.Args) > 1 && os.Args[1] == "init" {
|
||||||
|
runtime.GOMAXPROCS(1)
|
||||||
|
runtime.LockOSThread()
|
||||||
|
factory, _ := libcontainer.New("")
|
||||||
|
if err := factory.StartInitialization(); err != nil {
|
||||||
|
logrus.Fatal(err)
|
||||||
|
}
|
||||||
|
panic("--this line should have never been executed, congratulations--")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Then to create a container you first have to initialize an instance of a factory
|
||||||
|
that will handle the creation and initialization for a container.
|
||||||
|
|
||||||
|
```go
|
||||||
|
factory, err := libcontainer.New("/var/lib/container", libcontainer.Cgroupfs, libcontainer.InitArgs(os.Args[0], "init"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
logrus.Fatal(err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Once you have an instance of the factory created we can create a configuration
|
Once you have an instance of the factory created we can create a configuration
|
||||||
struct describing how the container is to be created. A sample would look similar to this:
|
struct describing how the container is to be created. A sample would look similar to this:
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
defaultMountFlags := syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV
|
||||||
config := &configs.Config{
|
config := &configs.Config{
|
||||||
Rootfs: rootfs,
|
Rootfs: "/your/path/to/rootfs",
|
||||||
Capabilities: []string{
|
Capabilities: []string{
|
||||||
"CAP_CHOWN",
|
"CAP_CHOWN",
|
||||||
"CAP_DAC_OVERRIDE",
|
"CAP_DAC_OVERRIDE",
|
||||||
"CAP_FSETID",
|
"CAP_FSETID",
|
||||||
"CAP_FOWNER",
|
"CAP_FOWNER",
|
||||||
"CAP_MKNOD",
|
"CAP_MKNOD",
|
||||||
"CAP_NET_RAW",
|
"CAP_NET_RAW",
|
||||||
"CAP_SETGID",
|
"CAP_SETGID",
|
||||||
"CAP_SETUID",
|
"CAP_SETUID",
|
||||||
"CAP_SETFCAP",
|
"CAP_SETFCAP",
|
||||||
"CAP_SETPCAP",
|
"CAP_SETPCAP",
|
||||||
"CAP_NET_BIND_SERVICE",
|
"CAP_NET_BIND_SERVICE",
|
||||||
"CAP_SYS_CHROOT",
|
"CAP_SYS_CHROOT",
|
||||||
"CAP_KILL",
|
"CAP_KILL",
|
||||||
"CAP_AUDIT_WRITE",
|
"CAP_AUDIT_WRITE",
|
||||||
},
|
},
|
||||||
Namespaces: configs.Namespaces([]configs.Namespace{
|
Namespaces: configs.Namespaces([]configs.Namespace{
|
||||||
{Type: configs.NEWNS},
|
{Type: configs.NEWNS},
|
||||||
{Type: configs.NEWUTS},
|
{Type: configs.NEWUTS},
|
||||||
{Type: configs.NEWIPC},
|
{Type: configs.NEWIPC},
|
||||||
{Type: configs.NEWPID},
|
{Type: configs.NEWPID},
|
||||||
{Type: configs.NEWNET},
|
{Type: configs.NEWUSER},
|
||||||
}),
|
{Type: configs.NEWNET},
|
||||||
Cgroups: &configs.Cgroup{
|
}),
|
||||||
Name: "test-container",
|
Cgroups: &configs.Cgroup{
|
||||||
Parent: "system",
|
Name: "test-container",
|
||||||
AllowAllDevices: false,
|
Parent: "system",
|
||||||
AllowedDevices: configs.DefaultAllowedDevices,
|
Resources: &configs.Resources{
|
||||||
},
|
MemorySwappiness: -1,
|
||||||
|
AllowAllDevices: false,
|
||||||
Devices: configs.DefaultAutoCreatedDevices,
|
AllowedDevices: configs.DefaultAllowedDevices,
|
||||||
Hostname: "testing",
|
},
|
||||||
Networks: []*configs.Network{
|
},
|
||||||
{
|
MaskPaths: []string{
|
||||||
Type: "loopback",
|
"/proc/kcore",
|
||||||
Address: "127.0.0.1/0",
|
},
|
||||||
Gateway: "localhost",
|
ReadonlyPaths: []string{
|
||||||
},
|
"/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus",
|
||||||
},
|
},
|
||||||
Rlimits: []configs.Rlimit{
|
Devices: configs.DefaultAutoCreatedDevices,
|
||||||
{
|
Hostname: "testing",
|
||||||
Type: syscall.RLIMIT_NOFILE,
|
Mounts: []*configs.Mount{
|
||||||
Hard: uint64(1024),
|
{
|
||||||
Soft: uint64(1024),
|
Source: "proc",
|
||||||
},
|
Destination: "/proc",
|
||||||
},
|
Device: "proc",
|
||||||
|
Flags: defaultMountFlags,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Source: "tmpfs",
|
||||||
|
Destination: "/dev",
|
||||||
|
Device: "tmpfs",
|
||||||
|
Flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME,
|
||||||
|
Data: "mode=755",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Source: "devpts",
|
||||||
|
Destination: "/dev/pts",
|
||||||
|
Device: "devpts",
|
||||||
|
Flags: syscall.MS_NOSUID | syscall.MS_NOEXEC,
|
||||||
|
Data: "newinstance,ptmxmode=0666,mode=0620,gid=5",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Device: "tmpfs",
|
||||||
|
Source: "shm",
|
||||||
|
Destination: "/dev/shm",
|
||||||
|
Data: "mode=1777,size=65536k",
|
||||||
|
Flags: defaultMountFlags,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Source: "mqueue",
|
||||||
|
Destination: "/dev/mqueue",
|
||||||
|
Device: "mqueue",
|
||||||
|
Flags: defaultMountFlags,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Source: "sysfs",
|
||||||
|
Destination: "/sys",
|
||||||
|
Device: "sysfs",
|
||||||
|
Flags: defaultMountFlags | syscall.MS_RDONLY,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
UidMappings: []configs.IDMap{
|
||||||
|
{
|
||||||
|
ContainerID: 0,
|
||||||
|
Host: 1000,
|
||||||
|
size: 65536,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
GidMappings: []configs.IDMap{
|
||||||
|
{
|
||||||
|
ContainerID: 0,
|
||||||
|
Host: 1000,
|
||||||
|
size: 65536,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Networks: []*configs.Network{
|
||||||
|
{
|
||||||
|
Type: "loopback",
|
||||||
|
Address: "127.0.0.1/0",
|
||||||
|
Gateway: "localhost",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Rlimits: []configs.Rlimit{
|
||||||
|
{
|
||||||
|
Type: syscall.RLIMIT_NOFILE,
|
||||||
|
Hard: uint64(1025),
|
||||||
|
Soft: uint64(1025),
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Once you have the configuration populated you can create a container:
|
Once you have the configuration populated you can create a container:
|
||||||
|
|
||||||
```go
|
```go
|
||||||
container, err := root.Create("container-id", config)
|
container, err := factory.Create("container-id", config)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
To spawn bash as the initial process inside the container and have the
|
To spawn bash as the initial process inside the container and have the
|
||||||
|
@ -91,23 +176,25 @@ processes pid returned in order to wait, signal, or kill the process:
|
||||||
|
|
||||||
```go
|
```go
|
||||||
process := &libcontainer.Process{
|
process := &libcontainer.Process{
|
||||||
Args: []string{"/bin/bash"},
|
Args: []string{"/bin/bash"},
|
||||||
Env: []string{"PATH=/bin"},
|
Env: []string{"PATH=/bin"},
|
||||||
User: "daemon",
|
User: "daemon",
|
||||||
Stdin: os.Stdin,
|
Stdin: os.Stdin,
|
||||||
Stdout: os.Stdout,
|
Stdout: os.Stdout,
|
||||||
Stderr: os.Stderr,
|
Stderr: os.Stderr,
|
||||||
}
|
}
|
||||||
|
|
||||||
err := container.Start(process)
|
err := container.Start(process)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
logrus.Fatal(err)
|
||||||
|
container.Destroy()
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// wait for the process to finish.
|
// wait for the process to finish.
|
||||||
status, err := process.Wait()
|
_, err := process.Wait()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
logrus.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// destroy the container.
|
// destroy the container.
|
||||||
|
@ -124,7 +211,6 @@ processes, err := container.Processes()
|
||||||
// it's processes.
|
// it's processes.
|
||||||
stats, err := container.Stats()
|
stats, err := container.Stats()
|
||||||
|
|
||||||
|
|
||||||
// pause all processes inside the container.
|
// pause all processes inside the container.
|
||||||
container.Pause()
|
container.Pause()
|
||||||
|
|
||||||
|
|
|
@ -142,6 +142,7 @@ system resources like cpu, memory, and device access.
|
||||||
| perf_event | 1 |
|
| perf_event | 1 |
|
||||||
| freezer | 1 |
|
| freezer | 1 |
|
||||||
| hugetlb | 1 |
|
| hugetlb | 1 |
|
||||||
|
| pids | 1 |
|
||||||
|
|
||||||
|
|
||||||
All cgroup subsystem are joined so that statistics can be collected from
|
All cgroup subsystem are joined so that statistics can be collected from
|
||||||
|
@ -199,7 +200,7 @@ provide a good default for security and flexibility for the applications.
|
||||||
| CAP_SYS_BOOT | 0 |
|
| CAP_SYS_BOOT | 0 |
|
||||||
| CAP_LEASE | 0 |
|
| CAP_LEASE | 0 |
|
||||||
| CAP_WAKE_ALARM | 0 |
|
| CAP_WAKE_ALARM | 0 |
|
||||||
| CAP_BLOCK_SUSPE | 0 |
|
| CAP_BLOCK_SUSPEND | 0 |
|
||||||
|
|
||||||
|
|
||||||
Additional security layers like [apparmor](https://wiki.ubuntu.com/AppArmor)
|
Additional security layers like [apparmor](https://wiki.ubuntu.com/AppArmor)
|
||||||
|
|
|
@ -15,6 +15,9 @@ type Manager interface {
|
||||||
// Returns the PIDs inside the cgroup set
|
// Returns the PIDs inside the cgroup set
|
||||||
GetPids() ([]int, error)
|
GetPids() ([]int, error)
|
||||||
|
|
||||||
|
// Returns the PIDs inside the cgroup set & all sub-cgroups
|
||||||
|
GetAllPids() ([]int, error)
|
||||||
|
|
||||||
// Returns statistics for the cgroup set
|
// Returns statistics for the cgroup set
|
||||||
GetStats() (*Stats, error)
|
GetStats() (*Stats, error)
|
||||||
|
|
||||||
|
|
18
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_test.go
vendored
Normal file
18
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_test.go
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package cgroups
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestParseCgroups(t *testing.T) {
|
||||||
|
cgroups, err := ParseCgroupFile("/proc/self/cgroup")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := cgroups["cpu"]; !ok {
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
}
|
|
@ -23,6 +23,7 @@ var (
|
||||||
&MemoryGroup{},
|
&MemoryGroup{},
|
||||||
&CpuGroup{},
|
&CpuGroup{},
|
||||||
&CpuacctGroup{},
|
&CpuacctGroup{},
|
||||||
|
&PidsGroup{},
|
||||||
&BlkioGroup{},
|
&BlkioGroup{},
|
||||||
&HugetlbGroup{},
|
&HugetlbGroup{},
|
||||||
&NetClsGroup{},
|
&NetClsGroup{},
|
||||||
|
@ -93,11 +94,10 @@ func getCgroupRoot() (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
type cgroupData struct {
|
type cgroupData struct {
|
||||||
root string
|
root string
|
||||||
parent string
|
innerPath string
|
||||||
name string
|
config *configs.Cgroup
|
||||||
config *configs.Cgroup
|
pid int
|
||||||
pid int
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) Apply(pid int) (err error) {
|
func (m *Manager) Apply(pid int) (err error) {
|
||||||
|
@ -112,6 +112,22 @@ func (m *Manager) Apply(pid int) (err error) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.Paths != nil {
|
||||||
|
paths := make(map[string]string)
|
||||||
|
for name, path := range c.Paths {
|
||||||
|
_, err := d.path(name)
|
||||||
|
if err != nil {
|
||||||
|
if cgroups.IsNotFound(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
paths[name] = path
|
||||||
|
}
|
||||||
|
m.Paths = paths
|
||||||
|
return cgroups.EnterPid(m.Paths, pid)
|
||||||
|
}
|
||||||
|
|
||||||
paths := make(map[string]string)
|
paths := make(map[string]string)
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -135,17 +151,13 @@ func (m *Manager) Apply(pid int) (err error) {
|
||||||
paths[sys.Name()] = p
|
paths[sys.Name()] = p
|
||||||
}
|
}
|
||||||
m.Paths = paths
|
m.Paths = paths
|
||||||
|
|
||||||
if paths["cpu"] != "" {
|
|
||||||
if err := CheckCpushares(paths["cpu"], c.Resources.CpuShares); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) Destroy() error {
|
func (m *Manager) Destroy() error {
|
||||||
|
if m.Cgroups.Paths != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
m.mu.Lock()
|
m.mu.Lock()
|
||||||
defer m.mu.Unlock()
|
defer m.mu.Unlock()
|
||||||
if err := cgroups.RemovePaths(m.Paths); err != nil {
|
if err := cgroups.RemovePaths(m.Paths); err != nil {
|
||||||
|
@ -179,15 +191,28 @@ func (m *Manager) GetStats() (*cgroups.Stats, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) Set(container *configs.Config) error {
|
func (m *Manager) Set(container *configs.Config) error {
|
||||||
for name, path := range m.Paths {
|
for _, sys := range subsystems {
|
||||||
sys, err := subsystems.Get(name)
|
// Generate fake cgroup data.
|
||||||
if err == errSubsystemDoesNotExist || !cgroups.PathExists(path) {
|
d, err := getCgroupData(container.Cgroups, -1)
|
||||||
continue
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
// Get the path, but don't error out if the cgroup wasn't found.
|
||||||
|
path, err := d.path(sys.Name())
|
||||||
|
if err != nil && !cgroups.IsNotFound(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if err := sys.Set(path, container.Cgroups); err != nil {
|
if err := sys.Set(path, container.Cgroups); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if m.Paths["cpu"] != "" {
|
||||||
|
if err := CheckCpushares(m.Paths["cpu"], container.Cgroups.Resources.CpuShares); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -217,31 +242,50 @@ func (m *Manager) Freeze(state configs.FreezerState) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) GetPids() ([]int, error) {
|
func (m *Manager) GetPids() ([]int, error) {
|
||||||
d, err := getCgroupData(m.Cgroups, 0)
|
dir, err := getCgroupPath(m.Cgroups)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
dir, err := d.path("devices")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return cgroups.GetPids(dir)
|
return cgroups.GetPids(dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *Manager) GetAllPids() ([]int, error) {
|
||||||
|
dir, err := getCgroupPath(m.Cgroups)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return cgroups.GetAllPids(dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCgroupPath(c *configs.Cgroup) (string, error) {
|
||||||
|
d, err := getCgroupData(c, 0)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return d.path("devices")
|
||||||
|
}
|
||||||
|
|
||||||
func getCgroupData(c *configs.Cgroup, pid int) (*cgroupData, error) {
|
func getCgroupData(c *configs.Cgroup, pid int) (*cgroupData, error) {
|
||||||
root, err := getCgroupRoot()
|
root, err := getCgroupRoot()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (c.Name != "" || c.Parent != "") && c.Path != "" {
|
||||||
|
return nil, fmt.Errorf("cgroup: either Path or Name and Parent should be used")
|
||||||
|
}
|
||||||
|
|
||||||
|
innerPath := c.Path
|
||||||
|
if innerPath == "" {
|
||||||
|
innerPath = filepath.Join(c.Parent, c.Name)
|
||||||
|
}
|
||||||
|
|
||||||
return &cgroupData{
|
return &cgroupData{
|
||||||
root: root,
|
root: root,
|
||||||
parent: c.Parent,
|
innerPath: c.Path,
|
||||||
name: c.Name,
|
config: c,
|
||||||
config: c,
|
pid: pid,
|
||||||
pid: pid,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -269,11 +313,10 @@ func (raw *cgroupData) path(subsystem string) (string, error) {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
cgPath := filepath.Join(raw.parent, raw.name)
|
|
||||||
// If the cgroup name/path is absolute do not look relative to the cgroup of the init process.
|
// If the cgroup name/path is absolute do not look relative to the cgroup of the init process.
|
||||||
if filepath.IsAbs(cgPath) {
|
if filepath.IsAbs(raw.innerPath) {
|
||||||
// Sometimes subsystems can be mounted togethger as 'cpu,cpuacct'.
|
// Sometimes subsystems can be mounted togethger as 'cpu,cpuacct'.
|
||||||
return filepath.Join(raw.root, filepath.Base(mnt), cgPath), nil
|
return filepath.Join(raw.root, filepath.Base(mnt), raw.innerPath), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
parentPath, err := raw.parentPath(subsystem, mnt, root)
|
parentPath, err := raw.parentPath(subsystem, mnt, root)
|
||||||
|
@ -281,7 +324,7 @@ func (raw *cgroupData) path(subsystem string) (string, error) {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return filepath.Join(parentPath, cgPath), nil
|
return filepath.Join(parentPath, raw.innerPath), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (raw *cgroupData) join(subsystem string) (string, error) {
|
func (raw *cgroupData) join(subsystem string) (string, error) {
|
||||||
|
|
|
@ -22,15 +22,10 @@ func (s *BlkioGroup) Name() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BlkioGroup) Apply(d *cgroupData) error {
|
func (s *BlkioGroup) Apply(d *cgroupData) error {
|
||||||
dir, err := d.join("blkio")
|
_, err := d.join("blkio")
|
||||||
if err != nil && !cgroups.IsNotFound(err) {
|
if err != nil && !cgroups.IsNotFound(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.Set(dir, d.config); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
636
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio_test.go
vendored
Normal file
636
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio_test.go
vendored
Normal file
|
@ -0,0 +1,636 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||||
|
"github.com/opencontainers/runc/libcontainer/configs"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
sectorsRecursiveContents = `8:0 1024`
|
||||||
|
serviceBytesRecursiveContents = `8:0 Read 100
|
||||||
|
8:0 Write 200
|
||||||
|
8:0 Sync 300
|
||||||
|
8:0 Async 500
|
||||||
|
8:0 Total 500
|
||||||
|
Total 500`
|
||||||
|
servicedRecursiveContents = `8:0 Read 10
|
||||||
|
8:0 Write 40
|
||||||
|
8:0 Sync 20
|
||||||
|
8:0 Async 30
|
||||||
|
8:0 Total 50
|
||||||
|
Total 50`
|
||||||
|
queuedRecursiveContents = `8:0 Read 1
|
||||||
|
8:0 Write 4
|
||||||
|
8:0 Sync 2
|
||||||
|
8:0 Async 3
|
||||||
|
8:0 Total 5
|
||||||
|
Total 5`
|
||||||
|
serviceTimeRecursiveContents = `8:0 Read 173959
|
||||||
|
8:0 Write 0
|
||||||
|
8:0 Sync 0
|
||||||
|
8:0 Async 173959
|
||||||
|
8:0 Total 17395
|
||||||
|
Total 17395`
|
||||||
|
waitTimeRecursiveContents = `8:0 Read 15571
|
||||||
|
8:0 Write 0
|
||||||
|
8:0 Sync 0
|
||||||
|
8:0 Async 15571
|
||||||
|
8:0 Total 15571`
|
||||||
|
mergedRecursiveContents = `8:0 Read 5
|
||||||
|
8:0 Write 10
|
||||||
|
8:0 Sync 0
|
||||||
|
8:0 Async 0
|
||||||
|
8:0 Total 15
|
||||||
|
Total 15`
|
||||||
|
timeRecursiveContents = `8:0 8`
|
||||||
|
throttleServiceBytes = `8:0 Read 11030528
|
||||||
|
8:0 Write 23
|
||||||
|
8:0 Sync 42
|
||||||
|
8:0 Async 11030528
|
||||||
|
8:0 Total 11030528
|
||||||
|
252:0 Read 11030528
|
||||||
|
252:0 Write 23
|
||||||
|
252:0 Sync 42
|
||||||
|
252:0 Async 11030528
|
||||||
|
252:0 Total 11030528
|
||||||
|
Total 22061056`
|
||||||
|
throttleServiced = `8:0 Read 164
|
||||||
|
8:0 Write 23
|
||||||
|
8:0 Sync 42
|
||||||
|
8:0 Async 164
|
||||||
|
8:0 Total 164
|
||||||
|
252:0 Read 164
|
||||||
|
252:0 Write 23
|
||||||
|
252:0 Sync 42
|
||||||
|
252:0 Async 164
|
||||||
|
252:0 Total 164
|
||||||
|
Total 328`
|
||||||
|
)
|
||||||
|
|
||||||
|
func appendBlkioStatEntry(blkioStatEntries *[]cgroups.BlkioStatEntry, major, minor, value uint64, op string) {
|
||||||
|
*blkioStatEntries = append(*blkioStatEntries, cgroups.BlkioStatEntry{Major: major, Minor: minor, Value: value, Op: op})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlkioSetWeight(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("blkio", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
const (
|
||||||
|
weightBefore = 100
|
||||||
|
weightAfter = 200
|
||||||
|
)
|
||||||
|
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"blkio.weight": strconv.Itoa(weightBefore),
|
||||||
|
})
|
||||||
|
|
||||||
|
helper.CgroupData.config.Resources.BlkioWeight = weightAfter
|
||||||
|
blkio := &BlkioGroup{}
|
||||||
|
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := getCgroupParamUint(helper.CgroupPath, "blkio.weight")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse blkio.weight - %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if value != weightAfter {
|
||||||
|
t.Fatal("Got the wrong value, set blkio.weight failed.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlkioSetWeightDevice(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("blkio", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
const (
|
||||||
|
weightDeviceBefore = "8:0 400"
|
||||||
|
)
|
||||||
|
|
||||||
|
wd := configs.NewWeightDevice(8, 0, 500, 0)
|
||||||
|
weightDeviceAfter := wd.WeightString()
|
||||||
|
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"blkio.weight_device": weightDeviceBefore,
|
||||||
|
})
|
||||||
|
|
||||||
|
helper.CgroupData.config.Resources.BlkioWeightDevice = []*configs.WeightDevice{wd}
|
||||||
|
blkio := &BlkioGroup{}
|
||||||
|
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := getCgroupParamString(helper.CgroupPath, "blkio.weight_device")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse blkio.weight_device - %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if value != weightDeviceAfter {
|
||||||
|
t.Fatal("Got the wrong value, set blkio.weight_device failed.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// regression #274
|
||||||
|
func TestBlkioSetMultipleWeightDevice(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("blkio", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
const (
|
||||||
|
weightDeviceBefore = "8:0 400"
|
||||||
|
)
|
||||||
|
|
||||||
|
wd1 := configs.NewWeightDevice(8, 0, 500, 0)
|
||||||
|
wd2 := configs.NewWeightDevice(8, 16, 500, 0)
|
||||||
|
// we cannot actually set and check both because normal ioutil.WriteFile
|
||||||
|
// when writing to cgroup file will overwrite the whole file content instead
|
||||||
|
// of updating it as the kernel is doing. Just check the second device
|
||||||
|
// is present will suffice for the test to ensure multiple writes are done.
|
||||||
|
weightDeviceAfter := wd2.WeightString()
|
||||||
|
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"blkio.weight_device": weightDeviceBefore,
|
||||||
|
})
|
||||||
|
|
||||||
|
helper.CgroupData.config.Resources.BlkioWeightDevice = []*configs.WeightDevice{wd1, wd2}
|
||||||
|
blkio := &BlkioGroup{}
|
||||||
|
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := getCgroupParamString(helper.CgroupPath, "blkio.weight_device")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse blkio.weight_device - %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if value != weightDeviceAfter {
|
||||||
|
t.Fatal("Got the wrong value, set blkio.weight_device failed.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlkioStats(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("blkio", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||||
|
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||||
|
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||||
|
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||||
|
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||||
|
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||||
|
"blkio.time_recursive": timeRecursiveContents,
|
||||||
|
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||||
|
})
|
||||||
|
|
||||||
|
blkio := &BlkioGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify expected stats.
|
||||||
|
expectedStats := cgroups.BlkioStats{}
|
||||||
|
appendBlkioStatEntry(&expectedStats.SectorsRecursive, 8, 0, 1024, "")
|
||||||
|
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 100, "Read")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 200, "Write")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 300, "Sync")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 500, "Async")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 500, "Total")
|
||||||
|
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 10, "Read")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 40, "Write")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 20, "Sync")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 30, "Async")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 50, "Total")
|
||||||
|
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 1, "Read")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 4, "Write")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 2, "Sync")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 3, "Async")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 5, "Total")
|
||||||
|
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 173959, "Read")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 0, "Write")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 0, "Sync")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 173959, "Async")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 17395, "Total")
|
||||||
|
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Read")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 0, "Write")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 0, "Sync")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Async")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Total")
|
||||||
|
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 5, "Read")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 10, "Write")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 0, "Sync")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 0, "Async")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 15, "Total")
|
||||||
|
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoTimeRecursive, 8, 0, 8, "")
|
||||||
|
|
||||||
|
expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlkioStatsNoSectorsFile(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("blkio", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||||
|
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||||
|
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||||
|
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||||
|
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||||
|
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||||
|
"blkio.time_recursive": timeRecursiveContents,
|
||||||
|
})
|
||||||
|
|
||||||
|
blkio := &BlkioGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed unexpectedly: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlkioStatsNoServiceBytesFile(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("blkio", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||||
|
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||||
|
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||||
|
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||||
|
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||||
|
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||||
|
"blkio.time_recursive": timeRecursiveContents,
|
||||||
|
})
|
||||||
|
|
||||||
|
blkio := &BlkioGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed unexpectedly: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlkioStatsNoServicedFile(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("blkio", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||||
|
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||||
|
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||||
|
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||||
|
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||||
|
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||||
|
"blkio.time_recursive": timeRecursiveContents,
|
||||||
|
})
|
||||||
|
|
||||||
|
blkio := &BlkioGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed unexpectedly: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlkioStatsNoQueuedFile(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("blkio", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||||
|
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||||
|
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||||
|
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||||
|
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||||
|
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||||
|
"blkio.time_recursive": timeRecursiveContents,
|
||||||
|
})
|
||||||
|
|
||||||
|
blkio := &BlkioGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed unexpectedly: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlkioStatsNoServiceTimeFile(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping test in short mode.")
|
||||||
|
}
|
||||||
|
helper := NewCgroupTestUtil("blkio", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||||
|
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||||
|
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||||
|
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||||
|
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||||
|
"blkio.time_recursive": timeRecursiveContents,
|
||||||
|
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||||
|
})
|
||||||
|
|
||||||
|
blkio := &BlkioGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed unexpectedly: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlkioStatsNoWaitTimeFile(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping test in short mode.")
|
||||||
|
}
|
||||||
|
helper := NewCgroupTestUtil("blkio", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||||
|
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||||
|
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||||
|
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||||
|
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||||
|
"blkio.time_recursive": timeRecursiveContents,
|
||||||
|
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||||
|
})
|
||||||
|
|
||||||
|
blkio := &BlkioGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed unexpectedly: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlkioStatsNoMergedFile(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping test in short mode.")
|
||||||
|
}
|
||||||
|
helper := NewCgroupTestUtil("blkio", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||||
|
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||||
|
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||||
|
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||||
|
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||||
|
"blkio.time_recursive": timeRecursiveContents,
|
||||||
|
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||||
|
})
|
||||||
|
|
||||||
|
blkio := &BlkioGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed unexpectedly: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlkioStatsNoTimeFile(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping test in short mode.")
|
||||||
|
}
|
||||||
|
helper := NewCgroupTestUtil("blkio", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||||
|
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||||
|
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||||
|
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||||
|
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||||
|
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||||
|
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||||
|
})
|
||||||
|
|
||||||
|
blkio := &BlkioGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed unexpectedly: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlkioStatsUnexpectedNumberOfFields(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("blkio", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"blkio.io_service_bytes_recursive": "8:0 Read 100 100",
|
||||||
|
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||||
|
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||||
|
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||||
|
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||||
|
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||||
|
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||||
|
"blkio.time_recursive": timeRecursiveContents,
|
||||||
|
})
|
||||||
|
|
||||||
|
blkio := &BlkioGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expected to fail, but did not")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlkioStatsUnexpectedFieldType(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("blkio", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"blkio.io_service_bytes_recursive": "8:0 Read Write",
|
||||||
|
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||||
|
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||||
|
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||||
|
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||||
|
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||||
|
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||||
|
"blkio.time_recursive": timeRecursiveContents,
|
||||||
|
})
|
||||||
|
|
||||||
|
blkio := &BlkioGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expected to fail, but did not")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNonCFQBlkioStats(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("blkio", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"blkio.io_service_bytes_recursive": "",
|
||||||
|
"blkio.io_serviced_recursive": "",
|
||||||
|
"blkio.io_queued_recursive": "",
|
||||||
|
"blkio.sectors_recursive": "",
|
||||||
|
"blkio.io_service_time_recursive": "",
|
||||||
|
"blkio.io_wait_time_recursive": "",
|
||||||
|
"blkio.io_merged_recursive": "",
|
||||||
|
"blkio.time_recursive": "",
|
||||||
|
"blkio.throttle.io_service_bytes": throttleServiceBytes,
|
||||||
|
"blkio.throttle.io_serviced": throttleServiced,
|
||||||
|
})
|
||||||
|
|
||||||
|
blkio := &BlkioGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify expected stats.
|
||||||
|
expectedStats := cgroups.BlkioStats{}
|
||||||
|
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Read")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 23, "Write")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 42, "Sync")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Async")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Total")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Read")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 23, "Write")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 42, "Sync")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Async")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Total")
|
||||||
|
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Read")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 23, "Write")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 42, "Sync")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Async")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Total")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Read")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 23, "Write")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 42, "Sync")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Async")
|
||||||
|
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Total")
|
||||||
|
|
||||||
|
expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlkioSetThrottleReadBpsDevice(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("blkio", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
const (
|
||||||
|
throttleBefore = `8:0 1024`
|
||||||
|
)
|
||||||
|
|
||||||
|
td := configs.NewThrottleDevice(8, 0, 2048)
|
||||||
|
throttleAfter := td.String()
|
||||||
|
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"blkio.throttle.read_bps_device": throttleBefore,
|
||||||
|
})
|
||||||
|
|
||||||
|
helper.CgroupData.config.Resources.BlkioThrottleReadBpsDevice = []*configs.ThrottleDevice{td}
|
||||||
|
blkio := &BlkioGroup{}
|
||||||
|
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := getCgroupParamString(helper.CgroupPath, "blkio.throttle.read_bps_device")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse blkio.throttle.read_bps_device - %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if value != throttleAfter {
|
||||||
|
t.Fatal("Got the wrong value, set blkio.throttle.read_bps_device failed.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func TestBlkioSetThrottleWriteBpsDevice(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("blkio", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
const (
|
||||||
|
throttleBefore = `8:0 1024`
|
||||||
|
)
|
||||||
|
|
||||||
|
td := configs.NewThrottleDevice(8, 0, 2048)
|
||||||
|
throttleAfter := td.String()
|
||||||
|
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"blkio.throttle.write_bps_device": throttleBefore,
|
||||||
|
})
|
||||||
|
|
||||||
|
helper.CgroupData.config.Resources.BlkioThrottleWriteBpsDevice = []*configs.ThrottleDevice{td}
|
||||||
|
blkio := &BlkioGroup{}
|
||||||
|
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := getCgroupParamString(helper.CgroupPath, "blkio.throttle.write_bps_device")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse blkio.throttle.write_bps_device - %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if value != throttleAfter {
|
||||||
|
t.Fatal("Got the wrong value, set blkio.throttle.write_bps_device failed.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func TestBlkioSetThrottleReadIOpsDevice(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("blkio", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
const (
|
||||||
|
throttleBefore = `8:0 1024`
|
||||||
|
)
|
||||||
|
|
||||||
|
td := configs.NewThrottleDevice(8, 0, 2048)
|
||||||
|
throttleAfter := td.String()
|
||||||
|
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"blkio.throttle.read_iops_device": throttleBefore,
|
||||||
|
})
|
||||||
|
|
||||||
|
helper.CgroupData.config.Resources.BlkioThrottleReadIOPSDevice = []*configs.ThrottleDevice{td}
|
||||||
|
blkio := &BlkioGroup{}
|
||||||
|
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := getCgroupParamString(helper.CgroupPath, "blkio.throttle.read_iops_device")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse blkio.throttle.read_iops_device - %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if value != throttleAfter {
|
||||||
|
t.Fatal("Got the wrong value, set blkio.throttle.read_iops_device failed.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func TestBlkioSetThrottleWriteIOpsDevice(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("blkio", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
const (
|
||||||
|
throttleBefore = `8:0 1024`
|
||||||
|
)
|
||||||
|
|
||||||
|
td := configs.NewThrottleDevice(8, 0, 2048)
|
||||||
|
throttleAfter := td.String()
|
||||||
|
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"blkio.throttle.write_iops_device": throttleBefore,
|
||||||
|
})
|
||||||
|
|
||||||
|
helper.CgroupData.config.Resources.BlkioThrottleWriteIOPSDevice = []*configs.ThrottleDevice{td}
|
||||||
|
blkio := &BlkioGroup{}
|
||||||
|
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := getCgroupParamString(helper.CgroupPath, "blkio.throttle.write_iops_device")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse blkio.throttle.write_iops_device - %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if value != throttleAfter {
|
||||||
|
t.Fatal("Got the wrong value, set blkio.throttle.write_iops_device failed.")
|
||||||
|
}
|
||||||
|
}
|
|
@ -22,15 +22,10 @@ func (s *CpuGroup) Name() string {
|
||||||
func (s *CpuGroup) Apply(d *cgroupData) error {
|
func (s *CpuGroup) Apply(d *cgroupData) error {
|
||||||
// We always want to join the cpu group, to allow fair cpu scheduling
|
// We always want to join the cpu group, to allow fair cpu scheduling
|
||||||
// on a container basis
|
// on a container basis
|
||||||
dir, err := d.join("cpu")
|
_, err := d.join("cpu")
|
||||||
if err != nil && !cgroups.IsNotFound(err) {
|
if err != nil && !cgroups.IsNotFound(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.Set(dir, d.config); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
163
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu_test.go
vendored
Normal file
163
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu_test.go
vendored
Normal file
|
@ -0,0 +1,163 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCpuSetShares(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("cpu", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
const (
|
||||||
|
sharesBefore = 1024
|
||||||
|
sharesAfter = 512
|
||||||
|
)
|
||||||
|
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"cpu.shares": strconv.Itoa(sharesBefore),
|
||||||
|
})
|
||||||
|
|
||||||
|
helper.CgroupData.config.Resources.CpuShares = sharesAfter
|
||||||
|
cpu := &CpuGroup{}
|
||||||
|
if err := cpu.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := getCgroupParamUint(helper.CgroupPath, "cpu.shares")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse cpu.shares - %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if value != sharesAfter {
|
||||||
|
t.Fatal("Got the wrong value, set cpu.shares failed.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCpuSetBandWidth(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("cpu", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
const (
|
||||||
|
quotaBefore = 8000
|
||||||
|
quotaAfter = 5000
|
||||||
|
periodBefore = 10000
|
||||||
|
periodAfter = 7000
|
||||||
|
rtRuntimeBefore = 8000
|
||||||
|
rtRuntimeAfter = 5000
|
||||||
|
rtPeriodBefore = 10000
|
||||||
|
rtPeriodAfter = 7000
|
||||||
|
)
|
||||||
|
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"cpu.cfs_quota_us": strconv.Itoa(quotaBefore),
|
||||||
|
"cpu.cfs_period_us": strconv.Itoa(periodBefore),
|
||||||
|
"cpu.rt_runtime_us": strconv.Itoa(rtRuntimeBefore),
|
||||||
|
"cpu.rt_period_us": strconv.Itoa(rtPeriodBefore),
|
||||||
|
})
|
||||||
|
|
||||||
|
helper.CgroupData.config.Resources.CpuQuota = quotaAfter
|
||||||
|
helper.CgroupData.config.Resources.CpuPeriod = periodAfter
|
||||||
|
helper.CgroupData.config.Resources.CpuRtRuntime = rtRuntimeAfter
|
||||||
|
helper.CgroupData.config.Resources.CpuRtPeriod = rtPeriodAfter
|
||||||
|
cpu := &CpuGroup{}
|
||||||
|
if err := cpu.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
quota, err := getCgroupParamUint(helper.CgroupPath, "cpu.cfs_quota_us")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse cpu.cfs_quota_us - %s", err)
|
||||||
|
}
|
||||||
|
if quota != quotaAfter {
|
||||||
|
t.Fatal("Got the wrong value, set cpu.cfs_quota_us failed.")
|
||||||
|
}
|
||||||
|
|
||||||
|
period, err := getCgroupParamUint(helper.CgroupPath, "cpu.cfs_period_us")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse cpu.cfs_period_us - %s", err)
|
||||||
|
}
|
||||||
|
if period != periodAfter {
|
||||||
|
t.Fatal("Got the wrong value, set cpu.cfs_period_us failed.")
|
||||||
|
}
|
||||||
|
rtRuntime, err := getCgroupParamUint(helper.CgroupPath, "cpu.rt_runtime_us")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse cpu.rt_runtime_us - %s", err)
|
||||||
|
}
|
||||||
|
if rtRuntime != rtRuntimeAfter {
|
||||||
|
t.Fatal("Got the wrong value, set cpu.rt_runtime_us failed.")
|
||||||
|
}
|
||||||
|
rtPeriod, err := getCgroupParamUint(helper.CgroupPath, "cpu.rt_period_us")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse cpu.rt_period_us - %s", err)
|
||||||
|
}
|
||||||
|
if rtPeriod != rtPeriodAfter {
|
||||||
|
t.Fatal("Got the wrong value, set cpu.rt_period_us failed.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCpuStats(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("cpu", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
const (
|
||||||
|
kNrPeriods = 2000
|
||||||
|
kNrThrottled = 200
|
||||||
|
kThrottledTime = uint64(18446744073709551615)
|
||||||
|
)
|
||||||
|
|
||||||
|
cpuStatContent := fmt.Sprintf("nr_periods %d\n nr_throttled %d\n throttled_time %d\n",
|
||||||
|
kNrPeriods, kNrThrottled, kThrottledTime)
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"cpu.stat": cpuStatContent,
|
||||||
|
})
|
||||||
|
|
||||||
|
cpu := &CpuGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := cpu.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedStats := cgroups.ThrottlingData{
|
||||||
|
Periods: kNrPeriods,
|
||||||
|
ThrottledPeriods: kNrThrottled,
|
||||||
|
ThrottledTime: kThrottledTime}
|
||||||
|
|
||||||
|
expectThrottlingDataEquals(t, expectedStats, actualStats.CpuStats.ThrottlingData)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNoCpuStatFile(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("cpu", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
cpu := &CpuGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := cpu.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Expected not to fail, but did")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInvalidCpuStat(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("cpu", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
cpuStatContent := `nr_periods 2000
|
||||||
|
nr_throttled 200
|
||||||
|
throttled_time fortytwo`
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"cpu.stat": cpuStatContent,
|
||||||
|
})
|
||||||
|
|
||||||
|
cpu := &CpuGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := cpu.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expected failed stat parsing.")
|
||||||
|
}
|
||||||
|
}
|
|
@ -4,6 +4,7 @@ package fs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
@ -11,6 +12,7 @@ import (
|
||||||
|
|
||||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||||
"github.com/opencontainers/runc/libcontainer/configs"
|
"github.com/opencontainers/runc/libcontainer/configs"
|
||||||
|
libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
type CpusetGroup struct {
|
type CpusetGroup struct {
|
||||||
|
@ -63,11 +65,6 @@ func (s *CpusetGroup) ApplyDir(dir string, cgroup *configs.Cgroup, pid int) erro
|
||||||
if err := s.ensureParent(dir, root); err != nil {
|
if err := s.ensureParent(dir, root); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// the default values inherit from parent cgroup are already set in
|
|
||||||
// s.ensureParent, cover these if we have our own
|
|
||||||
if err := s.Set(dir, cgroup); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// because we are not using d.join we need to place the pid into the procs file
|
// because we are not using d.join we need to place the pid into the procs file
|
||||||
// unlike the other subsystems
|
// unlike the other subsystems
|
||||||
if err := writeFile(dir, "cgroup.procs", strconv.Itoa(pid)); err != nil {
|
if err := writeFile(dir, "cgroup.procs", strconv.Itoa(pid)); err != nil {
|
||||||
|
@ -92,9 +89,13 @@ func (s *CpusetGroup) getSubsystemSettings(parent string) (cpus []byte, mems []b
|
||||||
// it's parent.
|
// it's parent.
|
||||||
func (s *CpusetGroup) ensureParent(current, root string) error {
|
func (s *CpusetGroup) ensureParent(current, root string) error {
|
||||||
parent := filepath.Dir(current)
|
parent := filepath.Dir(current)
|
||||||
if filepath.Clean(parent) == root {
|
if libcontainerUtils.CleanPath(parent) == root {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
// Avoid infinite recursion.
|
||||||
|
if parent == current {
|
||||||
|
return fmt.Errorf("cpuset: cgroup parent path outside cgroup root")
|
||||||
|
}
|
||||||
if err := s.ensureParent(parent, root); err != nil {
|
if err := s.ensureParent(parent, root); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
65
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset_test.go
vendored
Normal file
65
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset_test.go
vendored
Normal file
|
@ -0,0 +1,65 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCpusetSetCpus(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("cpuset", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
const (
|
||||||
|
cpusBefore = "0"
|
||||||
|
cpusAfter = "1-3"
|
||||||
|
)
|
||||||
|
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"cpuset.cpus": cpusBefore,
|
||||||
|
})
|
||||||
|
|
||||||
|
helper.CgroupData.config.Resources.CpusetCpus = cpusAfter
|
||||||
|
cpuset := &CpusetGroup{}
|
||||||
|
if err := cpuset.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := getCgroupParamString(helper.CgroupPath, "cpuset.cpus")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse cpuset.cpus - %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if value != cpusAfter {
|
||||||
|
t.Fatal("Got the wrong value, set cpuset.cpus failed.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCpusetSetMems(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("cpuset", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
const (
|
||||||
|
memsBefore = "0"
|
||||||
|
memsAfter = "1"
|
||||||
|
)
|
||||||
|
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"cpuset.mems": memsBefore,
|
||||||
|
})
|
||||||
|
|
||||||
|
helper.CgroupData.config.Resources.CpusetMems = memsAfter
|
||||||
|
cpuset := &CpusetGroup{}
|
||||||
|
if err := cpuset.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := getCgroupParamString(helper.CgroupPath, "cpuset.mems")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse cpuset.mems - %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if value != memsAfter {
|
||||||
|
t.Fatal("Got the wrong value, set cpuset.mems failed.")
|
||||||
|
}
|
||||||
|
}
|
|
@ -15,21 +15,29 @@ func (s *DevicesGroup) Name() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *DevicesGroup) Apply(d *cgroupData) error {
|
func (s *DevicesGroup) Apply(d *cgroupData) error {
|
||||||
dir, err := d.join("devices")
|
_, err := d.join("devices")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// We will return error even it's `not found` error, devices
|
// We will return error even it's `not found` error, devices
|
||||||
// cgroup is hard requirement for container's security.
|
// cgroup is hard requirement for container's security.
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.Set(dir, d.config); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *DevicesGroup) Set(path string, cgroup *configs.Cgroup) error {
|
func (s *DevicesGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||||
|
devices := cgroup.Resources.Devices
|
||||||
|
if len(devices) > 0 {
|
||||||
|
for _, dev := range devices {
|
||||||
|
file := "devices.deny"
|
||||||
|
if dev.Allow {
|
||||||
|
file = "devices.allow"
|
||||||
|
}
|
||||||
|
if err := writeFile(path, file, dev.CgroupString()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
if !cgroup.Resources.AllowAllDevices {
|
if !cgroup.Resources.AllowAllDevices {
|
||||||
if err := writeFile(path, "devices.deny", "a"); err != nil {
|
if err := writeFile(path, "devices.deny", "a"); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
84
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices_test.go
vendored
Normal file
84
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices_test.go
vendored
Normal file
|
@ -0,0 +1,84 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/opencontainers/runc/libcontainer/configs"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
allowedDevices = []*configs.Device{
|
||||||
|
{
|
||||||
|
Path: "/dev/zero",
|
||||||
|
Type: 'c',
|
||||||
|
Major: 1,
|
||||||
|
Minor: 5,
|
||||||
|
Permissions: "rwm",
|
||||||
|
FileMode: 0666,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
allowedList = "c 1:5 rwm"
|
||||||
|
deniedDevices = []*configs.Device{
|
||||||
|
{
|
||||||
|
Path: "/dev/null",
|
||||||
|
Type: 'c',
|
||||||
|
Major: 1,
|
||||||
|
Minor: 3,
|
||||||
|
Permissions: "rwm",
|
||||||
|
FileMode: 0666,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
deniedList = "c 1:3 rwm"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDevicesSetAllow(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("devices", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"devices.deny": "a",
|
||||||
|
})
|
||||||
|
|
||||||
|
helper.CgroupData.config.Resources.AllowAllDevices = false
|
||||||
|
helper.CgroupData.config.Resources.AllowedDevices = allowedDevices
|
||||||
|
devices := &DevicesGroup{}
|
||||||
|
if err := devices.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := getCgroupParamString(helper.CgroupPath, "devices.allow")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse devices.allow - %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if value != allowedList {
|
||||||
|
t.Fatal("Got the wrong value, set devices.allow failed.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDevicesSetDeny(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("devices", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"devices.allow": "a",
|
||||||
|
})
|
||||||
|
|
||||||
|
helper.CgroupData.config.Resources.AllowAllDevices = true
|
||||||
|
helper.CgroupData.config.Resources.DeniedDevices = deniedDevices
|
||||||
|
devices := &DevicesGroup{}
|
||||||
|
if err := devices.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := getCgroupParamString(helper.CgroupPath, "devices.deny")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse devices.deny - %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if value != deniedList {
|
||||||
|
t.Fatal("Got the wrong value, set devices.deny failed.")
|
||||||
|
}
|
||||||
|
}
|
|
@ -19,15 +19,10 @@ func (s *FreezerGroup) Name() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FreezerGroup) Apply(d *cgroupData) error {
|
func (s *FreezerGroup) Apply(d *cgroupData) error {
|
||||||
dir, err := d.join("freezer")
|
_, err := d.join("freezer")
|
||||||
if err != nil && !cgroups.IsNotFound(err) {
|
if err != nil && !cgroups.IsNotFound(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.Set(dir, d.config); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
47
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer_test.go
vendored
Normal file
47
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer_test.go
vendored
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/opencontainers/runc/libcontainer/configs"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFreezerSetState(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("freezer", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"freezer.state": string(configs.Frozen),
|
||||||
|
})
|
||||||
|
|
||||||
|
helper.CgroupData.config.Resources.Freezer = configs.Thawed
|
||||||
|
freezer := &FreezerGroup{}
|
||||||
|
if err := freezer.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := getCgroupParamString(helper.CgroupPath, "freezer.state")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse freezer.state - %s", err)
|
||||||
|
}
|
||||||
|
if value != string(configs.Thawed) {
|
||||||
|
t.Fatal("Got the wrong value, set freezer.state failed.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFreezerSetInvalidState(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("freezer", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
const (
|
||||||
|
invalidArg configs.FreezerState = "Invalid"
|
||||||
|
)
|
||||||
|
|
||||||
|
helper.CgroupData.config.Resources.Freezer = invalidArg
|
||||||
|
freezer := &FreezerGroup{}
|
||||||
|
if err := freezer.Set(helper.CgroupPath, helper.CgroupData.config); err == nil {
|
||||||
|
t.Fatal("Failed to return invalid argument error")
|
||||||
|
}
|
||||||
|
}
|
|
@ -19,15 +19,10 @@ func (s *HugetlbGroup) Name() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *HugetlbGroup) Apply(d *cgroupData) error {
|
func (s *HugetlbGroup) Apply(d *cgroupData) error {
|
||||||
dir, err := d.join("hugetlb")
|
_, err := d.join("hugetlb")
|
||||||
if err != nil && !cgroups.IsNotFound(err) {
|
if err != nil && !cgroups.IsNotFound(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.Set(dir, d.config); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
154
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb_test.go
vendored
Normal file
154
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb_test.go
vendored
Normal file
|
@ -0,0 +1,154 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||||
|
"github.com/opencontainers/runc/libcontainer/configs"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
hugetlbUsageContents = "128\n"
|
||||||
|
hugetlbMaxUsageContents = "256\n"
|
||||||
|
hugetlbFailcnt = "100\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
usage = "hugetlb.%s.usage_in_bytes"
|
||||||
|
limit = "hugetlb.%s.limit_in_bytes"
|
||||||
|
maxUsage = "hugetlb.%s.max_usage_in_bytes"
|
||||||
|
failcnt = "hugetlb.%s.failcnt"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHugetlbSetHugetlb(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("hugetlb", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
const (
|
||||||
|
hugetlbBefore = 256
|
||||||
|
hugetlbAfter = 512
|
||||||
|
)
|
||||||
|
|
||||||
|
for _, pageSize := range HugePageSizes {
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
fmt.Sprintf(limit, pageSize): strconv.Itoa(hugetlbBefore),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pageSize := range HugePageSizes {
|
||||||
|
helper.CgroupData.config.Resources.HugetlbLimit = []*configs.HugepageLimit{
|
||||||
|
{
|
||||||
|
Pagesize: pageSize,
|
||||||
|
Limit: hugetlbAfter,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
hugetlb := &HugetlbGroup{}
|
||||||
|
if err := hugetlb.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pageSize := range HugePageSizes {
|
||||||
|
limit := fmt.Sprintf(limit, pageSize)
|
||||||
|
value, err := getCgroupParamUint(helper.CgroupPath, limit)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse %s - %s", limit, err)
|
||||||
|
}
|
||||||
|
if value != hugetlbAfter {
|
||||||
|
t.Fatalf("Set hugetlb.limit_in_bytes failed. Expected: %v, Got: %v", hugetlbAfter, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHugetlbStats(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("hugetlb", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
for _, pageSize := range HugePageSizes {
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
fmt.Sprintf(usage, pageSize): hugetlbUsageContents,
|
||||||
|
fmt.Sprintf(maxUsage, pageSize): hugetlbMaxUsageContents,
|
||||||
|
fmt.Sprintf(failcnt, pageSize): hugetlbFailcnt,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
hugetlb := &HugetlbGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := hugetlb.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
expectedStats := cgroups.HugetlbStats{Usage: 128, MaxUsage: 256, Failcnt: 100}
|
||||||
|
for _, pageSize := range HugePageSizes {
|
||||||
|
expectHugetlbStatEquals(t, expectedStats, actualStats.HugetlbStats[pageSize])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHugetlbStatsNoUsageFile(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("hugetlb", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
maxUsage: hugetlbMaxUsageContents,
|
||||||
|
})
|
||||||
|
|
||||||
|
hugetlb := &HugetlbGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := hugetlb.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expected failure")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHugetlbStatsNoMaxUsageFile(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("hugetlb", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
for _, pageSize := range HugePageSizes {
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
fmt.Sprintf(usage, pageSize): hugetlbUsageContents,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
hugetlb := &HugetlbGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := hugetlb.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expected failure")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHugetlbStatsBadUsageFile(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("hugetlb", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
for _, pageSize := range HugePageSizes {
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
fmt.Sprintf(usage, pageSize): "bad",
|
||||||
|
maxUsage: hugetlbMaxUsageContents,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
hugetlb := &HugetlbGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := hugetlb.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expected failure")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHugetlbStatsBadMaxUsageFile(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("hugetlb", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
usage: hugetlbUsageContents,
|
||||||
|
maxUsage: "bad",
|
||||||
|
})
|
||||||
|
|
||||||
|
hugetlb := &HugetlbGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := hugetlb.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expected failure")
|
||||||
|
}
|
||||||
|
}
|
|
@ -32,8 +32,9 @@ func (s *MemoryGroup) Apply(d *cgroupData) (err error) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// We have to set kernel memory here, as we can't change it once
|
||||||
if err := s.Set(path, d.config); err != nil {
|
// processes have been attached.
|
||||||
|
if err := s.SetKernelMemory(path, d.config); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -50,7 +51,17 @@ func (s *MemoryGroup) Apply(d *cgroupData) (err error) {
|
||||||
if err != nil && !cgroups.IsNotFound(err) {
|
if err != nil && !cgroups.IsNotFound(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MemoryGroup) SetKernelMemory(path string, cgroup *configs.Cgroup) error {
|
||||||
|
// This has to be done separately because it has special constraints (it
|
||||||
|
// can't be done after there are processes attached to the cgroup).
|
||||||
|
if cgroup.Resources.KernelMemory > 0 {
|
||||||
|
if err := writeFile(path, "memory.kmem.limit_in_bytes", strconv.FormatInt(cgroup.Resources.KernelMemory, 10)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,12 +81,6 @@ func (s *MemoryGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if cgroup.Resources.KernelMemory > 0 {
|
|
||||||
if err := writeFile(path, "memory.kmem.limit_in_bytes", strconv.FormatInt(cgroup.Resources.KernelMemory, 10)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cgroup.Resources.OomKillDisable {
|
if cgroup.Resources.OomKillDisable {
|
||||||
if err := writeFile(path, "memory.oom_control", "1"); err != nil {
|
if err := writeFile(path, "memory.oom_control", "1"); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -157,6 +162,7 @@ func getMemoryData(path, name string) (cgroups.MemoryData, error) {
|
||||||
usage := strings.Join([]string{moduleName, "usage_in_bytes"}, ".")
|
usage := strings.Join([]string{moduleName, "usage_in_bytes"}, ".")
|
||||||
maxUsage := strings.Join([]string{moduleName, "max_usage_in_bytes"}, ".")
|
maxUsage := strings.Join([]string{moduleName, "max_usage_in_bytes"}, ".")
|
||||||
failcnt := strings.Join([]string{moduleName, "failcnt"}, ".")
|
failcnt := strings.Join([]string{moduleName, "failcnt"}, ".")
|
||||||
|
limit := strings.Join([]string{moduleName, "limit_in_bytes"}, ".")
|
||||||
|
|
||||||
value, err := getCgroupParamUint(path, usage)
|
value, err := getCgroupParamUint(path, usage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -182,6 +188,14 @@ func getMemoryData(path, name string) (cgroups.MemoryData, error) {
|
||||||
return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", failcnt, err)
|
return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", failcnt, err)
|
||||||
}
|
}
|
||||||
memoryData.Failcnt = value
|
memoryData.Failcnt = value
|
||||||
|
value, err = getCgroupParamUint(path, limit)
|
||||||
|
if err != nil {
|
||||||
|
if moduleName != "memory" && os.IsNotExist(err) {
|
||||||
|
return cgroups.MemoryData{}, nil
|
||||||
|
}
|
||||||
|
return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", limit, err)
|
||||||
|
}
|
||||||
|
memoryData.Limit = value
|
||||||
|
|
||||||
return memoryData, nil
|
return memoryData, nil
|
||||||
}
|
}
|
||||||
|
|
339
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory_test.go
vendored
Normal file
339
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory_test.go
vendored
Normal file
|
@ -0,0 +1,339 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
memoryStatContents = `cache 512
|
||||||
|
rss 1024`
|
||||||
|
memoryUsageContents = "2048\n"
|
||||||
|
memoryMaxUsageContents = "4096\n"
|
||||||
|
memoryFailcnt = "100\n"
|
||||||
|
memoryLimitContents = "8192\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMemorySetMemory(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("memory", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
const (
|
||||||
|
memoryBefore = 314572800 // 300M
|
||||||
|
memoryAfter = 524288000 // 500M
|
||||||
|
reservationBefore = 209715200 // 200M
|
||||||
|
reservationAfter = 314572800 // 300M
|
||||||
|
)
|
||||||
|
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"memory.limit_in_bytes": strconv.Itoa(memoryBefore),
|
||||||
|
"memory.soft_limit_in_bytes": strconv.Itoa(reservationBefore),
|
||||||
|
})
|
||||||
|
|
||||||
|
helper.CgroupData.config.Resources.Memory = memoryAfter
|
||||||
|
helper.CgroupData.config.Resources.MemoryReservation = reservationAfter
|
||||||
|
memory := &MemoryGroup{}
|
||||||
|
if err := memory.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := getCgroupParamUint(helper.CgroupPath, "memory.limit_in_bytes")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse memory.limit_in_bytes - %s", err)
|
||||||
|
}
|
||||||
|
if value != memoryAfter {
|
||||||
|
t.Fatal("Got the wrong value, set memory.limit_in_bytes failed.")
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err = getCgroupParamUint(helper.CgroupPath, "memory.soft_limit_in_bytes")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse memory.soft_limit_in_bytes - %s", err)
|
||||||
|
}
|
||||||
|
if value != reservationAfter {
|
||||||
|
t.Fatal("Got the wrong value, set memory.soft_limit_in_bytes failed.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemorySetMemoryswap(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("memory", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
const (
|
||||||
|
memoryswapBefore = 314572800 // 300M
|
||||||
|
memoryswapAfter = 524288000 // 500M
|
||||||
|
)
|
||||||
|
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"memory.memsw.limit_in_bytes": strconv.Itoa(memoryswapBefore),
|
||||||
|
})
|
||||||
|
|
||||||
|
helper.CgroupData.config.Resources.MemorySwap = memoryswapAfter
|
||||||
|
memory := &MemoryGroup{}
|
||||||
|
if err := memory.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := getCgroupParamUint(helper.CgroupPath, "memory.memsw.limit_in_bytes")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse memory.memsw.limit_in_bytes - %s", err)
|
||||||
|
}
|
||||||
|
if value != memoryswapAfter {
|
||||||
|
t.Fatal("Got the wrong value, set memory.memsw.limit_in_bytes failed.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemorySetKernelMemory(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("memory", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
const (
|
||||||
|
kernelMemoryBefore = 314572800 // 300M
|
||||||
|
kernelMemoryAfter = 524288000 // 500M
|
||||||
|
)
|
||||||
|
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"memory.kmem.limit_in_bytes": strconv.Itoa(kernelMemoryBefore),
|
||||||
|
})
|
||||||
|
|
||||||
|
helper.CgroupData.config.Resources.KernelMemory = kernelMemoryAfter
|
||||||
|
memory := &MemoryGroup{}
|
||||||
|
if err := memory.SetKernelMemory(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := getCgroupParamUint(helper.CgroupPath, "memory.kmem.limit_in_bytes")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse memory.kmem.limit_in_bytes - %s", err)
|
||||||
|
}
|
||||||
|
if value != kernelMemoryAfter {
|
||||||
|
t.Fatal("Got the wrong value, set memory.kmem.limit_in_bytes failed.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemorySetMemorySwappinessDefault(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("memory", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
const (
|
||||||
|
swappinessBefore = 60 //deafult is 60
|
||||||
|
swappinessAfter = 0
|
||||||
|
)
|
||||||
|
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"memory.swappiness": strconv.Itoa(swappinessBefore),
|
||||||
|
})
|
||||||
|
|
||||||
|
helper.CgroupData.config.Resources.Memory = swappinessAfter
|
||||||
|
memory := &MemoryGroup{}
|
||||||
|
if err := memory.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := getCgroupParamUint(helper.CgroupPath, "memory.swappiness")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse memory.swappiness - %s", err)
|
||||||
|
}
|
||||||
|
if value != swappinessAfter {
|
||||||
|
t.Fatal("Got the wrong value, set memory.swappiness failed.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemoryStats(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("memory", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"memory.stat": memoryStatContents,
|
||||||
|
"memory.usage_in_bytes": memoryUsageContents,
|
||||||
|
"memory.limit_in_bytes": memoryLimitContents,
|
||||||
|
"memory.max_usage_in_bytes": memoryMaxUsageContents,
|
||||||
|
"memory.failcnt": memoryFailcnt,
|
||||||
|
"memory.memsw.usage_in_bytes": memoryUsageContents,
|
||||||
|
"memory.memsw.max_usage_in_bytes": memoryMaxUsageContents,
|
||||||
|
"memory.memsw.failcnt": memoryFailcnt,
|
||||||
|
"memory.memsw.limit_in_bytes": memoryLimitContents,
|
||||||
|
"memory.kmem.usage_in_bytes": memoryUsageContents,
|
||||||
|
"memory.kmem.max_usage_in_bytes": memoryMaxUsageContents,
|
||||||
|
"memory.kmem.failcnt": memoryFailcnt,
|
||||||
|
"memory.kmem.limit_in_bytes": memoryLimitContents,
|
||||||
|
})
|
||||||
|
|
||||||
|
memory := &MemoryGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
expectedStats := cgroups.MemoryStats{Cache: 512, Usage: cgroups.MemoryData{Usage: 2048, MaxUsage: 4096, Failcnt: 100, Limit: 8192}, SwapUsage: cgroups.MemoryData{Usage: 2048, MaxUsage: 4096, Failcnt: 100, Limit: 8192}, KernelUsage: cgroups.MemoryData{Usage: 2048, MaxUsage: 4096, Failcnt: 100, Limit: 8192}, Stats: map[string]uint64{"cache": 512, "rss": 1024}}
|
||||||
|
expectMemoryStatEquals(t, expectedStats, actualStats.MemoryStats)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemoryStatsNoStatFile(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("memory", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"memory.usage_in_bytes": memoryUsageContents,
|
||||||
|
"memory.max_usage_in_bytes": memoryMaxUsageContents,
|
||||||
|
"memory.limit_in_bytes": memoryLimitContents,
|
||||||
|
})
|
||||||
|
|
||||||
|
memory := &MemoryGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemoryStatsNoUsageFile(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("memory", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"memory.stat": memoryStatContents,
|
||||||
|
"memory.max_usage_in_bytes": memoryMaxUsageContents,
|
||||||
|
"memory.limit_in_bytes": memoryLimitContents,
|
||||||
|
})
|
||||||
|
|
||||||
|
memory := &MemoryGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expected failure")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemoryStatsNoMaxUsageFile(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("memory", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"memory.stat": memoryStatContents,
|
||||||
|
"memory.usage_in_bytes": memoryUsageContents,
|
||||||
|
"memory.limit_in_bytes": memoryLimitContents,
|
||||||
|
})
|
||||||
|
|
||||||
|
memory := &MemoryGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expected failure")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemoryStatsNoLimitInBytesFile(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("memory", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"memory.stat": memoryStatContents,
|
||||||
|
"memory.usage_in_bytes": memoryUsageContents,
|
||||||
|
"memory.max_usage_in_bytes": memoryMaxUsageContents,
|
||||||
|
})
|
||||||
|
|
||||||
|
memory := &MemoryGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expected failure")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemoryStatsBadStatFile(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("memory", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"memory.stat": "rss rss",
|
||||||
|
"memory.usage_in_bytes": memoryUsageContents,
|
||||||
|
"memory.max_usage_in_bytes": memoryMaxUsageContents,
|
||||||
|
"memory.limit_in_bytes": memoryLimitContents,
|
||||||
|
})
|
||||||
|
|
||||||
|
memory := &MemoryGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expected failure")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemoryStatsBadUsageFile(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("memory", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"memory.stat": memoryStatContents,
|
||||||
|
"memory.usage_in_bytes": "bad",
|
||||||
|
"memory.max_usage_in_bytes": memoryMaxUsageContents,
|
||||||
|
"memory.limit_in_bytes": memoryLimitContents,
|
||||||
|
})
|
||||||
|
|
||||||
|
memory := &MemoryGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expected failure")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemoryStatsBadMaxUsageFile(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("memory", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"memory.stat": memoryStatContents,
|
||||||
|
"memory.usage_in_bytes": memoryUsageContents,
|
||||||
|
"memory.max_usage_in_bytes": "bad",
|
||||||
|
"memory.limit_in_bytes": memoryLimitContents,
|
||||||
|
})
|
||||||
|
|
||||||
|
memory := &MemoryGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expected failure")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemoryStatsBadLimitInBytesFile(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("memory", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"memory.stat": memoryStatContents,
|
||||||
|
"memory.usage_in_bytes": memoryUsageContents,
|
||||||
|
"memory.max_usage_in_bytes": memoryMaxUsageContents,
|
||||||
|
"memory.limit_in_bytes": "bad",
|
||||||
|
})
|
||||||
|
|
||||||
|
memory := &MemoryGroup{}
|
||||||
|
actualStats := *cgroups.NewStats()
|
||||||
|
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expected failure")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemorySetOomControl(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("memory", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
const (
|
||||||
|
oom_kill_disable = 1 // disable oom killer, default is 0
|
||||||
|
)
|
||||||
|
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"memory.oom_control": strconv.Itoa(oom_kill_disable),
|
||||||
|
})
|
||||||
|
|
||||||
|
memory := &MemoryGroup{}
|
||||||
|
if err := memory.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := getCgroupParamUint(helper.CgroupPath, "memory.oom_control")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse memory.oom_control - %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if value != oom_kill_disable {
|
||||||
|
t.Fatalf("Got the wrong value, set memory.oom_control failed.")
|
||||||
|
}
|
||||||
|
}
|
|
@ -15,15 +15,10 @@ func (s *NetClsGroup) Name() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *NetClsGroup) Apply(d *cgroupData) error {
|
func (s *NetClsGroup) Apply(d *cgroupData) error {
|
||||||
dir, err := d.join("net_cls")
|
_, err := d.join("net_cls")
|
||||||
if err != nil && !cgroups.IsNotFound(err) {
|
if err != nil && !cgroups.IsNotFound(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.Set(dir, d.config); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
38
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls_test.go
vendored
Normal file
38
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls_test.go
vendored
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
classidBefore = "0x100002"
|
||||||
|
classidAfter = "0x100001"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNetClsSetClassid(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("net_cls", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"net_cls.classid": classidBefore,
|
||||||
|
})
|
||||||
|
|
||||||
|
helper.CgroupData.config.Resources.NetClsClassid = classidAfter
|
||||||
|
netcls := &NetClsGroup{}
|
||||||
|
if err := netcls.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// As we are in mock environment, we can't get correct value of classid from
|
||||||
|
// net_cls.classid.
|
||||||
|
// So. we just judge if we successfully write classid into file
|
||||||
|
value, err := getCgroupParamString(helper.CgroupPath, "net_cls.classid")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse net_cls.classid - %s", err)
|
||||||
|
}
|
||||||
|
if value != classidAfter {
|
||||||
|
t.Fatal("Got the wrong value, set net_cls.classid failed.")
|
||||||
|
}
|
||||||
|
}
|
|
@ -15,15 +15,10 @@ func (s *NetPrioGroup) Name() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *NetPrioGroup) Apply(d *cgroupData) error {
|
func (s *NetPrioGroup) Apply(d *cgroupData) error {
|
||||||
dir, err := d.join("net_prio")
|
_, err := d.join("net_prio")
|
||||||
if err != nil && !cgroups.IsNotFound(err) {
|
if err != nil && !cgroups.IsNotFound(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.Set(dir, d.config); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
38
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio_test.go
vendored
Normal file
38
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio_test.go
vendored
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/opencontainers/runc/libcontainer/configs"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
prioMap = []*configs.IfPrioMap{
|
||||||
|
{
|
||||||
|
Interface: "test",
|
||||||
|
Priority: 5,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNetPrioSetIfPrio(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("net_prio", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
helper.CgroupData.config.Resources.NetPrioIfpriomap = prioMap
|
||||||
|
netPrio := &NetPrioGroup{}
|
||||||
|
if err := netPrio.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := getCgroupParamString(helper.CgroupPath, "net_prio.ifpriomap")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse net_prio.ifpriomap - %s", err)
|
||||||
|
}
|
||||||
|
if !strings.Contains(value, "test 5") {
|
||||||
|
t.Fatal("Got the wrong value, set net_prio.ifpriomap failed.")
|
||||||
|
}
|
||||||
|
}
|
57
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go
vendored
Normal file
57
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go
vendored
Normal file
|
@ -0,0 +1,57 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||||
|
"github.com/opencontainers/runc/libcontainer/configs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PidsGroup struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *PidsGroup) Name() string {
|
||||||
|
return "pids"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *PidsGroup) Apply(d *cgroupData) error {
|
||||||
|
_, err := d.join("pids")
|
||||||
|
if err != nil && !cgroups.IsNotFound(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *PidsGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||||
|
if cgroup.Resources.PidsLimit != 0 {
|
||||||
|
// "max" is the fallback value.
|
||||||
|
limit := "max"
|
||||||
|
|
||||||
|
if cgroup.Resources.PidsLimit > 0 {
|
||||||
|
limit = strconv.FormatInt(cgroup.Resources.PidsLimit, 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := writeFile(path, "pids.max", limit); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *PidsGroup) Remove(d *cgroupData) error {
|
||||||
|
return removePath(d.path("pids"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *PidsGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||||
|
value, err := getCgroupParamUint(path, "pids.current")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to parse pids.current - %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.PidsStats.Current = value
|
||||||
|
return nil
|
||||||
|
}
|
83
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids_test.go
vendored
Normal file
83
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids_test.go
vendored
Normal file
|
@ -0,0 +1,83 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
maxUnlimited = -1
|
||||||
|
maxLimited = 1024
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPidsSetMax(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("pids", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"pids.max": "max",
|
||||||
|
})
|
||||||
|
|
||||||
|
helper.CgroupData.config.Resources.PidsLimit = maxLimited
|
||||||
|
pids := &PidsGroup{}
|
||||||
|
if err := pids.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := getCgroupParamUint(helper.CgroupPath, "pids.max")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse pids.max - %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if value != maxLimited {
|
||||||
|
t.Fatalf("Expected %d, got %d for setting pids.max - limited", maxLimited, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPidsSetUnlimited(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("pids", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"pids.max": strconv.Itoa(maxLimited),
|
||||||
|
})
|
||||||
|
|
||||||
|
helper.CgroupData.config.Resources.PidsLimit = maxUnlimited
|
||||||
|
pids := &PidsGroup{}
|
||||||
|
if err := pids.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := getCgroupParamString(helper.CgroupPath, "pids.max")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse pids.max - %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if value != "max" {
|
||||||
|
t.Fatalf("Expected %s, got %s for setting pids.max - unlimited", "max", value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPidsStats(t *testing.T) {
|
||||||
|
helper := NewCgroupTestUtil("pids", t)
|
||||||
|
defer helper.cleanup()
|
||||||
|
|
||||||
|
helper.writeFileContents(map[string]string{
|
||||||
|
"pids.current": strconv.Itoa(1337),
|
||||||
|
"pids.max": strconv.Itoa(maxLimited),
|
||||||
|
})
|
||||||
|
|
||||||
|
pids := &PidsGroup{}
|
||||||
|
stats := *cgroups.NewStats()
|
||||||
|
if err := pids.GetStats(helper.CgroupPath, &stats); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if stats.PidsStats.Current != 1337 {
|
||||||
|
t.Fatalf("Expected %d, got %d for pids.current", 1337, stats.PidsStats.Current)
|
||||||
|
}
|
||||||
|
}
|
117
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/stats_util_test.go
vendored
Normal file
117
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/stats_util_test.go
vendored
Normal file
|
@ -0,0 +1,117 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||||
|
)
|
||||||
|
|
||||||
|
func blkioStatEntryEquals(expected, actual []cgroups.BlkioStatEntry) error {
|
||||||
|
if len(expected) != len(actual) {
|
||||||
|
return fmt.Errorf("blkioStatEntries length do not match")
|
||||||
|
}
|
||||||
|
for i, expValue := range expected {
|
||||||
|
actValue := actual[i]
|
||||||
|
if expValue != actValue {
|
||||||
|
return fmt.Errorf("Expected blkio stat entry %v but found %v", expValue, actValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func expectBlkioStatsEquals(t *testing.T, expected, actual cgroups.BlkioStats) {
|
||||||
|
if err := blkioStatEntryEquals(expected.IoServiceBytesRecursive, actual.IoServiceBytesRecursive); err != nil {
|
||||||
|
logrus.Printf("blkio IoServiceBytesRecursive do not match - %s\n", err)
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := blkioStatEntryEquals(expected.IoServicedRecursive, actual.IoServicedRecursive); err != nil {
|
||||||
|
logrus.Printf("blkio IoServicedRecursive do not match - %s\n", err)
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := blkioStatEntryEquals(expected.IoQueuedRecursive, actual.IoQueuedRecursive); err != nil {
|
||||||
|
logrus.Printf("blkio IoQueuedRecursive do not match - %s\n", err)
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := blkioStatEntryEquals(expected.SectorsRecursive, actual.SectorsRecursive); err != nil {
|
||||||
|
logrus.Printf("blkio SectorsRecursive do not match - %s\n", err)
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := blkioStatEntryEquals(expected.IoServiceTimeRecursive, actual.IoServiceTimeRecursive); err != nil {
|
||||||
|
logrus.Printf("blkio IoServiceTimeRecursive do not match - %s\n", err)
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := blkioStatEntryEquals(expected.IoWaitTimeRecursive, actual.IoWaitTimeRecursive); err != nil {
|
||||||
|
logrus.Printf("blkio IoWaitTimeRecursive do not match - %s\n", err)
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := blkioStatEntryEquals(expected.IoMergedRecursive, actual.IoMergedRecursive); err != nil {
|
||||||
|
logrus.Printf("blkio IoMergedRecursive do not match - %v vs %v\n", expected.IoMergedRecursive, actual.IoMergedRecursive)
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := blkioStatEntryEquals(expected.IoTimeRecursive, actual.IoTimeRecursive); err != nil {
|
||||||
|
logrus.Printf("blkio IoTimeRecursive do not match - %s\n", err)
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func expectThrottlingDataEquals(t *testing.T, expected, actual cgroups.ThrottlingData) {
|
||||||
|
if expected != actual {
|
||||||
|
logrus.Printf("Expected throttling data %v but found %v\n", expected, actual)
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func expectHugetlbStatEquals(t *testing.T, expected, actual cgroups.HugetlbStats) {
|
||||||
|
if expected != actual {
|
||||||
|
logrus.Printf("Expected hugetlb stats %v but found %v\n", expected, actual)
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func expectMemoryStatEquals(t *testing.T, expected, actual cgroups.MemoryStats) {
|
||||||
|
expectMemoryDataEquals(t, expected.Usage, actual.Usage)
|
||||||
|
expectMemoryDataEquals(t, expected.SwapUsage, actual.SwapUsage)
|
||||||
|
expectMemoryDataEquals(t, expected.KernelUsage, actual.KernelUsage)
|
||||||
|
|
||||||
|
for key, expValue := range expected.Stats {
|
||||||
|
actValue, ok := actual.Stats[key]
|
||||||
|
if !ok {
|
||||||
|
logrus.Printf("Expected memory stat key %s not found\n", key)
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
if expValue != actValue {
|
||||||
|
logrus.Printf("Expected memory stat value %d but found %d\n", expValue, actValue)
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func expectMemoryDataEquals(t *testing.T, expected, actual cgroups.MemoryData) {
|
||||||
|
if expected.Usage != actual.Usage {
|
||||||
|
logrus.Printf("Expected memory usage %d but found %d\n", expected.Usage, actual.Usage)
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
if expected.MaxUsage != actual.MaxUsage {
|
||||||
|
logrus.Printf("Expected memory max usage %d but found %d\n", expected.MaxUsage, actual.MaxUsage)
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
if expected.Failcnt != actual.Failcnt {
|
||||||
|
logrus.Printf("Expected memory failcnt %d but found %d\n", expected.Failcnt, actual.Failcnt)
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
if expected.Limit != actual.Limit {
|
||||||
|
logrus.Printf("Expected memory limit %d but found %d\n", expected.Limit, actual.Limit)
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
}
|
67
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/util_test.go
vendored
Normal file
67
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/util_test.go
vendored
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
/*
|
||||||
|
Utility for testing cgroup operations.
|
||||||
|
|
||||||
|
Creates a mock of the cgroup filesystem for the duration of the test.
|
||||||
|
*/
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/opencontainers/runc/libcontainer/configs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type cgroupTestUtil struct {
|
||||||
|
// cgroup data to use in tests.
|
||||||
|
CgroupData *cgroupData
|
||||||
|
|
||||||
|
// Path to the mock cgroup directory.
|
||||||
|
CgroupPath string
|
||||||
|
|
||||||
|
// Temporary directory to store mock cgroup filesystem.
|
||||||
|
tempDir string
|
||||||
|
t *testing.T
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates a new test util for the specified subsystem
|
||||||
|
func NewCgroupTestUtil(subsystem string, t *testing.T) *cgroupTestUtil {
|
||||||
|
d := &cgroupData{
|
||||||
|
config: &configs.Cgroup{},
|
||||||
|
}
|
||||||
|
d.config.Resources = &configs.Resources{}
|
||||||
|
tempDir, err := ioutil.TempDir("", "cgroup_test")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
d.root = tempDir
|
||||||
|
testCgroupPath := filepath.Join(d.root, subsystem)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure the full mock cgroup path exists.
|
||||||
|
err = os.MkdirAll(testCgroupPath, 0755)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
return &cgroupTestUtil{CgroupData: d, CgroupPath: testCgroupPath, tempDir: tempDir, t: t}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cgroupTestUtil) cleanup() {
|
||||||
|
os.RemoveAll(c.tempDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the specified contents on the mock of the specified cgroup files.
|
||||||
|
func (c *cgroupTestUtil) writeFileContents(fileContents map[string]string) {
|
||||||
|
for file, contents := range fileContents {
|
||||||
|
err := writeFile(c.CgroupPath, file, contents)
|
||||||
|
if err != nil {
|
||||||
|
c.t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
97
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils_test.go
vendored
Normal file
97
vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils_test.go
vendored
Normal file
|
@ -0,0 +1,97 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
cgroupFile = "cgroup.file"
|
||||||
|
floatValue = 2048.0
|
||||||
|
floatString = "2048"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGetCgroupParamsInt(t *testing.T) {
|
||||||
|
// Setup tempdir.
|
||||||
|
tempDir, err := ioutil.TempDir("", "cgroup_utils_test")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir)
|
||||||
|
tempFile := filepath.Join(tempDir, cgroupFile)
|
||||||
|
|
||||||
|
// Success.
|
||||||
|
err = ioutil.WriteFile(tempFile, []byte(floatString), 0755)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
value, err := getCgroupParamUint(tempDir, cgroupFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if value != floatValue {
|
||||||
|
t.Fatalf("Expected %d to equal %f", value, floatValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Success with new line.
|
||||||
|
err = ioutil.WriteFile(tempFile, []byte(floatString+"\n"), 0755)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
value, err = getCgroupParamUint(tempDir, cgroupFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if value != floatValue {
|
||||||
|
t.Fatalf("Expected %d to equal %f", value, floatValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Success with negative values
|
||||||
|
err = ioutil.WriteFile(tempFile, []byte("-12345"), 0755)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
value, err = getCgroupParamUint(tempDir, cgroupFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if value != 0 {
|
||||||
|
t.Fatalf("Expected %d to equal %d", value, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Success with negative values lesser than min int64
|
||||||
|
s := strconv.FormatFloat(math.MinInt64, 'f', -1, 64)
|
||||||
|
err = ioutil.WriteFile(tempFile, []byte(s), 0755)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
value, err = getCgroupParamUint(tempDir, cgroupFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if value != 0 {
|
||||||
|
t.Fatalf("Expected %d to equal %d", value, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not a float.
|
||||||
|
err = ioutil.WriteFile(tempFile, []byte("not-a-float"), 0755)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
_, err = getCgroupParamUint(tempDir, cgroupFile)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expecting error, got none")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unknown file.
|
||||||
|
err = os.Remove(tempFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
_, err = getCgroupParamUint(tempDir, cgroupFile)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expecting error, got none")
|
||||||
|
}
|
||||||
|
}
|
|
@ -36,7 +36,9 @@ type MemoryData struct {
|
||||||
Usage uint64 `json:"usage,omitempty"`
|
Usage uint64 `json:"usage,omitempty"`
|
||||||
MaxUsage uint64 `json:"max_usage,omitempty"`
|
MaxUsage uint64 `json:"max_usage,omitempty"`
|
||||||
Failcnt uint64 `json:"failcnt"`
|
Failcnt uint64 `json:"failcnt"`
|
||||||
|
Limit uint64 `json:"limit"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type MemoryStats struct {
|
type MemoryStats struct {
|
||||||
// memory used for cache
|
// memory used for cache
|
||||||
Cache uint64 `json:"cache,omitempty"`
|
Cache uint64 `json:"cache,omitempty"`
|
||||||
|
@ -49,6 +51,11 @@ type MemoryStats struct {
|
||||||
Stats map[string]uint64 `json:"stats,omitempty"`
|
Stats map[string]uint64 `json:"stats,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type PidsStats struct {
|
||||||
|
// number of pids in the cgroup
|
||||||
|
Current uint64 `json:"current,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
type BlkioStatEntry struct {
|
type BlkioStatEntry struct {
|
||||||
Major uint64 `json:"major,omitempty"`
|
Major uint64 `json:"major,omitempty"`
|
||||||
Minor uint64 `json:"minor,omitempty"`
|
Minor uint64 `json:"minor,omitempty"`
|
||||||
|
@ -80,6 +87,7 @@ type HugetlbStats struct {
|
||||||
type Stats struct {
|
type Stats struct {
|
||||||
CpuStats CpuStats `json:"cpu_stats,omitempty"`
|
CpuStats CpuStats `json:"cpu_stats,omitempty"`
|
||||||
MemoryStats MemoryStats `json:"memory_stats,omitempty"`
|
MemoryStats MemoryStats `json:"memory_stats,omitempty"`
|
||||||
|
PidsStats PidsStats `json:"pids_stats,omitempty"`
|
||||||
BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
|
BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
|
||||||
// the map is in the format "size of hugepage: stats of the hugepage"
|
// the map is in the format "size of hugepage: stats of the hugepage"
|
||||||
HugetlbStats map[string]HugetlbStats `json:"hugetlb_stats,omitempty"`
|
HugetlbStats map[string]HugetlbStats `json:"hugetlb_stats,omitempty"`
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue