Remove containerd files

Signed-off-by: Michael Crosby <crosbymichael@gmail.com>
This commit is contained in:
Michael Crosby 2016-11-07 13:10:09 -08:00
parent 992fdbfd76
commit e115b52ce2
74 changed files with 0 additions and 9757 deletions

View file

@ -1,136 +0,0 @@
// Code generated by protoc-gen-gogo.
// source: container.proto
// DO NOT EDIT!
/*
Package container is a generated protocol buffer package.
It is generated from these files:
container.proto
It has these top-level messages:
Container
ContainerSpec
*/
package container
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import docker_containerkit_types "github.com/docker/containerkit/api/types/mount"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
const _ = proto.GoGoProtoPackageIsVersion1
type Container struct {
Container *ContainerSpec `protobuf:"bytes,1,opt,name=container" json:"container,omitempty"`
}
func (m *Container) Reset() { *m = Container{} }
func (m *Container) String() string { return proto.CompactTextString(m) }
func (*Container) ProtoMessage() {}
func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorContainer, []int{0} }
func (m *Container) GetContainer() *ContainerSpec {
if m != nil {
return m.Container
}
return nil
}
// Container specifies runtime parameters for a container.
type ContainerSpec struct {
// name must be a unique name to identify the container.
//
// This can be used as a system identifier external to ContainerKit services.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Labels defines labels to be added to the container at creation time. If
// collisions with system labels occur, these labels will be overridden.
//
// This field *must* remain compatible with the Labels field of
// Annotations.
Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
Mounts []docker_containerkit_types.Mount `protobuf:"bytes,3,rep,name=mounts" json:"mounts"`
// Command to run the the container. The first element is a path to the
// executable and the following elements are treated as arguments.
//
// If command is empty, execution will fall back to the image's entrypoint.
//
// Command should only be used when overriding entrypoint.
Command []string `protobuf:"bytes,4,rep,name=command" json:"command,omitempty"`
// Args specifies arguments provided to the image's entrypoint.
//
// If Command and Args are provided, Args will be appended to Command.
Args []string `protobuf:"bytes,5,rep,name=args" json:"args,omitempty"`
// Env specifies the environment variables for the container in NAME=VALUE
// format. These must be compliant with [IEEE Std
// 1003.1-2001](http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html).
Env []string `protobuf:"bytes,6,rep,name=env" json:"env,omitempty"`
// Dir defines the working directory to set for the container process.
Dir string `protobuf:"bytes,7,opt,name=dir,proto3" json:"dir,omitempty"`
// User specifies the user that should be employed to run the container.
//
// Note that the primary group may be specified by appending the group name
// or id to the user name, separated by a `:`. This syntax is
// `<user>:<group>`.
User string `protobuf:"bytes,8,opt,name=user,proto3" json:"user,omitempty"`
// Groups specifies supplementary groups available to the user.
Groups []string `protobuf:"bytes,9,rep,name=groups" json:"groups,omitempty"`
}
func (m *ContainerSpec) Reset() { *m = ContainerSpec{} }
func (m *ContainerSpec) String() string { return proto.CompactTextString(m) }
func (*ContainerSpec) ProtoMessage() {}
func (*ContainerSpec) Descriptor() ([]byte, []int) { return fileDescriptorContainer, []int{1} }
func (m *ContainerSpec) GetLabels() map[string]string {
if m != nil {
return m.Labels
}
return nil
}
func (m *ContainerSpec) GetMounts() []docker_containerkit_types.Mount {
if m != nil {
return m.Mounts
}
return nil
}
func init() {
proto.RegisterType((*Container)(nil), "docker.containerkit.v1.Container")
proto.RegisterType((*ContainerSpec)(nil), "docker.containerkit.v1.ContainerSpec")
}
var fileDescriptorContainer = []byte{
// 344 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x91, 0xc1, 0x4a, 0xf3, 0x40,
0x14, 0x85, 0xff, 0x26, 0x6d, 0xfa, 0xe7, 0x16, 0x51, 0x06, 0x29, 0x43, 0x37, 0x86, 0x82, 0xd0,
0x8d, 0x13, 0x5a, 0x37, 0xda, 0x85, 0x0b, 0x8b, 0x0b, 0x41, 0x41, 0xe2, 0x13, 0x4c, 0xd2, 0x31,
0x86, 0x36, 0x33, 0x61, 0x32, 0x29, 0xf4, 0x0d, 0x7d, 0x0a, 0xb7, 0xbe, 0x86, 0xcc, 0x9d, 0xb4,
0x56, 0xe8, 0xc2, 0x4d, 0x38, 0xe7, 0xe4, 0x9e, 0x2f, 0x73, 0x27, 0x70, 0x9a, 0x29, 0x69, 0x78,
0x21, 0x85, 0x66, 0x95, 0x56, 0x46, 0x91, 0xe1, 0x52, 0x65, 0x2b, 0xa1, 0xd9, 0x3e, 0x5f, 0x15,
0x86, 0x6d, 0xa6, 0xa3, 0xab, 0xbc, 0x30, 0xef, 0x4d, 0xca, 0x32, 0x55, 0xc6, 0xb9, 0xca, 0x55,
0x8c, 0xe3, 0x69, 0xf3, 0x86, 0x0e, 0x0d, 0x2a, 0x87, 0x19, 0xcd, 0x0f, 0xc6, 0x1d, 0x31, 0x3e,
0x24, 0xc6, 0xbc, 0x2a, 0x62, 0xb3, 0xad, 0x44, 0x1d, 0x97, 0xaa, 0x91, 0xc6, 0x3d, 0x5d, 0x77,
0xfc, 0x02, 0xe1, 0x62, 0x37, 0x4b, 0x16, 0x10, 0xee, 0x8b, 0xb4, 0x13, 0x75, 0x26, 0x83, 0xd9,
0x25, 0x3b, 0x7e, 0x46, 0xb6, 0x6f, 0xbd, 0x56, 0x22, 0x4b, 0x7e, 0x7a, 0xe3, 0x2f, 0x0f, 0x4e,
0x7e, 0xbd, 0x24, 0x04, 0xba, 0x92, 0x97, 0x02, 0x89, 0x61, 0x82, 0x9a, 0x3c, 0x42, 0xb0, 0xe6,
0xa9, 0x58, 0xd7, 0xd4, 0x8b, 0xfc, 0xc9, 0x60, 0x36, 0xfd, 0xd3, 0x77, 0xd8, 0x13, 0x76, 0x1e,
0xa4, 0xd1, 0xdb, 0xa4, 0x05, 0x90, 0x3b, 0x08, 0x70, 0xa3, 0x9a, 0xfa, 0x88, 0x8a, 0x8e, 0xa2,
0xf0, 0x02, 0xd8, 0xb3, 0x1d, 0xbc, 0xef, 0x7e, 0x7c, 0x5e, 0xfc, 0x4b, 0xda, 0x16, 0xa1, 0xd0,
0xcf, 0x54, 0x59, 0x72, 0xb9, 0xa4, 0xdd, 0xc8, 0x9f, 0x84, 0xc9, 0xce, 0xda, 0x83, 0x73, 0x9d,
0xd7, 0xb4, 0x87, 0x31, 0x6a, 0x72, 0x06, 0xbe, 0x90, 0x1b, 0x1a, 0x60, 0x64, 0xa5, 0x4d, 0x96,
0x85, 0xa6, 0x7d, 0xdc, 0xce, 0x4a, 0xdb, 0x6b, 0x6a, 0xa1, 0xe9, 0x7f, 0xb7, 0xb0, 0xd5, 0x64,
0x08, 0x41, 0xae, 0x55, 0x53, 0xd5, 0x34, 0xc4, 0x6a, 0xeb, 0x46, 0xb7, 0x30, 0x38, 0x58, 0xca,
0xc2, 0x56, 0x62, 0xdb, 0x5e, 0x95, 0x95, 0xe4, 0x1c, 0x7a, 0x1b, 0xbe, 0x6e, 0x04, 0xf5, 0x30,
0x73, 0x66, 0xee, 0xdd, 0x74, 0xd2, 0x00, 0x7f, 0xe1, 0xf5, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff,
0x4a, 0x36, 0x9c, 0x33, 0x58, 0x02, 0x00, 0x00,
}

View file

@ -1,162 +0,0 @@
syntax = "proto3";
package docker.containerkit.v1;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
import "github.com/docker/containerkit/api/types/mount/mount.proto";
service Containers {
rpc Create(CreateRequest) returns (CreateResponse);
rpc Start(StartRequest) returns (StartResponse);
rpc Stop(StopRequest) returns (StopResponse);
rpc Delete(DeleteRequest) returns (DeleteResponse);
rpc List(ListRequest) returns (ListResponse);
rpc State(StateRequest) returns (StateResponse);
rpc Exec(ExecRequest) returns (ExecResponse);
rpc Update(UpdateRequest) returns (UpdateResponse);
}
message Container {
ContainerSpec container = 1;
// Runtime properties go here.
}
// Container specifies runtime parameters for a container.
message ContainerSpec {
// name must be a unique name to identify the container.
//
// This can be used as a system identifier external to ContainerKit services.
string name = 1;
// Labels defines labels to be added to the container at creation time. If
// collisions with system labels occur, these labels will be overridden.
//
// This field *must* remain compatible with the Labels field of
// Annotations.
map<string, string> labels = 2;
repeated types.Mount mounts = 3 [(gogoproto.nullable) = false];
// Command to run the the container. The first element is a path to the
// executable and the following elements are treated as arguments.
//
// If command is empty, execution will fall back to the image's entrypoint.
//
// Command should only be used when overriding entrypoint.
repeated string command = 4;
// Args specifies arguments provided to the image's entrypoint.
//
// If Command and Args are provided, Args will be appended to Command.
repeated string args = 5;
// Env specifies the environment variables for the container in NAME=VALUE
// format. These must be compliant with [IEEE Std
// 1003.1-2001](http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html).
repeated string env = 6;
// Dir defines the working directory to set for the container process.
string dir = 7;
// User specifies the user that should be employed to run the container.
//
// Note that the primary group may be specified by appending the group name
// or id to the user name, separated by a `:`. This syntax is
// `<user>:<group>`.
string user = 8;
// Groups specifies supplementary groups available to the user.
repeated string groups = 9;
}
message Rlimit {
string type = 1;
uint64 soft = 2;
uint64 hard = 3;
}
message User {
uint32 uid = 1;
uint32 gid = 2;
repeated uint32 additionalGids = 3;
}
message CreateRequest {
string id = 1;
string image = 2;
repeated string args = 3;
repeated string env = 4;
}
message CreateResponse {
Container container = 1;
}
message StartRequest {
string id = 1;
}
message StartResponse {
}
message StopRequest {
string id = 1;
uint32 signal = 2;
uint32 timeout = 3;
}
message StopResponse {
}
message DeleteRequest {
string id = 1;
}
message DeleteResponse {
}
message ListRequest {
}
message ListResponse {
repeated Container containers = 1;
}
message StateRequest {
string id = 1;
}
message StateResponse {
Container container = 1;
}
message ExecRequest {
string id = 1;
bool terminal = 2;
User user = 3;
repeated string args = 4;
repeated string env = 5;
string cwd = 6;
string pid = 7;
repeated string capabilities = 8;
string apparmorProfile = 9;
string selinuxLabel = 10;
bool noNewPrivileges = 11;
repeated Rlimit rlimits = 12;
}
message ExecResponse {
}
message UpdateRequest {
string id = 1;
}
message UpdateResponse {
}

View file

@ -1,3 +0,0 @@
package container
//go:generate protoc -I .:../../..:$GOPATH/src --gogo_out=plugins=grpc,import_path=github.com/docker/containerkit/api/container:. container.proto

View file

@ -1,437 +0,0 @@
package server
import (
"errors"
"fmt"
"syscall"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"github.com/docker/containerd"
"github.com/docker/containerd/api/grpc/types"
"github.com/docker/containerd/osutils"
"github.com/docker/containerd/runtime"
"github.com/docker/containerd/supervisor"
"github.com/golang/protobuf/ptypes"
"golang.org/x/net/context"
)
type apiServer struct {
sv *supervisor.Supervisor
}
// NewServer returns grpc server instance
func NewServer(sv *supervisor.Supervisor) types.APIServer {
return &apiServer{
sv: sv,
}
}
func (s *apiServer) GetServerVersion(ctx context.Context, c *types.GetServerVersionRequest) (*types.GetServerVersionResponse, error) {
return &types.GetServerVersionResponse{
Major: containerd.VersionMajor,
Minor: containerd.VersionMinor,
Patch: containerd.VersionPatch,
Revision: containerd.GitCommit,
}, nil
}
func (s *apiServer) CreateContainer(ctx context.Context, c *types.CreateContainerRequest) (*types.CreateContainerResponse, error) {
if c.BundlePath == "" {
return nil, errors.New("empty bundle path")
}
e := &supervisor.StartTask{}
e.ID = c.Id
e.BundlePath = c.BundlePath
e.Stdin = c.Stdin
e.Stdout = c.Stdout
e.Stderr = c.Stderr
e.Labels = c.Labels
e.NoPivotRoot = c.NoPivotRoot
e.Runtime = c.Runtime
e.RuntimeArgs = c.RuntimeArgs
e.StartResponse = make(chan supervisor.StartResponse, 1)
if c.Checkpoint != "" {
e.CheckpointDir = c.CheckpointDir
e.Checkpoint = &runtime.Checkpoint{
Name: c.Checkpoint,
}
}
s.sv.SendTask(e)
if err := <-e.ErrorCh(); err != nil {
return nil, err
}
r := <-e.StartResponse
apiC, err := createAPIContainer(r.Container, false)
if err != nil {
return nil, err
}
return &types.CreateContainerResponse{
Container: apiC,
}, nil
}
func (s *apiServer) CreateCheckpoint(ctx context.Context, r *types.CreateCheckpointRequest) (*types.CreateCheckpointResponse, error) {
e := &supervisor.CreateCheckpointTask{}
e.ID = r.Id
e.CheckpointDir = r.CheckpointDir
e.Checkpoint = &runtime.Checkpoint{
Name: r.Checkpoint.Name,
Exit: r.Checkpoint.Exit,
TCP: r.Checkpoint.Tcp,
UnixSockets: r.Checkpoint.UnixSockets,
Shell: r.Checkpoint.Shell,
EmptyNS: r.Checkpoint.EmptyNS,
}
s.sv.SendTask(e)
if err := <-e.ErrorCh(); err != nil {
return nil, err
}
return &types.CreateCheckpointResponse{}, nil
}
func (s *apiServer) DeleteCheckpoint(ctx context.Context, r *types.DeleteCheckpointRequest) (*types.DeleteCheckpointResponse, error) {
if r.Name == "" {
return nil, errors.New("checkpoint name cannot be empty")
}
e := &supervisor.DeleteCheckpointTask{}
e.ID = r.Id
e.CheckpointDir = r.CheckpointDir
e.Checkpoint = &runtime.Checkpoint{
Name: r.Name,
}
s.sv.SendTask(e)
if err := <-e.ErrorCh(); err != nil {
return nil, err
}
return &types.DeleteCheckpointResponse{}, nil
}
func (s *apiServer) ListCheckpoint(ctx context.Context, r *types.ListCheckpointRequest) (*types.ListCheckpointResponse, error) {
e := &supervisor.GetContainersTask{}
s.sv.SendTask(e)
if err := <-e.ErrorCh(); err != nil {
return nil, err
}
var container runtime.Container
for _, c := range e.Containers {
if c.ID() == r.Id {
container = c
break
}
}
if container == nil {
return nil, grpc.Errorf(codes.NotFound, "no such containers")
}
var out []*types.Checkpoint
checkpoints, err := container.Checkpoints(r.CheckpointDir)
if err != nil {
return nil, err
}
for _, c := range checkpoints {
out = append(out, &types.Checkpoint{
Name: c.Name,
Tcp: c.TCP,
Shell: c.Shell,
UnixSockets: c.UnixSockets,
// TODO: figure out timestamp
//Timestamp: c.Timestamp,
})
}
return &types.ListCheckpointResponse{Checkpoints: out}, nil
}
func (s *apiServer) Signal(ctx context.Context, r *types.SignalRequest) (*types.SignalResponse, error) {
e := &supervisor.SignalTask{}
e.ID = r.Id
e.PID = r.Pid
e.Signal = syscall.Signal(int(r.Signal))
s.sv.SendTask(e)
if err := <-e.ErrorCh(); err != nil {
return nil, err
}
return &types.SignalResponse{}, nil
}
func (s *apiServer) State(ctx context.Context, r *types.StateRequest) (*types.StateResponse, error) {
getState := func(c runtime.Container) (interface{}, error) {
return createAPIContainer(c, true)
}
e := &supervisor.GetContainersTask{}
e.ID = r.Id
e.GetState = getState
s.sv.SendTask(e)
if err := <-e.ErrorCh(); err != nil {
return nil, err
}
m := s.sv.Machine()
state := &types.StateResponse{
Machine: &types.Machine{
Cpus: uint32(m.Cpus),
Memory: uint64(m.Memory),
},
}
for idx := range e.Containers {
state.Containers = append(state.Containers, e.States[idx].(*types.Container))
}
return state, nil
}
func createAPIContainer(c runtime.Container, getPids bool) (*types.Container, error) {
processes, err := c.Processes()
if err != nil {
return nil, grpc.Errorf(codes.Internal, "get processes for container: "+err.Error())
}
var procs []*types.Process
for _, p := range processes {
oldProc := p.Spec()
stdio := p.Stdio()
proc := &types.Process{
Pid: p.ID(),
SystemPid: uint32(p.SystemPid()),
Terminal: oldProc.Terminal,
Args: oldProc.Args,
Env: oldProc.Env,
Cwd: oldProc.Cwd,
Stdin: stdio.Stdin,
Stdout: stdio.Stdout,
Stderr: stdio.Stderr,
}
proc.User = &types.User{
Uid: oldProc.User.UID,
Gid: oldProc.User.GID,
AdditionalGids: oldProc.User.AdditionalGids,
}
proc.Capabilities = oldProc.Capabilities
proc.ApparmorProfile = oldProc.ApparmorProfile
proc.SelinuxLabel = oldProc.SelinuxLabel
proc.NoNewPrivileges = oldProc.NoNewPrivileges
for _, rl := range oldProc.Rlimits {
proc.Rlimits = append(proc.Rlimits, &types.Rlimit{
Type: rl.Type,
Soft: rl.Soft,
Hard: rl.Hard,
})
}
procs = append(procs, proc)
}
var pids []int
state := c.State()
if getPids && (state == runtime.Running || state == runtime.Paused) {
if pids, err = c.Pids(); err != nil {
return nil, grpc.Errorf(codes.Internal, "get all pids for container: "+err.Error())
}
}
return &types.Container{
Id: c.ID(),
BundlePath: c.Path(),
Processes: procs,
Labels: c.Labels(),
Status: string(state),
Pids: toUint32(pids),
Runtime: c.Runtime(),
}, nil
}
func toUint32(its []int) []uint32 {
o := []uint32{}
for _, i := range its {
o = append(o, uint32(i))
}
return o
}
func (s *apiServer) UpdateContainer(ctx context.Context, r *types.UpdateContainerRequest) (*types.UpdateContainerResponse, error) {
e := &supervisor.UpdateTask{}
e.ID = r.Id
e.State = runtime.State(r.Status)
if r.Resources != nil {
rs := r.Resources
e.Resources = &runtime.Resource{}
if rs.CpuShares != 0 {
e.Resources.CPUShares = int64(rs.CpuShares)
}
if rs.BlkioWeight != 0 {
e.Resources.BlkioWeight = uint16(rs.BlkioWeight)
}
if rs.CpuPeriod != 0 {
e.Resources.CPUPeriod = int64(rs.CpuPeriod)
}
if rs.CpuQuota != 0 {
e.Resources.CPUQuota = int64(rs.CpuQuota)
}
if rs.CpusetCpus != "" {
e.Resources.CpusetCpus = rs.CpusetCpus
}
if rs.CpusetMems != "" {
e.Resources.CpusetMems = rs.CpusetMems
}
if rs.KernelMemoryLimit != 0 {
e.Resources.KernelMemory = int64(rs.KernelMemoryLimit)
}
if rs.KernelTCPMemoryLimit != 0 {
e.Resources.KernelTCPMemory = int64(rs.KernelTCPMemoryLimit)
}
if rs.MemoryLimit != 0 {
e.Resources.Memory = int64(rs.MemoryLimit)
}
if rs.MemoryReservation != 0 {
e.Resources.MemoryReservation = int64(rs.MemoryReservation)
}
if rs.MemorySwap != 0 {
e.Resources.MemorySwap = int64(rs.MemorySwap)
}
}
s.sv.SendTask(e)
if err := <-e.ErrorCh(); err != nil {
return nil, err
}
return &types.UpdateContainerResponse{}, nil
}
func (s *apiServer) UpdateProcess(ctx context.Context, r *types.UpdateProcessRequest) (*types.UpdateProcessResponse, error) {
e := &supervisor.UpdateProcessTask{}
e.ID = r.Id
e.PID = r.Pid
e.Height = int(r.Height)
e.Width = int(r.Width)
e.CloseStdin = r.CloseStdin
s.sv.SendTask(e)
if err := <-e.ErrorCh(); err != nil {
return nil, err
}
return &types.UpdateProcessResponse{}, nil
}
func (s *apiServer) Events(r *types.EventsRequest, stream types.API_EventsServer) error {
t := time.Time{}
if r.Timestamp != nil {
from, err := ptypes.Timestamp(r.Timestamp)
if err != nil {
return err
}
t = from
}
if r.StoredOnly && t.IsZero() {
return fmt.Errorf("invalid parameter: StoredOnly cannot be specified without setting a valid Timestamp")
}
events := s.sv.Events(t, r.StoredOnly, r.Id)
defer s.sv.Unsubscribe(events)
for e := range events {
tsp, err := ptypes.TimestampProto(e.Timestamp)
if err != nil {
return err
}
if r.Id == "" || e.ID == r.Id {
if err := stream.Send(&types.Event{
Id: e.ID,
Type: e.Type,
Timestamp: tsp,
Pid: e.PID,
Status: uint32(e.Status),
}); err != nil {
return err
}
}
}
return nil
}
func convertToPb(st *runtime.Stat) *types.StatsResponse {
tsp, _ := ptypes.TimestampProto(st.Timestamp)
pbSt := &types.StatsResponse{
Timestamp: tsp,
CgroupStats: &types.CgroupStats{},
}
systemUsage, _ := osutils.GetSystemCPUUsage()
pbSt.CgroupStats.CpuStats = &types.CpuStats{
CpuUsage: &types.CpuUsage{
TotalUsage: st.CPU.Usage.Total,
PercpuUsage: st.CPU.Usage.Percpu,
UsageInKernelmode: st.CPU.Usage.Kernel,
UsageInUsermode: st.CPU.Usage.User,
},
ThrottlingData: &types.ThrottlingData{
Periods: st.CPU.Throttling.Periods,
ThrottledPeriods: st.CPU.Throttling.ThrottledPeriods,
ThrottledTime: st.CPU.Throttling.ThrottledTime,
},
SystemUsage: systemUsage,
}
pbSt.CgroupStats.MemoryStats = &types.MemoryStats{
Cache: st.Memory.Cache,
Usage: &types.MemoryData{
Usage: st.Memory.Usage.Usage,
MaxUsage: st.Memory.Usage.Max,
Failcnt: st.Memory.Usage.Failcnt,
Limit: st.Memory.Usage.Limit,
},
SwapUsage: &types.MemoryData{
Usage: st.Memory.Swap.Usage,
MaxUsage: st.Memory.Swap.Max,
Failcnt: st.Memory.Swap.Failcnt,
Limit: st.Memory.Swap.Limit,
},
KernelUsage: &types.MemoryData{
Usage: st.Memory.Kernel.Usage,
MaxUsage: st.Memory.Kernel.Max,
Failcnt: st.Memory.Kernel.Failcnt,
Limit: st.Memory.Kernel.Limit,
},
Stats: st.Memory.Raw,
}
pbSt.CgroupStats.BlkioStats = &types.BlkioStats{
IoServiceBytesRecursive: convertBlkioEntryToPb(st.Blkio.IoServiceBytesRecursive),
IoServicedRecursive: convertBlkioEntryToPb(st.Blkio.IoServicedRecursive),
IoQueuedRecursive: convertBlkioEntryToPb(st.Blkio.IoQueuedRecursive),
IoServiceTimeRecursive: convertBlkioEntryToPb(st.Blkio.IoServiceTimeRecursive),
IoWaitTimeRecursive: convertBlkioEntryToPb(st.Blkio.IoWaitTimeRecursive),
IoMergedRecursive: convertBlkioEntryToPb(st.Blkio.IoMergedRecursive),
IoTimeRecursive: convertBlkioEntryToPb(st.Blkio.IoTimeRecursive),
SectorsRecursive: convertBlkioEntryToPb(st.Blkio.SectorsRecursive),
}
pbSt.CgroupStats.HugetlbStats = make(map[string]*types.HugetlbStats)
for k, st := range st.Hugetlb {
pbSt.CgroupStats.HugetlbStats[k] = &types.HugetlbStats{
Usage: st.Usage,
MaxUsage: st.Max,
Failcnt: st.Failcnt,
}
}
pbSt.CgroupStats.PidsStats = &types.PidsStats{
Current: st.Pids.Current,
Limit: st.Pids.Limit,
}
return pbSt
}
func convertBlkioEntryToPb(b []runtime.BlkioEntry) []*types.BlkioStatsEntry {
var pbEs []*types.BlkioStatsEntry
for _, e := range b {
pbEs = append(pbEs, &types.BlkioStatsEntry{
Major: e.Major,
Minor: e.Minor,
Op: e.Op,
Value: e.Value,
})
}
return pbEs
}
func (s *apiServer) Stats(ctx context.Context, r *types.StatsRequest) (*types.StatsResponse, error) {
e := &supervisor.StatsTask{}
e.ID = r.Id
e.Stat = make(chan *runtime.Stat, 1)
s.sv.SendTask(e)
if err := <-e.ErrorCh(); err != nil {
return nil, err
}
stats := <-e.Stat
t := convertToPb(stats)
return t, nil
}

View file

@ -1,56 +0,0 @@
package server
import (
"fmt"
"github.com/docker/containerd/api/grpc/types"
"github.com/docker/containerd/specs"
"github.com/docker/containerd/supervisor"
ocs "github.com/opencontainers/runtime-spec/specs-go"
"golang.org/x/net/context"
)
func (s *apiServer) AddProcess(ctx context.Context, r *types.AddProcessRequest) (*types.AddProcessResponse, error) {
process := &specs.ProcessSpec{
Terminal: r.Terminal,
Args: r.Args,
Env: r.Env,
Cwd: r.Cwd,
}
process.User = ocs.User{
UID: r.User.Uid,
GID: r.User.Gid,
AdditionalGids: r.User.AdditionalGids,
}
process.Capabilities = r.Capabilities
process.ApparmorProfile = r.ApparmorProfile
process.SelinuxLabel = r.SelinuxLabel
process.NoNewPrivileges = r.NoNewPrivileges
for _, rl := range r.Rlimits {
process.Rlimits = append(process.Rlimits, ocs.Rlimit{
Type: rl.Type,
Soft: rl.Soft,
Hard: rl.Hard,
})
}
if r.Id == "" {
return nil, fmt.Errorf("container id cannot be empty")
}
if r.Pid == "" {
return nil, fmt.Errorf("process id cannot be empty")
}
e := &supervisor.AddProcessTask{}
e.ID = r.Id
e.PID = r.Pid
e.ProcessSpec = process
e.Stdin = r.Stdin
e.Stdout = r.Stdout
e.Stderr = r.Stderr
e.StartResponse = make(chan supervisor.StartResponse, 1)
s.sv.SendTask(e)
if err := <-e.ErrorCh(); err != nil {
return nil, err
}
<-e.StartResponse
return &types.AddProcessResponse{}, nil
}

View file

@ -1,14 +0,0 @@
package server
import (
"errors"
"github.com/docker/containerd/api/grpc/types"
"golang.org/x/net/context"
)
var clockTicksPerSecond uint64
func (s *apiServer) AddProcess(ctx context.Context, r *types.AddProcessRequest) (*types.AddProcessResponse, error) {
return &types.AddProcessResponse{}, errors.New("apiServer AddProcess() not implemented on Solaris")
}

File diff suppressed because it is too large Load diff

View file

@ -1,342 +0,0 @@
syntax = "proto3";
package types;
import "google/protobuf/timestamp.proto";
service API {
rpc GetServerVersion(GetServerVersionRequest) returns (GetServerVersionResponse) {}
rpc CreateContainer(CreateContainerRequest) returns (CreateContainerResponse) {}
rpc UpdateContainer(UpdateContainerRequest) returns (UpdateContainerResponse) {}
rpc Signal(SignalRequest) returns (SignalResponse) {}
rpc UpdateProcess(UpdateProcessRequest) returns (UpdateProcessResponse) {}
rpc AddProcess(AddProcessRequest) returns (AddProcessResponse) {}
rpc CreateCheckpoint(CreateCheckpointRequest) returns (CreateCheckpointResponse) {}
rpc DeleteCheckpoint(DeleteCheckpointRequest) returns (DeleteCheckpointResponse) {}
rpc ListCheckpoint(ListCheckpointRequest) returns (ListCheckpointResponse) {}
rpc State(StateRequest) returns (StateResponse) {}
rpc Events(EventsRequest) returns (stream Event) {}
rpc Stats(StatsRequest) returns (StatsResponse) {}
}
message GetServerVersionRequest {
}
message GetServerVersionResponse {
uint32 major = 1;
uint32 minor = 2;
uint32 patch = 3;
string revision = 4;
}
message UpdateProcessRequest {
string id = 1;
string pid = 2;
bool closeStdin = 3; // Close stdin of the container
uint32 width = 4;
uint32 height = 5;
}
message UpdateProcessResponse {
}
message CreateContainerRequest {
string id = 1; // ID of container
string bundlePath = 2; // path to OCI bundle
string checkpoint = 3; // checkpoint name if you want to create immediate checkpoint (optional)
string stdin = 4; // path to the file where stdin will be read (optional)
string stdout = 5; // path to file where stdout will be written (optional)
string stderr = 6; // path to file where stderr will be written (optional)
repeated string labels = 7;
bool noPivotRoot = 8;
string runtime = 9;
repeated string runtimeArgs = 10;
string checkpointDir = 11; // Directory where checkpoints are stored
}
message CreateContainerResponse {
Container container = 1;
}
message SignalRequest {
string id = 1; // ID of container
string pid = 2; // PID of process inside container
uint32 signal = 3; // Signal which will be sent, you can find value in "man 7 signal"
}
message SignalResponse {
}
message AddProcessRequest {
string id = 1; // ID of container
bool terminal = 2; // Use tty for container stdio
User user = 3; // User under which process will be run
repeated string args = 4; // Arguments for process, first is binary path itself
repeated string env = 5; // List of environment variables for process
string cwd = 6; // Workind directory of process
string pid = 7; // Process ID
string stdin = 8; // path to the file where stdin will be read (optional)
string stdout = 9; // path to file where stdout will be written (optional)
string stderr = 10; // path to file where stderr will be written (optional)
repeated string capabilities = 11;
string apparmorProfile = 12;
string selinuxLabel = 13;
bool noNewPrivileges = 14;
repeated Rlimit rlimits = 15;
}
message Rlimit {
string type = 1;
uint64 soft = 2;
uint64 hard = 3;
}
message User {
uint32 uid = 1; // UID of user
uint32 gid = 2; // GID of user
repeated uint32 additionalGids = 3; // Additional groups to which user will be added
}
message AddProcessResponse {
}
message CreateCheckpointRequest {
string id = 1; // ID of container
Checkpoint checkpoint = 2; // Checkpoint configuration
string checkpointDir = 3; // Directory where checkpoints are stored
}
message CreateCheckpointResponse {
}
message DeleteCheckpointRequest {
string id = 1; // ID of container
string name = 2; // Name of checkpoint
string checkpointDir = 3; // Directory where checkpoints are stored
}
message DeleteCheckpointResponse {
}
message ListCheckpointRequest {
string id = 1; // ID of container
string checkpointDir = 2; // Directory where checkpoints are stored
}
message Checkpoint {
string name = 1; // Name of checkpoint
bool exit = 2; // checkpoint configuration: should container exit on checkpoint or not
bool tcp = 3; // allow open tcp connections
bool unixSockets = 4; // allow external unix sockets
bool shell = 5; // allow shell-jobs
repeated string emptyNS = 6;
}
message ListCheckpointResponse {
repeated Checkpoint checkpoints = 1; // List of checkpoints
}
message StateRequest {
string id = 1; // container id for a single container
}
message ContainerState {
string status = 1;
}
message Process {
string pid = 1;
bool terminal = 2; // Use tty for container stdio
User user = 3; // User under which process will be run
repeated string args = 4; // Arguments for process, first is binary path itself
repeated string env = 5; // List of environment variables for process
string cwd = 6; // Workind directory of process
uint32 systemPid = 7;
string stdin = 8; // path to the file where stdin will be read (optional)
string stdout = 9; // path to file where stdout will be written (optional)
string stderr = 10; // path to file where stderr will be written (optional)
repeated string capabilities = 11;
string apparmorProfile = 12;
string selinuxLabel = 13;
bool noNewPrivileges = 14;
repeated Rlimit rlimits = 15;
}
message Container {
string id = 1; // ID of container
string bundlePath = 2; // Path to OCI bundle
repeated Process processes = 3; // List of processes which run in container
string status = 4; // Container status ("running", "paused", etc.)
repeated string labels = 5;
repeated uint32 pids = 6;
string runtime = 7; // runtime used to execute the container
}
// Machine is information about machine on which containerd is run
message Machine {
uint32 cpus = 1; // number of cpus
uint64 memory = 2; // amount of memory
}
// StateResponse is information about containerd daemon
message StateResponse {
repeated Container containers = 1;
Machine machine = 2;
}
message UpdateContainerRequest {
string id = 1; // ID of container
string pid = 2;
string status = 3; // Status to which containerd will try to change
UpdateResource resources =4;
}
message UpdateResource {
uint64 blkioWeight = 1;
uint64 cpuShares = 2;
uint64 cpuPeriod = 3;
uint64 cpuQuota = 4;
string cpusetCpus = 5;
string cpusetMems = 6;
uint64 memoryLimit = 7;
uint64 memorySwap = 8;
uint64 memoryReservation = 9;
uint64 kernelMemoryLimit = 10;
uint64 kernelTCPMemoryLimit = 11;
uint64 blkioLeafWeight = 12;
repeated WeightDevice blkioWeightDevice = 13;
repeated ThrottleDevice blkioThrottleReadBpsDevice = 14;
repeated ThrottleDevice blkioThrottleWriteBpsDevice = 15;
repeated ThrottleDevice blkioThrottleReadIopsDevice = 16;
repeated ThrottleDevice blkioThrottleWriteIopsDevice = 17;
}
message BlockIODevice {
int64 major = 1;
int64 minor = 2;
}
message WeightDevice {
BlockIODevice blkIODevice = 1;
uint32 weight = 2;
uint32 leafWeight = 3;
}
message ThrottleDevice {
BlockIODevice blkIODevice = 1;
uint64 rate = 2;
}
message UpdateContainerResponse {
}
message EventsRequest {
// Tag 1 is deprecated (old uint64 timestamp)
google.protobuf.Timestamp timestamp = 2;
bool storedOnly = 3;
string id = 4;
}
message Event {
string type = 1;
string id = 2;
uint32 status = 3;
string pid = 4;
// Tag 5 is deprecated (old uint64 timestamp)
google.protobuf.Timestamp timestamp = 6;
}
message NetworkStats {
string name = 1; // name of network interface
uint64 rx_bytes = 2;
uint64 rx_Packets = 3;
uint64 Rx_errors = 4;
uint64 Rx_dropped = 5;
uint64 Tx_bytes = 6;
uint64 Tx_packets = 7;
uint64 Tx_errors = 8;
uint64 Tx_dropped = 9;
}
message CpuUsage {
uint64 total_usage = 1;
repeated uint64 percpu_usage = 2;
uint64 usage_in_kernelmode = 3;
uint64 usage_in_usermode = 4;
}
message ThrottlingData {
uint64 periods = 1;
uint64 throttled_periods = 2;
uint64 throttled_time = 3;
}
message CpuStats {
CpuUsage cpu_usage = 1;
ThrottlingData throttling_data = 2;
uint64 system_usage = 3;
}
message PidsStats {
uint64 current = 1;
uint64 limit = 2;
}
message MemoryData {
uint64 usage = 1;
uint64 max_usage = 2;
uint64 failcnt = 3;
uint64 limit = 4;
}
message MemoryStats {
uint64 cache = 1;
MemoryData usage = 2;
MemoryData swap_usage = 3;
MemoryData kernel_usage = 4;
map<string, uint64> stats = 5;
}
message BlkioStatsEntry {
uint64 major = 1;
uint64 minor = 2;
string op = 3;
uint64 value = 4;
}
message BlkioStats {
repeated BlkioStatsEntry io_service_bytes_recursive = 1; // number of bytes transferred to and from the block device
repeated BlkioStatsEntry io_serviced_recursive = 2;
repeated BlkioStatsEntry io_queued_recursive = 3;
repeated BlkioStatsEntry io_service_time_recursive = 4;
repeated BlkioStatsEntry io_wait_time_recursive = 5;
repeated BlkioStatsEntry io_merged_recursive = 6;
repeated BlkioStatsEntry io_time_recursive = 7;
repeated BlkioStatsEntry sectors_recursive = 8;
}
message HugetlbStats {
uint64 usage = 1;
uint64 max_usage = 2;
uint64 failcnt = 3;
uint64 limit = 4;
}
message CgroupStats {
CpuStats cpu_stats = 1;
MemoryStats memory_stats = 2;
BlkioStats blkio_stats = 3;
map<string, HugetlbStats> hugetlb_stats = 4; // the map is in the format "size of hugepage: stats of the hugepage"
PidsStats pids_stats = 5;
}
message StatsResponse {
repeated NetworkStats network_stats = 1;
CgroupStats cgroup_stats = 2;
// Tag 3 is deprecated (old uint64 timestamp)
google.protobuf.Timestamp timestamp = 4;
};
message StatsRequest {
string id = 1;
}

View file

@ -1,3 +0,0 @@
package image
//go:generate protoc -I .:../../..:$GOPATH/src --gogo_out=plugins=grpc,import_path=github.com/docker/containerkit/api/image:. image.proto

View file

@ -1,295 +0,0 @@
// Code generated by protoc-gen-gogo.
// source: image.proto
// DO NOT EDIT!
/*
Package image is a generated protocol buffer package.
It is generated from these files:
image.proto
It has these top-level messages:
PrepareRequest
PrepareResponse
CleanupRequest
CleanupResponse
CommitRequest
CommitResponse
*/
package image
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import docker_containerkit_types "github.com/docker/containerkit/api/types/mount"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
const _ = proto.GoGoProtoPackageIsVersion1
type PrepareRequest struct {
// Path specifies the filesystem path to target for the image preparation.
//
// These will influence the values of "target" in the emitted mounts. It
// must be unique per usage of the prepared mount and can only be prepared
// again after a call to cleanup.
Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"`
// name of the image to prepare.
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
}
func (m *PrepareRequest) Reset() { *m = PrepareRequest{} }
func (m *PrepareRequest) String() string { return proto.CompactTextString(m) }
func (*PrepareRequest) ProtoMessage() {}
func (*PrepareRequest) Descriptor() ([]byte, []int) { return fileDescriptorImage, []int{0} }
type PrepareResponse struct {
// Layers provides a list of mounts to use with container creation. The
// layers will be mounted, in order, assembling the root filesystem.
//
// Typically, these can be augmented with other mounts from the volume
// service, tmpfs, application-specific bind mounts or even mounts from
// other containers.
Layers []*docker_containerkit_types.Mount `protobuf:"bytes,1,rep,name=layers" json:"layers,omitempty"`
}
func (m *PrepareResponse) Reset() { *m = PrepareResponse{} }
func (m *PrepareResponse) String() string { return proto.CompactTextString(m) }
func (*PrepareResponse) ProtoMessage() {}
func (*PrepareResponse) Descriptor() ([]byte, []int) { return fileDescriptorImage, []int{1} }
func (m *PrepareResponse) GetLayers() []*docker_containerkit_types.Mount {
if m != nil {
return m.Layers
}
return nil
}
type CleanupRequest struct {
// Path cleans up the path used for the image.
// ID identifies the prepared image to cleanup.
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
}
func (m *CleanupRequest) Reset() { *m = CleanupRequest{} }
func (m *CleanupRequest) String() string { return proto.CompactTextString(m) }
func (*CleanupRequest) ProtoMessage() {}
func (*CleanupRequest) Descriptor() ([]byte, []int) { return fileDescriptorImage, []int{2} }
type CleanupResponse struct {
}
func (m *CleanupResponse) Reset() { *m = CleanupResponse{} }
func (m *CleanupResponse) String() string { return proto.CompactTextString(m) }
func (*CleanupResponse) ProtoMessage() {}
func (*CleanupResponse) Descriptor() ([]byte, []int) { return fileDescriptorImage, []int{3} }
// CommitRequest provides argument for the Commit RPC.
type CommitRequest struct {
// Path to a prepared image to capture changes.
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
}
func (m *CommitRequest) Reset() { *m = CommitRequest{} }
func (m *CommitRequest) String() string { return proto.CompactTextString(m) }
func (*CommitRequest) ProtoMessage() {}
func (*CommitRequest) Descriptor() ([]byte, []int) { return fileDescriptorImage, []int{4} }
type CommitResponse struct {
// name identifies the entity created as part of the image.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
}
func (m *CommitResponse) Reset() { *m = CommitResponse{} }
func (m *CommitResponse) String() string { return proto.CompactTextString(m) }
func (*CommitResponse) ProtoMessage() {}
func (*CommitResponse) Descriptor() ([]byte, []int) { return fileDescriptorImage, []int{5} }
func init() {
proto.RegisterType((*PrepareRequest)(nil), "docker.containerkit.types.PrepareRequest")
proto.RegisterType((*PrepareResponse)(nil), "docker.containerkit.types.PrepareResponse")
proto.RegisterType((*CleanupRequest)(nil), "docker.containerkit.types.CleanupRequest")
proto.RegisterType((*CleanupResponse)(nil), "docker.containerkit.types.CleanupResponse")
proto.RegisterType((*CommitRequest)(nil), "docker.containerkit.types.CommitRequest")
proto.RegisterType((*CommitResponse)(nil), "docker.containerkit.types.CommitResponse")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion2
// Client API for Images service
type ImagesClient interface {
// Prepare declares that an image is required for use. A prepared image,
// complete with a set of mounts to use for the image will be provided.
Prepare(ctx context.Context, in *PrepareRequest, opts ...grpc.CallOption) (*PrepareResponse, error)
// Cleanup instructs the images service to cleanup resources for the image.
Cleanup(ctx context.Context, in *CleanupRequest, opts ...grpc.CallOption) (*CleanupResponse, error)
// Commit
Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error)
}
type imagesClient struct {
cc *grpc.ClientConn
}
func NewImagesClient(cc *grpc.ClientConn) ImagesClient {
return &imagesClient{cc}
}
func (c *imagesClient) Prepare(ctx context.Context, in *PrepareRequest, opts ...grpc.CallOption) (*PrepareResponse, error) {
out := new(PrepareResponse)
err := grpc.Invoke(ctx, "/docker.containerkit.types.Images/Prepare", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *imagesClient) Cleanup(ctx context.Context, in *CleanupRequest, opts ...grpc.CallOption) (*CleanupResponse, error) {
out := new(CleanupResponse)
err := grpc.Invoke(ctx, "/docker.containerkit.types.Images/Cleanup", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *imagesClient) Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error) {
out := new(CommitResponse)
err := grpc.Invoke(ctx, "/docker.containerkit.types.Images/Commit", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Images service
type ImagesServer interface {
// Prepare declares that an image is required for use. A prepared image,
// complete with a set of mounts to use for the image will be provided.
Prepare(context.Context, *PrepareRequest) (*PrepareResponse, error)
// Cleanup instructs the images service to cleanup resources for the image.
Cleanup(context.Context, *CleanupRequest) (*CleanupResponse, error)
// Commit
Commit(context.Context, *CommitRequest) (*CommitResponse, error)
}
func RegisterImagesServer(s *grpc.Server, srv ImagesServer) {
s.RegisterService(&_Images_serviceDesc, srv)
}
func _Images_Prepare_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PrepareRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ImagesServer).Prepare(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/docker.containerkit.types.Images/Prepare",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ImagesServer).Prepare(ctx, req.(*PrepareRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Images_Cleanup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CleanupRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ImagesServer).Cleanup(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/docker.containerkit.types.Images/Cleanup",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ImagesServer).Cleanup(ctx, req.(*CleanupRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Images_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CommitRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ImagesServer).Commit(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/docker.containerkit.types.Images/Commit",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ImagesServer).Commit(ctx, req.(*CommitRequest))
}
return interceptor(ctx, in, info, handler)
}
var _Images_serviceDesc = grpc.ServiceDesc{
ServiceName: "docker.containerkit.types.Images",
HandlerType: (*ImagesServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Prepare",
Handler: _Images_Prepare_Handler,
},
{
MethodName: "Cleanup",
Handler: _Images_Cleanup_Handler,
},
{
MethodName: "Commit",
Handler: _Images_Commit_Handler,
},
},
Streams: []grpc.StreamDesc{},
}
var fileDescriptorImage = []byte{
// 314 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x52, 0x4f, 0x6b, 0x3a, 0x31,
0x14, 0x64, 0xf5, 0xc7, 0xca, 0xef, 0x49, 0x95, 0xe6, 0x64, 0xf7, 0x24, 0xdb, 0x1e, 0xb4, 0xd0,
0x0d, 0xd8, 0x8b, 0xf4, 0xea, 0xa9, 0x94, 0x42, 0xf1, 0x5e, 0x30, 0xda, 0xd7, 0x35, 0x68, 0xfe,
0x34, 0xc9, 0x1e, 0xfc, 0x4c, 0xfd, 0x92, 0x65, 0x93, 0xd4, 0x7f, 0xd0, 0xc5, 0xcb, 0xf2, 0x12,
0x66, 0xe6, 0xcd, 0xcc, 0x06, 0xba, 0x5c, 0xb0, 0x12, 0x0b, 0x6d, 0x94, 0x53, 0xe4, 0xe6, 0x43,
0xad, 0x36, 0x68, 0x8a, 0x95, 0x92, 0x8e, 0x71, 0x89, 0x66, 0xc3, 0x5d, 0xe1, 0x76, 0x1a, 0x6d,
0xf6, 0x50, 0x72, 0xb7, 0xae, 0x96, 0xc5, 0x4a, 0x09, 0x5a, 0xaa, 0x52, 0x51, 0xcf, 0x58, 0x56,
0x9f, 0xfe, 0xe4, 0x0f, 0x7e, 0x0a, 0x4a, 0xd9, 0xd3, 0x11, 0x3c, 0x88, 0xd2, 0x63, 0x51, 0xca,
0x34, 0xa7, 0x5e, 0x98, 0x0a, 0x55, 0x49, 0x17, 0xbe, 0x81, 0x9b, 0x4f, 0xa1, 0xf7, 0x66, 0x50,
0x33, 0x83, 0x73, 0xfc, 0xaa, 0xd0, 0x3a, 0x42, 0xe0, 0x9f, 0x66, 0x6e, 0x3d, 0x68, 0x0f, 0x93,
0xd1, 0xff, 0xb9, 0x9f, 0xeb, 0x3b, 0xc9, 0x04, 0x0e, 0x5a, 0xe1, 0xae, 0x9e, 0xf3, 0x17, 0xe8,
0xef, 0x99, 0x56, 0x2b, 0x69, 0x91, 0x4c, 0x21, 0xdd, 0xb2, 0x1d, 0x1a, 0x3b, 0x48, 0x86, 0xed,
0x51, 0x77, 0x32, 0x2c, 0xfe, 0xcc, 0x58, 0xbc, 0xd6, 0x26, 0xe6, 0x11, 0x9f, 0xdf, 0x41, 0x6f,
0xb6, 0x45, 0x26, 0x2b, 0x7d, 0x6e, 0x23, 0x39, 0xd8, 0xc8, 0xaf, 0xa1, 0xbf, 0x47, 0x85, 0x95,
0xf9, 0x2d, 0x5c, 0xcd, 0x94, 0x10, 0xdc, 0x35, 0xf1, 0x6a, 0xf5, 0x08, 0x8a, 0x4e, 0x7f, 0x03,
0x25, 0x87, 0x40, 0x93, 0xef, 0x16, 0xa4, 0xcf, 0xf5, 0x0f, 0xb2, 0x64, 0x01, 0x9d, 0x98, 0x8d,
0x8c, 0x1b, 0x32, 0x9c, 0x36, 0x97, 0xdd, 0x5f, 0x02, 0x8d, 0x06, 0x16, 0xd0, 0x89, 0x51, 0x1a,
0x37, 0x9c, 0x96, 0xd2, 0xb8, 0xe1, 0xac, 0x19, 0xf2, 0x0e, 0x69, 0x08, 0x4d, 0x46, 0x4d, 0xac,
0xe3, 0xf2, 0xb2, 0xf1, 0x05, 0xc8, 0x20, 0xbf, 0x4c, 0xfd, 0xfb, 0x79, 0xfc, 0x09, 0x00, 0x00,
0xff, 0xff, 0x28, 0x48, 0xab, 0xcd, 0xd4, 0x02, 0x00, 0x00,
}

View file

@ -1,83 +0,0 @@
syntax = "proto3";
package docker.containerkit.types;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
import "github.com/docker/containerkit/api/types/mount/mount.proto";
// Images abstracts the graph driver and image store.
//
// The interface is to be able to request and manage images. The output,
// provided as part of the Prepare step, is to expose a set of mounts that can
// be used at container startup to run the image.
service Images {
// Prepare declares that an image is required for use. A prepared image,
// complete with a set of mounts to use for the image will be provided.
rpc Prepare(PrepareRequest) returns (PrepareResponse);
// Cleanup instructs the images service to cleanup resources for the image.
rpc Cleanup(CleanupRequest) returns (CleanupResponse);
// Commit
rpc Commit(CommitRequest) returns (CommitResponse);
// NOTE(stevvooe): Placeholders for other operations here. Consider
// splitting this into a graphdriver like service (Prepare/Cleanup) and an
// image store service.
//
// Really, we want to separate image identification from CAS
// identification, so placing push/pull here may cause too much coupling.
// It might better to be able to import the layers here in the same way the
// graphdriver works, then only use the image metadata to maintain the link
// here.
//
// Basically, we want to avoid the tight coupling present between the image
// store and graphdriver in docker today.
//
// rpc Push(PushRequest) returns (stream PullRequest);
// rpc Pull(PullRequest) returns (stream PullResponse);
}
message PrepareRequest {
// Path specifies the filesystem path to target for the image preparation.
//
// These will influence the values of "target" in the emitted mounts. It
// must be unique per usage of the prepared mount and can only be prepared
// again after a call to cleanup.
string path = 3;
// name of the image to prepare.
string name = 2;
}
message PrepareResponse {
// Layers provides a list of mounts to use with container creation. The
// layers will be mounted, in order, assembling the root filesystem.
//
// Typically, these can be augmented with other mounts from the volume
// service, tmpfs, application-specific bind mounts or even mounts from
// other containers.
repeated types.Mount layers = 1;
// TODO(stevvooe): It is unclear whether or not we should integrate image
// metadata with this part of the service.
}
message CleanupRequest {
// Path cleans up the path used for the image.
// ID identifies the prepared image to cleanup.
string path = 1;
}
message CleanupResponse { }
// CommitRequest provides argument for the Commit RPC.
message CommitRequest {
// Path to a prepared image to capture changes.
string path = 1;
}
message CommitResponse {
// name identifies the entity created as part of the image.
string name = 1;
}

View file

@ -1,18 +0,0 @@
package pprof
import (
// expvar init routine adds the "/debug/vars" handler
_ "expvar"
"net/http"
"net/http/pprof"
)
// New returns a new handler serving pprof information
func New() http.Handler {
mux := http.NewServeMux()
mux.Handle("/pprof/block", pprof.Handler("block"))
mux.Handle("/pprof/heap", pprof.Handler("heap"))
mux.Handle("/pprof/goroutine", pprof.Handler("goroutine"))
mux.Handle("/pprof/threadcreate", pprof.Handler("threadcreate"))
return mux
}

View file

@ -1,5 +0,0 @@
package mount
//go:generate protoc -I .:../../..:$GOPATH/src --gogo_out=plugins=grpc,import_path=github.com/docker/containerkit/api/types/mount:. mount.proto
//+++go:generate protoc -I .:../../..:$GOPATH/src --gogo_out=plugins=grpc,import_path=github.com/docker/containerkit/api/types/mount,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto:. mount.proto

View file

@ -1,71 +0,0 @@
// Code generated by protoc-gen-gogo.
// source: mount.proto
// DO NOT EDIT!
/*
Package mount is a generated protocol buffer package.
It is generated from these files:
mount.proto
It has these top-level messages:
Mount
*/
package mount
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
const _ = proto.GoGoProtoPackageIsVersion1
// Mount describes mounts for a container.
//
// This type is the lingua franca of ContainerKit. All services provide mounts
// to be used with the container at creation time.
//
// The Mount type follows the structure of the mount syscall, including a type,
// source, target and options.
type Mount struct {
// Type defines the nature of the mount.
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
// Source specifies the name of the mount. Depending on mount type, this
// may be a volume name or a host path, or even ignored.
Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"`
// Target path in container
Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"`
// Options specifies zero or more fstab style mount options.
Options []string `protobuf:"bytes,4,rep,name=options" json:"options,omitempty"`
}
func (m *Mount) Reset() { *m = Mount{} }
func (m *Mount) String() string { return proto.CompactTextString(m) }
func (*Mount) ProtoMessage() {}
func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorMount, []int{0} }
func init() {
proto.RegisterType((*Mount)(nil), "docker.containerkit.types.Mount")
}
var fileDescriptorMount = []byte{
// 163 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x34, 0x8d, 0xb1, 0x0e, 0x82, 0x30,
0x10, 0x86, 0x83, 0x20, 0x86, 0xba, 0x75, 0x30, 0xd5, 0x89, 0x38, 0xb1, 0x58, 0x06, 0x9f, 0xc3,
0x85, 0x37, 0x80, 0x7a, 0xd6, 0x86, 0xd0, 0x23, 0xe5, 0x3a, 0xf8, 0xf6, 0xa6, 0x57, 0xdd, 0xfe,
0xef, 0x4b, 0xee, 0x3b, 0x71, 0x5c, 0x30, 0x7a, 0xd2, 0x6b, 0x40, 0x42, 0x79, 0x7e, 0xa2, 0x99,
0x21, 0x68, 0x83, 0x9e, 0x46, 0xe7, 0x21, 0xcc, 0x8e, 0x34, 0x7d, 0x56, 0xd8, 0x2e, 0x37, 0xeb,
0xe8, 0x1d, 0x27, 0x6d, 0x70, 0xe9, 0x2d, 0x5a, 0xec, 0xf9, 0x62, 0x8a, 0x2f, 0x26, 0x06, 0x5e,
0xb9, 0x74, 0x05, 0xb1, 0x7f, 0xa4, 0xb0, 0x94, 0xa2, 0x4a, 0x01, 0x55, 0xb4, 0x45, 0xd7, 0x0c,
0xbc, 0xe5, 0x49, 0xd4, 0x1b, 0xc6, 0x60, 0x40, 0xed, 0xd8, 0xfe, 0x28, 0x79, 0x1a, 0x83, 0x05,
0x52, 0x65, 0xf6, 0x99, 0xa4, 0x12, 0x07, 0x5c, 0xc9, 0xa1, 0xdf, 0x54, 0xd5, 0x96, 0x5d, 0x33,
0xfc, 0x71, 0xaa, 0xf9, 0xdb, 0xfd, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x20, 0x78, 0xd5, 0x59, 0xc6,
0x00, 0x00, 0x00,
}

View file

@ -1,27 +0,0 @@
syntax = "proto3";
package docker.containerkit.types;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
// Mount describes mounts for a container.
//
// This type is the lingua franca of ContainerKit. All services provide mounts
// to be used with the container at creation time.
//
// The Mount type follows the structure of the mount syscall, including a type,
// source, target and options.
message Mount {
// Type defines the nature of the mount.
string type = 1;
// Source specifies the name of the mount. Depending on mount type, this
// may be a volume name or a host path, or even ignored.
string source = 2;
// Target path in container
string target = 3;
// Options specifies zero or more fstab style mount options.
repeated string options = 4;
}

View file

@ -1,2 +0,0 @@
// Package types provides several types common to grpc services.
package types

View file

@ -1,161 +0,0 @@
package main
import (
"fmt"
"net/http"
"os"
"os/signal"
"runtime"
"strings"
"sync"
"syscall"
"time"
"github.com/Sirupsen/logrus"
"github.com/codegangsta/cli"
"github.com/docker/containerd/api/pprof"
"github.com/docker/containerd/supervisor"
)
const (
usage = `High performance container daemon`
minRlimit = 1024
defaultStateDir = "/run/containerd"
defaultGRPCEndpoint = "unix:///run/containerd/containerd.sock"
)
var daemonFlags = []cli.Flag{
cli.BoolFlag{
Name: "debug",
Usage: "enable debug output in the logs",
},
cli.StringFlag{
Name: "state-dir",
Value: defaultStateDir,
Usage: "runtime state directory",
},
cli.StringFlag{
Name: "listen,l",
Value: defaultGRPCEndpoint,
Usage: "proto://address on which the GRPC API will listen",
},
cli.StringFlag{
Name: "runtime,r",
Value: "runc",
Usage: "name or path of the OCI compliant runtime to use when executing containers",
},
cli.StringSliceFlag{
Name: "runtime-args",
Value: &cli.StringSlice{},
Usage: "specify additional runtime args",
},
cli.StringFlag{
Name: "shim",
Value: "containerd-shim",
Usage: "Name or path of shim",
},
cli.StringFlag{
Name: "pprof-address",
Usage: "http address to listen for pprof events",
},
cli.DurationFlag{
Name: "start-timeout",
Value: 15 * time.Second,
Usage: "timeout duration for waiting on a container to start before it is killed",
},
cli.IntFlag{
Name: "retain-count",
Value: 500,
Usage: "number of past events to keep in the event log",
},
}
func main() {
logrus.SetFormatter(&logrus.TextFormatter{TimestampFormat: time.RFC3339Nano})
app := cli.NewApp()
app.Name = "containerd"
app.Version = getVersion()
app.Usage = usage
app.Flags = daemonFlags
app.Before = func(context *cli.Context) error {
if context.GlobalBool("debug") {
logrus.SetLevel(logrus.DebugLevel)
}
if p := context.GlobalString("pprof-address"); len(p) > 0 {
h := pprof.New()
http.Handle("/debug", h)
go http.ListenAndServe(p, nil)
}
if err := checkLimits(); err != nil {
return err
}
return nil
}
app.Action = func(context *cli.Context) {
if err := daemon(context); err != nil {
logrus.Fatal(err)
}
}
if err := app.Run(os.Args); err != nil {
logrus.Fatal(err)
}
}
func daemon(context *cli.Context) error {
signals := make(chan os.Signal, 2048)
signal.Notify(signals, syscall.SIGTERM, syscall.SIGINT, syscall.SIGUSR1)
sv, err := supervisor.New(supervisor.Config{
StateDir: context.String("state-dir"),
Runtime: context.String("runtime"),
ShimName: context.String("shim"),
RuntimeArgs: context.StringSlice("runtime-args"),
Timeout: context.Duration("start-timeout"),
EventRetainCount: context.Int("retain-count"),
})
if err != nil {
return err
}
wg := &sync.WaitGroup{}
for i := 0; i < 10; i++ {
wg.Add(1)
w := supervisor.NewWorker(sv, wg)
go w.Start()
}
if err := sv.Start(); err != nil {
return err
}
// Split the listen string of the form proto://addr
var (
listenSpec = context.String("listen")
listenParts = strings.SplitN(listenSpec, "://", 2)
)
if len(listenParts) != 2 {
return fmt.Errorf("bad listen address format %s, expected proto://address", listenSpec)
}
server, err := startServer(listenParts[0], listenParts[1], sv)
if err != nil {
return err
}
for s := range signals {
switch s {
case syscall.SIGUSR1:
var (
buf []byte
stackSize int
)
bufferLen := 16384
for stackSize == len(buf) {
buf = make([]byte, bufferLen)
stackSize = runtime.Stack(buf, true)
bufferLen *= 2
}
buf = buf[:stackSize]
logrus.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf)
case syscall.SIGINT, syscall.SIGTERM:
logrus.Infof("stopping containerd after receiving %s", s)
server.Stop()
os.Exit(0)
}
}
return nil
}

View file

@ -1,71 +0,0 @@
package main
import (
"fmt"
"os"
"syscall"
"github.com/Sirupsen/logrus"
"github.com/docker/containerd"
"github.com/docker/containerd/api/grpc/server"
"github.com/docker/containerd/api/grpc/types"
"github.com/docker/containerd/supervisor"
"github.com/docker/docker/pkg/listeners"
"google.golang.org/grpc"
"google.golang.org/grpc/health"
"google.golang.org/grpc/health/grpc_health_v1"
)
func startServer(protocol, address string, sv *supervisor.Supervisor) (*grpc.Server, error) {
sockets, err := listeners.Init(protocol, address, "", nil)
if err != nil {
return nil, err
}
if len(sockets) != 1 {
return nil, fmt.Errorf("incorrect number of listeners")
}
l := sockets[0]
s := grpc.NewServer()
types.RegisterAPIServer(s, server.NewServer(sv))
healthServer := health.NewServer()
grpc_health_v1.RegisterHealthServer(s, healthServer)
go func() {
logrus.Debugf("containerd: grpc api on %s", address)
if err := s.Serve(l); err != nil {
logrus.WithField("error", err).Fatal("containerd: serve grpc")
}
}()
return s, nil
}
// getDefaultID returns the hostname for the instance host
func getDefaultID() string {
hostname, err := os.Hostname()
if err != nil {
panic(err)
}
return hostname
}
func checkLimits() error {
var l syscall.Rlimit
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l); err != nil {
return err
}
if l.Cur <= minRlimit {
logrus.WithFields(logrus.Fields{
"current": l.Cur,
"max": l.Max,
}).Warn("containerd: low RLIMIT_NOFILE changing to max")
l.Cur = l.Max
return syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
}
return nil
}
func getVersion() string {
if containerd.GitCommit != "" {
return fmt.Sprintf("%s commit: %s", containerd.Version, containerd.GitCommit)
}
return containerd.Version
}

View file

@ -1,2 +0,0 @@
all:
go build

View file

@ -1,165 +0,0 @@
package main
import (
"fmt"
"os"
"text/tabwriter"
"github.com/codegangsta/cli"
"github.com/docker/containerd/api/grpc/types"
netcontext "golang.org/x/net/context"
)
var checkpointSubCmds = []cli.Command{
listCheckpointCommand,
createCheckpointCommand,
deleteCheckpointCommand,
}
var checkpointCommand = cli.Command{
Name: "checkpoints",
Usage: "list all checkpoints",
ArgsUsage: "COMMAND [arguments...]",
Subcommands: checkpointSubCmds,
Description: func() string {
desc := "\n COMMAND:\n"
for _, command := range checkpointSubCmds {
desc += fmt.Sprintf(" %-10.10s%s\n", command.Name, command.Usage)
}
return desc
}(),
Action: listCheckpoints,
}
var listCheckpointCommand = cli.Command{
Name: "list",
Usage: "list all checkpoints for a container",
Action: listCheckpoints,
Flags: []cli.Flag{
cli.StringFlag{
Name: "checkpoint-dir",
Value: "",
Usage: "path to checkpoint directory",
},
},
}
func listCheckpoints(context *cli.Context) {
var (
c = getClient(context)
id = context.Args().First()
)
if id == "" {
fatal("container id cannot be empty", ExitStatusMissingArg)
}
resp, err := c.ListCheckpoint(netcontext.Background(), &types.ListCheckpointRequest{
Id: id,
CheckpointDir: context.String("checkpoint-dir"),
})
if err != nil {
fatal(err.Error(), 1)
}
w := tabwriter.NewWriter(os.Stdout, 20, 1, 3, ' ', 0)
fmt.Fprint(w, "NAME\tTCP\tUNIX SOCKETS\tSHELL\n")
for _, c := range resp.Checkpoints {
fmt.Fprintf(w, "%s\t%v\t%v\t%v\n", c.Name, c.Tcp, c.UnixSockets, c.Shell)
}
if err := w.Flush(); err != nil {
fatal(err.Error(), 1)
}
}
var createCheckpointCommand = cli.Command{
Name: "create",
Usage: "create a new checkpoint for the container",
Flags: []cli.Flag{
cli.BoolFlag{
Name: "tcp",
Usage: "persist open tcp connections",
},
cli.BoolFlag{
Name: "unix-sockets",
Usage: "persist unix sockets",
},
cli.BoolFlag{
Name: "exit",
Usage: "exit the container after the checkpoint completes successfully",
},
cli.BoolFlag{
Name: "shell",
Usage: "checkpoint shell jobs",
},
cli.StringFlag{
Name: "checkpoint-dir",
Value: "",
Usage: "directory to store checkpoints",
},
cli.StringSliceFlag{
Name: "empty-ns",
Usage: "create a namespace, but don't restore its properties",
},
},
Action: func(context *cli.Context) {
var (
containerID = context.Args().Get(0)
name = context.Args().Get(1)
)
if containerID == "" {
fatal("container id at cannot be empty", ExitStatusMissingArg)
}
if name == "" {
fatal("checkpoint name cannot be empty", ExitStatusMissingArg)
}
c := getClient(context)
checkpoint := types.Checkpoint{
Name: name,
Exit: context.Bool("exit"),
Tcp: context.Bool("tcp"),
Shell: context.Bool("shell"),
UnixSockets: context.Bool("unix-sockets"),
}
emptyNSes := context.StringSlice("empty-ns")
checkpoint.EmptyNS = append(checkpoint.EmptyNS, emptyNSes...)
if _, err := c.CreateCheckpoint(netcontext.Background(), &types.CreateCheckpointRequest{
Id: containerID,
CheckpointDir: context.String("checkpoint-dir"),
Checkpoint: &checkpoint,
}); err != nil {
fatal(err.Error(), 1)
}
},
}
var deleteCheckpointCommand = cli.Command{
Name: "delete",
Usage: "delete a container's checkpoint",
Flags: []cli.Flag{
cli.StringFlag{
Name: "checkpoint-dir",
Value: "",
Usage: "path to checkpoint directory",
},
},
Action: func(context *cli.Context) {
var (
containerID = context.Args().Get(0)
name = context.Args().Get(1)
)
if containerID == "" {
fatal("container id at cannot be empty", ExitStatusMissingArg)
}
if name == "" {
fatal("checkpoint name cannot be empty", ExitStatusMissingArg)
}
c := getClient(context)
if _, err := c.DeleteCheckpoint(netcontext.Background(), &types.DeleteCheckpointRequest{
Id: containerID,
Name: name,
CheckpointDir: context.String("checkpoint-dir"),
}); err != nil {
fatal(err.Error(), 1)
}
},
}

View file

@ -1,10 +0,0 @@
package main
// ctr wide constants
const (
// ExitStatusOK indicates successful completion
ExitStatusOK = 0
// ExitStatusMissingArg indicates failure due to missing argument(s)
ExitStatusMissingArg = 1
)

View file

@ -1,684 +0,0 @@
package main
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"os"
"os/signal"
"path/filepath"
"strconv"
"strings"
"syscall"
"text/tabwriter"
"time"
"github.com/codegangsta/cli"
"github.com/docker/containerd/api/grpc/types"
"github.com/docker/containerd/specs"
"github.com/docker/docker/pkg/term"
"github.com/golang/protobuf/ptypes"
netcontext "golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/transport"
)
// TODO: parse flags and pass opts
func getClient(ctx *cli.Context) types.APIClient {
// Parse proto://address form addresses.
bindSpec := ctx.GlobalString("address")
bindParts := strings.SplitN(bindSpec, "://", 2)
if len(bindParts) != 2 {
fatal(fmt.Sprintf("bad bind address format %s, expected proto://address", bindSpec), 1)
}
// reset the logger for grpc to log to dev/null so that it does not mess with our stdio
grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
dialOpts := []grpc.DialOption{grpc.WithInsecure(), grpc.WithTimeout(ctx.GlobalDuration("conn-timeout"))}
dialOpts = append(dialOpts,
grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
return net.DialTimeout(bindParts[0], bindParts[1], timeout)
},
))
conn, err := grpc.Dial(bindSpec, dialOpts...)
if err != nil {
fatal(err.Error(), 1)
}
return types.NewAPIClient(conn)
}
var contSubCmds = []cli.Command{
execCommand,
killCommand,
listCommand,
pauseCommand,
resumeCommand,
startCommand,
stateCommand,
statsCommand,
watchCommand,
updateCommand,
}
var containersCommand = cli.Command{
Name: "containers",
Usage: "interact with running containers",
ArgsUsage: "COMMAND [arguments...]",
Subcommands: contSubCmds,
Description: func() string {
desc := "\n COMMAND:\n"
for _, command := range contSubCmds {
desc += fmt.Sprintf(" %-10.10s%s\n", command.Name, command.Usage)
}
return desc
}(),
Action: listContainers,
}
var stateCommand = cli.Command{
Name: "state",
Usage: "get a raw dump of the containerd state",
Action: func(context *cli.Context) {
c := getClient(context)
resp, err := c.State(netcontext.Background(), &types.StateRequest{
Id: context.Args().First(),
})
if err != nil {
fatal(err.Error(), 1)
}
data, err := json.Marshal(resp)
if err != nil {
fatal(err.Error(), 1)
}
fmt.Print(string(data))
},
}
var listCommand = cli.Command{
Name: "list",
Usage: "list all running containers",
Action: listContainers,
}
func listContainers(context *cli.Context) {
c := getClient(context)
resp, err := c.State(netcontext.Background(), &types.StateRequest{
Id: context.Args().First(),
})
if err != nil {
fatal(err.Error(), 1)
}
w := tabwriter.NewWriter(os.Stdout, 20, 1, 3, ' ', 0)
fmt.Fprint(w, "ID\tPATH\tSTATUS\tPROCESSES\n")
sortContainers(resp.Containers)
for _, c := range resp.Containers {
procs := []string{}
for _, p := range c.Processes {
procs = append(procs, p.Pid)
}
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", c.Id, c.BundlePath, c.Status, strings.Join(procs, ","))
}
if err := w.Flush(); err != nil {
fatal(err.Error(), 1)
}
}
var startCommand = cli.Command{
Name: "start",
Usage: "start a container",
ArgsUsage: "ID BundlePath",
Flags: []cli.Flag{
cli.StringFlag{
Name: "checkpoint,c",
Value: "",
Usage: "checkpoint to start the container from",
},
cli.StringFlag{
Name: "checkpoint-dir",
Value: "",
Usage: "path to checkpoint directory",
},
cli.BoolFlag{
Name: "attach,a",
Usage: "connect to the stdio of the container",
},
cli.StringSliceFlag{
Name: "label,l",
Value: &cli.StringSlice{},
Usage: "set labels for the container",
},
cli.BoolFlag{
Name: "no-pivot",
Usage: "do not use pivot root",
},
cli.StringFlag{
Name: "runtime,r",
Value: "runc",
Usage: "name or path of the OCI compliant runtime to use when executing containers",
},
cli.StringSliceFlag{
Name: "runtime-args",
Value: &cli.StringSlice{},
Usage: "specify additional runtime args",
},
},
Action: func(context *cli.Context) {
var (
id = context.Args().Get(0)
path = context.Args().Get(1)
)
if path == "" {
fatal("bundle path cannot be empty", ExitStatusMissingArg)
}
if id == "" {
fatal("container id cannot be empty", ExitStatusMissingArg)
}
bpath, err := filepath.Abs(path)
if err != nil {
fatal(fmt.Sprintf("cannot get the absolute path of the bundle: %v", err), 1)
}
s, err := createStdio()
defer func() {
if s.stdin != "" {
os.RemoveAll(filepath.Dir(s.stdin))
}
}()
if err != nil {
fatal(err.Error(), 1)
}
var (
restoreAndCloseStdin func()
tty bool
c = getClient(context)
r = &types.CreateContainerRequest{
Id: id,
BundlePath: bpath,
Checkpoint: context.String("checkpoint"),
CheckpointDir: context.String("checkpoint-dir"),
Stdin: s.stdin,
Stdout: s.stdout,
Stderr: s.stderr,
Labels: context.StringSlice("label"),
NoPivotRoot: context.Bool("no-pivot"),
Runtime: context.String("runtime"),
RuntimeArgs: context.StringSlice("runtime-args"),
}
)
restoreAndCloseStdin = func() {
if state != nil {
term.RestoreTerminal(os.Stdin.Fd(), state)
}
if stdin != nil {
stdin.Close()
}
}
defer restoreAndCloseStdin()
if context.Bool("attach") {
mkterm, err := readTermSetting(bpath)
if err != nil {
fatal(err.Error(), 1)
}
tty = mkterm
if mkterm {
s, err := term.SetRawTerminal(os.Stdin.Fd())
if err != nil {
fatal(err.Error(), 1)
}
state = s
}
if err := attachStdio(s); err != nil {
fatal(err.Error(), 1)
}
}
events, err := c.Events(netcontext.Background(), &types.EventsRequest{})
if err != nil {
fatal(err.Error(), 1)
}
if _, err := c.CreateContainer(netcontext.Background(), r); err != nil {
fatal(err.Error(), 1)
}
if context.Bool("attach") {
go func() {
io.Copy(stdin, os.Stdin)
if _, err := c.UpdateProcess(netcontext.Background(), &types.UpdateProcessRequest{
Id: id,
Pid: "init",
CloseStdin: true,
}); err != nil {
fatal(err.Error(), 1)
}
restoreAndCloseStdin()
}()
if tty {
resize(id, "init", c)
go func() {
s := make(chan os.Signal, 64)
signal.Notify(s, syscall.SIGWINCH)
for range s {
if err := resize(id, "init", c); err != nil {
log.Println(err)
}
}
}()
}
waitForExit(c, events, id, "init", restoreAndCloseStdin)
}
},
}
func resize(id, pid string, c types.APIClient) error {
ws, err := term.GetWinsize(os.Stdin.Fd())
if err != nil {
return err
}
if _, err := c.UpdateProcess(netcontext.Background(), &types.UpdateProcessRequest{
Id: id,
Pid: "init",
Width: uint32(ws.Width),
Height: uint32(ws.Height),
}); err != nil {
return err
}
return nil
}
var (
stdin io.WriteCloser
state *term.State
)
// readTermSetting reads the Terminal option out of the specs configuration
// to know if ctr should allocate a pty
func readTermSetting(path string) (bool, error) {
f, err := os.Open(filepath.Join(path, "config.json"))
if err != nil {
return false, err
}
defer f.Close()
var spec specs.Spec
if err := json.NewDecoder(f).Decode(&spec); err != nil {
return false, err
}
return spec.Process.Terminal, nil
}
func attachStdio(s stdio) error {
stdinf, err := os.OpenFile(s.stdin, syscall.O_RDWR, 0)
if err != nil {
return err
}
// FIXME: assign to global
stdin = stdinf
stdoutf, err := os.OpenFile(s.stdout, syscall.O_RDWR, 0)
if err != nil {
return err
}
go io.Copy(os.Stdout, stdoutf)
stderrf, err := os.OpenFile(s.stderr, syscall.O_RDWR, 0)
if err != nil {
return err
}
go io.Copy(os.Stderr, stderrf)
return nil
}
var watchCommand = cli.Command{
Name: "watch",
Usage: "print container events",
Action: func(context *cli.Context) {
c := getClient(context)
id := context.Args().First()
if id != "" {
resp, err := c.State(netcontext.Background(), &types.StateRequest{Id: id})
if err != nil {
fatal(err.Error(), 1)
}
for _, c := range resp.Containers {
if c.Id == id {
break
}
}
if id == "" {
fatal("Invalid container id", 1)
}
}
events, reqErr := c.Events(netcontext.Background(), &types.EventsRequest{})
if reqErr != nil {
fatal(reqErr.Error(), 1)
}
for {
e, err := events.Recv()
if err != nil {
fatal(err.Error(), 1)
}
if id == "" || e.Id == id {
fmt.Printf("%#v\n", e)
}
}
},
}
var pauseCommand = cli.Command{
Name: "pause",
Usage: "pause a container",
Action: func(context *cli.Context) {
id := context.Args().First()
if id == "" {
fatal("container id cannot be empty", ExitStatusMissingArg)
}
c := getClient(context)
_, err := c.UpdateContainer(netcontext.Background(), &types.UpdateContainerRequest{
Id: id,
Pid: "init",
Status: "paused",
})
if err != nil {
fatal(err.Error(), 1)
}
},
}
var resumeCommand = cli.Command{
Name: "resume",
Usage: "resume a paused container",
Action: func(context *cli.Context) {
id := context.Args().First()
if id == "" {
fatal("container id cannot be empty", ExitStatusMissingArg)
}
c := getClient(context)
_, err := c.UpdateContainer(netcontext.Background(), &types.UpdateContainerRequest{
Id: id,
Pid: "init",
Status: "running",
})
if err != nil {
fatal(err.Error(), 1)
}
},
}
var killCommand = cli.Command{
Name: "kill",
Usage: "send a signal to a container or its processes",
Flags: []cli.Flag{
cli.StringFlag{
Name: "pid,p",
Value: "init",
Usage: "pid of the process to signal within the container",
},
cli.IntFlag{
Name: "signal,s",
Value: 15,
Usage: "signal to send to the container",
},
},
Action: func(context *cli.Context) {
id := context.Args().First()
if id == "" {
fatal("container id cannot be empty", ExitStatusMissingArg)
}
c := getClient(context)
if _, err := c.Signal(netcontext.Background(), &types.SignalRequest{
Id: id,
Pid: context.String("pid"),
Signal: uint32(context.Int("signal")),
}); err != nil {
fatal(err.Error(), 1)
}
},
}
var execCommand = cli.Command{
Name: "exec",
Usage: "exec another process in an existing container",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Usage: "container id to add the process to",
},
cli.StringFlag{
Name: "pid",
Usage: "process id for the new process",
},
cli.BoolFlag{
Name: "attach,a",
Usage: "connect to the stdio of the container",
},
cli.StringFlag{
Name: "cwd",
Usage: "current working directory for the process",
},
cli.BoolFlag{
Name: "tty,t",
Usage: "create a terminal for the process",
},
cli.StringSliceFlag{
Name: "env,e",
Value: &cli.StringSlice{},
Usage: "environment variables for the process",
},
cli.IntFlag{
Name: "uid,u",
Usage: "user id of the user for the process",
},
cli.IntFlag{
Name: "gid,g",
Usage: "group id of the user for the process",
},
},
Action: func(context *cli.Context) {
var restoreAndCloseStdin func()
p := &types.AddProcessRequest{
Id: context.String("id"),
Pid: context.String("pid"),
Args: context.Args(),
Cwd: context.String("cwd"),
Terminal: context.Bool("tty"),
Env: context.StringSlice("env"),
User: &types.User{
Uid: uint32(context.Int("uid")),
Gid: uint32(context.Int("gid")),
},
}
s, err := createStdio()
defer func() {
if s.stdin != "" {
os.RemoveAll(filepath.Dir(s.stdin))
}
}()
if err != nil {
fatal(err.Error(), 1)
}
p.Stdin = s.stdin
p.Stdout = s.stdout
p.Stderr = s.stderr
restoreAndCloseStdin = func() {
if state != nil {
term.RestoreTerminal(os.Stdin.Fd(), state)
}
if stdin != nil {
stdin.Close()
}
}
defer restoreAndCloseStdin()
if context.Bool("attach") {
if context.Bool("tty") {
s, err := term.SetRawTerminal(os.Stdin.Fd())
if err != nil {
fatal(err.Error(), 1)
}
state = s
}
if err := attachStdio(s); err != nil {
fatal(err.Error(), 1)
}
}
c := getClient(context)
events, err := c.Events(netcontext.Background(), &types.EventsRequest{})
if err != nil {
fatal(err.Error(), 1)
}
if _, err := c.AddProcess(netcontext.Background(), p); err != nil {
fatal(err.Error(), 1)
}
if context.Bool("attach") {
go func() {
io.Copy(stdin, os.Stdin)
if _, err := c.UpdateProcess(netcontext.Background(), &types.UpdateProcessRequest{
Id: p.Id,
Pid: p.Pid,
CloseStdin: true,
}); err != nil {
log.Println(err)
}
restoreAndCloseStdin()
}()
if context.Bool("tty") {
resize(p.Id, p.Pid, c)
go func() {
s := make(chan os.Signal, 64)
signal.Notify(s, syscall.SIGWINCH)
for range s {
if err := resize(p.Id, p.Pid, c); err != nil {
log.Println(err)
}
}
}()
}
waitForExit(c, events, context.String("id"), context.String("pid"), restoreAndCloseStdin)
}
},
}
var statsCommand = cli.Command{
Name: "stats",
Usage: "get stats for running container",
Action: func(context *cli.Context) {
req := &types.StatsRequest{
Id: context.Args().First(),
}
c := getClient(context)
stats, err := c.Stats(netcontext.Background(), req)
if err != nil {
fatal(err.Error(), 1)
}
data, err := json.Marshal(stats)
if err != nil {
fatal(err.Error(), 1)
}
fmt.Print(string(data))
},
}
func getUpdateCommandInt64Flag(context *cli.Context, name string) uint64 {
str := context.String(name)
if str == "" {
return 0
}
val, err := strconv.ParseUint(str, 0, 64)
if err != nil {
fatal(err.Error(), 1)
}
return val
}
var updateCommand = cli.Command{
Name: "update",
Usage: "update a containers resources",
Flags: []cli.Flag{
cli.StringFlag{
Name: "memory-limit",
},
cli.StringFlag{
Name: "memory-reservation",
},
cli.StringFlag{
Name: "memory-swap",
},
cli.StringFlag{
Name: "cpu-quota",
},
cli.StringFlag{
Name: "cpu-period",
},
cli.StringFlag{
Name: "kernel-limit",
},
cli.StringFlag{
Name: "kernel-tcp-limit",
},
cli.StringFlag{
Name: "blkio-weight",
},
cli.StringFlag{
Name: "cpuset-cpus",
},
cli.StringFlag{
Name: "cpuset-mems",
},
},
Action: func(context *cli.Context) {
req := &types.UpdateContainerRequest{
Id: context.Args().First(),
}
req.Resources = &types.UpdateResource{}
req.Resources.MemoryLimit = getUpdateCommandInt64Flag(context, "memory-limit")
req.Resources.MemoryReservation = getUpdateCommandInt64Flag(context, "memory-reservation")
req.Resources.MemorySwap = getUpdateCommandInt64Flag(context, "memory-swap")
req.Resources.BlkioWeight = getUpdateCommandInt64Flag(context, "blkio-weight")
req.Resources.CpuPeriod = getUpdateCommandInt64Flag(context, "cpu-period")
req.Resources.CpuQuota = getUpdateCommandInt64Flag(context, "cpu-quota")
req.Resources.CpuShares = getUpdateCommandInt64Flag(context, "cpu-shares")
req.Resources.CpusetCpus = context.String("cpuset-cpus")
req.Resources.CpusetMems = context.String("cpuset-mems")
req.Resources.KernelMemoryLimit = getUpdateCommandInt64Flag(context, "kernel-limit")
req.Resources.KernelTCPMemoryLimit = getUpdateCommandInt64Flag(context, "kernel-tcp-limit")
c := getClient(context)
if _, err := c.UpdateContainer(netcontext.Background(), req); err != nil {
fatal(err.Error(), 1)
}
},
}
func waitForExit(c types.APIClient, events types.API_EventsClient, id, pid string, closer func()) {
timestamp := time.Now()
for {
e, err := events.Recv()
if err != nil {
if grpc.ErrorDesc(err) == transport.ErrConnClosing.Desc {
closer()
os.Exit(128 + int(syscall.SIGHUP))
}
time.Sleep(1 * time.Second)
tsp, err := ptypes.TimestampProto(timestamp)
if err != nil {
closer()
fmt.Fprintf(os.Stderr, "%s", err.Error())
os.Exit(1)
}
events, _ = c.Events(netcontext.Background(), &types.EventsRequest{Timestamp: tsp})
continue
}
timestamp, err = ptypes.Timestamp(e.Timestamp)
if e.Id == id && e.Type == "exit" && e.Pid == pid {
closer()
os.Exit(int(e.Status))
}
}
}
type stdio struct {
stdin string
stdout string
stderr string
}

View file

@ -1,28 +0,0 @@
package main
import (
"io/ioutil"
"os"
"path/filepath"
"syscall"
)
func createStdio() (s stdio, err error) {
tmp, err := ioutil.TempDir("", "ctr-")
if err != nil {
return s, err
}
// create fifo's for the process
for name, fd := range map[string]*string{
"stdin": &s.stdin,
"stdout": &s.stdout,
"stderr": &s.stderr,
} {
path := filepath.Join(tmp, name)
if err := syscall.Mkfifo(path, 0755); err != nil && !os.IsExist(err) {
return s, err
}
*fd = path
}
return s, nil
}

View file

@ -1,9 +0,0 @@
package main
import (
"errors"
)
func createStdio() (s stdio, err error) {
return s, errors.New("createStdio not implemented on Solaris")
}

View file

@ -1,63 +0,0 @@
package main
import (
"fmt"
"os"
"text/tabwriter"
"time"
"github.com/codegangsta/cli"
"github.com/docker/containerd/api/grpc/types"
"github.com/golang/protobuf/ptypes"
netcontext "golang.org/x/net/context"
)
var eventsCommand = cli.Command{
Name: "events",
Usage: "receive events from the containerd daemon",
Flags: []cli.Flag{
cli.StringFlag{
Name: "timestamp,t",
Usage: "get events from a specific time stamp in RFC3339Nano format",
},
},
Action: func(context *cli.Context) {
var (
t = time.Time{}
c = getClient(context)
)
if ts := context.String("timestamp"); ts != "" {
from, err := time.Parse(time.RFC3339Nano, ts)
if err != nil {
fatal(err.Error(), 1)
}
t = from
}
tsp, err := ptypes.TimestampProto(t)
if err != nil {
fatal(err.Error(), 1)
}
events, err := c.Events(netcontext.Background(), &types.EventsRequest{
Timestamp: tsp,
})
if err != nil {
fatal(err.Error(), 1)
}
w := tabwriter.NewWriter(os.Stdout, 31, 1, 1, ' ', 0)
fmt.Fprint(w, "TIME\tTYPE\tID\tPID\tSTATUS\n")
w.Flush()
for {
e, err := events.Recv()
if err != nil {
fatal(err.Error(), 1)
}
t, err := ptypes.Timestamp(e.Timestamp)
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to convert timestamp")
t = time.Time{}
}
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d\n", t.Format(time.RFC3339Nano), e.Type, e.Id, e.Pid, e.Status)
w.Flush()
}
},
}

View file

@ -1,90 +0,0 @@
package main
import (
"fmt"
"os"
"time"
netcontext "golang.org/x/net/context"
"github.com/Sirupsen/logrus"
"github.com/codegangsta/cli"
"github.com/docker/containerd"
"github.com/docker/containerd/api/grpc/types"
)
const usage = `High performance container daemon cli`
type exit struct {
Code int
}
func main() {
// We want our defer functions to be run when calling fatal()
defer func() {
if e := recover(); e != nil {
if ex, ok := e.(exit); ok == true {
os.Exit(ex.Code)
}
panic(e)
}
}()
app := cli.NewApp()
app.Name = "ctr"
if containerd.GitCommit != "" {
app.Version = fmt.Sprintf("%s commit: %s", containerd.Version, containerd.GitCommit)
} else {
app.Version = containerd.Version
}
app.Usage = usage
app.Flags = []cli.Flag{
cli.BoolFlag{
Name: "debug",
Usage: "enable debug output in the logs",
},
cli.StringFlag{
Name: "address",
Value: "unix:///run/containerd/containerd.sock",
Usage: "proto://address of GRPC API",
},
cli.DurationFlag{
Name: "conn-timeout",
Value: 1 * time.Second,
Usage: "GRPC connection timeout",
},
}
app.Commands = []cli.Command{
checkpointCommand,
containersCommand,
eventsCommand,
stateCommand,
versionCommand,
}
app.Before = func(context *cli.Context) error {
if context.GlobalBool("debug") {
logrus.SetLevel(logrus.DebugLevel)
}
return nil
}
if err := app.Run(os.Args); err != nil {
logrus.Fatal(err)
}
}
var versionCommand = cli.Command{
Name: "version",
Usage: "return the daemon version",
Action: func(context *cli.Context) {
c := getClient(context)
resp, err := c.GetServerVersion(netcontext.Background(), &types.GetServerVersionRequest{})
if err != nil {
fatal(err.Error(), 1)
}
fmt.Printf("daemon version %d.%d.%d commit: %s\n", resp.Major, resp.Minor, resp.Patch, resp.Revision)
},
}
func fatal(err string, code int) {
fmt.Fprintf(os.Stderr, "[ctr] %s\n", err)
panic(exit{code})
}

View file

@ -1,27 +0,0 @@
package main
import (
"sort"
"github.com/docker/containerd/api/grpc/types"
)
func sortContainers(c []*types.Container) {
sort.Sort(&containerSorter{c})
}
type containerSorter struct {
c []*types.Container
}
func (s *containerSorter) Len() int {
return len(s.c)
}
func (s *containerSorter) Swap(i, j int) {
s.c[i], s.c[j] = s.c[j], s.c[i]
}
func (s *containerSorter) Less(i, j int) bool {
return s.c[i].Id < s.c[j].Id
}

View file

@ -1,6 +0,0 @@
# API
The API for containerd is with GRPC over a unix socket located at the default location of `/run/containerd/containerd.sock`.
At this time please refer to the [proto at](https://github.com/docker/containerd/blob/master/api/grpc/types/api.proto) for the API methods and types.
There is a Go implementation and types checked into this repository but alternate language implementations can be created using the grpc and protoc toolchain.

View file

@ -1,36 +0,0 @@
# Attaching to STDIO or TTY
The model for STDIO, TTY, and logging is a little different in containerd.
Because of the various methods that consumers want on the logging side these types of decisions
are pushed to the client.
Containerd API is developed for access on a single host therefore many things like paths on the host system are acceptable in the API.
For the STDIO model the client requesting to start a container provides the paths for the IO.
## Logging
If no options are specified on create all STDIO of the processes launched by containerd will be sent to `/dev/null`.
If you want containerd to send the STDIO of the processes to a file, you can pass paths to the files in the create container method defined by this proto in the stdin, stdout, and stderr fields:
```proto
message CreateContainerRequest {
string id = 1; // ID of container
string bundlePath = 2; // path to OCI bundle
string stdin = 3; // path to the file where stdin will be read (optional)
string stdout = 4; // path to file where stdout will be written (optional)
string stderr = 5; // path to file where stderr will be written (optional)
string console = 6; // path to the console for a container (optional)
string checkpoint = 7; // checkpoint name if you want to create immediate checkpoint (optional)
}
```
## Attach
In order to have attach like functionality for your containers you use the same API request but named pipes or fifos can be used to achieve this type of functionality.
The default CLI for containerd does this if you specify the `--attach` flag on `create` or `start`.
It will create fifos for each of the containers stdio which the CLI can read and write to.
This can be used to create an interactive session with the container, `bash` for example, or to have a blocking way to collect the container's STDIO and forward it to your logging facilities.
## TTY
The tty model is the same as above only the client creates a pty and provides to other side to containerd in the create request in the `console` field.
Containerd will provide the pty to the container to use and the session can be opened with the container after it starts.

View file

@ -1,12 +0,0 @@
# containerd changes to the bundle
Containerd will make changes to the container's bundle by adding additional files or folders by default with
options to change the output.
The current change that it makes is if you create a checkpoint of a container, the checkpoints will be saved
by default in the container bundle at `{bundle}/checkpoints/{checkpoint name}`.
A user can also populate this directory and provide the checkpoint name on the create request so that the container is started from this checkpoint.
As of this point, containerd has no other additions to the bundle.
Runtime state is currently stored in a tmpfs filesystem like `/run`.

View file

@ -1,208 +0,0 @@
# Creating OCI bundles
Since containerd consumes the OCI bundle format containers and configuration will have to be created
on the machine that containerd is running on. The easiest way to do this is to download an image
with docker and export it.
## Setup
First thing we need to do to create a bundle is setup the initial directory structure.
Create a directory with a unique name. In this example we will create a redis container.
We will create this container in a `/containers` directory.
```bash
mkdir redis
```
Inside the `redis` directory create another directory named `rootfs`
```bash
mkdir redis/rootfs
```
## Root Filesystem
Now we need to populate the `rootfs` directory with the filesystem of a redis container. To do this we
need to pull the redis image with docker and export its contents to the `rootfs` directory.
```bash
docker pull redis
# create the container with a temp name so that we can export it
docker create --name tempredis redis
# export it into the rootfs directory
docker export tempredis | tar -C redis/rootfs -xf -
# remove the container now that we have exported
docker rm tempredis
```
Now that we have the root filesystem populated we need to create the configs for the container.
## Configs
An easy way to get temp configs for the container bundle is to use the `runc`
cli tool from the [runc](https://github.com/opencontainers/runc) repository.
You need to `cd` into the `redis` directory and run the `runc spec` command. After doing this you
should have a file `config.json` created. The directory structure should look like this:
```
/containers/redis
├── config.json
└── rootfs/
```
## Edits
We need to edit the config to add `redis-server` as the application to launch inside the container,
and remove the network namespace so that you can connect to the redis server on your system.
The resulting `config.json` should look like this:
```json
{
"ociVersion": "0.4.0",
"platform": {
"os": "linux",
"arch": "amd64"
},
"process": {
"terminal": true,
"user": {},
"args": [
"redis-server", "--bind", "0.0.0.0"
],
"env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"TERM=xterm"
],
"cwd": "/",
"capabilities": [
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE"
],
"rlimits": [
{
"type": "RLIMIT_NOFILE",
"hard": 1024,
"soft": 1024
}
],
"noNewPrivileges": true
},
"root": {
"path": "rootfs",
"readonly": true
},
"hostname": "runc",
"mounts": [
{
"destination": "/proc",
"type": "proc",
"source": "proc"
},
{
"destination": "/dev",
"type": "tmpfs",
"source": "tmpfs",
"options": [
"nosuid",
"strictatime",
"mode=755",
"size=65536k"
]
},
{
"destination": "/dev/pts",
"type": "devpts",
"source": "devpts",
"options": [
"nosuid",
"noexec",
"newinstance",
"ptmxmode=0666",
"mode=0620",
"gid=5"
]
},
{
"destination": "/dev/shm",
"type": "tmpfs",
"source": "shm",
"options": [
"nosuid",
"noexec",
"nodev",
"mode=1777",
"size=65536k"
]
},
{
"destination": "/dev/mqueue",
"type": "mqueue",
"source": "mqueue",
"options": [
"nosuid",
"noexec",
"nodev"
]
},
{
"destination": "/sys",
"type": "sysfs",
"source": "sysfs",
"options": [
"nosuid",
"noexec",
"nodev",
"ro"
]
},
{
"destination": "/sys/fs/cgroup",
"type": "cgroup",
"source": "cgroup",
"options": [
"nosuid",
"noexec",
"nodev",
"relatime",
"ro"
]
}
],
"hooks": {},
"linux": {
"resources": {
"devices": [
{
"allow": false,
"access": "rwm"
}
]
},
"namespaces": [
{
"type": "pid"
},
{
"type": "ipc"
},
{
"type": "uts"
},
{
"type": "mount"
}
],
"devices": null
}
}
```
This is what you need to do to make a OCI compliant bundle for containerd to start.

View file

@ -1,159 +0,0 @@
# Client CLI
There is a default cli named `ctr` based on the GRPC api.
This cli will allow you to create and manage containers run with containerd.
```
$ ctr -h
NAME:
ctr - High performance container daemon cli
USAGE:
ctr [global options] command [command options] [arguments...]
VERSION:
0.1.0 commit: 54c213e8a719d734001beb2cb8f130c84cc3bd20
COMMANDS:
checkpoints list all checkpoints
containers interact with running containers
events receive events from the containerd daemon
state get a raw dump of the containerd state
help, h Shows a list of commands or help for one command
GLOBAL OPTIONS:
--debug enable debug output in the logs
--address "/run/containerd/containerd.sock" address of GRPC API
--help, -h show help
--version, -v print the version
```
## Starting a container
```
$ ctr containers start -h
NAME:
ctr containers start - start a container
USAGE:
ctr containers start [command options] [arguments...]
OPTIONS:
--checkpoint, -c checkpoint to start the container from
--attach, -a connect to the stdio of the container
--label, -l [--label option --label option] set labels for the container
```
```bash
$ sudo ctr containers start redis /containers/redis
```
`/containers/redis` is the path to an OCI bundle. [See the bundle docs for more information.](bundle.md)
## Listing containers
```bash
$ sudo ctr containers
ID PATH STATUS PROCESSES
1 /containers/redis running 14063
19 /containers/redis running 14100
14 /containers/redis running 14117
4 /containers/redis running 14030
16 /containers/redis running 14061
3 /containers/redis running 14024
12 /containers/redis running 14097
10 /containers/redis running 14131
18 /containers/redis running 13977
13 /containers/redis running 13979
15 /containers/redis running 13998
5 /containers/redis running 14021
9 /containers/redis running 14075
6 /containers/redis running 14107
2 /containers/redis running 14135
11 /containers/redis running 13978
17 /containers/redis running 13989
8 /containers/redis running 14053
7 /containers/redis running 14022
0 /containers/redis running 14006
```
## Kill a container's process
```
$ ctr containers kill -h
NAME:
ctr containers kill - send a signal to a container or its processes
USAGE:
ctr containers kill [command options] [arguments...]
OPTIONS:
--pid, -p "init" pid of the process to signal within the container
--signal, -s "15" signal to send to the container
```
## Exec another process into a container
```
$ ctr containers exec -h
NAME:
ctr containers exec - exec another process in an existing container
USAGE:
ctr containers exec [command options] [arguments...]
OPTIONS:
--id container id to add the process to
--pid process id for the new process
--attach, -a connect to the stdio of the container
--cwd current working directory for the process
--tty, -t create a terminal for the process
--env, -e [--env option --env option] environment variables for the process
--uid, -u "0" user id of the user for the process
--gid, -g "0" group id of the user for the process
```
## Stats for a container
```
$ ctr containers stats -h
NAME:
ctr containers stats - get stats for running container
USAGE:
ctr containers stats [arguments...]
```
## List checkpoints
```
$ sudo ctr checkpoints redis
NAME TCP UNIX SOCKETS SHELL
test false false false
test2 false false false
```
## Create a new checkpoint
```
$ ctr checkpoints create -h
NAME:
ctr checkpoints create - create a new checkpoint for the container
USAGE:
ctr checkpoints create [command options] [arguments...]
OPTIONS:
--tcp persist open tcp connections
--unix-sockets perist unix sockets
--exit exit the container after the checkpoint completes successfully
--shell checkpoint shell jobs
```
## Get events
```
$ sudo ctr events
TYPE ID PID STATUS
exit redis 24761 0
```

View file

@ -1,27 +0,0 @@
# Daemon options
```
$ containerd -h
NAME:
containerd - High performance container daemon
USAGE:
containerd [global options] command [command options] [arguments...]
VERSION:
0.1.0 commit: 54c213e8a719d734001beb2cb8f130c84cc3bd20
COMMANDS:
help, h Shows a list of commands or help for one command
GLOBAL OPTIONS:
--debug enable debug output in the logs
--state-dir "/run/containerd" runtime state directory
--metrics-interval "5m0s" interval for flushing metrics to the store
--listen, -l "/run/containerd/containerd.sock" Address on which GRPC API will listen
--runtime, -r "runc" name of the OCI compliant runtime to use when executing containers
--graphite-address Address of graphite server
--help, -h show help
--version, -v print the version
```

View file

@ -1,31 +0,0 @@
# Telemetry
Currently containerd only outputs metrics to stdout but will support dumping to various backends in the future.
```
[containerd] 2015/12/16 11:48:28 timer container-start-time
[containerd] 2015/12/16 11:48:28 count: 22
[containerd] 2015/12/16 11:48:28 min: 25425883
[containerd] 2015/12/16 11:48:28 max: 113077691
[containerd] 2015/12/16 11:48:28 mean: 68386923.27
[containerd] 2015/12/16 11:48:28 stddev: 20928453.26
[containerd] 2015/12/16 11:48:28 median: 65489003.50
[containerd] 2015/12/16 11:48:28 75%: 82393210.50
[containerd] 2015/12/16 11:48:28 95%: 112267814.75
[containerd] 2015/12/16 11:48:28 99%: 113077691.00
[containerd] 2015/12/16 11:48:28 99.9%: 113077691.00
[containerd] 2015/12/16 11:48:28 1-min rate: 0.00
[containerd] 2015/12/16 11:48:28 5-min rate: 0.01
[containerd] 2015/12/16 11:48:28 15-min rate: 0.01
[containerd] 2015/12/16 11:48:28 mean rate: 0.03
[containerd] 2015/12/16 11:48:28 counter containers
[containerd] 2015/12/16 11:48:28 count: 1
[containerd] 2015/12/16 11:48:28 counter events
[containerd] 2015/12/16 11:48:28 count: 87
[containerd] 2015/12/16 11:48:28 counter events-subscribers
[containerd] 2015/12/16 11:48:28 count: 2
[containerd] 2015/12/16 11:48:28 gauge goroutines
[containerd] 2015/12/16 11:48:28 value: 38
[containerd] 2015/12/16 11:48:28 gauge fds
[containerd] 2015/12/16 11:48:28 value: 18
```

View file

@ -1,75 +0,0 @@
// single app that will run containers in containerd and output
// the total time in seconds that it took for the execution.
// go run benchmark.go -count 1000 -bundle /containers/redis
package main
import (
"flag"
"net"
"strconv"
"sync"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/containerd/api/grpc/types"
netcontext "golang.org/x/net/context"
"google.golang.org/grpc"
)
func init() {
flag.StringVar(&bundle, "bundle", "/containers/redis", "the bundle path")
flag.StringVar(&addr, "addr", "/run/containerd/containerd.sock", "address to the container d instance")
flag.IntVar(&count, "count", 1000, "number of containers to run")
flag.Parse()
}
var (
count int
bundle, addr string
group = sync.WaitGroup{}
jobs = make(chan string, 20)
)
func getClient() types.APIClient {
dialOpts := []grpc.DialOption{grpc.WithInsecure()}
dialOpts = append(dialOpts,
grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
return net.DialTimeout("unix", addr, timeout)
},
))
conn, err := grpc.Dial(addr, dialOpts...)
if err != nil {
logrus.Fatal(err)
}
return types.NewAPIClient(conn)
}
func main() {
client := getClient()
for i := 0; i < 100; i++ {
group.Add(1)
go worker(client)
}
start := time.Now()
for i := 0; i < count; i++ {
id := strconv.Itoa(i)
jobs <- id
}
close(jobs)
group.Wait()
end := time.Now()
duration := end.Sub(start).Seconds()
logrus.Info(duration)
}
func worker(client types.APIClient) {
defer group.Done()
for id := range jobs {
if _, err := client.CreateContainer(netcontext.Background(), &types.CreateContainerRequest{
Id: id,
BundlePath: bundle,
}); err != nil {
logrus.Error(err)
}
}
}

View file

@ -1,111 +0,0 @@
package main
import (
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"reflect"
utils "github.com/docker/containerd/testutils"
ocs "github.com/opencontainers/runtime-spec/specs-go"
)
type OciProcessArgs struct {
Cmd string
Args []string
}
type Bundle struct {
Source string
Name string
Spec ocs.Spec
Path string
}
var bundleMap map[string]Bundle
// untarRootfs untars the given `source` tarPath into `destination/rootfs`
func untarRootfs(source string, destination string) error {
destination = filepath.Join(destination, "rootfs")
if err := os.MkdirAll(destination, 0755); err != nil {
return nil
}
tar := exec.Command("tar", "-C", destination, "-xf", source)
return tar.Run()
}
// CreateBundleWithFilter generate a new oci-bundle named `name` from
// the provide `source` rootfs. It starts from the default spec
// generated by `runc spec`, overrides the `spec.Process.Args` value
// with `args` and set `spec.Process.Terminal` to false. It then apply
// `filter()` to the resulting spec if it is provided.
func CreateBundleWithFilter(source, name string, args []string, filter func(spec *ocs.Spec)) error {
// Generate the spec
var spec ocs.Spec
f, err := os.Open(utils.RefOciSpecsPath)
if err != nil {
return fmt.Errorf("Failed to open default spec: %v", err)
}
if err := json.NewDecoder(f).Decode(&spec); err != nil {
return fmt.Errorf("Failed to load default spec: %v", err)
}
f.Close()
spec.Process.Args = args
spec.Process.Terminal = false
if filter != nil {
filter(&spec)
}
bundlePath := filepath.Join(utils.BundlesRoot, name)
nb := Bundle{source, name, spec, bundlePath}
// Check that we don't already have such a bundle
if b, ok := bundleMap[name]; ok {
if reflect.DeepEqual(b, nb) == false {
return fmt.Errorf("A bundle name named '%s' already exist but with different properties! %#v != %#v",
name, b, nb)
}
return nil
}
// Nothing should be there, but just in case
os.RemoveAll(bundlePath)
if err := untarRootfs(filepath.Join(utils.ArchivesDir, source+".tar"), bundlePath); err != nil {
return fmt.Errorf("Failed to untar %s.tar: %v", source, err)
}
// create a place for the io fifo
if err := os.Mkdir(filepath.Join(bundlePath, "io"), 0755); err != nil {
return fmt.Errorf("Failed to create bundle io directory: %v", err)
}
// Write the updated spec to the right location
config, e := os.Create(filepath.Join(bundlePath, "config.json"))
if e != nil {
return fmt.Errorf("Failed to create oci spec: %v", e)
}
defer config.Close()
if err := json.NewEncoder(config).Encode(&spec); err != nil {
return fmt.Errorf("Failed to encore oci spec: %v", e)
}
bundleMap[name] = nb
return nil
}
func GetBundle(name string) *Bundle {
bundle, ok := bundleMap[name]
if !ok {
return nil
}
return &bundle
}
func CreateBusyboxBundle(name string, args []string) error {
return CreateBundleWithFilter("busybox", name, args, nil)
}

View file

@ -1,278 +0,0 @@
package main
import (
"fmt"
"io/ioutil"
"log"
"net"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
"testing"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/health/grpc_health_v1"
"github.com/docker/containerd/api/grpc/types"
utils "github.com/docker/containerd/testutils"
"github.com/go-check/check"
"github.com/golang/protobuf/ptypes/timestamp"
)
func Test(t *testing.T) {
check.TestingT(t)
}
func init() {
check.Suite(&ContainerdSuite{})
}
type ContainerdSuite struct {
cwd string
outputDir string
stateDir string
grpcSocket string
logFile *os.File
cd *exec.Cmd
syncChild chan error
grpcClient types.APIClient
eventFiltersMutex sync.Mutex
eventFilters map[string]func(event *types.Event)
lastEventTs *timestamp.Timestamp
}
// getClient returns a connection to the Suite containerd
func (cs *ContainerdSuite) getClient(socket string) error {
// Parse proto://address form addresses.
bindParts := strings.SplitN(socket, "://", 2)
if len(bindParts) != 2 {
return fmt.Errorf("bad bind address format %s, expected proto://address", socket)
}
// reset the logger for grpc to log to dev/null so that it does not mess with our stdio
grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
dialOpts := []grpc.DialOption{grpc.WithInsecure()}
dialOpts = append(dialOpts,
grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
return net.DialTimeout(bindParts[0], bindParts[1], timeout)
}),
grpc.WithBlock(),
grpc.WithTimeout(5*time.Second),
)
conn, err := grpc.Dial(socket, dialOpts...)
if err != nil {
return err
}
healthClient := grpc_health_v1.NewHealthClient(conn)
if _, err := healthClient.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{}); err != nil {
return err
}
cs.grpcClient = types.NewAPIClient(conn)
return nil
}
// ContainerdEventsHandler will process all events coming from
// containerd. If a filter as been register for a given container id
// via `SetContainerEventFilter()`, it will be invoked every time an
// event for that id is received
func (cs *ContainerdSuite) ContainerdEventsHandler(events types.API_EventsClient) {
for {
e, err := events.Recv()
if err != nil {
// If daemon died or exited, return
if strings.Contains(err.Error(), "transport is closing") {
break
}
time.Sleep(1 * time.Second)
events, _ = cs.grpcClient.Events(context.Background(), &types.EventsRequest{Timestamp: cs.lastEventTs})
continue
}
cs.lastEventTs = e.Timestamp
cs.eventFiltersMutex.Lock()
if f, ok := cs.eventFilters[e.Id]; ok {
f(e)
if e.Type == "exit" && e.Pid == "init" {
delete(cs.eventFilters, e.Id)
}
}
cs.eventFiltersMutex.Unlock()
}
}
func (cs *ContainerdSuite) StopDaemon(kill bool) {
if cs.cd == nil {
return
}
if kill {
cs.cd.Process.Kill()
<-cs.syncChild
cs.cd = nil
} else {
// Terminate gently if possible
cs.cd.Process.Signal(os.Interrupt)
done := false
for done == false {
select {
case err := <-cs.syncChild:
if err != nil {
fmt.Printf("master containerd did not exit cleanly: %v\n", err)
}
done = true
case <-time.After(3 * time.Second):
fmt.Println("Timeout while waiting for containerd to exit, killing it!")
cs.cd.Process.Kill()
}
}
}
}
func (cs *ContainerdSuite) RestartDaemon(kill bool) error {
cs.StopDaemon(kill)
cd := exec.Command("containerd", "--debug",
"--state-dir", cs.stateDir,
"--listen", cs.grpcSocket,
"--metrics-interval", "0m0s",
"--runtime-args", fmt.Sprintf("--root=%s", filepath.Join(cs.cwd, cs.outputDir, "runc")),
)
cd.Stderr = cs.logFile
cd.Stdout = cs.logFile
if err := cd.Start(); err != nil {
return err
}
cs.cd = cd
if err := cs.getClient(cs.grpcSocket); err != nil {
// Kill the daemon
cs.cd.Process.Kill()
return err
}
// Monitor events
events, err := cs.grpcClient.Events(context.Background(), &types.EventsRequest{Timestamp: cs.lastEventTs})
if err != nil {
return err
}
go cs.ContainerdEventsHandler(events)
go func() {
cs.syncChild <- cd.Wait()
}()
return nil
}
func (cs *ContainerdSuite) SetUpSuite(c *check.C) {
bundleMap = make(map[string]Bundle)
cs.eventFilters = make(map[string]func(event *types.Event))
// Get working directory for tests
wd := utils.GetTestOutDir()
if err := os.Chdir(wd); err != nil {
c.Fatalf("Could not change working directory: %v", err)
}
cs.cwd = wd
// Clean old bundles
os.RemoveAll(utils.BundlesRoot)
// Ensure the oci bundles directory exists
if err := os.MkdirAll(utils.BundlesRoot, 0755); err != nil {
c.Fatalf("Failed to create bundles directory: %v", err)
}
// Generate the reference spec
if err := utils.GenerateReferenceSpecs(utils.BundlesRoot); err != nil {
c.Fatalf("Unable to generate OCI reference spec: %v", err)
}
// Create our output directory
cs.outputDir = fmt.Sprintf(utils.OutputDirFormat, time.Now().Format("2006-01-02_150405.000000"))
cs.stateDir = filepath.Join(cs.outputDir, "containerd-master")
if err := os.MkdirAll(cs.stateDir, 0755); err != nil {
c.Fatalf("Unable to created output directory '%s': %v", cs.stateDir, err)
}
cs.grpcSocket = "unix://" + filepath.Join(cs.outputDir, "containerd-master", "containerd.sock")
cdLogFile := filepath.Join(cs.outputDir, "containerd-master", "containerd.log")
f, err := os.OpenFile(cdLogFile, os.O_CREATE|os.O_TRUNC|os.O_RDWR|os.O_SYNC, 0777)
if err != nil {
c.Fatalf("Failed to create master containerd log file: %v", err)
}
cs.logFile = f
cs.syncChild = make(chan error)
cs.RestartDaemon(false)
}
func (cs *ContainerdSuite) TearDownSuite(c *check.C) {
// tell containerd to stop
if cs.cd != nil {
cs.cd.Process.Signal(os.Interrupt)
done := false
for done == false {
select {
case err := <-cs.syncChild:
if err != nil {
c.Errorf("master containerd did not exit cleanly: %v", err)
}
done = true
case <-time.After(3 * time.Second):
fmt.Println("Timeout while waiting for containerd to exit, killing it!")
cs.cd.Process.Kill()
}
}
}
if cs.logFile != nil {
cs.logFile.Close()
}
}
func (cs *ContainerdSuite) SetContainerEventFilter(id string, filter func(event *types.Event)) {
cs.eventFiltersMutex.Lock()
cs.eventFilters[id] = filter
cs.eventFiltersMutex.Unlock()
}
func (cs *ContainerdSuite) TearDownTest(c *check.C) {
ctrs, err := cs.ListRunningContainers()
if err != nil {
c.Fatalf("Unable to retrieve running containers: %v", err)
}
// Kill all containers that survived
for _, ctr := range ctrs {
ch := make(chan interface{})
cs.SetContainerEventFilter(ctr.Id, func(e *types.Event) {
if e.Type == "exit" && e.Pid == "init" {
ch <- nil
}
})
if err := cs.KillContainer(ctr.Id); err != nil {
fmt.Fprintf(os.Stderr, "Failed to cleanup leftover test containers: %v\n", err)
}
select {
case <-ch:
case <-time.After(3 * time.Second):
fmt.Fprintf(os.Stderr, "TearDownTest: Containerd %v didn't die after 3 seconds\n", ctr.Id)
}
}
}

View file

@ -1,321 +0,0 @@
package main
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"syscall"
"time"
"github.com/docker/containerd/api/grpc/types"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/timestamp"
"golang.org/x/net/context"
)
func (cs *ContainerdSuite) GetLogs() string {
b, _ := ioutil.ReadFile(cs.logFile.Name())
return string(b)
}
func (cs *ContainerdSuite) Events(from time.Time, storedOnly bool, id string) (types.API_EventsClient, error) {
var (
ftsp *timestamp.Timestamp
err error
)
if !from.IsZero() {
ftsp, err = ptypes.TimestampProto(from)
if err != nil {
return nil, err
}
}
return cs.grpcClient.Events(context.Background(), &types.EventsRequest{Timestamp: ftsp, StoredOnly: storedOnly, Id: id})
}
func (cs *ContainerdSuite) ListRunningContainers() ([]*types.Container, error) {
resp, err := cs.grpcClient.State(context.Background(), &types.StateRequest{})
if err != nil {
return nil, err
}
return resp.Containers, nil
}
func (cs *ContainerdSuite) SignalContainerProcess(id string, procID string, sig uint32) error {
_, err := cs.grpcClient.Signal(context.Background(), &types.SignalRequest{
Id: id,
Pid: procID,
Signal: sig,
})
return err
}
func (cs *ContainerdSuite) SignalContainer(id string, sig uint32) error {
return cs.SignalContainerProcess(id, "init", sig)
}
func (cs *ContainerdSuite) KillContainer(id string) error {
return cs.SignalContainerProcess(id, "init", uint32(syscall.SIGKILL))
}
func (cs *ContainerdSuite) UpdateContainerResource(id string, rs *types.UpdateResource) error {
_, err := cs.grpcClient.UpdateContainer(context.Background(), &types.UpdateContainerRequest{
Id: id,
Pid: "init",
Status: "",
Resources: rs,
})
return err
}
func (cs *ContainerdSuite) PauseContainer(id string) error {
_, err := cs.grpcClient.UpdateContainer(context.Background(), &types.UpdateContainerRequest{
Id: id,
Pid: "init",
Status: "paused",
})
return err
}
func (cs *ContainerdSuite) ResumeContainer(id string) error {
_, err := cs.grpcClient.UpdateContainer(context.Background(), &types.UpdateContainerRequest{
Id: id,
Pid: "init",
Status: "running",
})
return err
}
func (cs *ContainerdSuite) GetContainerStats(id string) (*types.StatsResponse, error) {
stats, err := cs.grpcClient.Stats(context.Background(), &types.StatsRequest{
Id: id,
})
return stats, err
}
type stdio struct {
stdin string
stdout string
stderr string
stdinf *os.File
stdoutf *os.File
stderrf *os.File
stdoutBuffer bytes.Buffer
stderrBuffer bytes.Buffer
}
type ContainerProcess struct {
containerID string
pid string
bundle *Bundle
io stdio
eventsCh chan *types.Event
cs *ContainerdSuite
hasExited bool
}
func (c *ContainerProcess) openIo() (err error) {
defer func() {
if err != nil {
c.Cleanup()
}
}()
c.io.stdinf, err = os.OpenFile(c.io.stdin, os.O_RDWR, 0)
if err != nil {
return err
}
c.io.stdoutf, err = os.OpenFile(c.io.stdout, os.O_RDWR, 0)
if err != nil {
return err
}
go io.Copy(&c.io.stdoutBuffer, c.io.stdoutf)
c.io.stderrf, err = os.OpenFile(c.io.stderr, os.O_RDWR, 0)
if err != nil {
return err
}
go io.Copy(&c.io.stderrBuffer, c.io.stderrf)
return nil
}
func (c *ContainerProcess) GetEventsChannel() chan *types.Event {
return c.eventsCh
}
func (c *ContainerProcess) GetNextEvent() *types.Event {
if c.hasExited {
return nil
}
e := <-c.eventsCh
if e.Type == "exit" && e.Pid == c.pid {
c.Cleanup()
c.hasExited = true
close(c.eventsCh)
}
return e
}
func (c *ContainerProcess) CloseStdin() error {
_, err := c.cs.grpcClient.UpdateProcess(context.Background(), &types.UpdateProcessRequest{
Id: c.containerID,
Pid: c.pid,
CloseStdin: true,
})
return err
}
func (c *ContainerProcess) Cleanup() {
for _, f := range []*os.File{
c.io.stdinf,
c.io.stdoutf,
c.io.stderrf,
} {
if f != nil {
f.Close()
f = nil
}
}
}
func NewContainerProcess(cs *ContainerdSuite, bundle *Bundle, cid, pid string) (c *ContainerProcess, err error) {
c = &ContainerProcess{
containerID: cid,
pid: "init",
bundle: bundle,
eventsCh: make(chan *types.Event, 8),
cs: cs,
hasExited: false,
}
for name, path := range map[string]*string{
"stdin": &c.io.stdin,
"stdout": &c.io.stdout,
"stderr": &c.io.stderr,
} {
*path = filepath.Join(bundle.Path, "io", cid+"-"+pid+"-"+name)
if err = syscall.Mkfifo(*path, 0755); err != nil && !os.IsExist(err) {
return nil, err
}
}
if err = c.openIo(); err != nil {
return nil, err
}
return c, nil
}
func (cs *ContainerdSuite) StartContainerWithEventFilter(id, bundleName string, filter func(*types.Event)) (c *ContainerProcess, err error) {
bundle := GetBundle(bundleName)
if bundle == nil {
return nil, fmt.Errorf("No such bundle '%s'", bundleName)
}
c, err = NewContainerProcess(cs, bundle, id, "init")
if err != nil {
return nil, err
}
r := &types.CreateContainerRequest{
Id: id,
BundlePath: filepath.Join(cs.cwd, bundle.Path),
Stdin: filepath.Join(cs.cwd, c.io.stdin),
Stdout: filepath.Join(cs.cwd, c.io.stdout),
Stderr: filepath.Join(cs.cwd, c.io.stderr),
}
if filter == nil {
filter = func(event *types.Event) {
c.eventsCh <- event
}
}
cs.SetContainerEventFilter(id, filter)
if _, err := cs.grpcClient.CreateContainer(context.Background(), r); err != nil {
c.Cleanup()
return nil, err
}
return c, nil
}
func (cs *ContainerdSuite) StartContainer(id, bundleName string) (c *ContainerProcess, err error) {
return cs.StartContainerWithEventFilter(id, bundleName, nil)
}
func (cs *ContainerdSuite) RunContainer(id, bundleName string) (c *ContainerProcess, err error) {
c, err = cs.StartContainer(id, bundleName)
if err != nil {
return nil, err
}
for {
e := c.GetNextEvent()
if e.Type == "exit" && e.Pid == "init" {
break
}
}
return c, err
}
func (cs *ContainerdSuite) AddProcessToContainer(init *ContainerProcess, pid, cwd string, env, args []string, uid, gid uint32) (c *ContainerProcess, err error) {
c, err = NewContainerProcess(cs, init.bundle, init.containerID, pid)
if err != nil {
return nil, err
}
pr := &types.AddProcessRequest{
Id: init.containerID,
Pid: pid,
Args: args,
Cwd: cwd,
Env: env,
User: &types.User{
Uid: uid,
Gid: gid,
},
Stdin: filepath.Join(cs.cwd, c.io.stdin),
Stdout: filepath.Join(cs.cwd, c.io.stdout),
Stderr: filepath.Join(cs.cwd, c.io.stderr),
}
_, err = cs.grpcClient.AddProcess(context.Background(), pr)
if err != nil {
c.Cleanup()
return nil, err
}
return c, nil
}
type containerSorter struct {
c []*types.Container
}
func (s *containerSorter) Len() int {
return len(s.c)
}
func (s *containerSorter) Swap(i, j int) {
s.c[i], s.c[j] = s.c[j], s.c[i]
}
func (s *containerSorter) Less(i, j int) bool {
return s.c[i].Id < s.c[j].Id
}
func sortContainers(c []*types.Container) {
sort.Sort(&containerSorter{c})
}

View file

@ -1,63 +0,0 @@
package main
import (
"fmt"
"time"
"github.com/docker/containerd/api/grpc/types"
"github.com/docker/docker/pkg/integration/checker"
"github.com/go-check/check"
)
func (cs *ContainerdSuite) TestEventsId(t *check.C) {
if err := CreateBusyboxBundle("busybox-ls", []string{"ls"}); err != nil {
t.Fatal(err)
}
from := time.Now()
for i := 0; i < 10; i++ {
_, err := cs.RunContainer(fmt.Sprintf("ls-%d", i), "busybox-ls")
if err != nil {
t.Fatal(err)
}
}
containerID := "ls-4"
events, err := cs.Events(from, true, containerID)
if err != nil {
t.Fatal(err)
}
evs := []*types.Event{}
for {
e, err := events.Recv()
if err != nil {
if err.Error() == "EOF" {
break
}
t.Fatal(err)
}
evs = append(evs, e)
}
t.Assert(len(evs), checker.Equals, 2)
for idx, evt := range []types.Event{
{
Type: "start-container",
Id: containerID,
Status: 0,
Pid: "",
},
{
Type: "exit",
Id: containerID,
Status: 0,
Pid: "init",
},
} {
evt.Timestamp = evs[idx].Timestamp
t.Assert(*evs[idx], checker.Equals, evt)
}
}

View file

@ -1,187 +0,0 @@
package main
import (
"path/filepath"
"syscall"
"github.com/docker/containerd/api/grpc/types"
"github.com/docker/docker/pkg/integration/checker"
"github.com/go-check/check"
)
func (cs *ContainerdSuite) TestBusyboxTopExecEcho(t *check.C) {
bundleName := "busybox-top"
if err := CreateBusyboxBundle(bundleName, []string{"top"}); err != nil {
t.Fatal(err)
}
var (
err error
initp *ContainerProcess
echop *ContainerProcess
)
containerID := "top"
initp, err = cs.StartContainer(containerID, bundleName)
t.Assert(err, checker.Equals, nil)
echop, err = cs.AddProcessToContainer(initp, "echo", "/", []string{"PATH=/bin"}, []string{"sh", "-c", "echo -n Ay Caramba! ; exit 1"}, 0, 0)
t.Assert(err, checker.Equals, nil)
for _, evt := range []types.Event{
{
Type: "start-container",
Id: containerID,
Status: 0,
Pid: "",
},
{
Type: "start-process",
Id: containerID,
Status: 0,
Pid: "echo",
},
{
Type: "exit",
Id: containerID,
Status: 1,
Pid: "echo",
},
} {
ch := initp.GetEventsChannel()
e := <-ch
evt.Timestamp = e.Timestamp
t.Assert(*e, checker.Equals, evt)
}
t.Assert(echop.io.stdoutBuffer.String(), checker.Equals, "Ay Caramba!")
}
func (cs *ContainerdSuite) TestBusyboxTopExecTop(t *check.C) {
bundleName := "busybox-top"
if err := CreateBusyboxBundle(bundleName, []string{"top"}); err != nil {
t.Fatal(err)
}
var (
err error
initp *ContainerProcess
)
containerID := "top"
initp, err = cs.StartContainer(containerID, bundleName)
t.Assert(err, checker.Equals, nil)
execID := "top1"
_, err = cs.AddProcessToContainer(initp, execID, "/", []string{"PATH=/usr/bin"}, []string{"top"}, 0, 0)
t.Assert(err, checker.Equals, nil)
for idx, evt := range []types.Event{
{
Type: "start-container",
Id: containerID,
Status: 0,
Pid: "",
},
{
Type: "start-process",
Id: containerID,
Status: 0,
Pid: execID,
},
{
Type: "exit",
Id: containerID,
Status: 137,
Pid: execID,
},
} {
ch := initp.GetEventsChannel()
e := <-ch
evt.Timestamp = e.Timestamp
t.Assert(*e, checker.Equals, evt)
if idx == 1 {
// Process Started, kill it
cs.SignalContainerProcess(containerID, "top1", uint32(syscall.SIGKILL))
}
}
// Container should still be running
containers, err := cs.ListRunningContainers()
if err != nil {
t.Fatal(err)
}
t.Assert(len(containers), checker.Equals, 1)
t.Assert(containers[0].Id, checker.Equals, "top")
t.Assert(containers[0].Status, checker.Equals, "running")
t.Assert(containers[0].BundlePath, check.Equals, filepath.Join(cs.cwd, GetBundle(bundleName).Path))
}
func (cs *ContainerdSuite) TestBusyboxTopExecTopKillInit(t *check.C) {
bundleName := "busybox-top"
if err := CreateBusyboxBundle(bundleName, []string{"top"}); err != nil {
t.Fatal(err)
}
var (
err error
initp *ContainerProcess
)
containerID := "top"
initp, err = cs.StartContainer(containerID, bundleName)
t.Assert(err, checker.Equals, nil)
execID := "top1"
_, err = cs.AddProcessToContainer(initp, execID, "/", []string{"PATH=/usr/bin"}, []string{"top"}, 0, 0)
t.Assert(err, checker.Equals, nil)
ch := initp.GetEventsChannel()
for _, evt := range []types.Event{
{
Type: "start-container",
Id: containerID,
Status: 0,
Pid: "",
},
{
Type: "start-process",
Id: containerID,
Status: 0,
Pid: execID,
},
} {
e := <-ch
evt.Timestamp = e.Timestamp
t.Assert(*e, checker.Equals, evt)
}
cs.SignalContainerProcess(containerID, "init", uint32(syscall.SIGTERM))
for i := 0; i < 2; i++ {
e := <-ch
switch e.Pid {
case "init":
evt := types.Event{
Type: "exit",
Id: containerID,
Status: 143,
Pid: "init",
Timestamp: e.Timestamp,
}
t.Assert(*e, checker.Equals, evt)
case execID:
evt := types.Event{
Type: "exit",
Id: containerID,
Status: 137,
Pid: execID,
Timestamp: e.Timestamp,
}
t.Assert(*e, checker.Equals, evt)
default:
t.Fatalf("Unexpected event %v", e)
}
}
}

View file

@ -1,538 +0,0 @@
package main
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"syscall"
"time"
"github.com/docker/containerd/api/grpc/types"
"github.com/docker/docker/pkg/integration/checker"
"github.com/go-check/check"
ocs "github.com/opencontainers/runtime-spec/specs-go"
"google.golang.org/grpc"
)
func (cs *ContainerdSuite) TestStartBusyboxLsSlash(t *check.C) {
expectedOutput := `bin
dev
etc
home
lib
lib64
linuxrc
media
mnt
opt
proc
root
run
sbin
sys
tmp
usr
var
`
if err := CreateBusyboxBundle("busybox-ls-slash", []string{"ls", "/"}); err != nil {
t.Fatal(err)
}
c, err := cs.RunContainer("myls", "busybox-ls-slash")
if err != nil {
t.Fatal(err)
}
t.Assert(c.io.stdoutBuffer.String(), checker.Equals, expectedOutput)
}
func (cs *ContainerdSuite) TestStartBusyboxNoSuchFile(t *check.C) {
expectedOutput := `oci runtime error: exec: "NoSuchFile": executable file not found in $PATH`
if err := CreateBusyboxBundle("busybox-no-such-file", []string{"NoSuchFile"}); err != nil {
t.Fatal(err)
}
_, err := cs.RunContainer("NoSuchFile", "busybox-no-such-file")
t.Assert(grpc.ErrorDesc(err), checker.Contains, expectedOutput)
}
func (cs *ContainerdSuite) TestStartBusyboxTop(t *check.C) {
bundleName := "busybox-top"
if err := CreateBusyboxBundle(bundleName, []string{"top"}); err != nil {
t.Fatal(err)
}
containerID := "start-busybox-top"
_, err := cs.StartContainer(containerID, bundleName)
t.Assert(err, checker.Equals, nil)
containers, err := cs.ListRunningContainers()
if err != nil {
t.Fatal(err)
}
t.Assert(len(containers), checker.Equals, 1)
t.Assert(containers[0].Id, checker.Equals, containerID)
t.Assert(containers[0].Status, checker.Equals, "running")
t.Assert(containers[0].BundlePath, check.Equals, filepath.Join(cs.cwd, GetBundle(bundleName).Path))
}
func (cs *ContainerdSuite) TestStartBusyboxLsEvents(t *check.C) {
if err := CreateBusyboxBundle("busybox-ls", []string{"ls"}); err != nil {
t.Fatal(err)
}
containerID := "ls-events"
c, err := cs.StartContainer(containerID, "busybox-ls")
if err != nil {
t.Fatal(err)
}
for _, evt := range []types.Event{
{
Type: "start-container",
Id: containerID,
Status: 0,
Pid: "",
},
{
Type: "exit",
Id: containerID,
Status: 0,
Pid: "init",
},
} {
ch := c.GetEventsChannel()
select {
case e := <-ch:
evt.Timestamp = e.Timestamp
t.Assert(*e, checker.Equals, evt)
case <-time.After(2 * time.Second):
t.Fatal("Container took more than 2 seconds to terminate")
}
}
}
func (cs *ContainerdSuite) TestStartBusyboxSleep(t *check.C) {
if err := CreateBusyboxBundle("busybox-sleep-5", []string{"sleep", "5"}); err != nil {
t.Fatal(err)
}
ch := make(chan interface{})
filter := func(e *types.Event) {
if e.Type == "exit" && e.Pid == "init" {
ch <- nil
}
}
start := time.Now()
_, err := cs.StartContainerWithEventFilter("sleep5", "busybox-sleep-5", filter)
if err != nil {
t.Fatal(err)
}
// We add a generous 20% marge of error
select {
case <-ch:
t.Assert(uint64(time.Now().Sub(start)), checker.LessOrEqualThan, uint64(6*time.Second))
case <-time.After(6 * time.Second):
t.Fatal("Container took more than 6 seconds to exit")
}
}
func (cs *ContainerdSuite) TestStartBusyboxTopKill(t *check.C) {
bundleName := "busybox-top"
if err := CreateBusyboxBundle(bundleName, []string{"top"}); err != nil {
t.Fatal(err)
}
containerID := "top-kill"
c, err := cs.StartContainer(containerID, bundleName)
if err != nil {
t.Fatal(err)
}
<-time.After(1 * time.Second)
err = cs.KillContainer(containerID)
if err != nil {
t.Fatal(err)
}
for _, evt := range []types.Event{
{
Type: "start-container",
Id: containerID,
Status: 0,
Pid: "",
},
{
Type: "exit",
Id: containerID,
Status: 128 + uint32(syscall.SIGKILL),
Pid: "init",
},
} {
ch := c.GetEventsChannel()
select {
case e := <-ch:
evt.Timestamp = e.Timestamp
t.Assert(*e, checker.Equals, evt)
case <-time.After(2 * time.Second):
t.Fatal("Container took more than 2 seconds to terminate")
}
}
}
func (cs *ContainerdSuite) TestStartBusyboxTopSignalSigterm(t *check.C) {
bundleName := "busybox-top"
if err := CreateBusyboxBundle(bundleName, []string{"top"}); err != nil {
t.Fatal(err)
}
containerID := "top-sigterm"
c, err := cs.StartContainer(containerID, bundleName)
if err != nil {
t.Fatal(err)
}
<-time.After(1 * time.Second)
err = cs.SignalContainer(containerID, uint32(syscall.SIGTERM))
if err != nil {
t.Fatal(err)
}
for _, evt := range []types.Event{
{
Type: "start-container",
Id: containerID,
Status: 0,
Pid: "",
},
{
Type: "exit",
Id: containerID,
Status: 128 + uint32(syscall.SIGTERM),
Pid: "init",
},
} {
ch := c.GetEventsChannel()
select {
case e := <-ch:
evt.Timestamp = e.Timestamp
t.Assert(*e, checker.Equals, evt)
case <-time.After(2 * time.Second):
t.Fatal("Container took more than 2 seconds to terminate")
}
}
}
func (cs *ContainerdSuite) TestStartBusyboxTrapUSR1(t *check.C) {
if err := CreateBusyboxBundle("busybox-trap-usr1", []string{"sh", "-c", "trap 'echo -n booh!' SIGUSR1 ; sleep 60 & wait"}); err != nil {
t.Fatal(err)
}
containerID := "trap-usr1"
c, err := cs.StartContainer(containerID, "busybox-trap-usr1")
if err != nil {
t.Fatal(err)
}
<-time.After(1 * time.Second)
if err := cs.SignalContainer(containerID, uint32(syscall.SIGUSR1)); err != nil {
t.Fatal(err)
}
for {
e := c.GetNextEvent()
if e.Type == "exit" && e.Pid == "init" {
break
}
}
t.Assert(c.io.stdoutBuffer.String(), checker.Equals, "booh!")
}
func (cs *ContainerdSuite) TestStartBusyboxTopPauseResume(t *check.C) {
bundleName := "busybox-top"
if err := CreateBusyboxBundle(bundleName, []string{"top"}); err != nil {
t.Fatal(err)
}
containerID := "top-pause-resume"
c, err := cs.StartContainer(containerID, bundleName)
if err != nil {
t.Fatal(err)
}
if err := cs.PauseContainer(containerID); err != nil {
t.Fatal(err)
}
if err := cs.ResumeContainer(containerID); err != nil {
t.Fatal(err)
}
for _, evt := range []types.Event{
{
Type: "start-container",
Id: containerID,
Status: 0,
Pid: "",
},
{
Type: "pause",
Id: containerID,
Status: 0,
Pid: "",
},
{
Type: "resume",
Id: containerID,
Status: 0,
Pid: "",
},
} {
ch := c.GetEventsChannel()
select {
case e := <-ch:
evt.Timestamp = e.Timestamp
t.Assert(*e, checker.Equals, evt)
case <-time.After(2 * time.Second):
t.Fatal("Container took more than 2 seconds to terminate")
}
}
// check that status is running
containers, err := cs.ListRunningContainers()
if err != nil {
t.Fatal(err)
}
t.Assert(len(containers), checker.Equals, 1)
t.Assert(containers[0].Id, checker.Equals, containerID)
t.Assert(containers[0].Status, checker.Equals, "running")
}
func (cs *ContainerdSuite) TestOOM(t *check.C) {
bundleName := "busybox-sh-512k-memlimit"
if err := CreateBundleWithFilter("busybox", bundleName, []string{"sh", "-c", "x=oom-party-time; while true; do x=$x$x$x$x$x$x$x$x$x$x; done"}, func(spec *ocs.Spec) {
// Limit to 512k for quick oom
var limit uint64 = 8 * 1024 * 1024
spec.Linux.Resources.Memory = &ocs.Memory{
Limit: &limit,
}
if swapEnabled() {
spec.Linux.Resources.Memory.Swap = &limit
}
}); err != nil {
t.Fatal(err)
}
containerID := "sh-oom"
c, err := cs.StartContainer(containerID, bundleName)
if err != nil {
t.Fatal(err)
}
for _, evt := range []types.Event{
{
Type: "start-container",
Id: containerID,
Status: 0,
Pid: "",
},
{
Type: "oom",
Id: containerID,
Status: 0,
Pid: "",
},
{
Type: "exit",
Id: containerID,
Status: 137,
Pid: "init",
},
} {
ch := c.GetEventsChannel()
select {
case e := <-ch:
evt.Timestamp = e.Timestamp
t.Assert(*e, checker.Equals, evt)
case <-time.After(60 * time.Second):
t.Fatalf("Container took more than 60 seconds to %s", evt.Type)
}
}
}
func (cs *ContainerdSuite) TestRestart(t *check.C) {
bundleName := "busybox-top"
if err := CreateBusyboxBundle(bundleName, []string{"top"}); err != nil {
t.Fatal(err)
}
totalCtr := 10
for i := 0; i < totalCtr; i++ {
containerID := fmt.Sprintf("top%d", i)
c, err := cs.StartContainer(containerID, bundleName)
if err != nil {
t.Fatal(err)
}
e := c.GetNextEvent()
t.Assert(*e, checker.Equals, types.Event{
Type: "start-container",
Id: containerID,
Status: 0,
Pid: "",
Timestamp: e.Timestamp,
})
}
// restart daemon gracefully (SIGINT)
cs.RestartDaemon(false)
// check that status is running
containers, err := cs.ListRunningContainers()
if err != nil {
t.Fatal(err)
}
sortContainers(containers)
t.Assert(len(containers), checker.Equals, totalCtr)
for i := 0; i < totalCtr; i++ {
t.Assert(containers[i].Id, checker.Equals, fmt.Sprintf("top%d", i))
t.Assert(containers[i].Status, checker.Equals, "running")
}
// Now kill daemon (SIGKILL)
cs.StopDaemon(true)
// Sleep a second to allow thevent e timestamp to change since
// it's second based
<-time.After(3 * time.Second)
// Kill a couple of containers
killedCtr := map[int]bool{4: true, 2: true}
var f func(*types.Event)
deathChans := make([]chan error, len(killedCtr))
deathChansIdx := 0
for i := range killedCtr {
ch := make(chan error, 1)
deathChans[deathChansIdx] = ch
deathChansIdx++
syscall.Kill(int(containers[i].Pids[0]), syscall.SIGKILL)
// Filter to be notified of their death
containerID := fmt.Sprintf("top%d", i)
f = func(event *types.Event) {
expectedEvent := types.Event{
Type: "exit",
Id: containerID,
Status: 137,
Pid: "init",
}
expectedEvent.Timestamp = event.Timestamp
if ok := t.Check(*event, checker.Equals, expectedEvent); !ok {
ch <- fmt.Errorf("Unexpected event: %#v", *event)
} else {
ch <- nil
}
}
cs.SetContainerEventFilter(containerID, f)
}
cs.RestartDaemon(true)
// Ensure we got our events
for i := range deathChans {
done := false
for done == false {
select {
case err := <-deathChans[i]:
t.Assert(err, checker.Equals, nil)
done = true
case <-time.After(3 * time.Second):
t.Fatal("Exit event for container not received after 3 seconds")
}
}
}
// check that status is running
containers, err = cs.ListRunningContainers()
if err != nil {
t.Fatal(err)
}
sortContainers(containers)
t.Assert(len(containers), checker.Equals, totalCtr-len(killedCtr))
idShift := 0
for i := 0; i < totalCtr-len(killedCtr); i++ {
if _, ok := killedCtr[i+idShift]; ok {
idShift++
}
t.Assert(containers[i].Id, checker.Equals, fmt.Sprintf("top%d", i+idShift))
t.Assert(containers[i].Status, checker.Equals, "running")
}
}
func swapEnabled() bool {
_, err := os.Stat("/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes")
return err == nil
}
func (cs *ContainerdSuite) TestSigkillShimReuseName(t *check.C) {
bundleName := "busybox-top"
if err := CreateBusyboxBundle(bundleName, []string{"top"}); err != nil {
t.Fatal(err)
}
containerID := "top"
c, err := cs.StartContainer(containerID, bundleName)
if err != nil {
t.Fatal(err)
}
// Sigkill the shim
exec.Command("pkill", "-9", "containerd-shim").Run()
// Wait for it to be reaped
for _, evt := range []types.Event{
{
Type: "start-container",
Id: containerID,
Status: 0,
Pid: "",
},
{
Type: "exit",
Id: containerID,
Status: 128 + 9,
Pid: "init",
},
} {
ch := c.GetEventsChannel()
select {
case e := <-ch:
evt.Timestamp = e.Timestamp
t.Assert(*e, checker.Equals, evt)
case <-time.After(2 * time.Second):
t.Fatal("Container took more than 2 seconds to terminate")
}
}
// Start a new continer with the same name
c, err = cs.StartContainer(containerID, bundleName)
if err != nil {
t.Fatal(err)
}
}

View file

@ -1,738 +0,0 @@
package runtime
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"syscall"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/containerd/specs"
ocs "github.com/opencontainers/runtime-spec/specs-go"
"golang.org/x/sys/unix"
)
// Container defines the operations allowed on a container
type Container interface {
// ID returns the container ID
ID() string
// Path returns the path to the bundle
Path() string
// Start starts the init process of the container
Start(checkpointPath string, s Stdio) (Process, error)
// Exec starts another process in an existing container
Exec(string, specs.ProcessSpec, Stdio) (Process, error)
// Delete removes the container's state and any resources
Delete() error
// Processes returns all the containers processes that have been added
Processes() ([]Process, error)
// State returns the containers runtime state
State() State
// Resume resumes a paused container
Resume() error
// Pause pauses a running container
Pause() error
// RemoveProcess removes the specified process from the container
RemoveProcess(string) error
// Checkpoints returns all the checkpoints for a container
Checkpoints(checkpointDir string) ([]Checkpoint, error)
// Checkpoint creates a new checkpoint
Checkpoint(checkpoint Checkpoint, checkpointDir string) error
// DeleteCheckpoint deletes the checkpoint for the provided name
DeleteCheckpoint(name string, checkpointDir string) error
// Labels are user provided labels for the container
Labels() []string
// Pids returns all pids inside the container
Pids() ([]int, error)
// Stats returns realtime container stats and resource information
Stats() (*Stat, error)
// Name or path of the OCI compliant runtime used to execute the container
Runtime() string
// OOM signals the channel if the container received an OOM notification
OOM() (OOM, error)
// UpdateResource updates the containers resources to new values
UpdateResources(*Resource) error
// Status return the current status of the container.
Status() (State, error)
}
// OOM wraps a container OOM.
type OOM interface {
io.Closer
FD() int
ContainerID() string
Flush() error
Removed() bool
}
// Stdio holds the path to the 3 pipes used for the standard ios.
type Stdio struct {
Stdin string
Stdout string
Stderr string
}
// NewStdio wraps the given standard io path into an Stdio struct.
// If a given parameter is the empty string, it is replaced by "/dev/null"
func NewStdio(stdin, stdout, stderr string) Stdio {
for _, s := range []*string{
&stdin, &stdout, &stderr,
} {
if *s == "" {
*s = "/dev/null"
}
}
return Stdio{
Stdin: stdin,
Stdout: stdout,
Stderr: stderr,
}
}
// ContainerOpts keeps the options passed at container creation
type ContainerOpts struct {
Root string
ID string
Bundle string
Runtime string
RuntimeArgs []string
Shim string
Labels []string
NoPivotRoot bool
Timeout time.Duration
}
// New returns a new container
func New(opts ContainerOpts) (Container, error) {
c := &container{
root: opts.Root,
id: opts.ID,
bundle: opts.Bundle,
labels: opts.Labels,
processes: make(map[string]*process),
runtime: opts.Runtime,
runtimeArgs: opts.RuntimeArgs,
shim: opts.Shim,
noPivotRoot: opts.NoPivotRoot,
timeout: opts.Timeout,
}
if err := os.Mkdir(filepath.Join(c.root, c.id), 0755); err != nil {
return nil, err
}
f, err := os.Create(filepath.Join(c.root, c.id, StateFile))
if err != nil {
return nil, err
}
defer f.Close()
if err := json.NewEncoder(f).Encode(state{
Bundle: c.bundle,
Labels: c.labels,
Runtime: c.runtime,
RuntimeArgs: c.runtimeArgs,
Shim: c.shim,
NoPivotRoot: opts.NoPivotRoot,
}); err != nil {
return nil, err
}
return c, nil
}
// Load return a new container from the matchin state file on disk.
func Load(root, id, shimName string, timeout time.Duration) (Container, error) {
var s state
f, err := os.Open(filepath.Join(root, id, StateFile))
if err != nil {
return nil, err
}
defer f.Close()
if err := json.NewDecoder(f).Decode(&s); err != nil {
return nil, err
}
c := &container{
root: root,
id: id,
bundle: s.Bundle,
labels: s.Labels,
runtime: s.Runtime,
runtimeArgs: s.RuntimeArgs,
shim: s.Shim,
noPivotRoot: s.NoPivotRoot,
processes: make(map[string]*process),
timeout: timeout,
}
if c.shim == "" {
c.shim = shimName
}
dirs, err := ioutil.ReadDir(filepath.Join(root, id))
if err != nil {
return nil, err
}
for _, d := range dirs {
if !d.IsDir() {
continue
}
pid := d.Name()
s, err := readProcessState(filepath.Join(root, id, pid))
if err != nil {
return nil, err
}
p, err := loadProcess(filepath.Join(root, id, pid), pid, c, s)
if err != nil {
logrus.WithField("id", id).WithField("pid", pid).Debug("containerd: error loading process %s", err)
continue
}
c.processes[pid] = p
}
return c, nil
}
func readProcessState(dir string) (*ProcessState, error) {
f, err := os.Open(filepath.Join(dir, "process.json"))
if err != nil {
return nil, err
}
defer f.Close()
var s ProcessState
if err := json.NewDecoder(f).Decode(&s); err != nil {
return nil, err
}
return &s, nil
}
type container struct {
// path to store runtime state information
root string
id string
bundle string
runtime string
runtimeArgs []string
shim string
processes map[string]*process
labels []string
oomFds []int
noPivotRoot bool
timeout time.Duration
}
func (c *container) ID() string {
return c.id
}
func (c *container) Path() string {
return c.bundle
}
func (c *container) Labels() []string {
return c.labels
}
func (c *container) readSpec() (*specs.Spec, error) {
var spec specs.Spec
f, err := os.Open(filepath.Join(c.bundle, "config.json"))
if err != nil {
return nil, err
}
defer f.Close()
if err := json.NewDecoder(f).Decode(&spec); err != nil {
return nil, err
}
return &spec, nil
}
func (c *container) Delete() error {
err := os.RemoveAll(filepath.Join(c.root, c.id))
args := c.runtimeArgs
args = append(args, "delete", c.id)
if b, derr := exec.Command(c.runtime, args...).CombinedOutput(); err != nil {
err = fmt.Errorf("%s: %q", derr, string(b))
} else if len(b) > 0 {
logrus.Debugf("%v %v: %q", c.runtime, args, string(b))
}
return err
}
func (c *container) Processes() ([]Process, error) {
out := []Process{}
for _, p := range c.processes {
out = append(out, p)
}
return out, nil
}
func (c *container) RemoveProcess(pid string) error {
delete(c.processes, pid)
return os.RemoveAll(filepath.Join(c.root, c.id, pid))
}
func (c *container) State() State {
proc := c.processes["init"]
if proc == nil {
return Stopped
}
return proc.State()
}
func (c *container) Runtime() string {
return c.runtime
}
func (c *container) Pause() error {
args := c.runtimeArgs
args = append(args, "pause", c.id)
b, err := exec.Command(c.runtime, args...).CombinedOutput()
if err != nil {
return fmt.Errorf("%s: %q", err.Error(), string(b))
}
return nil
}
func (c *container) Resume() error {
args := c.runtimeArgs
args = append(args, "resume", c.id)
b, err := exec.Command(c.runtime, args...).CombinedOutput()
if err != nil {
return fmt.Errorf("%s: %q", err.Error(), string(b))
}
return nil
}
func (c *container) Checkpoints(checkpointDir string) ([]Checkpoint, error) {
if checkpointDir == "" {
checkpointDir = filepath.Join(c.bundle, "checkpoints")
}
dirs, err := ioutil.ReadDir(checkpointDir)
if err != nil {
return nil, err
}
var out []Checkpoint
for _, d := range dirs {
if !d.IsDir() {
continue
}
path := filepath.Join(checkpointDir, d.Name(), "config.json")
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
var cpt Checkpoint
if err := json.Unmarshal(data, &cpt); err != nil {
return nil, err
}
out = append(out, cpt)
}
return out, nil
}
func (c *container) Checkpoint(cpt Checkpoint, checkpointDir string) error {
if checkpointDir == "" {
checkpointDir = filepath.Join(c.bundle, "checkpoints")
}
if err := os.MkdirAll(checkpointDir, 0755); err != nil {
return err
}
path := filepath.Join(checkpointDir, cpt.Name)
if err := os.Mkdir(path, 0755); err != nil {
return err
}
f, err := os.Create(filepath.Join(path, "config.json"))
if err != nil {
return err
}
cpt.Created = time.Now()
err = json.NewEncoder(f).Encode(cpt)
f.Close()
if err != nil {
return err
}
args := []string{
"checkpoint",
"--image-path", path,
"--work-path", filepath.Join(path, "criu.work"),
}
add := func(flags ...string) {
args = append(args, flags...)
}
add(c.runtimeArgs...)
if !cpt.Exit {
add("--leave-running")
}
if cpt.Shell {
add("--shell-job")
}
if cpt.TCP {
add("--tcp-established")
}
if cpt.UnixSockets {
add("--ext-unix-sk")
}
for _, ns := range cpt.EmptyNS {
add("--empty-ns", ns)
}
add(c.id)
out, err := exec.Command(c.runtime, args...).CombinedOutput()
if err != nil {
return fmt.Errorf("%s: %q", err.Error(), string(out))
}
return err
}
func (c *container) DeleteCheckpoint(name string, checkpointDir string) error {
if checkpointDir == "" {
checkpointDir = filepath.Join(c.bundle, "checkpoints")
}
return os.RemoveAll(filepath.Join(checkpointDir, name))
}
func (c *container) Start(checkpointPath string, s Stdio) (Process, error) {
processRoot := filepath.Join(c.root, c.id, InitProcessID)
if err := os.Mkdir(processRoot, 0755); err != nil {
return nil, err
}
cmd := exec.Command(c.shim,
c.id, c.bundle, c.runtime,
)
cmd.Dir = processRoot
cmd.SysProcAttr = &syscall.SysProcAttr{
Setpgid: true,
}
spec, err := c.readSpec()
if err != nil {
return nil, err
}
config := &processConfig{
checkpoint: checkpointPath,
root: processRoot,
id: InitProcessID,
c: c,
stdio: s,
spec: spec,
processSpec: specs.ProcessSpec(spec.Process),
}
p, err := newProcess(config)
if err != nil {
return nil, err
}
if err := c.createCmd(InitProcessID, cmd, p); err != nil {
return nil, err
}
return p, nil
}
func (c *container) Exec(pid string, pspec specs.ProcessSpec, s Stdio) (pp Process, err error) {
processRoot := filepath.Join(c.root, c.id, pid)
if err := os.Mkdir(processRoot, 0755); err != nil {
return nil, err
}
defer func() {
if err != nil {
c.RemoveProcess(pid)
}
}()
cmd := exec.Command(c.shim,
c.id, c.bundle, c.runtime,
)
cmd.Dir = processRoot
cmd.SysProcAttr = &syscall.SysProcAttr{
Setpgid: true,
}
spec, err := c.readSpec()
if err != nil {
return nil, err
}
config := &processConfig{
exec: true,
id: pid,
root: processRoot,
c: c,
processSpec: pspec,
spec: spec,
stdio: s,
}
p, err := newProcess(config)
if err != nil {
return nil, err
}
if err := c.createCmd(pid, cmd, p); err != nil {
return nil, err
}
return p, nil
}
func (c *container) createCmd(pid string, cmd *exec.Cmd, p *process) error {
p.cmd = cmd
if err := cmd.Start(); err != nil {
close(p.cmdDoneCh)
if exErr, ok := err.(*exec.Error); ok {
if exErr.Err == exec.ErrNotFound || exErr.Err == os.ErrNotExist {
return fmt.Errorf("%s not installed on system", c.shim)
}
}
return err
}
// We need the pid file to have been written to run
defer func() {
go func() {
err := p.cmd.Wait()
if err == nil {
p.cmdSuccess = true
}
if same, err := p.isSameProcess(); same && p.pid > 0 {
// The process changed its PR_SET_PDEATHSIG, so force
// kill it
logrus.Infof("containerd: %s:%s (pid %v) has become an orphan, killing it", p.container.id, p.id, p.pid)
err = unix.Kill(p.pid, syscall.SIGKILL)
if err != nil && err != syscall.ESRCH {
logrus.Errorf("containerd: unable to SIGKILL %s:%s (pid %v): %v", p.container.id, p.id, p.pid, err)
} else {
for {
err = unix.Kill(p.pid, 0)
if err != nil {
break
}
time.Sleep(5 * time.Millisecond)
}
}
}
close(p.cmdDoneCh)
}()
}()
if err := c.waitForCreate(p, cmd); err != nil {
return err
}
c.processes[pid] = p
return nil
}
func hostIDFromMap(id uint32, mp []ocs.IDMapping) int {
for _, m := range mp {
if (id >= m.ContainerID) && (id <= (m.ContainerID + m.Size - 1)) {
return int(m.HostID + (id - m.ContainerID))
}
}
return 0
}
func (c *container) Pids() ([]int, error) {
args := c.runtimeArgs
args = append(args, "ps", "--format=json", c.id)
out, err := exec.Command(c.runtime, args...).CombinedOutput()
if err != nil {
return nil, fmt.Errorf("%s: %q", err.Error(), out)
}
var pids []int
if err := json.Unmarshal(out, &pids); err != nil {
return nil, err
}
return pids, nil
}
func (c *container) Stats() (*Stat, error) {
now := time.Now()
args := c.runtimeArgs
args = append(args, "events", "--stats", c.id)
out, err := exec.Command(c.runtime, args...).CombinedOutput()
if err != nil {
return nil, fmt.Errorf("%s: %q", err.Error(), out)
}
s := struct {
Data *Stat `json:"data"`
}{}
if err := json.Unmarshal(out, &s); err != nil {
return nil, err
}
s.Data.Timestamp = now
return s.Data, nil
}
// Status implements the runtime Container interface.
func (c *container) Status() (State, error) {
args := c.runtimeArgs
args = append(args, "state", c.id)
out, err := exec.Command(c.runtime, args...).CombinedOutput()
if err != nil {
return "", fmt.Errorf("%s: %q", err.Error(), out)
}
// We only require the runtime json output to have a top level Status field.
var s struct {
Status State `json:"status"`
}
if err := json.Unmarshal(out, &s); err != nil {
return "", err
}
return s.Status, nil
}
func (c *container) writeEventFD(root string, cfd, efd int) error {
f, err := os.OpenFile(filepath.Join(root, "cgroup.event_control"), os.O_WRONLY, 0)
if err != nil {
return err
}
defer f.Close()
_, err = f.WriteString(fmt.Sprintf("%d %d", efd, cfd))
return err
}
type waitArgs struct {
pid int
err error
}
func (c *container) waitForCreate(p *process, cmd *exec.Cmd) error {
wc := make(chan error, 1)
go func() {
for {
if _, err := p.getPidFromFile(); err != nil {
if os.IsNotExist(err) || err == errInvalidPidInt {
alive, err := isAlive(cmd)
if err != nil {
wc <- err
return
}
if !alive {
// runc could have failed to run the container so lets get the error
// out of the logs or the shim could have encountered an error
messages, err := readLogMessages(filepath.Join(p.root, "shim-log.json"))
if err != nil {
wc <- err
return
}
for _, m := range messages {
if m.Level == "error" {
wc <- fmt.Errorf("shim error: %v", m.Msg)
return
}
}
// no errors reported back from shim, check for runc/runtime errors
messages, err = readLogMessages(filepath.Join(p.root, "log.json"))
if err != nil {
if os.IsNotExist(err) {
err = ErrContainerNotStarted
}
wc <- err
return
}
for _, m := range messages {
if m.Level == "error" {
wc <- fmt.Errorf("oci runtime error: %v", m.Msg)
return
}
}
wc <- ErrContainerNotStarted
return
}
time.Sleep(15 * time.Millisecond)
continue
}
wc <- err
return
}
// the pid file was read successfully
wc <- nil
return
}
}()
select {
case err := <-wc:
if err != nil {
return err
}
err = p.saveStartTime()
if err != nil {
logrus.Warnf("containerd: unable to save %s:%s starttime: %v", p.container.id, p.id, err)
}
return nil
case <-time.After(c.timeout):
cmd.Process.Kill()
cmd.Wait()
return ErrContainerStartTimeout
}
}
// isAlive checks if the shim that launched the container is still alive
func isAlive(cmd *exec.Cmd) (bool, error) {
if _, err := syscall.Wait4(cmd.Process.Pid, nil, syscall.WNOHANG, nil); err == nil {
return true, nil
}
if err := syscall.Kill(cmd.Process.Pid, 0); err != nil {
if err == syscall.ESRCH {
return false, nil
}
return false, err
}
return true, nil
}
type oom struct {
id string
root string
control *os.File
eventfd int
}
func (o *oom) ContainerID() string {
return o.id
}
func (o *oom) FD() int {
return o.eventfd
}
func (o *oom) Flush() error {
buf := make([]byte, 8)
_, err := syscall.Read(o.eventfd, buf)
return err
}
func (o *oom) Removed() bool {
_, err := os.Lstat(filepath.Join(o.root, "cgroup.event_control"))
return os.IsNotExist(err)
}
func (o *oom) Close() error {
err := syscall.Close(o.eventfd)
if cerr := o.control.Close(); err == nil {
err = cerr
}
return err
}
type message struct {
Level string `json:"level"`
Msg string `json:"msg"`
}
func readLogMessages(path string) ([]message, error) {
var out []message
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
dec := json.NewDecoder(f)
for {
var m message
if err := dec.Decode(&m); err != nil {
if err == io.EOF {
break
}
return nil, err
}
out = append(out, m)
}
return out, nil
}

View file

@ -1,174 +0,0 @@
package runtime
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"syscall"
"github.com/docker/containerd/specs"
ocs "github.com/opencontainers/runtime-spec/specs-go"
)
func findCgroupMountpointAndRoot(pid int, subsystem string) (string, string, error) {
f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
if err != nil {
return "", "", err
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
txt := scanner.Text()
fields := strings.Split(txt, " ")
for _, opt := range strings.Split(fields[len(fields)-1], ",") {
if opt == subsystem {
return fields[4], fields[3], nil
}
}
}
if err := scanner.Err(); err != nil {
return "", "", err
}
return "", "", fmt.Errorf("cgroup path for %s not found", subsystem)
}
func parseCgroupFile(path string) (map[string]string, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
s := bufio.NewScanner(f)
cgroups := make(map[string]string)
for s.Scan() {
if err := s.Err(); err != nil {
return nil, err
}
text := s.Text()
parts := strings.Split(text, ":")
for _, subs := range strings.Split(parts[1], ",") {
cgroups[subs] = parts[2]
}
}
return cgroups, nil
}
func (c *container) OOM() (OOM, error) {
p := c.processes[InitProcessID]
if p == nil {
return nil, fmt.Errorf("no init process found")
}
mountpoint, hostRoot, err := findCgroupMountpointAndRoot(os.Getpid(), "memory")
if err != nil {
return nil, err
}
cgroups, err := parseCgroupFile(fmt.Sprintf("/proc/%d/cgroup", p.pid))
if err != nil {
return nil, err
}
root, ok := cgroups["memory"]
if !ok {
return nil, fmt.Errorf("no memory cgroup for container %s", c.ID())
}
// Take care of the case were we're running inside a container
// ourself
root = strings.TrimPrefix(root, hostRoot)
return c.getMemeoryEventFD(filepath.Join(mountpoint, root))
}
func u64Ptr(i uint64) *uint64 { return &i }
func (c *container) UpdateResources(r *Resource) error {
sr := ocs.Resources{
Memory: &ocs.Memory{
Limit: u64Ptr(uint64(r.Memory)),
Reservation: u64Ptr(uint64(r.MemoryReservation)),
Swap: u64Ptr(uint64(r.MemorySwap)),
Kernel: u64Ptr(uint64(r.KernelMemory)),
KernelTCP: u64Ptr(uint64(r.KernelTCPMemory)),
},
CPU: &ocs.CPU{
Shares: u64Ptr(uint64(r.CPUShares)),
Quota: u64Ptr(uint64(r.CPUQuota)),
Period: u64Ptr(uint64(r.CPUPeriod)),
Cpus: &r.CpusetCpus,
Mems: &r.CpusetMems,
},
BlockIO: &ocs.BlockIO{
Weight: &r.BlkioWeight,
},
}
srStr := bytes.NewBuffer(nil)
if err := json.NewEncoder(srStr).Encode(&sr); err != nil {
return err
}
args := c.runtimeArgs
args = append(args, "update", "-r", "-", c.id)
cmd := exec.Command(c.runtime, args...)
cmd.Stdin = srStr
b, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf(string(b))
}
return nil
}
func getRootIDs(s *specs.Spec) (int, int, error) {
if s == nil {
return 0, 0, nil
}
var hasUserns bool
for _, ns := range s.Linux.Namespaces {
if ns.Type == ocs.UserNamespace {
hasUserns = true
break
}
}
if !hasUserns {
return 0, 0, nil
}
uid := hostIDFromMap(0, s.Linux.UIDMappings)
gid := hostIDFromMap(0, s.Linux.GIDMappings)
return uid, gid, nil
}
func (c *container) getMemeoryEventFD(root string) (*oom, error) {
f, err := os.Open(filepath.Join(root, "memory.oom_control"))
if err != nil {
return nil, err
}
fd, _, serr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0)
if serr != 0 {
f.Close()
return nil, serr
}
if err := c.writeEventFD(root, int(f.Fd()), int(fd)); err != nil {
syscall.Close(int(fd))
f.Close()
return nil, err
}
return &oom{
root: root,
id: c.id,
eventfd: int(fd),
control: f,
}, nil
}

View file

@ -1,19 +0,0 @@
package runtime
import (
"errors"
"github.com/docker/containerd/specs"
)
func (c *container) OOM() (OOM, error) {
return nil, errors.New("runtime OOM() not implemented on Solaris")
}
func (c *container) UpdateResources(r *Resource) error {
return errors.New("runtime UpdateResources() not implemented on Solaris")
}
func getRootIDs(s *specs.Spec) (int, int, error) {
return 0, 0, errors.New("runtime getRootIDs() not implemented on Solaris")
}

View file

@ -1,467 +0,0 @@
package runtime
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/containerd/specs"
"golang.org/x/sys/unix"
)
// Process holds the operation allowed on a container's process
type Process interface {
io.Closer
// ID of the process.
// This is either "init" when it is the container's init process or
// it is a user provided id for the process similar to the container id
ID() string
// Start unblocks the associated container init process.
// This should only be called on the process with ID "init"
Start() error
CloseStdin() error
Resize(int, int) error
// FD returns the fd the provides an event when the process exits
FD() int
// ExitStatus returns the exit status of the process or an error if it
// has not exited
ExitStatus() (uint32, error)
// Spec returns the process spec that created the process
Spec() specs.ProcessSpec
// Signal sends the provided signal to the process
Signal(os.Signal) error
// Container returns the container that the process belongs to
Container() Container
// Stdio of the container
Stdio() Stdio
// SystemPid is the pid on the system
SystemPid() int
// State returns if the process is running or not
State() State
// Wait reaps the shim process if avaliable
Wait()
}
type processConfig struct {
id string
root string
processSpec specs.ProcessSpec
spec *specs.Spec
c *container
stdio Stdio
exec bool
checkpoint string
}
func newProcess(config *processConfig) (*process, error) {
p := &process{
root: config.root,
id: config.id,
container: config.c,
spec: config.processSpec,
stdio: config.stdio,
cmdDoneCh: make(chan struct{}),
state: Running,
}
uid, gid, err := getRootIDs(config.spec)
if err != nil {
return nil, err
}
f, err := os.Create(filepath.Join(config.root, "process.json"))
if err != nil {
return nil, err
}
defer f.Close()
ps := ProcessState{
ProcessSpec: config.processSpec,
Exec: config.exec,
PlatformProcessState: PlatformProcessState{
Checkpoint: config.checkpoint,
RootUID: uid,
RootGID: gid,
},
Stdin: config.stdio.Stdin,
Stdout: config.stdio.Stdout,
Stderr: config.stdio.Stderr,
RuntimeArgs: config.c.runtimeArgs,
NoPivotRoot: config.c.noPivotRoot,
}
if err := json.NewEncoder(f).Encode(ps); err != nil {
return nil, err
}
exit, err := getExitPipe(filepath.Join(config.root, ExitFile))
if err != nil {
return nil, err
}
control, err := getControlPipe(filepath.Join(config.root, ControlFile))
if err != nil {
return nil, err
}
p.exitPipe = exit
p.controlPipe = control
return p, nil
}
func loadProcess(root, id string, c *container, s *ProcessState) (*process, error) {
p := &process{
root: root,
id: id,
container: c,
spec: s.ProcessSpec,
stdio: Stdio{
Stdin: s.Stdin,
Stdout: s.Stdout,
Stderr: s.Stderr,
},
state: Stopped,
}
startTime, err := ioutil.ReadFile(filepath.Join(p.root, StartTimeFile))
if err != nil && !os.IsNotExist(err) {
return nil, err
}
p.startTime = string(startTime)
if _, err := p.getPidFromFile(); err != nil {
return nil, err
}
if _, err := p.ExitStatus(); err != nil {
if err == ErrProcessNotExited {
exit, err := getExitPipe(filepath.Join(root, ExitFile))
if err != nil {
return nil, err
}
p.exitPipe = exit
control, err := getControlPipe(filepath.Join(root, ControlFile))
if err != nil {
return nil, err
}
p.controlPipe = control
p.state = Running
return p, nil
}
return nil, err
}
return p, nil
}
func readProcStatField(pid int, field int) (string, error) {
data, err := ioutil.ReadFile(filepath.Join(string(filepath.Separator), "proc", strconv.Itoa(pid), "stat"))
if err != nil {
return "", err
}
if field > 2 {
// First, split out the name since he could contains spaces.
parts := strings.Split(string(data), ") ")
// Now split out the rest, we end up with 2 fields less
parts = strings.Split(parts[1], " ")
return parts[field-2-1], nil // field count start at 1 in manual
}
parts := strings.Split(string(data), " (")
if field == 1 {
return parts[0], nil
}
parts = strings.Split(parts[1], ") ")
return parts[0], nil
}
type process struct {
root string
id string
pid int
exitPipe *os.File
controlPipe *os.File
container *container
spec specs.ProcessSpec
stdio Stdio
cmd *exec.Cmd
cmdSuccess bool
cmdDoneCh chan struct{}
state State
stateLock sync.Mutex
startTime string
}
func (p *process) ID() string {
return p.id
}
func (p *process) Container() Container {
return p.container
}
func (p *process) SystemPid() int {
return p.pid
}
// FD returns the fd of the exit pipe
func (p *process) FD() int {
return int(p.exitPipe.Fd())
}
func (p *process) CloseStdin() error {
_, err := fmt.Fprintf(p.controlPipe, "%d %d %d\n", 0, 0, 0)
return err
}
func (p *process) Resize(w, h int) error {
_, err := fmt.Fprintf(p.controlPipe, "%d %d %d\n", 1, w, h)
return err
}
func (p *process) updateExitStatusFile(status uint32) (uint32, error) {
p.stateLock.Lock()
p.state = Stopped
p.stateLock.Unlock()
err := ioutil.WriteFile(filepath.Join(p.root, ExitStatusFile), []byte(fmt.Sprintf("%u", status)), 0644)
return status, err
}
func (p *process) handleSigkilledShim(rst uint32, rerr error) (uint32, error) {
if p.cmd == nil || p.cmd.Process == nil {
e := unix.Kill(p.pid, 0)
if e == syscall.ESRCH {
logrus.Warnf("containerd: %s:%s (pid %d) does not exist", p.container.id, p.id, p.pid)
// The process died while containerd was down (probably of
// SIGKILL, but no way to be sure)
return p.updateExitStatusFile(UnknownStatus)
}
// If it's not the same process, just mark it stopped and set
// the status to the UnknownStatus value (i.e. 255)
if same, err := p.isSameProcess(); !same {
logrus.Warnf("containerd: %s:%s (pid %d) is not the same process anymore (%v)", p.container.id, p.id, p.pid, err)
// Create the file so we get the exit event generated once monitor kicks in
// without having to go through all this process again
return p.updateExitStatusFile(UnknownStatus)
}
ppid, err := readProcStatField(p.pid, 4)
if err != nil {
return rst, fmt.Errorf("could not check process ppid: %v (%v)", err, rerr)
}
if ppid == "1" {
logrus.Warnf("containerd: %s:%s shim died, killing associated process", p.container.id, p.id)
unix.Kill(p.pid, syscall.SIGKILL)
if err != nil && err != syscall.ESRCH {
return UnknownStatus, fmt.Errorf("containerd: unable to SIGKILL %s:%s (pid %v): %v", p.container.id, p.id, p.pid, err)
}
// wait for the process to die
for {
e := unix.Kill(p.pid, 0)
if e == syscall.ESRCH {
break
}
time.Sleep(5 * time.Millisecond)
}
// Create the file so we get the exit event generated once monitor kicks in
// without having to go through all this process again
return p.updateExitStatusFile(128 + uint32(syscall.SIGKILL))
}
return rst, rerr
}
// Possible that the shim was SIGKILLED
e := unix.Kill(p.cmd.Process.Pid, 0)
if e != syscall.ESRCH {
return rst, rerr
}
// Ensure we got the shim ProcessState
<-p.cmdDoneCh
shimStatus := p.cmd.ProcessState.Sys().(syscall.WaitStatus)
if shimStatus.Signaled() && shimStatus.Signal() == syscall.SIGKILL {
logrus.Debugf("containerd: ExitStatus(container: %s, process: %s): shim was SIGKILL'ed reaping its child with pid %d", p.container.id, p.id, p.pid)
rerr = nil
rst = 128 + uint32(shimStatus.Signal())
p.stateLock.Lock()
p.state = Stopped
p.stateLock.Unlock()
}
return rst, rerr
}
func (p *process) ExitStatus() (rst uint32, rerr error) {
data, err := ioutil.ReadFile(filepath.Join(p.root, ExitStatusFile))
defer func() {
if rerr != nil {
rst, rerr = p.handleSigkilledShim(rst, rerr)
}
}()
if err != nil {
if os.IsNotExist(err) {
return UnknownStatus, ErrProcessNotExited
}
return UnknownStatus, err
}
if len(data) == 0 {
return UnknownStatus, ErrProcessNotExited
}
p.stateLock.Lock()
p.state = Stopped
p.stateLock.Unlock()
i, err := strconv.ParseUint(string(data), 10, 32)
return uint32(i), err
}
func (p *process) Spec() specs.ProcessSpec {
return p.spec
}
func (p *process) Stdio() Stdio {
return p.stdio
}
// Close closes any open files and/or resouces on the process
func (p *process) Close() error {
err := p.exitPipe.Close()
if cerr := p.controlPipe.Close(); err == nil {
err = cerr
}
return err
}
func (p *process) State() State {
p.stateLock.Lock()
defer p.stateLock.Unlock()
return p.state
}
func (p *process) getPidFromFile() (int, error) {
data, err := ioutil.ReadFile(filepath.Join(p.root, "pid"))
if err != nil {
return -1, err
}
i, err := strconv.Atoi(string(data))
if err != nil {
return -1, errInvalidPidInt
}
p.pid = i
return i, nil
}
func (p *process) readStartTime() (string, error) {
return readProcStatField(p.pid, 22)
}
func (p *process) saveStartTime() error {
startTime, err := p.readStartTime()
if err != nil {
return err
}
p.startTime = startTime
return ioutil.WriteFile(filepath.Join(p.root, StartTimeFile), []byte(startTime), 0644)
}
func (p *process) isSameProcess() (bool, error) {
// for backward compat assume it's the same if startTime wasn't set
if p.startTime == "" {
return true, nil
}
if p.pid == 0 {
_, err := p.getPidFromFile()
if err != nil {
return false, err
}
}
startTime, err := p.readStartTime()
if err != nil {
return false, err
}
return startTime == p.startTime, nil
}
// Wait will reap the shim process
func (p *process) Wait() {
if p.cmdDoneCh != nil {
<-p.cmdDoneCh
}
}
func getExitPipe(path string) (*os.File, error) {
if err := unix.Mkfifo(path, 0755); err != nil && !os.IsExist(err) {
return nil, err
}
// add NONBLOCK in case the other side has already closed or else
// this function would never return
return os.OpenFile(path, syscall.O_RDONLY|syscall.O_NONBLOCK, 0)
}
func getControlPipe(path string) (*os.File, error) {
if err := unix.Mkfifo(path, 0755); err != nil && !os.IsExist(err) {
return nil, err
}
return os.OpenFile(path, syscall.O_RDWR|syscall.O_NONBLOCK, 0)
}
// Signal sends the provided signal to the process
func (p *process) Signal(s os.Signal) error {
return syscall.Kill(p.pid, s.(syscall.Signal))
}
// Start unblocks the associated container init process.
// This should only be called on the process with ID "init"
func (p *process) Start() error {
if p.ID() == InitProcessID {
var (
errC = make(chan error, 1)
args = append(p.container.runtimeArgs, "start", p.container.id)
cmd = exec.Command(p.container.runtime, args...)
)
go func() {
out, err := cmd.CombinedOutput()
if err != nil {
errC <- fmt.Errorf("%s: %q", err.Error(), out)
}
errC <- nil
}()
select {
case err := <-errC:
if err != nil {
return err
}
case <-p.cmdDoneCh:
if !p.cmdSuccess {
if cmd.Process != nil {
cmd.Process.Kill()
}
cmd.Wait()
return ErrShimExited
}
err := <-errC
if err != nil {
return err
}
}
}
return nil
}

View file

@ -1,130 +0,0 @@
package runtime
import (
"errors"
"time"
"github.com/docker/containerd/specs"
)
var (
// ErrContainerExited is returned when access to an exited
// container is attempted
ErrContainerExited = errors.New("containerd: container has exited")
// ErrProcessNotExited is returned when trying to retrieve the exit
// status of an alive process
ErrProcessNotExited = errors.New("containerd: process has not exited")
// ErrContainerNotStarted is returned when a container fails to
// start without error from the shim or the OCI runtime
ErrContainerNotStarted = errors.New("containerd: container not started")
// ErrContainerStartTimeout is returned if a container takes too
// long to start
ErrContainerStartTimeout = errors.New("containerd: container did not start before the specified timeout")
// ErrShimExited is returned if the shim or the contianer's init process
// exits before completing
ErrShimExited = errors.New("containerd: shim exited before container process was started")
errNoPidFile = errors.New("containerd: no process pid file found")
errInvalidPidInt = errors.New("containerd: process pid is invalid")
errNotImplemented = errors.New("containerd: not implemented")
)
const (
// ExitFile holds the name of the pipe used to monitor process
// exit
ExitFile = "exit"
// ExitStatusFile holds the name of the file where the container
// exit code is to be written
ExitStatusFile = "exitStatus"
// StateFile holds the name of the file where the container state
// is written
StateFile = "state.json"
// ControlFile holds the name of the pipe used to control the shim
ControlFile = "control"
// InitProcessID holds the special ID used for the very first
// container's process
InitProcessID = "init"
// StartTimeFile holds the name of the file in which the process
// start time is saved
StartTimeFile = "starttime"
// UnknownStatus is the value returned when a process exit
// status cannot be determined
UnknownStatus = 255
)
// Checkpoint holds information regarding a container checkpoint
type Checkpoint struct {
// Timestamp is the time that checkpoint happened
Created time.Time `json:"created"`
// Name is the name of the checkpoint
Name string `json:"name"`
// TCP checkpoints open tcp connections
TCP bool `json:"tcp"`
// UnixSockets persists unix sockets in the checkpoint
UnixSockets bool `json:"unixSockets"`
// Shell persists tty sessions in the checkpoint
Shell bool `json:"shell"`
// Exit exits the container after the checkpoint is finished
Exit bool `json:"exit"`
// EmptyNS tells CRIU to omit a specified namespace
EmptyNS []string `json:"emptyNS,omitempty"`
}
// PlatformProcessState container platform-specific fields in the ProcessState structure
type PlatformProcessState struct {
Checkpoint string `json:"checkpoint"`
RootUID int `json:"rootUID"`
RootGID int `json:"rootGID"`
}
// State represents a container state
type State string
// Resource regroups the various container limits that can be updated
type Resource struct {
CPUShares int64
BlkioWeight uint16
CPUPeriod int64
CPUQuota int64
CpusetCpus string
CpusetMems string
KernelMemory int64
KernelTCPMemory int64
Memory int64
MemoryReservation int64
MemorySwap int64
}
// Possible container states
const (
Paused = State("paused")
Stopped = State("stopped")
Running = State("running")
)
type state struct {
Bundle string `json:"bundle"`
Labels []string `json:"labels"`
Stdin string `json:"stdin"`
Stdout string `json:"stdout"`
Stderr string `json:"stderr"`
Runtime string `json:"runtime"`
RuntimeArgs []string `json:"runtimeArgs"`
Shim string `json:"shim"`
NoPivotRoot bool `json:"noPivotRoot"`
}
// ProcessState holds the process OCI specs along with various fields
// required by containerd
type ProcessState struct {
specs.ProcessSpec
Exec bool `json:"exec"`
Stdin string `json:"containerdStdin"`
Stdout string `json:"containerdStdout"`
Stderr string `json:"containerdStderr"`
RuntimeArgs []string `json:"runtimeArgs"`
NoPivotRoot bool `json:"noPivotRoot"`
PlatformProcessState
}

View file

@ -1,180 +0,0 @@
package runtime
import (
"flag"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"syscall"
"testing"
"time"
utils "github.com/docker/containerd/testutils"
)
var (
devNull = "/dev/null"
stdin io.WriteCloser
runtimeTool = flag.String("runtime", "runc", "Runtime to use for this test")
)
// Create containerd state and oci bundles directory
func setup() error {
if err := os.MkdirAll(utils.StateDir, 0755); err != nil {
return err
}
if err := os.MkdirAll(utils.BundlesRoot, 0755); err != nil {
return err
}
return nil
}
// Creates the bundleDir with rootfs, io fifo dir and a default spec.
// On success, returns the bundlePath
func setupBundle(bundleName string) (string, error) {
bundlePath := filepath.Join(utils.BundlesRoot, bundleName)
if err := os.MkdirAll(bundlePath, 0755); err != nil {
fmt.Println("Unable to create bundlePath due to ", err)
return "", err
}
io := filepath.Join(bundlePath, "io")
if err := os.MkdirAll(io, 0755); err != nil {
fmt.Println("Unable to create io dir due to ", err)
return "", err
}
if err := utils.GenerateReferenceSpecs(bundlePath); err != nil {
fmt.Println("Unable to generate OCI reference spec: ", err)
return "", err
}
if err := utils.CreateBusyboxBundle(bundleName); err != nil {
fmt.Println("CreateBusyboxBundle error: ", err)
return "", err
}
return bundlePath, nil
}
func setupStdio(cwd string, bundlePath string, bundleName string) (Stdio, error) {
s := NewStdio(devNull, devNull, devNull)
pid := "init"
for stdName, stdPath := range map[string]*string{
"stdin": &s.Stdin,
"stdout": &s.Stdout,
"stderr": &s.Stderr,
} {
*stdPath = filepath.Join(cwd, bundlePath, "io", bundleName+"-"+pid+"-"+stdName)
if err := syscall.Mkfifo(*stdPath, 0755); err != nil && !os.IsExist(err) {
fmt.Println("Mkfifo error: ", err)
return s, err
}
}
err := attachStdio(s)
if err != nil {
fmt.Println("attachStdio error: ", err)
return s, err
}
return s, nil
}
func attachStdio(s Stdio) error {
stdinf, err := os.OpenFile(s.Stdin, syscall.O_RDWR, 0)
if err != nil {
return err
}
stdin = stdinf
stdoutf, err := os.OpenFile(s.Stdout, syscall.O_RDWR, 0)
if err != nil {
return err
}
go io.Copy(os.Stdout, stdoutf)
stderrf, err := os.OpenFile(s.Stderr, syscall.O_RDWR, 0)
if err != nil {
return err
}
go io.Copy(os.Stderr, stderrf)
return nil
}
func teardownBundle(bundleName string) {
containerRoot := filepath.Join(utils.StateDir, bundleName)
os.RemoveAll(containerRoot)
bundlePath := filepath.Join(utils.BundlesRoot, bundleName)
os.RemoveAll(bundlePath)
return
}
// Remove containerd state and oci bundles directory
func teardown() {
os.RemoveAll(utils.StateDir)
os.RemoveAll(utils.BundlesRoot)
}
func BenchmarkBusyboxSh(b *testing.B) {
bundleName := "busybox-sh"
wd := utils.GetTestOutDir()
if err := os.Chdir(wd); err != nil {
b.Fatalf("Could not change working directory: %v", err)
}
if err := setup(); err != nil {
b.Fatalf("Error setting up test: %v", err)
}
defer teardown()
for n := 0; n < b.N; n++ {
bundlePath, err := setupBundle(bundleName)
if err != nil {
return
}
s, err := setupStdio(wd, bundlePath, bundleName)
if err != nil {
return
}
c, err := New(ContainerOpts{
Root: utils.StateDir,
ID: bundleName,
Bundle: filepath.Join(wd, bundlePath),
Runtime: *runtimeTool,
Shim: "containerd-shim",
Timeout: 15 * time.Second,
})
if err != nil {
b.Fatalf("Error creating a New container: ", err)
}
benchmarkStartContainer(b, c, s, bundleName)
teardownBundle(bundleName)
}
}
func benchmarkStartContainer(b *testing.B, c Container, s Stdio, bundleName string) {
p, err := c.Start("", s)
if err != nil {
b.Fatalf("Error starting container %v", err)
}
kill := exec.Command(c.Runtime(), "kill", bundleName, "KILL")
kill.Run()
p.Wait()
c.Delete()
// wait for kill to finish. selected wait time is arbitrary
time.Sleep(500 * time.Millisecond)
}

View file

@ -1,87 +0,0 @@
package runtime
import "time"
// Stat holds a container statistics
type Stat struct {
// Timestamp is the time that the statistics where collected
Timestamp time.Time
CPU CPU `json:"cpu"`
Memory Memory `json:"memory"`
Pids Pids `json:"pids"`
Blkio Blkio `json:"blkio"`
Hugetlb map[string]Hugetlb `json:"hugetlb"`
}
// Hugetlb holds information regarding a container huge tlb usage
type Hugetlb struct {
Usage uint64 `json:"usage,omitempty"`
Max uint64 `json:"max,omitempty"`
Failcnt uint64 `json:"failcnt"`
}
// BlkioEntry represents a single record for a Blkio stat
type BlkioEntry struct {
Major uint64 `json:"major,omitempty"`
Minor uint64 `json:"minor,omitempty"`
Op string `json:"op,omitempty"`
Value uint64 `json:"value,omitempty"`
}
// Blkio regroups all the Blkio related stats
type Blkio struct {
IoServiceBytesRecursive []BlkioEntry `json:"ioServiceBytesRecursive,omitempty"`
IoServicedRecursive []BlkioEntry `json:"ioServicedRecursive,omitempty"`
IoQueuedRecursive []BlkioEntry `json:"ioQueueRecursive,omitempty"`
IoServiceTimeRecursive []BlkioEntry `json:"ioServiceTimeRecursive,omitempty"`
IoWaitTimeRecursive []BlkioEntry `json:"ioWaitTimeRecursive,omitempty"`
IoMergedRecursive []BlkioEntry `json:"ioMergedRecursive,omitempty"`
IoTimeRecursive []BlkioEntry `json:"ioTimeRecursive,omitempty"`
SectorsRecursive []BlkioEntry `json:"sectorsRecursive,omitempty"`
}
// Pids holds the stat of the pid usage of the machine
type Pids struct {
Current uint64 `json:"current,omitempty"`
Limit uint64 `json:"limit,omitempty"`
}
// Throttling holds a cpu throttling information
type Throttling struct {
Periods uint64 `json:"periods,omitempty"`
ThrottledPeriods uint64 `json:"throttledPeriods,omitempty"`
ThrottledTime uint64 `json:"throttledTime,omitempty"`
}
// CPUUsage holds information regarding cpu usage
type CPUUsage struct {
// Units: nanoseconds.
Total uint64 `json:"total,omitempty"`
Percpu []uint64 `json:"percpu,omitempty"`
Kernel uint64 `json:"kernel"`
User uint64 `json:"user"`
}
// CPU regroups both a CPU usage and throttling information
type CPU struct {
Usage CPUUsage `json:"usage,omitempty"`
Throttling Throttling `json:"throttling,omitempty"`
}
// MemoryEntry regroups statistic about a given type of memory
type MemoryEntry struct {
Limit uint64 `json:"limit"`
Usage uint64 `json:"usage,omitempty"`
Max uint64 `json:"max,omitempty"`
Failcnt uint64 `json:"failcnt"`
}
// Memory holds information regarding the different type of memories available
type Memory struct {
Cache uint64 `json:"cache,omitempty"`
Usage MemoryEntry `json:"usage,omitempty"`
Swap MemoryEntry `json:"swap,omitempty"`
Kernel MemoryEntry `json:"kernel,omitempty"`
KernelTCP MemoryEntry `json:"kernelTCP,omitempty"`
Raw map[string]uint64 `json:"raw,omitempty"`
}

View file

@ -1,12 +0,0 @@
package specs
import oci "github.com/opencontainers/runtime-spec/specs-go"
type (
// ProcessSpec aliases the platform process specs
ProcessSpec oci.Process
// Spec aliases the platform oci spec
Spec oci.Spec
// Rlimit aliases the platform resource limit
Rlimit oci.Rlimit
)

View file

@ -1,8 +0,0 @@
package specs
import ocs "github.com/opencontainers/specs/specs-go"
type (
ProcessSpec ocs.Process
Spec ocs.Spec
)

View file

@ -1,43 +0,0 @@
package supervisor
import (
"time"
"github.com/docker/containerd/runtime"
"github.com/docker/containerd/specs"
)
// AddProcessTask holds everything necessary to add a process to a
// container
type AddProcessTask struct {
baseTask
ID string
PID string
Stdout string
Stderr string
Stdin string
ProcessSpec *specs.ProcessSpec
StartResponse chan StartResponse
}
func (s *Supervisor) addProcess(t *AddProcessTask) error {
ci, ok := s.containers[t.ID]
if !ok {
return ErrContainerNotFound
}
process, err := ci.container.Exec(t.PID, *t.ProcessSpec, runtime.NewStdio(t.Stdin, t.Stdout, t.Stderr))
if err != nil {
return err
}
if err := s.monitor.Add(process); err != nil {
return err
}
t.StartResponse <- StartResponse{}
s.notifySubscribers(Event{
Timestamp: time.Now(),
Type: StateStartProcess,
PID: t.PID,
ID: t.ID,
})
return nil
}

View file

@ -1,37 +0,0 @@
// +build !windows
package supervisor
import "github.com/docker/containerd/runtime"
// CreateCheckpointTask holds needed parameters to create a new checkpoint
type CreateCheckpointTask struct {
baseTask
ID string
CheckpointDir string
Checkpoint *runtime.Checkpoint
}
func (s *Supervisor) createCheckpoint(t *CreateCheckpointTask) error {
i, ok := s.containers[t.ID]
if !ok {
return ErrContainerNotFound
}
return i.container.Checkpoint(*t.Checkpoint, t.CheckpointDir)
}
// DeleteCheckpointTask holds needed parameters to delete a checkpoint
type DeleteCheckpointTask struct {
baseTask
ID string
CheckpointDir string
Checkpoint *runtime.Checkpoint
}
func (s *Supervisor) deleteCheckpoint(t *DeleteCheckpointTask) error {
i, ok := s.containers[t.ID]
if !ok {
return ErrContainerNotFound
}
return i.container.DeleteCheckpoint(t.Checkpoint.Name, t.CheckpointDir)
}

View file

@ -1,63 +0,0 @@
package supervisor
import (
"path/filepath"
"github.com/docker/containerd/runtime"
)
// StartTask holds needed parameters to create a new container
type StartTask struct {
baseTask
ID string
BundlePath string
Stdout string
Stderr string
Stdin string
StartResponse chan StartResponse
Labels []string
NoPivotRoot bool
Checkpoint *runtime.Checkpoint
CheckpointDir string
Runtime string
RuntimeArgs []string
}
func (s *Supervisor) start(t *StartTask) error {
rt := s.config.Runtime
rtArgs := s.config.RuntimeArgs
if t.Runtime != "" {
rt = t.Runtime
rtArgs = t.RuntimeArgs
}
container, err := runtime.New(runtime.ContainerOpts{
Root: s.config.StateDir,
ID: t.ID,
Bundle: t.BundlePath,
Runtime: rt,
RuntimeArgs: rtArgs,
Shim: s.config.ShimName,
Labels: t.Labels,
NoPivotRoot: t.NoPivotRoot,
Timeout: s.config.Timeout,
})
if err != nil {
return err
}
s.containers[t.ID] = &containerInfo{
container: container,
}
task := &startTask{
Err: t.ErrorCh(),
Container: container,
StartResponse: t.StartResponse,
Stdin: t.Stdin,
Stdout: t.Stdout,
Stderr: t.Stderr,
}
if t.Checkpoint != nil {
task.CheckpointPath = filepath.Join(t.CheckpointDir, t.Checkpoint.Name)
}
s.startTasks <- task
return errDeferredResponse
}

View file

@ -1,8 +0,0 @@
package supervisor
type platformStartTask struct {
}
// Checkpoint not supported on Solaris
func (task *startTask) setTaskCheckpoint(t *StartTask) {
}

View file

@ -1,44 +0,0 @@
package supervisor
import (
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/containerd/runtime"
)
// DeleteTask holds needed parameters to remove a container
type DeleteTask struct {
baseTask
ID string
Status uint32
PID string
NoEvent bool
Process runtime.Process
}
func (s *Supervisor) delete(t *DeleteTask) error {
if i, ok := s.containers[t.ID]; ok {
if err := s.deleteContainer(i.container); err != nil {
logrus.WithField("error", err).Error("containerd: deleting container")
}
if t.Process != nil {
t.Process.Wait()
}
if !t.NoEvent {
s.notifySubscribers(Event{
Type: StateExit,
Timestamp: time.Now(),
ID: t.ID,
Status: t.Status,
PID: t.PID,
})
}
}
return nil
}
func (s *Supervisor) deleteContainer(container runtime.Container) error {
delete(s.containers, container.ID())
return container.Delete()
}

View file

@ -1,28 +0,0 @@
package supervisor
import "errors"
var (
// ErrContainerNotFound is returned when the container ID passed
// for a given operation is invalid
ErrContainerNotFound = errors.New("containerd: container not found")
// ErrProcessNotFound is returned when the process ID passed for
// a given operation is invalid
ErrProcessNotFound = errors.New("containerd: process not found for container")
// ErrUnknownContainerStatus is returned when the container status
// cannot be determined
ErrUnknownContainerStatus = errors.New("containerd: unknown container status ")
// ErrUnknownTask is returned when an unknown Task type is
// scheduled (should never happen).
ErrUnknownTask = errors.New("containerd: unknown task type")
// Internal errors
errShutdown = errors.New("containerd: supervisor is shutdown")
errRootNotAbs = errors.New("containerd: rootfs path is not an absolute path")
errNoContainerForPid = errors.New("containerd: pid not registered for any container")
// internal error where the handler will defer to another for the final response
//
// TODO: we could probably do a typed error with another error channel for this to make it
// less like magic
errDeferredResponse = errors.New("containerd: deferred response")
)

View file

@ -1,87 +0,0 @@
package supervisor
import (
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/containerd/runtime"
)
// ExitTask holds needed parameters to execute the exit task
type ExitTask struct {
baseTask
Process runtime.Process
}
func (s *Supervisor) exit(t *ExitTask) error {
proc := t.Process
status, err := proc.ExitStatus()
if err != nil {
logrus.WithFields(logrus.Fields{
"error": err,
"pid": proc.ID(),
"id": proc.Container().ID(),
"systemPid": proc.SystemPid(),
}).Error("containerd: get exit status")
}
logrus.WithFields(logrus.Fields{
"pid": proc.ID(),
"status": status,
"id": proc.Container().ID(),
"systemPid": proc.SystemPid(),
}).Debug("containerd: process exited")
// if the process is the the init process of the container then
// fire a separate event for this process
if proc.ID() != runtime.InitProcessID {
ne := &ExecExitTask{
ID: proc.Container().ID(),
PID: proc.ID(),
Status: status,
Process: proc,
}
s.execExit(ne)
return nil
}
container := proc.Container()
ne := &DeleteTask{
ID: container.ID(),
Status: status,
PID: proc.ID(),
Process: proc,
}
s.delete(ne)
return nil
}
// ExecExitTask holds needed parameters to execute the exec exit task
type ExecExitTask struct {
baseTask
ID string
PID string
Status uint32
Process runtime.Process
}
func (s *Supervisor) execExit(t *ExecExitTask) error {
container := t.Process.Container()
// exec process: we remove this process without notifying the main event loop
if err := container.RemoveProcess(t.PID); err != nil {
logrus.WithField("error", err).Error("containerd: find container for pid")
}
// If the exec spawned children which are still using its IO
// waiting here will block until they die or close their IO
// descriptors.
// Hence, we use a go routine to avoid block all other operations
go func() {
t.Process.Wait()
s.notifySubscribers(Event{
Timestamp: time.Now(),
ID: t.ID,
Type: StateExit,
PID: t.PID,
Status: t.Status,
})
}()
return nil
}

View file

@ -1,47 +0,0 @@
package supervisor
import "github.com/docker/containerd/runtime"
// GetContainersTask holds needed parameters to retrieve a list of
// containers
type GetContainersTask struct {
baseTask
ID string
GetState func(c runtime.Container) (interface{}, error)
Containers []runtime.Container
States []interface{}
}
func (s *Supervisor) getContainers(t *GetContainersTask) error {
if t.ID != "" {
ci, ok := s.containers[t.ID]
if !ok {
return ErrContainerNotFound
}
t.Containers = append(t.Containers, ci.container)
if t.GetState != nil {
st, err := t.GetState(ci.container)
if err != nil {
return err
}
t.States = append(t.States, st)
}
return nil
}
for _, ci := range s.containers {
t.Containers = append(t.Containers, ci.container)
if t.GetState != nil {
st, err := t.GetState(ci.container)
if err != nil {
return err
}
t.States = append(t.States, st)
}
}
return nil
}

View file

@ -1,28 +0,0 @@
// +build !solaris
package supervisor
import "github.com/cloudfoundry/gosigar"
// Machine holds the current machine cpu count and ram size
type Machine struct {
Cpus int
Memory int64
}
// CollectMachineInformation returns information regarding the current
// machine (e.g. CPU count, RAM amount)
func CollectMachineInformation() (Machine, error) {
m := Machine{}
cpu := sigar.CpuList{}
if err := cpu.Get(); err != nil {
return m, err
}
m.Cpus = len(cpu.List)
mem := sigar.Mem{}
if err := mem.Get(); err != nil {
return m, err
}
m.Memory = int64(mem.Total / 1024 / 1024)
return m, nil
}

View file

@ -1,15 +0,0 @@
package supervisor
import (
"errors"
)
type Machine struct {
Cpus int
Memory int64
}
func CollectMachineInformation() (Machine, error) {
m := Machine{}
return m, errors.New("supervisor CollectMachineInformation not implemented on Solaris")
}

View file

@ -1,23 +0,0 @@
package supervisor
import (
"time"
"github.com/Sirupsen/logrus"
)
// OOMTask holds needed parameters to report a container OOM
type OOMTask struct {
baseTask
ID string
}
func (s *Supervisor) oom(t *OOMTask) error {
logrus.WithField("id", t.ID).Debug("containerd: container oom")
s.notifySubscribers(Event{
Timestamp: time.Now(),
ID: t.ID,
Type: StateOOM,
})
return nil
}

View file

@ -1,28 +0,0 @@
package supervisor
import "os"
// SignalTask holds needed parameters to signal a container
type SignalTask struct {
baseTask
ID string
PID string
Signal os.Signal
}
func (s *Supervisor) signal(t *SignalTask) error {
i, ok := s.containers[t.ID]
if !ok {
return ErrContainerNotFound
}
processes, err := i.container.Processes()
if err != nil {
return err
}
for _, p := range processes {
if p.ID() == t.PID {
return p.Signal(t.Signal)
}
}
return ErrProcessNotFound
}

View file

@ -1,27 +0,0 @@
package supervisor
import (
"sort"
"github.com/docker/containerd/runtime"
)
func sortProcesses(p []runtime.Process) {
sort.Sort(&processSorter{p})
}
type processSorter struct {
processes []runtime.Process
}
func (s *processSorter) Len() int {
return len(s.processes)
}
func (s *processSorter) Swap(i, j int) {
s.processes[i], s.processes[j] = s.processes[j], s.processes[i]
}
func (s *processSorter) Less(i, j int) bool {
return s.processes[j].ID() == "init"
}

View file

@ -1,89 +0,0 @@
package supervisor
import (
"flag"
"os"
"sort"
"testing"
"github.com/docker/containerd/runtime"
"github.com/docker/containerd/specs"
)
var (
runtimeTool = flag.String("runtime", "runc", "Runtime to use for this test")
)
type testProcess struct {
id string
}
func (p *testProcess) ID() string {
return p.id
}
func (p *testProcess) Start() error {
return nil
}
func (p *testProcess) CloseStdin() error {
return nil
}
func (p *testProcess) Resize(w, h int) error {
return nil
}
func (p *testProcess) Stdio() runtime.Stdio {
return runtime.Stdio{}
}
func (p *testProcess) SystemPid() int {
return -1
}
func (p *testProcess) ExitFD() int {
return -1
}
func (p *testProcess) ExitStatus() (uint32, error) {
return runtime.UnknownStatus, nil
}
func (p *testProcess) Container() runtime.Container {
return nil
}
func (p *testProcess) Spec() specs.ProcessSpec {
return specs.ProcessSpec{}
}
func (p *testProcess) Signal(os.Signal) error {
return nil
}
func (p *testProcess) Close() error {
return nil
}
func (p *testProcess) State() runtime.State {
return runtime.Running
}
func (p *testProcess) Wait() {
}
func TestSortProcesses(t *testing.T) {
p := []runtime.Process{
&testProcess{"ls"},
&testProcess{"other"},
&testProcess{"init"},
&testProcess{"other2"},
}
s := &processSorter{p}
sort.Sort(s)
if id := p[len(p)-1].ID(); id != "init" {
t.Fatalf("expected init but received %q", id)
}
}

View file

@ -1,28 +0,0 @@
package supervisor
import "github.com/docker/containerd/runtime"
// StatsTask holds needed parameters to retrieve a container statistics
type StatsTask struct {
baseTask
ID string
Stat chan *runtime.Stat
}
func (s *Supervisor) stats(t *StatsTask) error {
i, ok := s.containers[t.ID]
if !ok {
return ErrContainerNotFound
}
// TODO: use workers for this
go func() {
s, err := i.container.Stats()
if err != nil {
t.ErrorCh() <- err
return
}
t.ErrorCh() <- nil
t.Stat <- s
}()
return errDeferredResponse
}

View file

@ -1,310 +0,0 @@
package supervisor
import (
"io/ioutil"
"os"
"sync"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/containerd/monitor"
"github.com/docker/containerd/runtime"
)
const (
defaultBufferSize = 2048 // size of queue in eventloop
)
type Config struct {
StateDir string
Runtime string
ShimName string
RuntimeArgs []string
Timeout time.Duration
EventRetainCount int
}
// New returns an initialized Process supervisor.
func New(c Config) (*Supervisor, error) {
startTasks := make(chan *startTask, 10)
if err := os.MkdirAll(c.StateDir, 0755); err != nil {
return nil, err
}
machine, err := CollectMachineInformation()
if err != nil {
return nil, err
}
m, err := monitor.New()
if err != nil {
return nil, err
}
go m.Run()
s := &Supervisor{
config: c,
containers: make(map[string]*containerInfo),
startTasks: startTasks,
machine: machine,
subscribers: make(map[chan Event]struct{}),
tasks: make(chan Task, defaultBufferSize),
monitor: m,
}
if err := setupEventLog(s, c.EventRetainCount); err != nil {
return nil, err
}
go s.monitorEventHandler()
if err := s.restore(); err != nil {
return nil, err
}
return s, nil
}
// Supervisor represents a container supervisor
type Supervisor struct {
config Config
containers map[string]*containerInfo
startTasks chan *startTask
// we need a lock around the subscribers map only because additions and deletions from
// the map are via the API so we cannot really control the concurrency
subscriberLock sync.RWMutex
subscribers map[chan Event]struct{}
machine Machine
tasks chan Task
monitor *monitor.Monitor
eventLog []Event
eventLock sync.Mutex
}
// Stop closes all startTasks and sends a SIGTERM to each container's pid1 then waits for they to
// terminate. After it has handled all the SIGCHILD events it will close the signals chan
// and exit. Stop is a non-blocking call and will return after the containers have been signaled
func (s *Supervisor) Stop() {
// Close the startTasks channel so that no new containers get started
close(s.startTasks)
}
// Close closes any open files in the supervisor but expects that Stop has been
// callsed so that no more containers are started.
func (s *Supervisor) Close() error {
return nil
}
// Event represents a container event
type Event struct {
ID string `json:"id"`
Type string `json:"type"`
Timestamp time.Time `json:"timestamp"`
PID string `json:"pid,omitempty"`
Status uint32 `json:"status,omitempty"`
}
// Events returns an event channel that external consumers can use to receive updates
// on container events
func (s *Supervisor) Events(from time.Time, storedOnly bool, id string) chan Event {
c := make(chan Event, defaultBufferSize)
if storedOnly {
defer s.Unsubscribe(c)
}
s.subscriberLock.Lock()
defer s.subscriberLock.Unlock()
if !from.IsZero() {
// replay old event
s.eventLock.Lock()
past := s.eventLog[:]
s.eventLock.Unlock()
for _, e := range past {
if e.Timestamp.After(from) {
if id == "" || e.ID == id {
c <- e
}
}
}
}
if storedOnly {
close(c)
} else {
s.subscribers[c] = struct{}{}
}
return c
}
// Unsubscribe removes the provided channel from receiving any more events
func (s *Supervisor) Unsubscribe(sub chan Event) {
s.subscriberLock.Lock()
defer s.subscriberLock.Unlock()
if _, ok := s.subscribers[sub]; ok {
delete(s.subscribers, sub)
close(sub)
}
}
// notifySubscribers will send the provided event to the external subscribers
// of the events channel
func (s *Supervisor) notifySubscribers(e Event) {
s.subscriberLock.RLock()
defer s.subscriberLock.RUnlock()
for sub := range s.subscribers {
// do a non-blocking send for the channel
select {
case sub <- e:
default:
logrus.WithField("event", e.Type).Warn("containerd: event not sent to subscriber")
}
}
}
// Start is a non-blocking call that runs the supervisor for monitoring contianer processes and
// executing new containers.
//
// This event loop is the only thing that is allowed to modify state of containers and processes
// therefore it is save to do operations in the handlers that modify state of the system or
// state of the Supervisor
func (s *Supervisor) Start() error {
logrus.WithFields(logrus.Fields{
"stateDir": s.config.StateDir,
"runtime": s.config.Runtime,
"runtimeArgs": s.config.RuntimeArgs,
"memory": s.machine.Memory,
"cpus": s.machine.Cpus,
}).Debug("containerd: supervisor running")
go func() {
for i := range s.tasks {
s.handleTask(i)
}
}()
return nil
}
// Machine returns the machine information for which the
// supervisor is executing on.
func (s *Supervisor) Machine() Machine {
return s.machine
}
// SendTask sends the provided event the the supervisors main event loop
func (s *Supervisor) SendTask(evt Task) {
s.tasks <- evt
}
func (s *Supervisor) monitorEventHandler() {
for e := range s.monitor.Events() {
switch t := e.(type) {
case runtime.Process:
if err := s.monitor.Remove(e); err != nil {
logrus.WithField("error", err).Error("containerd: remove process event FD from monitor")
}
if err := t.Close(); err != nil {
logrus.WithField("error", err).Error("containerd: close process event FD")
}
ev := &ExitTask{
Process: t,
}
s.SendTask(ev)
case runtime.OOM:
if t.Removed() {
if err := s.monitor.Remove(e); err != nil {
logrus.WithField("error", err).Error("containerd: remove oom event FD from monitor")
}
if err := t.Close(); err != nil {
logrus.WithField("error", err).Error("containerd: close oom event FD")
}
// don't send an event on the close of this FD
continue
}
ev := &OOMTask{
ID: t.ContainerID(),
}
s.SendTask(ev)
}
}
}
func (s *Supervisor) restore() error {
dirs, err := ioutil.ReadDir(s.config.StateDir)
if err != nil {
return err
}
for _, d := range dirs {
if !d.IsDir() {
continue
}
id := d.Name()
container, err := runtime.Load(s.config.StateDir, id, s.config.ShimName, s.config.Timeout)
if err != nil {
return err
}
processes, err := container.Processes()
if err != nil {
return err
}
s.containers[id] = &containerInfo{
container: container,
}
oom, err := container.OOM()
if err != nil {
logrus.WithField("error", err).Error("containerd: get oom FD")
}
if err := s.monitor.Add(oom); err != nil && err != runtime.ErrContainerExited {
logrus.WithField("error", err).Error("containerd: notify OOM events")
}
logrus.WithField("id", id).Debug("containerd: container restored")
var exitedProcesses []runtime.Process
for _, p := range processes {
if p.State() == runtime.Running {
if err := s.monitor.Add(p); err != nil {
return err
}
} else {
exitedProcesses = append(exitedProcesses, p)
}
}
if len(exitedProcesses) > 0 {
// sort processes so that init is fired last because that is how the kernel sends the
// exit events
sortProcesses(exitedProcesses)
for _, p := range exitedProcesses {
e := &ExitTask{
Process: p,
}
s.SendTask(e)
}
}
}
return nil
}
func (s *Supervisor) handleTask(i Task) {
var err error
switch t := i.(type) {
case *AddProcessTask:
err = s.addProcess(t)
case *CreateCheckpointTask:
err = s.createCheckpoint(t)
case *DeleteCheckpointTask:
err = s.deleteCheckpoint(t)
case *StartTask:
err = s.start(t)
case *DeleteTask:
err = s.delete(t)
case *ExitTask:
err = s.exit(t)
case *GetContainersTask:
err = s.getContainers(t)
case *SignalTask:
err = s.signal(t)
case *StatsTask:
err = s.stats(t)
case *UpdateTask:
err = s.updateContainer(t)
case *UpdateProcessTask:
err = s.updateProcess(t)
case *OOMTask:
err = s.oom(t)
default:
err = ErrUnknownTask
}
if err != errDeferredResponse {
i.ErrorCh() <- err
close(i.ErrorCh())
}
}

View file

@ -1,33 +0,0 @@
package supervisor
import (
"sync"
"github.com/docker/containerd/runtime"
)
// StartResponse is the response containing a started container
type StartResponse struct {
Container runtime.Container
}
// Task executes an action returning an error chan with either nil or
// the error from executing the task
type Task interface {
// ErrorCh returns a channel used to report and error from an async task
ErrorCh() chan error
}
type baseTask struct {
errCh chan error
mu sync.Mutex
}
func (t *baseTask) ErrorCh() chan error {
t.mu.Lock()
defer t.mu.Unlock()
if t.errCh == nil {
t.errCh = make(chan error, 1)
}
return t.errCh
}

View file

@ -1,12 +0,0 @@
package supervisor
// State constants used in Event types
const (
StateStart = "start-container"
StatePause = "pause"
StateResume = "resume"
StateExit = "exit"
StateStartProcess = "start-process"
StateOOM = "oom"
StateLive = "live"
)

View file

@ -1,95 +0,0 @@
package supervisor
import (
"time"
"github.com/docker/containerd/runtime"
)
// UpdateTask holds needed parameters to update a container resource constraints
type UpdateTask struct {
baseTask
ID string
State runtime.State
Resources *runtime.Resource
}
func (s *Supervisor) updateContainer(t *UpdateTask) error {
i, ok := s.containers[t.ID]
if !ok {
return ErrContainerNotFound
}
container := i.container
if t.State != "" {
switch t.State {
case runtime.Running:
if err := container.Resume(); err != nil {
return err
}
s.notifySubscribers(Event{
ID: t.ID,
Type: StateResume,
Timestamp: time.Now(),
})
case runtime.Paused:
if err := container.Pause(); err != nil {
return err
}
s.notifySubscribers(Event{
ID: t.ID,
Type: StatePause,
Timestamp: time.Now(),
})
default:
return ErrUnknownContainerStatus
}
return nil
}
if t.Resources != nil {
return container.UpdateResources(t.Resources)
}
return nil
}
// UpdateProcessTask holds needed parameters to update a container
// process terminal size or close its stdin
type UpdateProcessTask struct {
baseTask
ID string
PID string
CloseStdin bool
Width int
Height int
}
func (s *Supervisor) updateProcess(t *UpdateProcessTask) error {
i, ok := s.containers[t.ID]
if !ok {
return ErrContainerNotFound
}
processes, err := i.container.Processes()
if err != nil {
return err
}
var process runtime.Process
for _, p := range processes {
if p.ID() == t.PID {
process = p
break
}
}
if process == nil {
return ErrProcessNotFound
}
if t.CloseStdin {
if err := process.CloseStdin(); err != nil {
return err
}
}
if t.Width > 0 || t.Height > 0 {
if err := process.Resize(t.Width, t.Height); err != nil {
return err
}
}
return nil
}

View file

@ -1,101 +0,0 @@
package supervisor
import (
"encoding/json"
"io"
"os"
"path/filepath"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/containerd/runtime"
)
type containerInfo struct {
container runtime.Container
}
func setupEventLog(s *Supervisor, retainCount int) error {
if err := readEventLog(s); err != nil {
return err
}
logrus.WithField("count", len(s.eventLog)).Debug("containerd: read past events")
events := s.Events(time.Time{}, false, "")
return eventLogger(s, filepath.Join(s.config.StateDir, "events.log"), events, retainCount)
}
func eventLogger(s *Supervisor, path string, events chan Event, retainCount int) error {
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND|os.O_TRUNC, 0755)
if err != nil {
return err
}
go func() {
var (
count = len(s.eventLog)
enc = json.NewEncoder(f)
)
for e := range events {
// if we have a specified retain count make sure the truncate the event
// log if it grows past the specified number of events to keep.
if retainCount > 0 {
if count > retainCount {
logrus.Debug("truncating event log")
// close the log file
if f != nil {
f.Close()
}
slice := retainCount - 1
l := len(s.eventLog)
if slice >= l {
slice = l
}
s.eventLock.Lock()
s.eventLog = s.eventLog[len(s.eventLog)-slice:]
s.eventLock.Unlock()
if f, err = os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND|os.O_TRUNC, 0755); err != nil {
logrus.WithField("error", err).Error("containerd: open event to journal")
continue
}
enc = json.NewEncoder(f)
count = 0
for _, le := range s.eventLog {
if err := enc.Encode(le); err != nil {
logrus.WithField("error", err).Error("containerd: write event to journal")
}
}
}
}
s.eventLock.Lock()
s.eventLog = append(s.eventLog, e)
s.eventLock.Unlock()
count++
if err := enc.Encode(e); err != nil {
logrus.WithField("error", err).Error("containerd: write event to journal")
}
}
}()
return nil
}
func readEventLog(s *Supervisor) error {
f, err := os.Open(filepath.Join(s.config.StateDir, "events.log"))
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
defer f.Close()
dec := json.NewDecoder(f)
for {
var e Event
if err := dec.Decode(&e); err != nil {
if err == io.EOF {
break
}
return err
}
s.eventLog = append(s.eventLog, e)
}
return nil
}

View file

@ -1,103 +0,0 @@
package supervisor
import (
"sync"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/containerd/runtime"
)
// Worker interface
type Worker interface {
Start()
}
type startTask struct {
Container runtime.Container
CheckpointPath string
Stdin string
Stdout string
Stderr string
Err chan error
StartResponse chan StartResponse
}
// NewWorker return a new initialized worker
func NewWorker(s *Supervisor, wg *sync.WaitGroup) Worker {
return &worker{
s: s,
wg: wg,
}
}
type worker struct {
wg *sync.WaitGroup
s *Supervisor
}
// Start runs a loop in charge of starting new containers
func (w *worker) Start() {
defer w.wg.Done()
for t := range w.s.startTasks {
process, err := t.Container.Start(t.CheckpointPath, runtime.NewStdio(t.Stdin, t.Stdout, t.Stderr))
if err != nil {
logrus.WithFields(logrus.Fields{
"error": err,
"id": t.Container.ID(),
}).Error("containerd: start container")
t.Err <- err
evt := &DeleteTask{
ID: t.Container.ID(),
NoEvent: true,
Process: process,
}
w.s.SendTask(evt)
continue
}
oom, err := t.Container.OOM()
if err != nil {
logrus.WithField("error", err).Error("containerd: get oom FD")
}
if err := w.s.monitor.Add(oom); err != nil && err != runtime.ErrContainerExited {
if process.State() != runtime.Stopped {
logrus.WithField("error", err).Error("containerd: notify OOM events")
}
}
if err := w.s.monitor.Add(process); err != nil {
logrus.WithField("error", err).Error("containerd: add process to monitor")
t.Err <- err
evt := &DeleteTask{
ID: t.Container.ID(),
NoEvent: true,
Process: process,
}
w.s.SendTask(evt)
continue
}
// only call process start if we aren't restoring from a checkpoint
// if we have restored from a checkpoint then the process is already started
if t.CheckpointPath == "" {
if err := process.Start(); err != nil {
logrus.WithField("error", err).Error("containerd: start init process")
t.Err <- err
evt := &DeleteTask{
ID: t.Container.ID(),
NoEvent: true,
Process: process,
}
w.s.SendTask(evt)
continue
}
}
t.Err <- nil
t.StartResponse <- StartResponse{
Container: t.Container,
}
w.s.notifySubscribers(Event{
Timestamp: time.Now(),
ID: t.Container.ID(),
Type: StateStart,
})
}
}

View file

@ -1,70 +0,0 @@
package testutils
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
)
// GetTestOutDir returns the output directory for testing and benchmark artifacts
func GetTestOutDir() string {
out, _ := exec.Command("git", "rev-parse", "--show-toplevel").CombinedOutput()
repoRoot := string(out)
prefix := filepath.Join(strings.TrimSpace(repoRoot), "output")
return prefix
}
var (
// ArchivesDir holds the location of the available rootfs
ArchivesDir = filepath.Join("test-artifacts", "archives")
// BundlesRoot holds the location where OCI Bundles are stored
BundlesRoot = filepath.Join("test-artifacts", "oci-bundles")
// OutputDirFormat holds the standard format used when creating a
// new test output directory
OutputDirFormat = filepath.Join("test-artifacts", "runs", "%s")
// RefOciSpecsPath holds the path to the generic OCI config
RefOciSpecsPath = filepath.Join(BundlesRoot, "config.json")
// StateDir holds the path to the directory used by the containerd
// started by tests
StateDir = "/run/containerd-bench-test"
)
// untarRootfs untars the given `source` tarPath into `destination/rootfs`
func untarRootfs(source string, destination string) error {
rootfs := filepath.Join(destination, "rootfs")
if err := os.MkdirAll(rootfs, 0755); err != nil {
fmt.Println("untarRootfs os.MkdirAll failed with err %v", err)
return nil
}
tar := exec.Command("tar", "-C", rootfs, "-xf", source)
return tar.Run()
}
// GenerateReferenceSpecs generates a default OCI specs via `runc spec`
func GenerateReferenceSpecs(destination string) error {
if _, err := os.Stat(filepath.Join(destination, "config.json")); err == nil {
return nil
}
specs := exec.Command("runc", "spec")
specs.Dir = destination
return specs.Run()
}
// CreateBundle generates a valid OCI bundle from the given rootfs
func CreateBundle(source, name string) error {
bundlePath := filepath.Join(BundlesRoot, name)
if err := untarRootfs(filepath.Join(ArchivesDir, source+".tar"), bundlePath); err != nil {
return fmt.Errorf("Failed to untar %s.tar: %v", source, err)
}
return nil
}
// CreateBusyboxBundle generates a bundle based on the busybox rootfs
func CreateBusyboxBundle(name string) error {
return CreateBundle("busybox", name)
}