Add golint to test (#255)
* Add a new lint rule to the Makefile Signed-off-by: Kenfe-Mickael Laventure <mickael.laventure@gmail.com> * Fix linter errors Signed-off-by: Kenfe-Mickael Laventure <mickael.laventure@gmail.com> * Allow replacing the default apt mirror Signed-off-by: Kenfe-Mickael Laventure <mickael.laventure@gmail.com>
This commit is contained in:
parent
4176ba7b52
commit
5624732128
38 changed files with 297 additions and 151 deletions
18
Dockerfile
18
Dockerfile
|
@ -1,5 +1,9 @@
|
|||
FROM debian:jessie
|
||||
|
||||
# allow replacing httpredir mirror
|
||||
ARG APT_MIRROR=httpredir.debian.org
|
||||
RUN sed -i s/httpredir.debian.org/$APT_MIRROR/g /etc/apt/sources.list
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
|
@ -18,6 +22,20 @@ RUN curl -sSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd6
|
|||
ENV PATH /go/bin:/usr/local/go/bin:$PATH
|
||||
ENV GOPATH /go:/go/src/github.com/docker/containerd/vendor
|
||||
|
||||
ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3
|
||||
# Grab Go's cover tool for dead-simple code coverage testing
|
||||
# Grab Go's vet tool for examining go code to find suspicious constructs
|
||||
# and help prevent errors that the compiler might not catch
|
||||
RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \
|
||||
&& (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) \
|
||||
&& go install -v golang.org/x/tools/cmd/cover \
|
||||
&& go install -v golang.org/x/tools/cmd/vet
|
||||
# Grab Go's lint tool
|
||||
ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456
|
||||
RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \
|
||||
&& (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \
|
||||
&& go install -v github.com/golang/lint/golint
|
||||
|
||||
WORKDIR /go/src/github.com/docker/containerd
|
||||
|
||||
# install seccomp: the version shipped in trusty is too old
|
||||
|
|
4
Makefile
4
Makefile
|
@ -80,7 +80,7 @@ fmt:
|
|||
@gofmt -s -l . | grep -v vendor | grep -v .pb. | tee /dev/stderr
|
||||
|
||||
lint:
|
||||
@golint ./... | grep -v vendor | grep -v .pb. | tee /dev/stderr
|
||||
@hack/validate-lint
|
||||
|
||||
shell: dbuild
|
||||
$(DOCKER_RUN) bash
|
||||
|
@ -95,7 +95,7 @@ endif
|
|||
bench: shim validate install bundles-rootfs
|
||||
go test -bench=. -v $(shell go list ./... | grep -v /vendor | grep -v /integration-test)
|
||||
|
||||
validate: fmt
|
||||
validate: fmt lint
|
||||
|
||||
uninstall:
|
||||
$(foreach file,containerd containerd-shim ctr,rm /usr/local/bin/$(file);)
|
||||
|
|
|
@ -82,7 +82,7 @@ func (s *apiServer) CreateCheckpoint(ctx context.Context, r *types.CreateCheckpo
|
|||
e.Checkpoint = &runtime.Checkpoint{
|
||||
Name: r.Checkpoint.Name,
|
||||
Exit: r.Checkpoint.Exit,
|
||||
Tcp: r.Checkpoint.Tcp,
|
||||
TCP: r.Checkpoint.Tcp,
|
||||
UnixSockets: r.Checkpoint.UnixSockets,
|
||||
Shell: r.Checkpoint.Shell,
|
||||
}
|
||||
|
@ -135,7 +135,7 @@ func (s *apiServer) ListCheckpoint(ctx context.Context, r *types.ListCheckpointR
|
|||
for _, c := range checkpoints {
|
||||
out = append(out, &types.Checkpoint{
|
||||
Name: c.Name,
|
||||
Tcp: c.Tcp,
|
||||
Tcp: c.TCP,
|
||||
Shell: c.Shell,
|
||||
UnixSockets: c.UnixSockets,
|
||||
// TODO: figure out timestamp
|
||||
|
@ -333,15 +333,15 @@ func convertToPb(st *runtime.Stat) *types.StatsResponse {
|
|||
systemUsage, _ := getSystemCPUUsage()
|
||||
pbSt.CgroupStats.CpuStats = &types.CpuStats{
|
||||
CpuUsage: &types.CpuUsage{
|
||||
TotalUsage: st.Cpu.Usage.Total,
|
||||
PercpuUsage: st.Cpu.Usage.Percpu,
|
||||
UsageInKernelmode: st.Cpu.Usage.Kernel,
|
||||
UsageInUsermode: st.Cpu.Usage.User,
|
||||
TotalUsage: st.CPU.Usage.Total,
|
||||
PercpuUsage: st.CPU.Usage.Percpu,
|
||||
UsageInKernelmode: st.CPU.Usage.Kernel,
|
||||
UsageInUsermode: st.CPU.Usage.User,
|
||||
},
|
||||
ThrottlingData: &types.ThrottlingData{
|
||||
Periods: st.Cpu.Throttling.Periods,
|
||||
ThrottledPeriods: st.Cpu.Throttling.ThrottledPeriods,
|
||||
ThrottledTime: st.Cpu.Throttling.ThrottledTime,
|
||||
Periods: st.CPU.Throttling.Periods,
|
||||
ThrottledPeriods: st.CPU.Throttling.ThrottledPeriods,
|
||||
ThrottledTime: st.CPU.Throttling.ThrottledTime,
|
||||
},
|
||||
SystemUsage: systemUsage,
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package pprof
|
||||
|
||||
import (
|
||||
// expvar init routine adds the "/debug/vars" handler
|
||||
_ "expvar"
|
||||
"net/http"
|
||||
"net/http/pprof"
|
||||
|
@ -8,6 +9,7 @@ import (
|
|||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Enable registers the "/debug/pprof" handler
|
||||
func Enable(address string) {
|
||||
http.Handle("/", http.RedirectHandler("/debug/pprof", http.StatusMovedPermanently))
|
||||
|
||||
|
|
|
@ -6,14 +6,17 @@ import (
|
|||
"syscall"
|
||||
)
|
||||
|
||||
// EpollCreate1 directly calls syscall.EpollCreate1
|
||||
func EpollCreate1(flag int) (int, error) {
|
||||
return syscall.EpollCreate1(flag)
|
||||
}
|
||||
|
||||
// EpollCtl directly calls syscall.EpollCtl
|
||||
func EpollCtl(epfd int, op int, fd int, event *syscall.EpollEvent) error {
|
||||
return syscall.EpollCtl(epfd, op, fd, event)
|
||||
}
|
||||
|
||||
// EpollWait directly calls syscall.EpollWait
|
||||
func EpollWait(epfd int, events []syscall.EpollEvent, msec int) (int, error) {
|
||||
return syscall.EpollWait(epfd, events, msec)
|
||||
}
|
||||
|
|
|
@ -40,6 +40,7 @@ import (
|
|||
"unsafe"
|
||||
)
|
||||
|
||||
// EpollCreate1 calls a C implementation
|
||||
func EpollCreate1(flag int) (int, error) {
|
||||
fd := int(C.EpollCreate1(C.int(flag)))
|
||||
if fd < 0 {
|
||||
|
@ -48,6 +49,7 @@ func EpollCreate1(flag int) (int, error) {
|
|||
return fd, nil
|
||||
}
|
||||
|
||||
// EpollCtl calls a C implementation
|
||||
func EpollCtl(epfd int, op int, fd int, event *syscall.EpollEvent) error {
|
||||
errno := C.EpollCtl(C.int(epfd), C.int(syscall.EPOLL_CTL_ADD), C.int(fd), C.int(event.Events), C.int(event.Fd))
|
||||
if errno < 0 {
|
||||
|
@ -56,6 +58,7 @@ func EpollCtl(epfd int, op int, fd int, event *syscall.EpollEvent) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// EpollWait calls a C implementation
|
||||
func EpollWait(epfd int, events []syscall.EpollEvent, msec int) (int, error) {
|
||||
var c_events [128]C.struct_event_t
|
||||
n := int(C.run_epoll_wait(C.int(epfd), (*C.struct_event_t)(unsafe.Pointer(&c_events))))
|
||||
|
|
|
@ -24,8 +24,8 @@ type checkpoint struct {
|
|||
Created time.Time `json:"created"`
|
||||
// Name is the name of the checkpoint
|
||||
Name string `json:"name"`
|
||||
// Tcp checkpoints open tcp connections
|
||||
Tcp bool `json:"tcp"`
|
||||
// TCP checkpoints open tcp connections
|
||||
TCP bool `json:"tcp"`
|
||||
// UnixSockets persists unix sockets in the checkpoint
|
||||
UnixSockets bool `json:"unixSockets"`
|
||||
// Shell persists tty sessions in the checkpoint
|
||||
|
@ -140,7 +140,7 @@ func (p *process) start() error {
|
|||
if p.checkpoint.Shell {
|
||||
add("--shell-job")
|
||||
}
|
||||
if p.checkpoint.Tcp {
|
||||
if p.checkpoint.TCP {
|
||||
add("--tcp-established")
|
||||
}
|
||||
if p.checkpoint.UnixSockets {
|
||||
|
@ -292,6 +292,7 @@ func (p *process) openIO() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// IO holds all 3 standard io Reader/Writer (stdin,stdout,stderr)
|
||||
type IO struct {
|
||||
Stdin io.WriteCloser
|
||||
Stdout io.ReadCloser
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
|
||||
const usage = `High performance container daemon cli`
|
||||
|
||||
type Exit struct {
|
||||
type exit struct {
|
||||
Code int
|
||||
}
|
||||
|
||||
|
@ -23,7 +23,7 @@ func main() {
|
|||
// We want our defer functions to be run when calling fatal()
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
if ex, ok := e.(Exit); ok == true {
|
||||
if ex, ok := e.(exit); ok == true {
|
||||
os.Exit(ex.Code)
|
||||
}
|
||||
panic(e)
|
||||
|
@ -86,5 +86,5 @@ var versionCommand = cli.Command{
|
|||
|
||||
func fatal(err string, code int) {
|
||||
fmt.Fprintf(os.Stderr, "[ctr] %s\n", err)
|
||||
panic(Exit{code})
|
||||
panic(exit{code})
|
||||
}
|
||||
|
|
9
hack/validate-lint
Executable file
9
hack/validate-lint
Executable file
|
@ -0,0 +1,9 @@
|
|||
#!/bin/bash
|
||||
|
||||
lint_error=$(golint ./... | grep -v vendor | grep -v .pb. | tee /dev/stderr)
|
||||
|
||||
if [ "$lint_error" != "" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
exit 0
|
|
@ -44,14 +44,14 @@ func untarRootfs(source string, destination string) error {
|
|||
func CreateBundleWithFilter(source, name string, args []string, filter func(spec *ocs.Spec)) error {
|
||||
// Generate the spec
|
||||
var spec ocs.Spec
|
||||
if f, err := os.Open(utils.RefOciSpecsPath); err != nil {
|
||||
f, err := os.Open(utils.RefOciSpecsPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to open default spec: %v", err)
|
||||
} else {
|
||||
if err := json.NewDecoder(f).Decode(&spec); err != nil {
|
||||
return fmt.Errorf("Failed to load default spec: %v", err)
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
if err := json.NewDecoder(f).Decode(&spec); err != nil {
|
||||
return fmt.Errorf("Failed to load default spec: %v", err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
spec.Process.Args = args
|
||||
spec.Process.Terminal = false
|
||||
|
|
|
@ -21,10 +21,10 @@ func (cs *ContainerdSuite) ListRunningContainers() ([]*types.Container, error) {
|
|||
return resp.Containers, nil
|
||||
}
|
||||
|
||||
func (cs *ContainerdSuite) SignalContainerProcess(id string, procId string, sig uint32) error {
|
||||
func (cs *ContainerdSuite) SignalContainerProcess(id string, procID string, sig uint32) error {
|
||||
_, err := cs.grpcClient.Signal(context.Background(), &types.SignalRequest{
|
||||
Id: id,
|
||||
Pid: procId,
|
||||
Pid: procID,
|
||||
Signal: sig,
|
||||
})
|
||||
return err
|
||||
|
@ -74,8 +74,8 @@ type stdio struct {
|
|||
stderrBuffer bytes.Buffer
|
||||
}
|
||||
|
||||
type containerProcess struct {
|
||||
containerId string
|
||||
type ContainerProcess struct {
|
||||
containerID string
|
||||
pid string
|
||||
bundle *Bundle
|
||||
io stdio
|
||||
|
@ -84,7 +84,7 @@ type containerProcess struct {
|
|||
hasExited bool
|
||||
}
|
||||
|
||||
func (c *containerProcess) openIo() (err error) {
|
||||
func (c *ContainerProcess) openIo() (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
c.Cleanup()
|
||||
|
@ -111,11 +111,11 @@ func (c *containerProcess) openIo() (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *containerProcess) GetEventsChannel() chan *types.Event {
|
||||
func (c *ContainerProcess) GetEventsChannel() chan *types.Event {
|
||||
return c.eventsCh
|
||||
}
|
||||
|
||||
func (c *containerProcess) GetNextEvent() *types.Event {
|
||||
func (c *ContainerProcess) GetNextEvent() *types.Event {
|
||||
if c.hasExited {
|
||||
return nil
|
||||
}
|
||||
|
@ -131,16 +131,16 @@ func (c *containerProcess) GetNextEvent() *types.Event {
|
|||
return e
|
||||
}
|
||||
|
||||
func (c *containerProcess) CloseStdin() error {
|
||||
func (c *ContainerProcess) CloseStdin() error {
|
||||
_, err := c.cs.grpcClient.UpdateProcess(context.Background(), &types.UpdateProcessRequest{
|
||||
Id: c.containerId,
|
||||
Id: c.containerID,
|
||||
Pid: c.pid,
|
||||
CloseStdin: true,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *containerProcess) Cleanup() {
|
||||
func (c *ContainerProcess) Cleanup() {
|
||||
for _, f := range []*os.File{
|
||||
c.io.stdinf,
|
||||
c.io.stdoutf,
|
||||
|
@ -153,9 +153,9 @@ func (c *containerProcess) Cleanup() {
|
|||
}
|
||||
}
|
||||
|
||||
func NewContainerProcess(cs *ContainerdSuite, bundle *Bundle, cid, pid string) (c *containerProcess, err error) {
|
||||
c = &containerProcess{
|
||||
containerId: cid,
|
||||
func NewContainerProcess(cs *ContainerdSuite, bundle *Bundle, cid, pid string) (c *ContainerProcess, err error) {
|
||||
c = &ContainerProcess{
|
||||
containerID: cid,
|
||||
pid: "init",
|
||||
bundle: bundle,
|
||||
eventsCh: make(chan *types.Event, 8),
|
||||
|
@ -181,7 +181,7 @@ func NewContainerProcess(cs *ContainerdSuite, bundle *Bundle, cid, pid string) (
|
|||
return c, nil
|
||||
}
|
||||
|
||||
func (cs *ContainerdSuite) StartContainerWithEventFilter(id, bundleName string, filter func(*types.Event)) (c *containerProcess, err error) {
|
||||
func (cs *ContainerdSuite) StartContainerWithEventFilter(id, bundleName string, filter func(*types.Event)) (c *ContainerProcess, err error) {
|
||||
bundle := GetBundle(bundleName)
|
||||
if bundle == nil {
|
||||
return nil, fmt.Errorf("No such bundle '%s'", bundleName)
|
||||
|
@ -216,11 +216,11 @@ func (cs *ContainerdSuite) StartContainerWithEventFilter(id, bundleName string,
|
|||
return c, nil
|
||||
}
|
||||
|
||||
func (cs *ContainerdSuite) StartContainer(id, bundleName string) (c *containerProcess, err error) {
|
||||
func (cs *ContainerdSuite) StartContainer(id, bundleName string) (c *ContainerProcess, err error) {
|
||||
return cs.StartContainerWithEventFilter(id, bundleName, nil)
|
||||
}
|
||||
|
||||
func (cs *ContainerdSuite) RunContainer(id, bundleName string) (c *containerProcess, err error) {
|
||||
func (cs *ContainerdSuite) RunContainer(id, bundleName string) (c *ContainerProcess, err error) {
|
||||
c, err = cs.StartContainer(id, bundleName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -236,14 +236,14 @@ func (cs *ContainerdSuite) RunContainer(id, bundleName string) (c *containerProc
|
|||
return c, err
|
||||
}
|
||||
|
||||
func (cs *ContainerdSuite) AddProcessToContainer(init *containerProcess, pid, cwd string, env, args []string, uid, gid uint32) (c *containerProcess, err error) {
|
||||
c, err = NewContainerProcess(cs, init.bundle, init.containerId, pid)
|
||||
func (cs *ContainerdSuite) AddProcessToContainer(init *ContainerProcess, pid, cwd string, env, args []string, uid, gid uint32) (c *ContainerProcess, err error) {
|
||||
c, err = NewContainerProcess(cs, init.bundle, init.containerID, pid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pr := &types.AddProcessRequest{
|
||||
Id: init.containerId,
|
||||
Id: init.containerID,
|
||||
Pid: pid,
|
||||
Args: args,
|
||||
Cwd: cwd,
|
||||
|
|
|
@ -17,12 +17,12 @@ func (cs *ContainerdSuite) TestBusyboxTopExecEcho(t *check.C) {
|
|||
|
||||
var (
|
||||
err error
|
||||
initp *containerProcess
|
||||
echop *containerProcess
|
||||
initp *ContainerProcess
|
||||
echop *ContainerProcess
|
||||
)
|
||||
|
||||
containerId := "top"
|
||||
initp, err = cs.StartContainer(containerId, bundleName)
|
||||
containerID := "top"
|
||||
initp, err = cs.StartContainer(containerID, bundleName)
|
||||
t.Assert(err, checker.Equals, nil)
|
||||
|
||||
echop, err = cs.AddProcessToContainer(initp, "echo", "/", []string{"PATH=/bin"}, []string{"sh", "-c", "echo -n Ay Caramba! ; exit 1"}, 0, 0)
|
||||
|
@ -31,19 +31,19 @@ func (cs *ContainerdSuite) TestBusyboxTopExecEcho(t *check.C) {
|
|||
for _, evt := range []types.Event{
|
||||
{
|
||||
Type: "start-container",
|
||||
Id: containerId,
|
||||
Id: containerID,
|
||||
Status: 0,
|
||||
Pid: "",
|
||||
},
|
||||
{
|
||||
Type: "start-process",
|
||||
Id: containerId,
|
||||
Id: containerID,
|
||||
Status: 0,
|
||||
Pid: "echo",
|
||||
},
|
||||
{
|
||||
Type: "exit",
|
||||
Id: containerId,
|
||||
Id: containerID,
|
||||
Status: 1,
|
||||
Pid: "echo",
|
||||
},
|
||||
|
@ -66,35 +66,35 @@ func (cs *ContainerdSuite) TestBusyboxTopExecTop(t *check.C) {
|
|||
|
||||
var (
|
||||
err error
|
||||
initp *containerProcess
|
||||
initp *ContainerProcess
|
||||
)
|
||||
|
||||
containerId := "top"
|
||||
initp, err = cs.StartContainer(containerId, bundleName)
|
||||
containerID := "top"
|
||||
initp, err = cs.StartContainer(containerID, bundleName)
|
||||
t.Assert(err, checker.Equals, nil)
|
||||
|
||||
execId := "top1"
|
||||
_, err = cs.AddProcessToContainer(initp, execId, "/", []string{"PATH=/usr/bin"}, []string{"top"}, 0, 0)
|
||||
execID := "top1"
|
||||
_, err = cs.AddProcessToContainer(initp, execID, "/", []string{"PATH=/usr/bin"}, []string{"top"}, 0, 0)
|
||||
t.Assert(err, checker.Equals, nil)
|
||||
|
||||
for idx, evt := range []types.Event{
|
||||
{
|
||||
Type: "start-container",
|
||||
Id: containerId,
|
||||
Id: containerID,
|
||||
Status: 0,
|
||||
Pid: "",
|
||||
},
|
||||
{
|
||||
Type: "start-process",
|
||||
Id: containerId,
|
||||
Id: containerID,
|
||||
Status: 0,
|
||||
Pid: execId,
|
||||
Pid: execID,
|
||||
},
|
||||
{
|
||||
Type: "exit",
|
||||
Id: containerId,
|
||||
Id: containerID,
|
||||
Status: 137,
|
||||
Pid: execId,
|
||||
Pid: execID,
|
||||
},
|
||||
} {
|
||||
ch := initp.GetEventsChannel()
|
||||
|
@ -103,7 +103,7 @@ func (cs *ContainerdSuite) TestBusyboxTopExecTop(t *check.C) {
|
|||
t.Assert(*e, checker.Equals, evt)
|
||||
if idx == 1 {
|
||||
// Process Started, kill it
|
||||
cs.SignalContainerProcess(containerId, "top1", uint32(syscall.SIGKILL))
|
||||
cs.SignalContainerProcess(containerID, "top1", uint32(syscall.SIGKILL))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -126,39 +126,39 @@ func (cs *ContainerdSuite) TestBusyboxTopExecTopKillInit(t *check.C) {
|
|||
|
||||
var (
|
||||
err error
|
||||
initp *containerProcess
|
||||
initp *ContainerProcess
|
||||
)
|
||||
|
||||
containerId := "top"
|
||||
initp, err = cs.StartContainer(containerId, bundleName)
|
||||
containerID := "top"
|
||||
initp, err = cs.StartContainer(containerID, bundleName)
|
||||
t.Assert(err, checker.Equals, nil)
|
||||
|
||||
execId := "top1"
|
||||
_, err = cs.AddProcessToContainer(initp, execId, "/", []string{"PATH=/usr/bin"}, []string{"top"}, 0, 0)
|
||||
execID := "top1"
|
||||
_, err = cs.AddProcessToContainer(initp, execID, "/", []string{"PATH=/usr/bin"}, []string{"top"}, 0, 0)
|
||||
t.Assert(err, checker.Equals, nil)
|
||||
|
||||
for idx, evt := range []types.Event{
|
||||
{
|
||||
Type: "start-container",
|
||||
Id: containerId,
|
||||
Id: containerID,
|
||||
Status: 0,
|
||||
Pid: "",
|
||||
},
|
||||
{
|
||||
Type: "start-process",
|
||||
Id: containerId,
|
||||
Id: containerID,
|
||||
Status: 0,
|
||||
Pid: execId,
|
||||
Pid: execID,
|
||||
},
|
||||
{
|
||||
Type: "exit",
|
||||
Id: containerId,
|
||||
Id: containerID,
|
||||
Status: 137,
|
||||
Pid: execId,
|
||||
Pid: execID,
|
||||
},
|
||||
{
|
||||
Type: "exit",
|
||||
Id: containerId,
|
||||
Id: containerID,
|
||||
Status: 143,
|
||||
Pid: "init",
|
||||
},
|
||||
|
@ -169,7 +169,7 @@ func (cs *ContainerdSuite) TestBusyboxTopExecTopKillInit(t *check.C) {
|
|||
t.Assert(*e, checker.Equals, evt)
|
||||
if idx == 1 {
|
||||
// Process Started, kill it
|
||||
cs.SignalContainerProcess(containerId, "init", uint32(syscall.SIGTERM))
|
||||
cs.SignalContainerProcess(containerID, "init", uint32(syscall.SIGTERM))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -79,8 +79,8 @@ func (cs *ContainerdSuite) TestStartBusyboxLsEvents(t *check.C) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
containerId := "ls-events"
|
||||
c, err := cs.StartContainer(containerId, "busybox-ls")
|
||||
containerID := "ls-events"
|
||||
c, err := cs.StartContainer(containerID, "busybox-ls")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -88,13 +88,13 @@ func (cs *ContainerdSuite) TestStartBusyboxLsEvents(t *check.C) {
|
|||
for _, evt := range []types.Event{
|
||||
{
|
||||
Type: "start-container",
|
||||
Id: containerId,
|
||||
Id: containerID,
|
||||
Status: 0,
|
||||
Pid: "",
|
||||
},
|
||||
{
|
||||
Type: "exit",
|
||||
Id: containerId,
|
||||
Id: containerID,
|
||||
Status: 0,
|
||||
Pid: "init",
|
||||
},
|
||||
|
@ -144,7 +144,7 @@ func (cs *ContainerdSuite) TestStartBusyboxTopKill(t *check.C) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
containerId := "top"
|
||||
containerID := "top"
|
||||
c, err := cs.StartContainer("top", bundleName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -152,7 +152,7 @@ func (cs *ContainerdSuite) TestStartBusyboxTopKill(t *check.C) {
|
|||
|
||||
<-time.After(1 * time.Second)
|
||||
|
||||
err = cs.KillContainer(containerId)
|
||||
err = cs.KillContainer(containerID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -160,13 +160,13 @@ func (cs *ContainerdSuite) TestStartBusyboxTopKill(t *check.C) {
|
|||
for _, evt := range []types.Event{
|
||||
{
|
||||
Type: "start-container",
|
||||
Id: containerId,
|
||||
Id: containerID,
|
||||
Status: 0,
|
||||
Pid: "",
|
||||
},
|
||||
{
|
||||
Type: "exit",
|
||||
Id: containerId,
|
||||
Id: containerID,
|
||||
Status: 128 + uint32(syscall.SIGKILL),
|
||||
Pid: "init",
|
||||
},
|
||||
|
@ -189,7 +189,7 @@ func (cs *ContainerdSuite) TestStartBusyboxTopSignalSigterm(t *check.C) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
containerId := "top"
|
||||
containerID := "top"
|
||||
c, err := cs.StartContainer("top", bundleName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -197,7 +197,7 @@ func (cs *ContainerdSuite) TestStartBusyboxTopSignalSigterm(t *check.C) {
|
|||
|
||||
<-time.After(1 * time.Second)
|
||||
|
||||
err = cs.SignalContainer(containerId, uint32(syscall.SIGTERM))
|
||||
err = cs.SignalContainer(containerID, uint32(syscall.SIGTERM))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -205,13 +205,13 @@ func (cs *ContainerdSuite) TestStartBusyboxTopSignalSigterm(t *check.C) {
|
|||
for _, evt := range []types.Event{
|
||||
{
|
||||
Type: "start-container",
|
||||
Id: containerId,
|
||||
Id: containerID,
|
||||
Status: 0,
|
||||
Pid: "",
|
||||
},
|
||||
{
|
||||
Type: "exit",
|
||||
Id: containerId,
|
||||
Id: containerID,
|
||||
Status: 128 + uint32(syscall.SIGTERM),
|
||||
Pid: "init",
|
||||
},
|
||||
|
@ -233,13 +233,13 @@ func (cs *ContainerdSuite) TestStartBusyboxTrapUSR1(t *check.C) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
containerId := "trap-usr1"
|
||||
c, err := cs.StartContainer(containerId, "busybox-trap-usr1")
|
||||
containerID := "trap-usr1"
|
||||
c, err := cs.StartContainer(containerID, "busybox-trap-usr1")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := cs.SignalContainer(containerId, uint32(syscall.SIGUSR1)); err != nil {
|
||||
if err := cs.SignalContainer(containerID, uint32(syscall.SIGUSR1)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -259,36 +259,36 @@ func (cs *ContainerdSuite) TestStartBusyboxTopPauseResume(t *check.C) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
containerId := "top"
|
||||
c, err := cs.StartContainer(containerId, bundleName)
|
||||
containerID := "top"
|
||||
c, err := cs.StartContainer(containerID, bundleName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := cs.PauseContainer(containerId); err != nil {
|
||||
if err := cs.PauseContainer(containerID); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := cs.ResumeContainer(containerId); err != nil {
|
||||
if err := cs.ResumeContainer(containerID); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, evt := range []types.Event{
|
||||
{
|
||||
Type: "start-container",
|
||||
Id: containerId,
|
||||
Id: containerID,
|
||||
Status: 0,
|
||||
Pid: "",
|
||||
},
|
||||
{
|
||||
Type: "pause",
|
||||
Id: containerId,
|
||||
Id: containerID,
|
||||
Status: 0,
|
||||
Pid: "",
|
||||
},
|
||||
{
|
||||
Type: "resume",
|
||||
Id: containerId,
|
||||
Id: containerID,
|
||||
Status: 0,
|
||||
Pid: "",
|
||||
},
|
||||
|
@ -323,8 +323,8 @@ func (cs *ContainerdSuite) TestRestart(t *check.C) {
|
|||
totalCtr := 10
|
||||
|
||||
for i := 0; i < totalCtr; i++ {
|
||||
containerId := fmt.Sprintf("top%d", i)
|
||||
c, err := cs.StartContainer(containerId, bundleName)
|
||||
containerID := fmt.Sprintf("top%d", i)
|
||||
c, err := cs.StartContainer(containerID, bundleName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -333,7 +333,7 @@ func (cs *ContainerdSuite) TestRestart(t *check.C) {
|
|||
|
||||
t.Assert(*e, checker.Equals, types.Event{
|
||||
Type: "start-container",
|
||||
Id: containerId,
|
||||
Id: containerID,
|
||||
Status: 0,
|
||||
Pid: "",
|
||||
Timestamp: e.Timestamp,
|
||||
|
@ -369,18 +369,18 @@ func (cs *ContainerdSuite) TestRestart(t *check.C) {
|
|||
deathChans := make([]chan error, len(killedCtr))
|
||||
deathChansIdx := 0
|
||||
|
||||
for i, _ := range killedCtr {
|
||||
for i := range killedCtr {
|
||||
ch := make(chan error, 1)
|
||||
deathChans[deathChansIdx] = ch
|
||||
deathChansIdx++
|
||||
syscall.Kill(int(containers[i].Pids[0]), syscall.SIGKILL)
|
||||
|
||||
// Filter to be notified of their death
|
||||
containerId := fmt.Sprintf("top%d", i)
|
||||
containerID := fmt.Sprintf("top%d", i)
|
||||
f = func(event *types.Event) {
|
||||
expectedEvent := types.Event{
|
||||
Type: "exit",
|
||||
Id: containerId,
|
||||
Id: containerID,
|
||||
Status: 137,
|
||||
Pid: "init",
|
||||
}
|
||||
|
@ -391,13 +391,13 @@ func (cs *ContainerdSuite) TestRestart(t *check.C) {
|
|||
ch <- nil
|
||||
}
|
||||
}
|
||||
cs.SetContainerEventFilter(containerId, f)
|
||||
cs.SetContainerEventFilter(containerID, f)
|
||||
}
|
||||
|
||||
cs.RestartDaemon(true)
|
||||
|
||||
// Ensure we got our events
|
||||
for i, _ := range deathChans {
|
||||
for i := range deathChans {
|
||||
done := false
|
||||
for done == false {
|
||||
select {
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
// +build linux
|
||||
|
||||
// http://man7.org/linux/man-pages/man2/prctl.2.html
|
||||
// Package osutils provide access to the Get Child and Set Child prctl
|
||||
// flags.
|
||||
// See http://man7.org/linux/man-pages/man2/prctl.2.html
|
||||
package osutils
|
||||
|
||||
import (
|
||||
|
@ -8,6 +10,7 @@ import (
|
|||
"unsafe"
|
||||
)
|
||||
|
||||
// PR_SET_CHILD_SUBREAPER allows setting the child subreaper.
|
||||
// If arg2 is nonzero, set the "child subreaper" attribute of the
|
||||
// calling process; if arg2 is zero, unset the attribute. When a
|
||||
// process is marked as a child subreaper, all of the children
|
||||
|
@ -19,16 +22,18 @@ import (
|
|||
// nearest still living ancestor subreaper will receive a SIGCHLD
|
||||
// signal and be able to wait(2) on the process to discover its
|
||||
// termination status.
|
||||
const PR_SET_CHILD_SUBREAPER = 36
|
||||
const prSetChildSubreaper = 36
|
||||
|
||||
// PR_GET_CHILD_SUBREAPER allows retrieving the current child
|
||||
// subreaper.
|
||||
// Return the "child subreaper" setting of the caller, in the
|
||||
// location pointed to by (int *) arg2.
|
||||
const PR_GET_CHILD_SUBREAPER = 37
|
||||
const prGetChildSubreaper = 37
|
||||
|
||||
// GetSubreaper returns the subreaper setting for the calling process
|
||||
func GetSubreaper() (int, error) {
|
||||
var i uintptr
|
||||
if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, PR_GET_CHILD_SUBREAPER, uintptr(unsafe.Pointer(&i)), 0); err != 0 {
|
||||
if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, prGetChildSubreaper, uintptr(unsafe.Pointer(&i)), 0); err != 0 {
|
||||
return -1, err
|
||||
}
|
||||
return int(i), nil
|
||||
|
@ -36,7 +41,7 @@ func GetSubreaper() (int, error) {
|
|||
|
||||
// SetSubreaper sets the value i as the subreaper setting for the calling process
|
||||
func SetSubreaper(i int) error {
|
||||
if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, PR_SET_CHILD_SUBREAPER, uintptr(i), 0); err != 0 {
|
||||
if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, prSetChildSubreaper, uintptr(i), 0); err != 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
)
|
||||
|
||||
//Solaris TODO
|
||||
|
||||
// GetSubreaper returns the subreaper setting for the calling process
|
||||
func GetSubreaper() (int, error) {
|
||||
return 0, errors.New("osutils GetSubreaper not implemented on Solaris")
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
ocs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
// Container defines the operations allowed on a container
|
||||
type Container interface {
|
||||
// ID returns the container ID
|
||||
ID() string
|
||||
|
@ -60,6 +61,7 @@ type Container interface {
|
|||
Status() (State, error)
|
||||
}
|
||||
|
||||
// OOM wraps a container OOM.
|
||||
type OOM interface {
|
||||
io.Closer
|
||||
FD() int
|
||||
|
@ -68,12 +70,15 @@ type OOM interface {
|
|||
Removed() bool
|
||||
}
|
||||
|
||||
// Stdio holds the path to the 3 pipes used for the standard ios.
|
||||
type Stdio struct {
|
||||
Stdin string
|
||||
Stdout string
|
||||
Stderr string
|
||||
}
|
||||
|
||||
// NewStdio wraps the given standard io path into an Stdio struct.
|
||||
// If a given parameter is the empty string, it is replaced by "/dev/null"
|
||||
func NewStdio(stdin, stdout, stderr string) Stdio {
|
||||
for _, s := range []*string{
|
||||
&stdin, &stdout, &stderr,
|
||||
|
@ -89,6 +94,7 @@ func NewStdio(stdin, stdout, stderr string) Stdio {
|
|||
}
|
||||
}
|
||||
|
||||
// ContainerOpts keeps the options passed at container creation
|
||||
type ContainerOpts struct {
|
||||
Root string
|
||||
ID string
|
||||
|
@ -136,6 +142,7 @@ func New(opts ContainerOpts) (Container, error) {
|
|||
return c, nil
|
||||
}
|
||||
|
||||
// Load return a new container from the matchin state file on disk.
|
||||
func Load(root, id string, timeout time.Duration) (Container, error) {
|
||||
var s state
|
||||
f, err := os.Open(filepath.Join(root, id, StateFile))
|
||||
|
@ -355,7 +362,7 @@ func (c *container) Checkpoint(cpt Checkpoint, checkpointDir string) error {
|
|||
if cpt.Shell {
|
||||
add("--shell-job")
|
||||
}
|
||||
if cpt.Tcp {
|
||||
if cpt.TCP {
|
||||
add("--tcp-established")
|
||||
}
|
||||
if cpt.UnixSockets {
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Process holds the operation allowed on a container's process
|
||||
type Process interface {
|
||||
io.Closer
|
||||
|
||||
|
|
|
@ -8,15 +8,17 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
ErrNotChildProcess = errors.New("containerd: not a child process for container")
|
||||
ErrInvalidContainerType = errors.New("containerd: invalid container type for runtime")
|
||||
ErrCheckpointNotExists = errors.New("containerd: checkpoint does not exist for container")
|
||||
ErrCheckpointExists = errors.New("containerd: checkpoint already exists")
|
||||
ErrContainerExited = errors.New("containerd: container has exited")
|
||||
ErrTerminalsNotSupported = errors.New("containerd: terminals are not supported for runtime")
|
||||
ErrProcessNotExited = errors.New("containerd: process has not exited")
|
||||
ErrProcessExited = errors.New("containerd: process has exited")
|
||||
ErrContainerNotStarted = errors.New("containerd: container not started")
|
||||
// ErrContainerExited is returned when access to an exited
|
||||
// container is attempted
|
||||
ErrContainerExited = errors.New("containerd: container has exited")
|
||||
// ErrProcessNotExited is returned when trying to retrive the exit
|
||||
// status of an alive process
|
||||
ErrProcessNotExited = errors.New("containerd: process has not exited")
|
||||
// ErrContainerNotStarted is returned when a container fails to
|
||||
// start without error from the shim or the OCI runtime
|
||||
ErrContainerNotStarted = errors.New("containerd: container not started")
|
||||
// ErrContainerStartTimeout is returned if a container takes too
|
||||
// long to start
|
||||
ErrContainerStartTimeout = errors.New("containerd: container did not start before the specified timeout")
|
||||
|
||||
errNoPidFile = errors.New("containerd: no process pid file found")
|
||||
|
@ -25,20 +27,30 @@ var (
|
|||
)
|
||||
|
||||
const (
|
||||
ExitFile = "exit"
|
||||
// ExitFile holds the name of the pipe used to monitor process
|
||||
// exit
|
||||
ExitFile = "exit"
|
||||
// ExitStatusFile holds the name of the file where the container
|
||||
// exit code is to be written
|
||||
ExitStatusFile = "exitStatus"
|
||||
StateFile = "state.json"
|
||||
ControlFile = "control"
|
||||
InitProcessID = "init"
|
||||
// StateFile holds the name of the file where the container state
|
||||
// is written
|
||||
StateFile = "state.json"
|
||||
// ControlFile holds the name of the pipe used to control the shim
|
||||
ControlFile = "control"
|
||||
// InitProcessID holds the special ID used for the very first
|
||||
// container's process
|
||||
InitProcessID = "init"
|
||||
)
|
||||
|
||||
// Checkpoint holds information regarding a container checkpoint
|
||||
type Checkpoint struct {
|
||||
// Timestamp is the time that checkpoint happened
|
||||
Created time.Time `json:"created"`
|
||||
// Name is the name of the checkpoint
|
||||
Name string `json:"name"`
|
||||
// Tcp checkpoints open tcp connections
|
||||
Tcp bool `json:"tcp"`
|
||||
// TCP checkpoints open tcp connections
|
||||
TCP bool `json:"tcp"`
|
||||
// UnixSockets persists unix sockets in the checkpoint
|
||||
UnixSockets bool `json:"unixSockets"`
|
||||
// Shell persists tty sessions in the checkpoint
|
||||
|
@ -53,8 +65,11 @@ type PlatformProcessState struct {
|
|||
RootUID int `json:"rootUID"`
|
||||
RootGID int `json:"rootGID"`
|
||||
}
|
||||
|
||||
// State represents a container state
|
||||
type State string
|
||||
|
||||
// Resource regroups the various container limits that can be updated
|
||||
type Resource struct {
|
||||
CPUShares int64
|
||||
BlkioWeight uint16
|
||||
|
@ -68,6 +83,7 @@ type Resource struct {
|
|||
MemorySwap int64
|
||||
}
|
||||
|
||||
// Possible container states
|
||||
const (
|
||||
Paused = State("paused")
|
||||
Stopped = State("stopped")
|
||||
|
@ -86,6 +102,8 @@ type state struct {
|
|||
NoPivotRoot bool `json:"noPivotRoot"`
|
||||
}
|
||||
|
||||
// ProcessState holds the process OCI specs along with various fields
|
||||
// required by containerd
|
||||
type ProcessState struct {
|
||||
specs.ProcessSpec
|
||||
Exec bool `json:"exec"`
|
||||
|
|
|
@ -2,22 +2,25 @@ package runtime
|
|||
|
||||
import "time"
|
||||
|
||||
// Stat holds a container statistics
|
||||
type Stat struct {
|
||||
// Timestamp is the time that the statistics where collected
|
||||
Timestamp time.Time
|
||||
Cpu Cpu `json:"cpu"`
|
||||
CPU CPU `json:"cpu"`
|
||||
Memory Memory `json:"memory"`
|
||||
Pids Pids `json:"pids"`
|
||||
Blkio Blkio `json:"blkio"`
|
||||
Hugetlb map[string]Hugetlb `json:"hugetlb"`
|
||||
}
|
||||
|
||||
// Hugetlb holds information regarding a container huge tlb usage
|
||||
type Hugetlb struct {
|
||||
Usage uint64 `json:"usage,omitempty"`
|
||||
Max uint64 `json:"max,omitempty"`
|
||||
Failcnt uint64 `json:"failcnt"`
|
||||
}
|
||||
|
||||
// BlkioEntry represents a single record for a Blkio stat
|
||||
type BlkioEntry struct {
|
||||
Major uint64 `json:"major,omitempty"`
|
||||
Minor uint64 `json:"minor,omitempty"`
|
||||
|
@ -25,6 +28,7 @@ type BlkioEntry struct {
|
|||
Value uint64 `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
// Blkio regroups all the Blkio related stats
|
||||
type Blkio struct {
|
||||
IoServiceBytesRecursive []BlkioEntry `json:"ioServiceBytesRecursive,omitempty"`
|
||||
IoServicedRecursive []BlkioEntry `json:"ioServicedRecursive,omitempty"`
|
||||
|
@ -36,18 +40,21 @@ type Blkio struct {
|
|||
SectorsRecursive []BlkioEntry `json:"sectorsRecursive,omitempty"`
|
||||
}
|
||||
|
||||
// Pids holds the stat of the pid usage of the machine
|
||||
type Pids struct {
|
||||
Current uint64 `json:"current,omitempty"`
|
||||
Limit uint64 `json:"limit,omitempty"`
|
||||
}
|
||||
|
||||
// Throttling holds a cpu throttling information
|
||||
type Throttling struct {
|
||||
Periods uint64 `json:"periods,omitempty"`
|
||||
ThrottledPeriods uint64 `json:"throttledPeriods,omitempty"`
|
||||
ThrottledTime uint64 `json:"throttledTime,omitempty"`
|
||||
}
|
||||
|
||||
type CpuUsage struct {
|
||||
// CPUUsage holds information regarding cpu usage
|
||||
type CPUUsage struct {
|
||||
// Units: nanoseconds.
|
||||
Total uint64 `json:"total,omitempty"`
|
||||
Percpu []uint64 `json:"percpu,omitempty"`
|
||||
|
@ -55,11 +62,13 @@ type CpuUsage struct {
|
|||
User uint64 `json:"user"`
|
||||
}
|
||||
|
||||
type Cpu struct {
|
||||
Usage CpuUsage `json:"usage,omitempty"`
|
||||
// CPU regroups both a CPU usage and throttling information
|
||||
type CPU struct {
|
||||
Usage CPUUsage `json:"usage,omitempty"`
|
||||
Throttling Throttling `json:"throttling,omitempty"`
|
||||
}
|
||||
|
||||
// MemoryEntry regroups statistic about a given type of memory
|
||||
type MemoryEntry struct {
|
||||
Limit uint64 `json:"limit"`
|
||||
Usage uint64 `json:"usage,omitempty"`
|
||||
|
@ -67,6 +76,7 @@ type MemoryEntry struct {
|
|||
Failcnt uint64 `json:"failcnt"`
|
||||
}
|
||||
|
||||
// Memory holds information regarding the different type of memories available
|
||||
type Memory struct {
|
||||
Cache uint64 `json:"cache,omitempty"`
|
||||
Usage MemoryEntry `json:"usage,omitempty"`
|
||||
|
|
|
@ -1,9 +1,12 @@
|
|||
package specs
|
||||
|
||||
import ocs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
import oci "github.com/opencontainers/runtime-spec/specs-go"
|
||||
|
||||
type (
|
||||
ProcessSpec ocs.Process
|
||||
Spec ocs.Spec
|
||||
Rlimit ocs.Rlimit
|
||||
// ProcessSpec aliases the platform process specs
|
||||
ProcessSpec oci.Process
|
||||
// Spec aliases the platform oci spec
|
||||
Spec oci.Spec
|
||||
// Rlimit aliases the platform resource limit
|
||||
Rlimit oci.Rlimit
|
||||
)
|
||||
|
|
|
@ -7,6 +7,8 @@ import (
|
|||
"github.com/docker/containerd/specs"
|
||||
)
|
||||
|
||||
// AddProcessTask holds everything necessary to add a process to a
|
||||
// container
|
||||
type AddProcessTask struct {
|
||||
baseTask
|
||||
ID string
|
||||
|
|
|
@ -4,6 +4,7 @@ package supervisor
|
|||
|
||||
import "github.com/docker/containerd/runtime"
|
||||
|
||||
// CreateCheckpointTask holds needed parameters to create a new checkpoint
|
||||
type CreateCheckpointTask struct {
|
||||
baseTask
|
||||
ID string
|
||||
|
@ -19,6 +20,7 @@ func (s *Supervisor) createCheckpoint(t *CreateCheckpointTask) error {
|
|||
return i.container.Checkpoint(*t.Checkpoint, t.CheckpointDir)
|
||||
}
|
||||
|
||||
// DeleteCheckpointTask holds needed parameters to delete a checkpoint
|
||||
type DeleteCheckpointTask struct {
|
||||
baseTask
|
||||
ID string
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"github.com/docker/containerd/runtime"
|
||||
)
|
||||
|
||||
// StartTask holds needed parameters to create a new container
|
||||
type StartTask struct {
|
||||
baseTask
|
||||
ID string
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"github.com/docker/containerd/runtime"
|
||||
)
|
||||
|
||||
// DeleteTask holds needed parameters to remove a container
|
||||
type DeleteTask struct {
|
||||
baseTask
|
||||
ID string
|
||||
|
|
|
@ -3,14 +3,18 @@ package supervisor
|
|||
import "errors"
|
||||
|
||||
var (
|
||||
// External errors
|
||||
ErrTaskChanNil = errors.New("containerd: task channel is nil")
|
||||
ErrBundleNotFound = errors.New("containerd: bundle not found")
|
||||
ErrContainerNotFound = errors.New("containerd: container not found")
|
||||
ErrContainerExists = errors.New("containerd: container already exists")
|
||||
ErrProcessNotFound = errors.New("containerd: process not found for container")
|
||||
// ErrContainerNotFound is returned when the container ID passed
|
||||
// for a given operation is invalid
|
||||
ErrContainerNotFound = errors.New("containerd: container not found")
|
||||
// ErrProcessNotFound is returned when the process ID passed for
|
||||
// a given operation is invalid
|
||||
ErrProcessNotFound = errors.New("containerd: process not found for container")
|
||||
// ErrUnknownContainerStatus is returned when the container status
|
||||
// cannot be determined
|
||||
ErrUnknownContainerStatus = errors.New("containerd: unknown container status ")
|
||||
ErrUnknownTask = errors.New("containerd: unknown task type")
|
||||
// ErrUnknownTask is returned when an unknown Task type is
|
||||
// scheduled (should never happen).
|
||||
ErrUnknownTask = errors.New("containerd: unknown task type")
|
||||
|
||||
// Internal errors
|
||||
errShutdown = errors.New("containerd: supervisor is shutdown")
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"github.com/docker/containerd/runtime"
|
||||
)
|
||||
|
||||
// ExitTask holds needed parameters to execute the exit task
|
||||
type ExitTask struct {
|
||||
baseTask
|
||||
Process runtime.Process
|
||||
|
@ -56,6 +57,7 @@ func (s *Supervisor) exit(t *ExitTask) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// ExecExitTask holds needed parameters to execute the exec exit task
|
||||
type ExecExitTask struct {
|
||||
baseTask
|
||||
ID string
|
||||
|
|
|
@ -2,6 +2,8 @@ package supervisor
|
|||
|
||||
import "github.com/docker/containerd/runtime"
|
||||
|
||||
// GetContainersTask holds needed parameters to retrieve a list of
|
||||
// containers
|
||||
type GetContainersTask struct {
|
||||
baseTask
|
||||
ID string
|
||||
|
|
|
@ -4,11 +4,14 @@ package supervisor
|
|||
|
||||
import "github.com/cloudfoundry/gosigar"
|
||||
|
||||
// Machine holds the current machine cpu count and ram size
|
||||
type Machine struct {
|
||||
Cpus int
|
||||
Memory int64
|
||||
}
|
||||
|
||||
// CollectMachineInformation returns information regarding the current
|
||||
// machine (e.g. CPU count, RAM amount)
|
||||
func CollectMachineInformation() (Machine, error) {
|
||||
m := Machine{}
|
||||
cpu := sigar.CpuList{}
|
||||
|
|
|
@ -3,18 +3,29 @@ package supervisor
|
|||
import "github.com/rcrowley/go-metrics"
|
||||
|
||||
var (
|
||||
ContainerCreateTimer = metrics.NewTimer()
|
||||
ContainerDeleteTimer = metrics.NewTimer()
|
||||
ContainerStartTimer = metrics.NewTimer()
|
||||
ContainerStatsTimer = metrics.NewTimer()
|
||||
ContainersCounter = metrics.NewCounter()
|
||||
// ContainerCreateTimer holds the metrics timer associated with container creation
|
||||
ContainerCreateTimer = metrics.NewTimer()
|
||||
// ContainerDeleteTimer holds the metrics timer associated with container deletion
|
||||
ContainerDeleteTimer = metrics.NewTimer()
|
||||
// ContainerStartTimer holds the metrics timer associated with container start duration
|
||||
ContainerStartTimer = metrics.NewTimer()
|
||||
// ContainerStatsTimer holds the metrics timer associated with container stats generation
|
||||
ContainerStatsTimer = metrics.NewTimer()
|
||||
// ContainersCounter keeps track of the number of active containers
|
||||
ContainersCounter = metrics.NewCounter()
|
||||
// EventSubscriberCounter keeps track of the number of active event subscribers
|
||||
EventSubscriberCounter = metrics.NewCounter()
|
||||
TasksCounter = metrics.NewCounter()
|
||||
ExecProcessTimer = metrics.NewTimer()
|
||||
ExitProcessTimer = metrics.NewTimer()
|
||||
EpollFdCounter = metrics.NewCounter()
|
||||
// TasksCounter keeps track of the number of active supervisor tasks
|
||||
TasksCounter = metrics.NewCounter()
|
||||
// ExecProcessTimer holds the metrics timer associated with container exec
|
||||
ExecProcessTimer = metrics.NewTimer()
|
||||
// ExitProcessTimer holds the metrics timer associated with reporting container exit status
|
||||
ExitProcessTimer = metrics.NewTimer()
|
||||
// EpollFdCounter keeps trac of how many process are being monitored
|
||||
EpollFdCounter = metrics.NewCounter()
|
||||
)
|
||||
|
||||
// Metrics return the list of all available metrics
|
||||
func Metrics() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"container-create-time": ContainerCreateTimer,
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/docker/containerd/runtime"
|
||||
)
|
||||
|
||||
// NewMonitor starts a new process monitor and returns it
|
||||
func NewMonitor() (*Monitor, error) {
|
||||
m := &Monitor{
|
||||
receivers: make(map[int]interface{}),
|
||||
|
@ -24,6 +25,7 @@ func NewMonitor() (*Monitor, error) {
|
|||
return m, nil
|
||||
}
|
||||
|
||||
// Monitor represents a runtime.Process monitor
|
||||
type Monitor struct {
|
||||
m sync.Mutex
|
||||
receivers map[int]interface{}
|
||||
|
@ -32,14 +34,17 @@ type Monitor struct {
|
|||
epollFd int
|
||||
}
|
||||
|
||||
// Exits returns the channel used to notify of a process exit
|
||||
func (m *Monitor) Exits() chan runtime.Process {
|
||||
return m.exits
|
||||
}
|
||||
|
||||
// OOMs returns the channel used to notify of a container exit due to OOM
|
||||
func (m *Monitor) OOMs() chan string {
|
||||
return m.ooms
|
||||
}
|
||||
|
||||
// Monitor adds a process to the list of the one being monitored
|
||||
func (m *Monitor) Monitor(p runtime.Process) error {
|
||||
m.m.Lock()
|
||||
defer m.m.Unlock()
|
||||
|
@ -56,6 +61,7 @@ func (m *Monitor) Monitor(p runtime.Process) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// MonitorOOM adds a container to the list of the ones monitored for OOM
|
||||
func (m *Monitor) MonitorOOM(c runtime.Container) error {
|
||||
m.m.Lock()
|
||||
defer m.m.Unlock()
|
||||
|
@ -76,6 +82,7 @@ func (m *Monitor) MonitorOOM(c runtime.Container) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Close cleans up resources allocated by NewMonitor()
|
||||
func (m *Monitor) Close() error {
|
||||
return syscall.Close(m.epollFd)
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// OOMTask holds needed parameters to report a container OOM
|
||||
type OOMTask struct {
|
||||
baseTask
|
||||
ID string
|
||||
|
|
|
@ -2,6 +2,7 @@ package supervisor
|
|||
|
||||
import "os"
|
||||
|
||||
// SignalTask holds needed parameters to signal a container
|
||||
type SignalTask struct {
|
||||
baseTask
|
||||
ID string
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"github.com/docker/containerd/runtime"
|
||||
)
|
||||
|
||||
// StatsTask holds needed parameters to retrieve a container statistics
|
||||
type StatsTask struct {
|
||||
baseTask
|
||||
ID string
|
||||
|
|
|
@ -144,6 +144,7 @@ func readEventLog(s *Supervisor) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Supervisor represents a container supervisor
|
||||
type Supervisor struct {
|
||||
// stateDir is the directory on the system to store container runtime state information.
|
||||
stateDir string
|
||||
|
@ -179,6 +180,7 @@ func (s *Supervisor) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Event represents a container event
|
||||
type Event struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"github.com/docker/containerd/runtime"
|
||||
)
|
||||
|
||||
// UpdateTask holds needed parameters to update a container resource constraints
|
||||
type UpdateTask struct {
|
||||
baseTask
|
||||
ID string
|
||||
|
@ -50,6 +51,8 @@ func (s *Supervisor) updateContainer(t *UpdateTask) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// UpdateProcessTask holds needed parameters to update a container
|
||||
// process terminal size or close its stdin
|
||||
type UpdateProcessTask struct {
|
||||
baseTask
|
||||
ID string
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"github.com/docker/containerd/runtime"
|
||||
)
|
||||
|
||||
// Worker interface
|
||||
type Worker interface {
|
||||
Start()
|
||||
}
|
||||
|
@ -22,6 +23,7 @@ type startTask struct {
|
|||
StartResponse chan StartResponse
|
||||
}
|
||||
|
||||
// NewWorker return a new initialized worker
|
||||
func NewWorker(s *Supervisor, wg *sync.WaitGroup) Worker {
|
||||
return &worker{
|
||||
s: s,
|
||||
|
@ -34,6 +36,7 @@ type worker struct {
|
|||
s *Supervisor
|
||||
}
|
||||
|
||||
// Start runs a loop in charge of starting new containers
|
||||
func (w *worker) Start() {
|
||||
defer w.wg.Done()
|
||||
for t := range w.s.startTasks {
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
// Output directory for testing and benchmark artifacts
|
||||
// GetTestOutDir returns the output directory for testing and benchmark artifacts
|
||||
func GetTestOutDir() string {
|
||||
out, _ := exec.Command("git", "rev-parse", "--show-toplevel").CombinedOutput()
|
||||
repoRoot := string(out)
|
||||
|
@ -17,11 +17,18 @@ func GetTestOutDir() string {
|
|||
}
|
||||
|
||||
var (
|
||||
ArchivesDir = filepath.Join("test-artifacts", "archives")
|
||||
BundlesRoot = filepath.Join("test-artifacts", "oci-bundles")
|
||||
// ArchivesDir holds the location of the available rootfs
|
||||
ArchivesDir = filepath.Join("test-artifacts", "archives")
|
||||
// BundlesRoot holds the location where OCI Bundles are stored
|
||||
BundlesRoot = filepath.Join("test-artifacts", "oci-bundles")
|
||||
// OutputDirFormat holds the standard format used when creating a
|
||||
// new test output directory
|
||||
OutputDirFormat = filepath.Join("test-artifacts", "runs", "%s")
|
||||
// RefOciSpecsPath holds the path to the generic OCI config
|
||||
RefOciSpecsPath = filepath.Join(BundlesRoot, "config.json")
|
||||
StateDir = "/run/containerd-bench-test"
|
||||
// StateDir holds the path to the directory used by the containerd
|
||||
// started by tests
|
||||
StateDir = "/run/containerd-bench-test"
|
||||
)
|
||||
|
||||
// untarRootfs untars the given `source` tarPath into `destination/rootfs`
|
||||
|
@ -36,6 +43,7 @@ func untarRootfs(source string, destination string) error {
|
|||
return tar.Run()
|
||||
}
|
||||
|
||||
// GenerateReferenceSpecs generates a default OCI specs via `runc spec`
|
||||
func GenerateReferenceSpecs(destination string) error {
|
||||
if _, err := os.Stat(filepath.Join(destination, "config.json")); err == nil {
|
||||
return nil
|
||||
|
@ -45,6 +53,7 @@ func GenerateReferenceSpecs(destination string) error {
|
|||
return specs.Run()
|
||||
}
|
||||
|
||||
// CreateBundle generates a valid OCI bundle from the given rootfs
|
||||
func CreateBundle(source, name string) error {
|
||||
bundlePath := filepath.Join(BundlesRoot, name)
|
||||
|
||||
|
@ -55,6 +64,7 @@ func CreateBundle(source, name string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// CreateBusyboxBundle generates a bundle based on the busybox rootfs
|
||||
func CreateBusyboxBundle(name string) error {
|
||||
return CreateBundle("busybox", name)
|
||||
}
|
||||
|
|
|
@ -2,10 +2,19 @@ package containerd
|
|||
|
||||
import "fmt"
|
||||
|
||||
// VersionMajor holds the release major number
|
||||
const VersionMajor = 0
|
||||
|
||||
// VersionMinor holds the release minor number
|
||||
const VersionMinor = 2
|
||||
|
||||
// VersionPatch holds the release patch number
|
||||
const VersionPatch = 0
|
||||
|
||||
// Version holds the combination of major minor and patch as a string
|
||||
// of format Major.Minor.Patch
|
||||
var Version = fmt.Sprintf("%d.%d.%d", VersionMajor, VersionMinor, VersionPatch)
|
||||
|
||||
// GitCommit is filled with the Git revision being used to build the
|
||||
// program at linking time
|
||||
var GitCommit = ""
|
||||
|
|
Loading…
Reference in a new issue