Initial windows runtime work
Signed-off-by: Kenfe-Mickael Laventure <mickael.laventure@gmail.com>
This commit is contained in:
parent
e5c8c5634a
commit
c5843b7615
120 changed files with 11158 additions and 596 deletions
201
windows/container.go
Normal file
201
windows/container.go
Normal file
|
@ -0,0 +1,201 @@
|
|||
// +build windows
|
||||
|
||||
package windows
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/windows/hcs"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrLoadedContainer = errors.New("loaded container can only be terminated")
|
||||
)
|
||||
|
||||
type State struct {
|
||||
pid uint32
|
||||
status containerd.Status
|
||||
}
|
||||
|
||||
func (s State) Pid() uint32 {
|
||||
return s.pid
|
||||
}
|
||||
|
||||
func (s State) Status() containerd.Status {
|
||||
return s.status
|
||||
}
|
||||
|
||||
type eventCallback func(id string, evType containerd.EventType, pid, exitStatus uint32)
|
||||
|
||||
func loadContainers(ctx context.Context, rootDir string) ([]*container, error) {
|
||||
hcs, err := hcs.LoadAll(ctx, owner, rootDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
containers := make([]*container, 0)
|
||||
for id, h := range hcs {
|
||||
containers = append(containers, &container{
|
||||
id: id,
|
||||
status: containerd.RunningStatus,
|
||||
hcs: h,
|
||||
})
|
||||
}
|
||||
|
||||
return containers, nil
|
||||
}
|
||||
|
||||
func newContainer(id, rootDir string, pid uint32, spec RuntimeSpec, io containerd.IO, sendEvent eventCallback) (*container, error) {
|
||||
hcs, err := hcs.New(rootDir, owner, id, spec.OCISpec, spec.Configuration, io)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &container{
|
||||
runtimePid: pid,
|
||||
id: id,
|
||||
hcs: hcs,
|
||||
status: containerd.CreatedStatus,
|
||||
ecSync: make(chan struct{}),
|
||||
sendEvent: sendEvent,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type container struct {
|
||||
sync.Mutex
|
||||
|
||||
runtimePid uint32
|
||||
id string
|
||||
hcs *hcs.HCS
|
||||
status containerd.Status
|
||||
|
||||
ec uint32
|
||||
ecErr error
|
||||
ecSync chan struct{}
|
||||
sendEvent func(id string, evType containerd.EventType, pid, exitStatus uint32)
|
||||
}
|
||||
|
||||
func (c *container) Info() containerd.ContainerInfo {
|
||||
return containerd.ContainerInfo{
|
||||
ID: c.id,
|
||||
Runtime: runtimeName,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *container) Start(ctx context.Context) error {
|
||||
if c.runtimePid == 0 {
|
||||
return ErrLoadedContainer
|
||||
}
|
||||
|
||||
err := c.hcs.Start(ctx, false)
|
||||
if err != nil {
|
||||
c.hcs.Terminate(ctx)
|
||||
c.sendEvent(c.id, containerd.ExitEvent, c.runtimePid, 255)
|
||||
return err
|
||||
}
|
||||
|
||||
c.setStatus(containerd.RunningStatus)
|
||||
c.sendEvent(c.id, containerd.StartEvent, c.runtimePid, 0)
|
||||
|
||||
// Wait for our process to terminate
|
||||
go func() {
|
||||
c.ec, c.ecErr = c.hcs.ExitCode(context.Background())
|
||||
c.setStatus(containerd.StoppedStatus)
|
||||
c.sendEvent(c.id, containerd.ExitEvent, c.runtimePid, c.ec)
|
||||
close(c.ecSync)
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *container) State(ctx context.Context) (containerd.State, error) {
|
||||
return &State{
|
||||
pid: c.runtimePid,
|
||||
status: c.getStatus(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *container) Kill(ctx context.Context, signal uint32, all bool) error {
|
||||
return c.hcs.Terminate(ctx)
|
||||
}
|
||||
|
||||
func (c *container) Exec(ctx context.Context, opts containerd.ExecOpts) (containerd.Process, error) {
|
||||
if c.runtimePid == 0 {
|
||||
return nil, ErrLoadedContainer
|
||||
}
|
||||
|
||||
var procSpec specs.Process
|
||||
if err := json.Unmarshal(opts.Spec, &procSpec); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal oci spec")
|
||||
}
|
||||
|
||||
p, err := c.hcs.Exec(ctx, procSpec, opts.IO)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
ec, _ := p.ExitCode()
|
||||
c.sendEvent(c.id, containerd.ExitEvent, p.Pid(), ec)
|
||||
}()
|
||||
|
||||
return &process{p}, nil
|
||||
}
|
||||
|
||||
func (c *container) setStatus(status containerd.Status) {
|
||||
c.Lock()
|
||||
c.status = status
|
||||
c.Unlock()
|
||||
}
|
||||
|
||||
func (c *container) getStatus() containerd.Status {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
return c.status
|
||||
}
|
||||
|
||||
func (c *container) exitCode(ctx context.Context) (uint32, error) {
|
||||
if c.runtimePid == 0 {
|
||||
return 255, ErrLoadedContainer
|
||||
}
|
||||
|
||||
<-c.ecSync
|
||||
return c.ec, c.ecErr
|
||||
}
|
||||
|
||||
func (c *container) remove(ctx context.Context) error {
|
||||
return c.hcs.Remove(ctx)
|
||||
}
|
||||
|
||||
func (c *container) getRuntimePid() uint32 {
|
||||
return c.runtimePid
|
||||
}
|
||||
|
||||
type process struct {
|
||||
p *hcs.Process
|
||||
}
|
||||
|
||||
func (p *process) State(ctx context.Context) (containerd.State, error) {
|
||||
return &processState{p.p}, nil
|
||||
}
|
||||
|
||||
func (p *process) Kill(ctx context.Context, sig uint32, all bool) error {
|
||||
return p.p.Kill()
|
||||
}
|
||||
|
||||
type processState struct {
|
||||
p *hcs.Process
|
||||
}
|
||||
|
||||
func (s *processState) Status() containerd.Status {
|
||||
return s.p.Status()
|
||||
}
|
||||
|
||||
func (s *processState) Pid() uint32 {
|
||||
return s.p.Pid()
|
||||
}
|
523
windows/hcs/hcs.go
Normal file
523
windows/hcs/hcs.go
Normal file
|
@ -0,0 +1,523 @@
|
|||
// +build windows
|
||||
|
||||
package hcs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
layerFile = "layer"
|
||||
defaultTerminateTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
func LoadAll(ctx context.Context, owner, rootDir string) (map[string]*HCS, error) {
|
||||
ctrProps, err := hcsshim.GetContainers(hcsshim.ComputeSystemQuery{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to retrieve running containers")
|
||||
}
|
||||
|
||||
containers := make(map[string]*HCS)
|
||||
for _, p := range ctrProps {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
if p.Owner != owner || p.SystemType != "Container" {
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO: take context in account
|
||||
container, err := hcsshim.OpenContainer(p.ID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed open container %s", p.ID)
|
||||
}
|
||||
stateDir := filepath.Join(rootDir, p.ID)
|
||||
b, err := ioutil.ReadFile(filepath.Join(stateDir, layerFile))
|
||||
containers[p.ID] = &HCS{
|
||||
id: p.ID,
|
||||
container: container,
|
||||
stateDir: stateDir,
|
||||
layerFolderPath: string(b),
|
||||
conf: Configuration{
|
||||
TerminateDuration: defaultTerminateTimeout,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return containers, nil
|
||||
}
|
||||
|
||||
// New creates a new container (but doesn't start) it.
|
||||
func New(rootDir, owner, containerID string, spec specs.Spec, conf Configuration, cio containerd.IO) (*HCS, error) {
|
||||
stateDir := filepath.Join(rootDir, containerID)
|
||||
if err := os.MkdirAll(stateDir, 0755); err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to create container state dir %s", stateDir)
|
||||
}
|
||||
|
||||
if conf.TerminateDuration == 0 {
|
||||
conf.TerminateDuration = defaultTerminateTimeout
|
||||
}
|
||||
|
||||
h := &HCS{
|
||||
stateDir: stateDir,
|
||||
owner: owner,
|
||||
id: containerID,
|
||||
spec: spec,
|
||||
conf: conf,
|
||||
}
|
||||
|
||||
sio, err := newSIO(cio)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h.io = sio
|
||||
runtime.SetFinalizer(sio, func(s *shimIO) {
|
||||
s.Close()
|
||||
})
|
||||
|
||||
hcsConf, err := h.newHCSConfiguration()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctr, err := hcsshim.CreateContainer(containerID, hcsConf)
|
||||
if err != nil {
|
||||
removeLayer(context.TODO(), hcsConf.LayerFolderPath)
|
||||
return nil, err
|
||||
}
|
||||
h.container = ctr
|
||||
h.layerFolderPath = hcsConf.LayerFolderPath
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
type HCS struct {
|
||||
stateDir string
|
||||
owner string
|
||||
id string
|
||||
spec specs.Spec
|
||||
conf Configuration
|
||||
io *shimIO
|
||||
container hcsshim.Container
|
||||
initProcess hcsshim.Process
|
||||
layerFolderPath string
|
||||
}
|
||||
|
||||
// Start starts the associated container and instantiate the init
|
||||
// process within it.
|
||||
func (s *HCS) Start(ctx context.Context, servicing bool) error {
|
||||
if s.initProcess != nil {
|
||||
return errors.New("init process already started")
|
||||
}
|
||||
if err := s.container.Start(); err != nil {
|
||||
if err := s.Terminate(ctx); err != nil {
|
||||
log.G(ctx).WithError(err).Errorf("failed to terminate container %s", s.id)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
proc, err := s.newProcess(ctx, s.io, s.spec.Process)
|
||||
if err != nil {
|
||||
s.Terminate(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
s.initProcess = proc
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Pid returns the pid of the container init process
|
||||
func (s *HCS) Pid() int {
|
||||
return s.initProcess.Pid()
|
||||
}
|
||||
|
||||
// ExitCode waits for the container to exit and return the exit code
|
||||
// of the init process
|
||||
func (s *HCS) ExitCode(ctx context.Context) (uint32, error) {
|
||||
// TODO: handle a context cancellation
|
||||
if err := s.initProcess.Wait(); err != nil {
|
||||
if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != syscall.ERROR_BROKEN_PIPE {
|
||||
return 255, errors.Wrapf(err, "failed to wait for container '%s' init process", s.id)
|
||||
}
|
||||
// container is probably dead, let's try to get its exit code
|
||||
}
|
||||
|
||||
ec, err := s.initProcess.ExitCode()
|
||||
if err != nil {
|
||||
if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != syscall.ERROR_BROKEN_PIPE {
|
||||
return 255, errors.Wrapf(err, "failed to get container '%s' init process exit code", s.id)
|
||||
}
|
||||
// Well, unknown exit code it is
|
||||
ec = 255
|
||||
}
|
||||
|
||||
return uint32(ec), err
|
||||
}
|
||||
|
||||
// Exec starts a new process within the container
|
||||
func (s *HCS) Exec(ctx context.Context, procSpec specs.Process, io containerd.IO) (*Process, error) {
|
||||
sio, err := newSIO(io)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p, err := s.newProcess(ctx, sio, procSpec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Process{
|
||||
containerID: s.id,
|
||||
p: p,
|
||||
status: containerd.RunningStatus,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// newProcess create a new process within a running container. This is
|
||||
// used to create both the init process and subsequent 'exec'
|
||||
// processes.
|
||||
func (s *HCS) newProcess(ctx context.Context, sio *shimIO, procSpec specs.Process) (hcsshim.Process, error) {
|
||||
conf := hcsshim.ProcessConfig{
|
||||
EmulateConsole: sio.terminal,
|
||||
CreateStdInPipe: sio.stdin != nil,
|
||||
CreateStdOutPipe: sio.stdout != nil,
|
||||
CreateStdErrPipe: sio.stderr != nil,
|
||||
User: procSpec.User.Username,
|
||||
CommandLine: strings.Join(procSpec.Args, " "),
|
||||
Environment: ociSpecEnvToHCSEnv(procSpec.Env),
|
||||
WorkingDirectory: procSpec.Cwd,
|
||||
}
|
||||
conf.ConsoleSize[0] = procSpec.ConsoleSize.Height
|
||||
conf.ConsoleSize[1] = procSpec.ConsoleSize.Width
|
||||
|
||||
if conf.WorkingDirectory == "" {
|
||||
conf.WorkingDirectory = s.spec.Process.Cwd
|
||||
}
|
||||
|
||||
proc, err := s.container.CreateProcess(&conf)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create process with conf %#v", conf)
|
||||
|
||||
}
|
||||
pid := proc.Pid()
|
||||
|
||||
stdin, stdout, stderr, err := proc.Stdio()
|
||||
if err != nil {
|
||||
s.Terminate(ctx)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if sio.stdin != nil {
|
||||
go func() {
|
||||
log.G(ctx).WithField("pid", pid).Debug("stdin: copy started")
|
||||
io.Copy(stdin, sio.stdin)
|
||||
log.G(ctx).WithField("pid", pid).Debug("stdin: copy done")
|
||||
stdin.Close()
|
||||
sio.stdin.Close()
|
||||
}()
|
||||
} else {
|
||||
proc.CloseStdin()
|
||||
}
|
||||
|
||||
if sio.stdout != nil {
|
||||
go func() {
|
||||
log.G(ctx).WithField("pid", pid).Debug("stdout: copy started")
|
||||
io.Copy(sio.stdout, stdout)
|
||||
log.G(ctx).WithField("pid", pid).Debug("stdout: copy done")
|
||||
stdout.Close()
|
||||
sio.stdout.Close()
|
||||
}()
|
||||
}
|
||||
|
||||
if sio.stderr != nil {
|
||||
go func() {
|
||||
log.G(ctx).WithField("pid", pid).Debug("stderr: copy started")
|
||||
io.Copy(sio.stderr, stderr)
|
||||
log.G(ctx).WithField("pid", pid).Debug("stderr: copy done")
|
||||
stderr.Close()
|
||||
sio.stderr.Close()
|
||||
}()
|
||||
}
|
||||
|
||||
return proc, nil
|
||||
}
|
||||
|
||||
// Terminate stop a running container.
|
||||
func (s *HCS) Terminate(ctx context.Context) error {
|
||||
err := s.container.Terminate()
|
||||
switch {
|
||||
case hcsshim.IsPending(err):
|
||||
// TODO: take the context into account
|
||||
err = s.container.WaitTimeout(s.conf.TerminateDuration)
|
||||
case hcsshim.IsAlreadyStopped(err):
|
||||
err = nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *HCS) Shutdown(ctx context.Context) error {
|
||||
err := s.container.Shutdown()
|
||||
switch {
|
||||
case hcsshim.IsPending(err):
|
||||
// TODO: take the context into account
|
||||
err = s.container.WaitTimeout(s.conf.TerminateDuration)
|
||||
case hcsshim.IsAlreadyStopped(err):
|
||||
err = nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Debugf("failed to shutdown container %s, calling terminate", s.id)
|
||||
return s.Terminate(ctx)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove start a servicing container if needed then cleanup the container
|
||||
// resources
|
||||
func (s *HCS) Remove(ctx context.Context) error {
|
||||
defer func() {
|
||||
if err := s.Shutdown(ctx); err != nil {
|
||||
log.G(ctx).WithError(err).WithField("id", s.id).
|
||||
Errorf("failed to shutdown/terminate container")
|
||||
}
|
||||
|
||||
if s.initProcess != nil {
|
||||
if err := s.initProcess.Close(); err != nil {
|
||||
log.G(ctx).WithError(err).WithFields(logrus.Fields{"pid": s.Pid(), "id": s.id}).
|
||||
Errorf("failed to clean init process resources")
|
||||
}
|
||||
}
|
||||
if err := s.container.Close(); err != nil {
|
||||
log.G(ctx).WithError(err).WithField("id", s.id).Errorf("failed to clean container resources")
|
||||
}
|
||||
|
||||
// Cleanup folder layer
|
||||
if err := removeLayer(ctx, s.layerFolderPath); err == nil {
|
||||
os.RemoveAll(s.stateDir)
|
||||
}
|
||||
}()
|
||||
|
||||
if update, err := s.container.HasPendingUpdates(); err != nil || !update {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: take the context into account
|
||||
serviceHCS, err := New(s.stateDir, s.owner, s.id+"_servicing", s.spec, s.conf, containerd.IO{})
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).WithField("id", s.id).Warn("could not create servicing container")
|
||||
return nil
|
||||
}
|
||||
defer serviceHCS.container.Close()
|
||||
|
||||
err = serviceHCS.Start(ctx, true)
|
||||
if err != nil {
|
||||
if err := serviceHCS.Terminate(ctx); err != nil {
|
||||
log.G(ctx).WithError(err).WithField("id", s.id).Errorf("failed to terminate servicing container for %s")
|
||||
}
|
||||
log.G(ctx).WithError(err).WithField("id", s.id).Errorf("failed to start servicing container")
|
||||
return nil
|
||||
}
|
||||
|
||||
// wait for the container to exit
|
||||
_, err = serviceHCS.ExitCode(ctx)
|
||||
if err != nil {
|
||||
if err := serviceHCS.Terminate(ctx); err != nil {
|
||||
log.G(ctx).WithError(err).WithField("id", s.id).Errorf("failed to terminate servicing container for %s")
|
||||
}
|
||||
log.G(ctx).WithError(err).WithField("id", s.id).Errorf("failed to get servicing container exit code")
|
||||
}
|
||||
|
||||
serviceHCS.container.WaitTimeout(s.conf.TerminateDuration)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// newHCSConfiguration generates a hcsshim configuration from the instance
|
||||
// OCI Spec and hcs.Configuration.
|
||||
func (s *HCS) newHCSConfiguration() (*hcsshim.ContainerConfig, error) {
|
||||
configuration := &hcsshim.ContainerConfig{
|
||||
SystemType: "Container",
|
||||
Name: s.id,
|
||||
Owner: s.owner,
|
||||
HostName: s.spec.Hostname,
|
||||
IgnoreFlushesDuringBoot: s.conf.IgnoreFlushesDuringBoot,
|
||||
HvPartition: s.conf.UseHyperV,
|
||||
AllowUnqualifiedDNSQuery: s.conf.AllowUnqualifiedDNSQuery,
|
||||
EndpointList: s.conf.NetworkEndpoints,
|
||||
NetworkSharedContainerName: s.conf.NetworkSharedContainerID,
|
||||
Credentials: s.conf.Credentials,
|
||||
}
|
||||
|
||||
// TODO: use the create request Mount for those
|
||||
for _, layerPath := range s.conf.Layers {
|
||||
_, filename := filepath.Split(layerPath)
|
||||
guid, err := hcsshim.NameToGuid(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configuration.Layers = append(configuration.Layers, hcsshim.Layer{
|
||||
ID: guid.ToString(),
|
||||
Path: layerPath,
|
||||
})
|
||||
}
|
||||
|
||||
if len(s.spec.Mounts) > 0 {
|
||||
mds := make([]hcsshim.MappedDir, len(s.spec.Mounts))
|
||||
for i, mount := range s.spec.Mounts {
|
||||
mds[i] = hcsshim.MappedDir{
|
||||
HostPath: mount.Source,
|
||||
ContainerPath: mount.Destination,
|
||||
ReadOnly: false,
|
||||
}
|
||||
for _, o := range mount.Options {
|
||||
if strings.ToLower(o) == "ro" {
|
||||
mds[i].ReadOnly = true
|
||||
}
|
||||
}
|
||||
}
|
||||
configuration.MappedDirectories = mds
|
||||
}
|
||||
|
||||
if s.conf.DNSSearchList != nil {
|
||||
configuration.DNSSearchList = strings.Join(s.conf.DNSSearchList, ",")
|
||||
}
|
||||
|
||||
if configuration.HvPartition {
|
||||
for _, layerPath := range s.conf.Layers {
|
||||
utilityVMPath := filepath.Join(layerPath, "UtilityVM")
|
||||
_, err := os.Stat(utilityVMPath)
|
||||
if err == nil {
|
||||
configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: utilityVMPath}
|
||||
break
|
||||
} else if !os.IsNotExist(err) {
|
||||
return nil, errors.Wrapf(err, "failed to access layer %s", layerPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(configuration.Layers) == 0 {
|
||||
// TODO: support starting with 0 layers, this mean we need the "filter" directory as parameter
|
||||
return nil, errors.New("at least one layers must be provided")
|
||||
}
|
||||
|
||||
di := hcsshim.DriverInfo{
|
||||
Flavour: 1, // filter driver
|
||||
}
|
||||
|
||||
if len(configuration.Layers) > 0 {
|
||||
di.HomeDir = filepath.Dir(s.conf.Layers[0])
|
||||
}
|
||||
|
||||
// Windows doesn't support creating a container with a readonly
|
||||
// filesystem, so always create a RW one
|
||||
if err := hcsshim.CreateSandboxLayer(di, s.id, s.conf.Layers[0], s.conf.Layers); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create sandbox layer for %s: layers: %#v, driverInfo: %#v",
|
||||
s.id, configuration.Layers, di)
|
||||
}
|
||||
|
||||
configuration.LayerFolderPath = filepath.Join(di.HomeDir, s.id)
|
||||
if err := ioutil.WriteFile(filepath.Join(s.stateDir, layerFile), []byte(configuration.LayerFolderPath), 0644); err != nil {
|
||||
log.L.WithError(err).Warnf("failed to save active layer %s", configuration.LayerFolderPath)
|
||||
}
|
||||
|
||||
err := hcsshim.ActivateLayer(di, s.id)
|
||||
if err != nil {
|
||||
removeLayer(context.TODO(), configuration.LayerFolderPath)
|
||||
return nil, errors.Wrapf(err, "failed to active layer %s", configuration.LayerFolderPath)
|
||||
}
|
||||
|
||||
err = hcsshim.PrepareLayer(di, s.id, s.conf.Layers)
|
||||
if err != nil {
|
||||
removeLayer(context.TODO(), configuration.LayerFolderPath)
|
||||
return nil, errors.Wrapf(err, "failed to prepare layer %s", configuration.LayerFolderPath)
|
||||
}
|
||||
|
||||
volumePath, err := hcsshim.GetLayerMountPath(di, s.id)
|
||||
if err != nil {
|
||||
if err := hcsshim.DestroyLayer(di, s.id); err != nil {
|
||||
log.L.Warnf("failed to DestroyLayer %s: %s", s.id, err)
|
||||
}
|
||||
return nil, errors.Wrapf(err, "failed to getmount path for layer %s: driverInfo: %#v", s.id, di)
|
||||
}
|
||||
configuration.VolumePath = volumePath
|
||||
|
||||
f, err := os.OpenFile(fmt.Sprintf("%s-hcs.json", s.id), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 066)
|
||||
if err != nil {
|
||||
fmt.Println("failed to create file:", err)
|
||||
} else {
|
||||
defer f.Close()
|
||||
enc := json.NewEncoder(f)
|
||||
enc.Encode(configuration)
|
||||
}
|
||||
|
||||
return configuration, nil
|
||||
}
|
||||
|
||||
// removeLayer delete the given layer, all associated containers must have
|
||||
// been shutdown for this to succeed.
|
||||
func removeLayer(ctx context.Context, path string) error {
|
||||
layerID := filepath.Base(path)
|
||||
parentPath := filepath.Dir(path)
|
||||
di := hcsshim.DriverInfo{
|
||||
Flavour: 1, // filter driver
|
||||
HomeDir: parentPath,
|
||||
}
|
||||
|
||||
err := hcsshim.UnprepareLayer(di, layerID)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Warnf("failed to unprepare layer %s for removal", path)
|
||||
}
|
||||
|
||||
err = hcsshim.DeactivateLayer(di, layerID)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Warnf("failed to deactivate layer %s for removal", path)
|
||||
}
|
||||
|
||||
removePath := filepath.Join(parentPath, fmt.Sprintf("%s-removing", layerID))
|
||||
err = os.Rename(path, removePath)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Warnf("failed to rename container layer %s for removal", path)
|
||||
removePath = path
|
||||
}
|
||||
if err := hcsshim.DestroyLayer(di, removePath); err != nil {
|
||||
log.G(ctx).WithError(err).Errorf("failed to remove container layer %s", removePath)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ociSpecEnvToHCSEnv converts from the OCI Spec ENV format to the one
|
||||
// expected by HCS.
|
||||
func ociSpecEnvToHCSEnv(a []string) map[string]string {
|
||||
env := make(map[string]string)
|
||||
for _, s := range a {
|
||||
arr := strings.SplitN(s, "=", 2)
|
||||
if len(arr) == 2 {
|
||||
env[arr[0]] = arr[1]
|
||||
}
|
||||
}
|
||||
return env
|
||||
}
|
50
windows/hcs/process.go
Normal file
50
windows/hcs/process.go
Normal file
|
@ -0,0 +1,50 @@
|
|||
// +build windows
|
||||
|
||||
package hcs
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type Process struct {
|
||||
containerID string
|
||||
p hcsshim.Process
|
||||
status containerd.Status
|
||||
}
|
||||
|
||||
func (h *Process) Pid() uint32 {
|
||||
return uint32(h.p.Pid())
|
||||
}
|
||||
|
||||
func (h *Process) Kill() error {
|
||||
return h.p.Kill()
|
||||
}
|
||||
|
||||
func (h *Process) ExitCode() (uint32, error) {
|
||||
if err := h.p.Wait(); err != nil {
|
||||
if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != syscall.ERROR_BROKEN_PIPE {
|
||||
return 255, errors.Wrapf(err, "failed to wait for container '%s' process %d", h.containerID, h.p.Pid())
|
||||
}
|
||||
// container is probably dead, let's try to get its exit code
|
||||
}
|
||||
h.status = containerd.StoppedStatus
|
||||
|
||||
ec, err := h.p.ExitCode()
|
||||
if err != nil {
|
||||
if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != syscall.ERROR_BROKEN_PIPE {
|
||||
return 255, errors.Wrapf(err, "failed to get container '%s' process %d exit code", h.containerID, h.p.Pid())
|
||||
}
|
||||
// Well, unknown exit code it is
|
||||
ec = 255
|
||||
}
|
||||
|
||||
return uint32(ec), err
|
||||
}
|
||||
|
||||
func (h *Process) Status() containerd.Status {
|
||||
return h.status
|
||||
}
|
77
windows/hcs/shimio.go
Normal file
77
windows/hcs/shimio.go
Normal file
|
@ -0,0 +1,77 @@
|
|||
// +build windows
|
||||
|
||||
package hcs
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type shimIO struct {
|
||||
stdin net.Conn
|
||||
stdout net.Conn
|
||||
stderr net.Conn
|
||||
terminal bool
|
||||
}
|
||||
|
||||
// newSIO connects to the provided pipes
|
||||
func newSIO(io containerd.IO) (*shimIO, error) {
|
||||
var (
|
||||
c net.Conn
|
||||
err error
|
||||
sio shimIO
|
||||
)
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
sio.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
for _, p := range []struct {
|
||||
name string
|
||||
open bool
|
||||
conn *net.Conn
|
||||
}{
|
||||
{
|
||||
name: io.Stdin,
|
||||
open: io.Stdin != "",
|
||||
conn: &sio.stdin,
|
||||
},
|
||||
{
|
||||
name: io.Stdout,
|
||||
open: io.Stdout != "",
|
||||
conn: &sio.stdout,
|
||||
},
|
||||
{
|
||||
name: io.Stderr,
|
||||
open: !io.Terminal && io.Stderr != "",
|
||||
conn: &sio.stderr,
|
||||
},
|
||||
} {
|
||||
if p.open {
|
||||
dialTimeout := 3 * time.Second
|
||||
c, err = winio.DialPipe(p.name, &dialTimeout)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to connect to %s", p.name)
|
||||
}
|
||||
*p.conn = c
|
||||
}
|
||||
}
|
||||
|
||||
return &sio, nil
|
||||
}
|
||||
|
||||
// Close terminates all successfully dialed IO connections
|
||||
func (s *shimIO) Close() {
|
||||
for _, cn := range []net.Conn{s.stdin, s.stdout, s.stderr} {
|
||||
if cn != nil {
|
||||
cn.Close()
|
||||
cn = nil
|
||||
}
|
||||
}
|
||||
}
|
22
windows/hcs/types.go
Normal file
22
windows/hcs/types.go
Normal file
|
@ -0,0 +1,22 @@
|
|||
// +build windows
|
||||
|
||||
package hcs
|
||||
|
||||
import "time"
|
||||
|
||||
type Configuration struct {
|
||||
UseHyperV bool `json:"useHyperV,omitempty"`
|
||||
|
||||
Layers []string `json:"layers"`
|
||||
|
||||
TerminateDuration time.Duration `json:"terminateDuration",omitempty`
|
||||
|
||||
IgnoreFlushesDuringBoot bool `json:"ignoreFlushesDuringBoot,omitempty"`
|
||||
|
||||
AllowUnqualifiedDNSQuery bool `json:"allowUnqualifiedDNSQuery,omitempty"`
|
||||
DNSSearchList []string `json:"dnsSearchList,omitempty"`
|
||||
NetworkEndpoints []string `json:"networkEndpoints,omitempty"`
|
||||
NetworkSharedContainerID string
|
||||
|
||||
Credentials string `json:"credentials,omitempty"`
|
||||
}
|
211
windows/runtime.go
Normal file
211
windows/runtime.go
Normal file
|
@ -0,0 +1,211 @@
|
|||
// +build windows
|
||||
|
||||
package windows
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/plugin"
|
||||
"github.com/containerd/containerd/windows/hcs"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
runtimeName = "windows"
|
||||
owner = "containerd"
|
||||
configFilename = "config.json"
|
||||
)
|
||||
|
||||
// Win32 error codes that are used for various workarounds
|
||||
// These really should be ALL_CAPS to match golangs syscall library and standard
|
||||
// Win32 error conventions, but golint insists on CamelCase.
|
||||
const (
|
||||
CoEClassstring = syscall.Errno(0x800401F3) // Invalid class string
|
||||
ErrorNoNetwork = syscall.Errno(1222) // The network is not present or not started
|
||||
ErrorBadPathname = syscall.Errno(161) // The specified path is invalid
|
||||
ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object
|
||||
)
|
||||
|
||||
func init() {
|
||||
plugin.Register(runtimeName, &plugin.Registration{
|
||||
Type: plugin.RuntimePlugin,
|
||||
Init: New,
|
||||
})
|
||||
}
|
||||
|
||||
func New(ic *plugin.InitContext) (interface{}, error) {
|
||||
c, cancel := context.WithCancel(ic.Context)
|
||||
|
||||
rootDir := filepath.Join(ic.Root, runtimeName)
|
||||
if err := os.MkdirAll(rootDir, 0755); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not create state directory at %s", rootDir)
|
||||
}
|
||||
|
||||
// Terminate all previous container that we may have started. We don't
|
||||
// support restoring containers
|
||||
|
||||
ctrs, err := loadContainers(ic.Context, rootDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, c := range ctrs {
|
||||
c.remove(ic.Context)
|
||||
}
|
||||
|
||||
// Try to delete the old state dir and recreate it
|
||||
stateDir := filepath.Join(ic.State, runtimeName)
|
||||
if err := os.RemoveAll(stateDir); err != nil {
|
||||
log.G(c).WithError(err).Warnf("failed to cleanup old state directory at %s", stateDir)
|
||||
}
|
||||
if err := os.MkdirAll(stateDir, 0755); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not create state directory at %s", stateDir)
|
||||
}
|
||||
|
||||
return &Runtime{
|
||||
containers: make(map[string]*container),
|
||||
containersPid: make(map[uint32]struct{}),
|
||||
events: make(chan *containerd.Event, 2048),
|
||||
eventsContext: c,
|
||||
eventsCancel: cancel,
|
||||
stateDir: stateDir,
|
||||
rootDir: rootDir,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type Runtime struct {
|
||||
sync.Mutex
|
||||
|
||||
rootDir string
|
||||
stateDir string
|
||||
|
||||
containers map[string]*container
|
||||
containersPid map[uint32]struct{}
|
||||
currentPid uint32
|
||||
|
||||
events chan *containerd.Event
|
||||
eventsContext context.Context
|
||||
eventsCancel func()
|
||||
}
|
||||
|
||||
type RuntimeSpec struct {
|
||||
// Spec is the OCI spec
|
||||
OCISpec specs.Spec
|
||||
|
||||
// HCS specific options
|
||||
hcs.Configuration
|
||||
}
|
||||
|
||||
func (r *Runtime) Create(ctx context.Context, id string, opts containerd.CreateOpts) (containerd.Container, error) {
|
||||
var rtSpec RuntimeSpec
|
||||
if err := json.Unmarshal(opts.Spec, &rtSpec); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal oci spec")
|
||||
}
|
||||
|
||||
pid, err := r.getPid()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctr, err := newContainer(id, r.rootDir, pid, rtSpec, opts.IO, func(id string, evType containerd.EventType, pid, exitStatus uint32) {
|
||||
r.sendEvent(id, evType, pid, exitStatus)
|
||||
})
|
||||
if err != nil {
|
||||
r.putPid(pid)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r.Lock()
|
||||
r.containers[id] = ctr
|
||||
r.containersPid[pid] = struct{}{}
|
||||
r.Unlock()
|
||||
|
||||
r.sendEvent(id, containerd.CreateEvent, pid, 0)
|
||||
|
||||
return ctr, nil
|
||||
}
|
||||
|
||||
func (r *Runtime) Delete(ctx context.Context, c containerd.Container) (uint32, error) {
|
||||
wc, ok := c.(*container)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("container cannot be cast as *windows.container")
|
||||
}
|
||||
ec, err := wc.exitCode(ctx)
|
||||
if err != nil {
|
||||
ec = 255
|
||||
log.G(ctx).WithError(err).Errorf("failed to retrieve exit code for container %s", c.Info().ID)
|
||||
}
|
||||
|
||||
if err = wc.remove(ctx); err == nil {
|
||||
r.Lock()
|
||||
delete(r.containers, c.Info().ID)
|
||||
r.Unlock()
|
||||
}
|
||||
|
||||
r.putPid(wc.getRuntimePid())
|
||||
|
||||
return ec, err
|
||||
}
|
||||
|
||||
func (r *Runtime) Containers() ([]containerd.Container, error) {
|
||||
r.Lock()
|
||||
list := make([]containerd.Container, len(r.containers))
|
||||
for _, c := range r.containers {
|
||||
list = append(list, c)
|
||||
}
|
||||
r.Unlock()
|
||||
|
||||
return list, nil
|
||||
}
|
||||
|
||||
func (r *Runtime) Events(ctx context.Context) <-chan *containerd.Event {
|
||||
return r.events
|
||||
}
|
||||
|
||||
func (r *Runtime) sendEvent(id string, evType containerd.EventType, pid, exitStatus uint32) {
|
||||
r.events <- &containerd.Event{
|
||||
Timestamp: time.Now(),
|
||||
Runtime: runtimeName,
|
||||
Type: evType,
|
||||
Pid: pid,
|
||||
ID: id,
|
||||
ExitStatus: exitStatus,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Runtime) getPid() (uint32, error) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
|
||||
pid := r.currentPid + 1
|
||||
for pid != r.currentPid {
|
||||
// 0 is reserved and invalid
|
||||
if pid == 0 {
|
||||
pid = 1
|
||||
}
|
||||
if _, ok := r.containersPid[pid]; !ok {
|
||||
r.currentPid = pid
|
||||
return pid, nil
|
||||
}
|
||||
pid++
|
||||
}
|
||||
|
||||
return 0, errors.New("pid pool exhausted")
|
||||
}
|
||||
|
||||
func (r *Runtime) putPid(pid uint32) {
|
||||
r.Lock()
|
||||
delete(r.containersPid, pid)
|
||||
r.Unlock()
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue