update vendor

Signed-off-by: Jess Frazelle <acidburn@microsoft.com>
This commit is contained in:
Jess Frazelle 2018-09-25 12:27:46 -04:00
parent 19a32db84d
commit 94d1cfbfbf
No known key found for this signature in database
GPG key ID: 18F3685C0022BFF3
10501 changed files with 2307943 additions and 29279 deletions

View file

@ -0,0 +1,86 @@
package container // import "github.com/docker/docker/container"
import (
"os"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/system"
"github.com/pkg/errors"
)
// ResolvePath resolves the given path in the container to a resource on the
// host. Returns a resolved path (absolute path to the resource on the host),
// the absolute path to the resource relative to the container's rootfs, and
// an error if the path points to outside the container's rootfs.
func (container *Container) ResolvePath(path string) (resolvedPath, absPath string, err error) {
if container.BaseFS == nil {
return "", "", errors.New("ResolvePath: BaseFS of container " + container.ID + " is unexpectedly nil")
}
// Check if a drive letter supplied, it must be the system drive. No-op except on Windows
path, err = system.CheckSystemDriveAndRemoveDriveLetter(path, container.BaseFS)
if err != nil {
return "", "", err
}
// Consider the given path as an absolute path in the container.
absPath = archive.PreserveTrailingDotOrSeparator(
container.BaseFS.Join(string(container.BaseFS.Separator()), path),
path,
container.BaseFS.Separator())
// Split the absPath into its Directory and Base components. We will
// resolve the dir in the scope of the container then append the base.
dirPath, basePath := container.BaseFS.Split(absPath)
resolvedDirPath, err := container.GetResourcePath(dirPath)
if err != nil {
return "", "", err
}
// resolvedDirPath will have been cleaned (no trailing path separators) so
// we can manually join it with the base path element.
resolvedPath = resolvedDirPath + string(container.BaseFS.Separator()) + basePath
return resolvedPath, absPath, nil
}
// StatPath is the unexported version of StatPath. Locks and mounts should
// be acquired before calling this method and the given path should be fully
// resolved to a path on the host corresponding to the given absolute path
// inside the container.
func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) {
if container.BaseFS == nil {
return nil, errors.New("StatPath: BaseFS of container " + container.ID + " is unexpectedly nil")
}
driver := container.BaseFS
lstat, err := driver.Lstat(resolvedPath)
if err != nil {
return nil, err
}
var linkTarget string
if lstat.Mode()&os.ModeSymlink != 0 {
// Fully evaluate the symlink in the scope of the container rootfs.
hostPath, err := container.GetResourcePath(absPath)
if err != nil {
return nil, err
}
linkTarget, err = driver.Rel(driver.Path(), hostPath)
if err != nil {
return nil, err
}
// Make it an absolute path.
linkTarget = driver.Join(string(driver.Separator()), linkTarget)
}
return &types.ContainerPathStat{
Name: driver.Base(absPath),
Size: lstat.Size(),
Mode: lstat.Mode(),
Mtime: lstat.ModTime(),
LinkTarget: linkTarget,
}, nil
}

View file

@ -0,0 +1,736 @@
package container // import "github.com/docker/docker/container"
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"syscall"
"time"
"github.com/containerd/containerd/cio"
containertypes "github.com/docker/docker/api/types/container"
mounttypes "github.com/docker/docker/api/types/mount"
swarmtypes "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/container/stream"
"github.com/docker/docker/daemon/exec"
"github.com/docker/docker/daemon/logger"
"github.com/docker/docker/daemon/logger/jsonfilelog"
"github.com/docker/docker/daemon/logger/local"
"github.com/docker/docker/daemon/network"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/docker/pkg/containerfs"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/symlink"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/restartmanager"
"github.com/docker/docker/volume"
volumemounts "github.com/docker/docker/volume/mounts"
"github.com/docker/go-units"
agentexec "github.com/docker/swarmkit/agent/exec"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const configFileName = "config.v2.json"
// ExitStatus provides exit reasons for a container.
type ExitStatus struct {
// The exit code with which the container exited.
ExitCode int
// Whether the container encountered an OOM.
OOMKilled bool
// Time at which the container died
ExitedAt time.Time
}
// Container holds the structure defining a container object.
type Container struct {
StreamConfig *stream.Config
// embed for Container to support states directly.
*State `json:"State"` // Needed for Engine API version <= 1.11
Root string `json:"-"` // Path to the "home" of the container, including metadata.
BaseFS containerfs.ContainerFS `json:"-"` // interface containing graphdriver mount
RWLayer layer.RWLayer `json:"-"`
ID string
Created time.Time
Managed bool
Path string
Args []string
Config *containertypes.Config
ImageID image.ID `json:"Image"`
NetworkSettings *network.Settings
LogPath string
Name string
Driver string
OS string
// MountLabel contains the options for the 'mount' command
MountLabel string
ProcessLabel string
RestartCount int
HasBeenStartedBefore bool
HasBeenManuallyStopped bool // used for unless-stopped restart policy
MountPoints map[string]*volumemounts.MountPoint
HostConfig *containertypes.HostConfig `json:"-"` // do not serialize the host config in the json, otherwise we'll make the container unportable
ExecCommands *exec.Store `json:"-"`
DependencyStore agentexec.DependencyGetter `json:"-"`
SecretReferences []*swarmtypes.SecretReference
ConfigReferences []*swarmtypes.ConfigReference
// logDriver for closing
LogDriver logger.Logger `json:"-"`
LogCopier *logger.Copier `json:"-"`
restartManager restartmanager.RestartManager
attachContext *attachContext
// Fields here are specific to Unix platforms
AppArmorProfile string
HostnamePath string
HostsPath string
ShmPath string
ResolvConfPath string
SeccompProfile string
NoNewPrivileges bool
// Fields here are specific to Windows
NetworkSharedContainerID string `json:"-"`
SharedEndpointList []string `json:"-"`
}
// NewBaseContainer creates a new container with its
// basic configuration.
func NewBaseContainer(id, root string) *Container {
return &Container{
ID: id,
State: NewState(),
ExecCommands: exec.NewStore(),
Root: root,
MountPoints: make(map[string]*volumemounts.MountPoint),
StreamConfig: stream.NewConfig(),
attachContext: &attachContext{},
}
}
// FromDisk loads the container configuration stored in the host.
func (container *Container) FromDisk() error {
pth, err := container.ConfigPath()
if err != nil {
return err
}
jsonSource, err := os.Open(pth)
if err != nil {
return err
}
defer jsonSource.Close()
dec := json.NewDecoder(jsonSource)
// Load container settings
if err := dec.Decode(container); err != nil {
return err
}
// Ensure the operating system is set if blank. Assume it is the OS of the
// host OS if not, to ensure containers created before multiple-OS
// support are migrated
if container.OS == "" {
container.OS = runtime.GOOS
}
return container.readHostConfig()
}
// toDisk saves the container configuration on disk and returns a deep copy.
func (container *Container) toDisk() (*Container, error) {
var (
buf bytes.Buffer
deepCopy Container
)
pth, err := container.ConfigPath()
if err != nil {
return nil, err
}
// Save container settings
f, err := ioutils.NewAtomicFileWriter(pth, 0600)
if err != nil {
return nil, err
}
defer f.Close()
w := io.MultiWriter(&buf, f)
if err := json.NewEncoder(w).Encode(container); err != nil {
return nil, err
}
if err := json.NewDecoder(&buf).Decode(&deepCopy); err != nil {
return nil, err
}
deepCopy.HostConfig, err = container.WriteHostConfig()
if err != nil {
return nil, err
}
return &deepCopy, nil
}
// CheckpointTo makes the Container's current state visible to queries, and persists state.
// Callers must hold a Container lock.
func (container *Container) CheckpointTo(store ViewDB) error {
deepCopy, err := container.toDisk()
if err != nil {
return err
}
return store.Save(deepCopy)
}
// readHostConfig reads the host configuration from disk for the container.
func (container *Container) readHostConfig() error {
container.HostConfig = &containertypes.HostConfig{}
// If the hostconfig file does not exist, do not read it.
// (We still have to initialize container.HostConfig,
// but that's OK, since we just did that above.)
pth, err := container.HostConfigPath()
if err != nil {
return err
}
f, err := os.Open(pth)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
defer f.Close()
if err := json.NewDecoder(f).Decode(&container.HostConfig); err != nil {
return err
}
container.InitDNSHostConfig()
return nil
}
// WriteHostConfig saves the host configuration on disk for the container,
// and returns a deep copy of the saved object. Callers must hold a Container lock.
func (container *Container) WriteHostConfig() (*containertypes.HostConfig, error) {
var (
buf bytes.Buffer
deepCopy containertypes.HostConfig
)
pth, err := container.HostConfigPath()
if err != nil {
return nil, err
}
f, err := ioutils.NewAtomicFileWriter(pth, 0644)
if err != nil {
return nil, err
}
defer f.Close()
w := io.MultiWriter(&buf, f)
if err := json.NewEncoder(w).Encode(&container.HostConfig); err != nil {
return nil, err
}
if err := json.NewDecoder(&buf).Decode(&deepCopy); err != nil {
return nil, err
}
return &deepCopy, nil
}
// SetupWorkingDirectory sets up the container's working directory as set in container.Config.WorkingDir
func (container *Container) SetupWorkingDirectory(rootIdentity idtools.Identity) error {
// TODO @jhowardmsft, @gupta-ak LCOW Support. This will need revisiting.
// We will need to do remote filesystem operations here.
if container.OS != runtime.GOOS {
return nil
}
if container.Config.WorkingDir == "" {
return nil
}
container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir)
pth, err := container.GetResourcePath(container.Config.WorkingDir)
if err != nil {
return err
}
if err := idtools.MkdirAllAndChownNew(pth, 0755, rootIdentity); err != nil {
pthInfo, err2 := os.Stat(pth)
if err2 == nil && pthInfo != nil && !pthInfo.IsDir() {
return errors.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir)
}
return err
}
return nil
}
// GetResourcePath evaluates `path` in the scope of the container's BaseFS, with proper path
// sanitisation. Symlinks are all scoped to the BaseFS of the container, as
// though the container's BaseFS was `/`.
//
// The BaseFS of a container is the host-facing path which is bind-mounted as
// `/` inside the container. This method is essentially used to access a
// particular path inside the container as though you were a process in that
// container.
//
// NOTE: The returned path is *only* safely scoped inside the container's BaseFS
// if no component of the returned path changes (such as a component
// symlinking to a different path) between using this method and using the
// path. See symlink.FollowSymlinkInScope for more details.
func (container *Container) GetResourcePath(path string) (string, error) {
if container.BaseFS == nil {
return "", errors.New("GetResourcePath: BaseFS of container " + container.ID + " is unexpectedly nil")
}
// IMPORTANT - These are paths on the OS where the daemon is running, hence
// any filepath operations must be done in an OS agnostic way.
r, e := container.BaseFS.ResolveScopedPath(path, false)
// Log this here on the daemon side as there's otherwise no indication apart
// from the error being propagated all the way back to the client. This makes
// debugging significantly easier and clearly indicates the error comes from the daemon.
if e != nil {
logrus.Errorf("Failed to ResolveScopedPath BaseFS %s path %s %s\n", container.BaseFS.Path(), path, e)
}
return r, e
}
// GetRootResourcePath evaluates `path` in the scope of the container's root, with proper path
// sanitisation. Symlinks are all scoped to the root of the container, as
// though the container's root was `/`.
//
// The root of a container is the host-facing configuration metadata directory.
// Only use this method to safely access the container's `container.json` or
// other metadata files. If in doubt, use container.GetResourcePath.
//
// NOTE: The returned path is *only* safely scoped inside the container's root
// if no component of the returned path changes (such as a component
// symlinking to a different path) between using this method and using the
// path. See symlink.FollowSymlinkInScope for more details.
func (container *Container) GetRootResourcePath(path string) (string, error) {
// IMPORTANT - These are paths on the OS where the daemon is running, hence
// any filepath operations must be done in an OS agnostic way.
cleanPath := filepath.Join(string(os.PathSeparator), path)
return symlink.FollowSymlinkInScope(filepath.Join(container.Root, cleanPath), container.Root)
}
// ExitOnNext signals to the monitor that it should not restart the container
// after we send the kill signal.
func (container *Container) ExitOnNext() {
container.RestartManager().Cancel()
}
// HostConfigPath returns the path to the container's JSON hostconfig
func (container *Container) HostConfigPath() (string, error) {
return container.GetRootResourcePath("hostconfig.json")
}
// ConfigPath returns the path to the container's JSON config
func (container *Container) ConfigPath() (string, error) {
return container.GetRootResourcePath(configFileName)
}
// CheckpointDir returns the directory checkpoints are stored in
func (container *Container) CheckpointDir() string {
return filepath.Join(container.Root, "checkpoints")
}
// StartLogger starts a new logger driver for the container.
func (container *Container) StartLogger() (logger.Logger, error) {
cfg := container.HostConfig.LogConfig
initDriver, err := logger.GetLogDriver(cfg.Type)
if err != nil {
return nil, errors.Wrap(err, "failed to get logging factory")
}
info := logger.Info{
Config: cfg.Config,
ContainerID: container.ID,
ContainerName: container.Name,
ContainerEntrypoint: container.Path,
ContainerArgs: container.Args,
ContainerImageID: container.ImageID.String(),
ContainerImageName: container.Config.Image,
ContainerCreated: container.Created,
ContainerEnv: container.Config.Env,
ContainerLabels: container.Config.Labels,
DaemonName: "docker",
}
// Set logging file for "json-logger"
// TODO(@cpuguy83): Setup here based on log driver is a little weird.
switch cfg.Type {
case jsonfilelog.Name:
info.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID))
if err != nil {
return nil, err
}
container.LogPath = info.LogPath
case local.Name:
// Do not set container.LogPath for the local driver
// This would expose the value to the API, which should not be done as it means
// that the log file implementation would become a stable API that cannot change.
logDir, err := container.GetRootResourcePath("local-logs")
if err != nil {
return nil, err
}
if err := os.MkdirAll(logDir, 0700); err != nil {
return nil, errdefs.System(errors.Wrap(err, "error creating local logs dir"))
}
info.LogPath = filepath.Join(logDir, "container.log")
}
l, err := initDriver(info)
if err != nil {
return nil, err
}
if containertypes.LogMode(cfg.Config["mode"]) == containertypes.LogModeNonBlock {
bufferSize := int64(-1)
if s, exists := cfg.Config["max-buffer-size"]; exists {
bufferSize, err = units.RAMInBytes(s)
if err != nil {
return nil, err
}
}
l = logger.NewRingLogger(l, info, bufferSize)
}
return l, nil
}
// GetProcessLabel returns the process label for the container.
func (container *Container) GetProcessLabel() string {
// even if we have a process label return "" if we are running
// in privileged mode
if container.HostConfig.Privileged {
return ""
}
return container.ProcessLabel
}
// GetMountLabel returns the mounting label for the container.
// This label is empty if the container is privileged.
func (container *Container) GetMountLabel() string {
return container.MountLabel
}
// GetExecIDs returns the list of exec commands running on the container.
func (container *Container) GetExecIDs() []string {
return container.ExecCommands.List()
}
// ShouldRestart decides whether the daemon should restart the container or not.
// This is based on the container's restart policy.
func (container *Container) ShouldRestart() bool {
shouldRestart, _, _ := container.RestartManager().ShouldRestart(uint32(container.ExitCode()), container.HasBeenManuallyStopped, container.FinishedAt.Sub(container.StartedAt))
return shouldRestart
}
// AddMountPointWithVolume adds a new mount point configured with a volume to the container.
func (container *Container) AddMountPointWithVolume(destination string, vol volume.Volume, rw bool) {
operatingSystem := container.OS
if operatingSystem == "" {
operatingSystem = runtime.GOOS
}
volumeParser := volumemounts.NewParser(operatingSystem)
container.MountPoints[destination] = &volumemounts.MountPoint{
Type: mounttypes.TypeVolume,
Name: vol.Name(),
Driver: vol.DriverName(),
Destination: destination,
RW: rw,
Volume: vol,
CopyData: volumeParser.DefaultCopyMode(),
}
}
// UnmountVolumes unmounts all volumes
func (container *Container) UnmountVolumes(volumeEventLog func(name, action string, attributes map[string]string)) error {
var errors []string
for _, volumeMount := range container.MountPoints {
if volumeMount.Volume == nil {
continue
}
if err := volumeMount.Cleanup(); err != nil {
errors = append(errors, err.Error())
continue
}
attributes := map[string]string{
"driver": volumeMount.Volume.DriverName(),
"container": container.ID,
}
volumeEventLog(volumeMount.Volume.Name(), "unmount", attributes)
}
if len(errors) > 0 {
return fmt.Errorf("error while unmounting volumes for container %s: %s", container.ID, strings.Join(errors, "; "))
}
return nil
}
// IsDestinationMounted checks whether a path is mounted on the container or not.
func (container *Container) IsDestinationMounted(destination string) bool {
return container.MountPoints[destination] != nil
}
// StopSignal returns the signal used to stop the container.
func (container *Container) StopSignal() int {
var stopSignal syscall.Signal
if container.Config.StopSignal != "" {
stopSignal, _ = signal.ParseSignal(container.Config.StopSignal)
}
if int(stopSignal) == 0 {
stopSignal, _ = signal.ParseSignal(signal.DefaultStopSignal)
}
return int(stopSignal)
}
// StopTimeout returns the timeout (in seconds) used to stop the container.
func (container *Container) StopTimeout() int {
if container.Config.StopTimeout != nil {
return *container.Config.StopTimeout
}
return DefaultStopTimeout
}
// InitDNSHostConfig ensures that the dns fields are never nil.
// New containers don't ever have those fields nil,
// but pre created containers can still have those nil values.
// The non-recommended host configuration in the start api can
// make these fields nil again, this corrects that issue until
// we remove that behavior for good.
// See https://github.com/docker/docker/pull/17779
// for a more detailed explanation on why we don't want that.
func (container *Container) InitDNSHostConfig() {
container.Lock()
defer container.Unlock()
if container.HostConfig.DNS == nil {
container.HostConfig.DNS = make([]string, 0)
}
if container.HostConfig.DNSSearch == nil {
container.HostConfig.DNSSearch = make([]string, 0)
}
if container.HostConfig.DNSOptions == nil {
container.HostConfig.DNSOptions = make([]string, 0)
}
}
// UpdateMonitor updates monitor configure for running container
func (container *Container) UpdateMonitor(restartPolicy containertypes.RestartPolicy) {
type policySetter interface {
SetPolicy(containertypes.RestartPolicy)
}
if rm, ok := container.RestartManager().(policySetter); ok {
rm.SetPolicy(restartPolicy)
}
}
// FullHostname returns hostname and optional domain appended to it.
func (container *Container) FullHostname() string {
fullHostname := container.Config.Hostname
if container.Config.Domainname != "" {
fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname)
}
return fullHostname
}
// RestartManager returns the current restartmanager instance connected to container.
func (container *Container) RestartManager() restartmanager.RestartManager {
if container.restartManager == nil {
container.restartManager = restartmanager.New(container.HostConfig.RestartPolicy, container.RestartCount)
}
return container.restartManager
}
// ResetRestartManager initializes new restartmanager based on container config
func (container *Container) ResetRestartManager(resetCount bool) {
if container.restartManager != nil {
container.restartManager.Cancel()
}
if resetCount {
container.RestartCount = 0
}
container.restartManager = nil
}
type attachContext struct {
ctx context.Context
cancel context.CancelFunc
mu sync.Mutex
}
// InitAttachContext initializes or returns existing context for attach calls to
// track container liveness.
func (container *Container) InitAttachContext() context.Context {
container.attachContext.mu.Lock()
defer container.attachContext.mu.Unlock()
if container.attachContext.ctx == nil {
container.attachContext.ctx, container.attachContext.cancel = context.WithCancel(context.Background())
}
return container.attachContext.ctx
}
// CancelAttachContext cancels attach context. All attach calls should detach
// after this call.
func (container *Container) CancelAttachContext() {
container.attachContext.mu.Lock()
if container.attachContext.ctx != nil {
container.attachContext.cancel()
container.attachContext.ctx = nil
}
container.attachContext.mu.Unlock()
}
func (container *Container) startLogging() error {
if container.HostConfig.LogConfig.Type == "none" {
return nil // do not start logging routines
}
l, err := container.StartLogger()
if err != nil {
return fmt.Errorf("failed to initialize logging driver: %v", err)
}
copier := logger.NewCopier(map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l)
container.LogCopier = copier
copier.Run()
container.LogDriver = l
return nil
}
// StdinPipe gets the stdin stream of the container
func (container *Container) StdinPipe() io.WriteCloser {
return container.StreamConfig.StdinPipe()
}
// StdoutPipe gets the stdout stream of the container
func (container *Container) StdoutPipe() io.ReadCloser {
return container.StreamConfig.StdoutPipe()
}
// StderrPipe gets the stderr stream of the container
func (container *Container) StderrPipe() io.ReadCloser {
return container.StreamConfig.StderrPipe()
}
// CloseStreams closes the container's stdio streams
func (container *Container) CloseStreams() error {
return container.StreamConfig.CloseStreams()
}
// InitializeStdio is called by libcontainerd to connect the stdio.
func (container *Container) InitializeStdio(iop *cio.DirectIO) (cio.IO, error) {
if err := container.startLogging(); err != nil {
container.Reset(false)
return nil, err
}
container.StreamConfig.CopyToPipe(iop)
if container.StreamConfig.Stdin() == nil && !container.Config.Tty {
if iop.Stdin != nil {
if err := iop.Stdin.Close(); err != nil {
logrus.Warnf("error closing stdin: %+v", err)
}
}
}
return &rio{IO: iop, sc: container.StreamConfig}, nil
}
// MountsResourcePath returns the path where mounts are stored for the given mount
func (container *Container) MountsResourcePath(mount string) (string, error) {
return container.GetRootResourcePath(filepath.Join("mounts", mount))
}
// SecretMountPath returns the path of the secret mount for the container
func (container *Container) SecretMountPath() (string, error) {
return container.MountsResourcePath("secrets")
}
// SecretFilePath returns the path to the location of a secret on the host.
func (container *Container) SecretFilePath(secretRef swarmtypes.SecretReference) (string, error) {
secrets, err := container.SecretMountPath()
if err != nil {
return "", err
}
return filepath.Join(secrets, secretRef.SecretID), nil
}
func getSecretTargetPath(r *swarmtypes.SecretReference) string {
if filepath.IsAbs(r.File.Name) {
return r.File.Name
}
return filepath.Join(containerSecretMountPath, r.File.Name)
}
// CreateDaemonEnvironment creates a new environment variable slice for this container.
func (container *Container) CreateDaemonEnvironment(tty bool, linkedEnv []string) []string {
// Setup environment
os := container.OS
if os == "" {
os = runtime.GOOS
}
env := []string{}
if runtime.GOOS != "windows" || (runtime.GOOS == "windows" && os == "linux") {
env = []string{
"PATH=" + system.DefaultPathEnv(os),
"HOSTNAME=" + container.Config.Hostname,
}
if tty {
env = append(env, "TERM=xterm")
}
env = append(env, linkedEnv...)
}
// because the env on the container can override certain default values
// we need to replace the 'env' keys where they match and append anything
// else.
env = ReplaceOrAppendEnvValues(env, container.Config.Env)
return env
}
type rio struct {
cio.IO
sc *stream.Config
}
func (i *rio) Close() error {
i.IO.Close()
return i.sc.CloseStreams()
}
func (i *rio) Wait() {
i.sc.Wait()
i.IO.Wait()
}

View file

@ -0,0 +1,126 @@
package container // import "github.com/docker/docker/container"
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/docker/docker/api/types/container"
swarmtypes "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/logger/jsonfilelog"
"github.com/docker/docker/pkg/signal"
"gotest.tools/assert"
)
func TestContainerStopSignal(t *testing.T) {
c := &Container{
Config: &container.Config{},
}
def, err := signal.ParseSignal(signal.DefaultStopSignal)
if err != nil {
t.Fatal(err)
}
s := c.StopSignal()
if s != int(def) {
t.Fatalf("Expected %v, got %v", def, s)
}
c = &Container{
Config: &container.Config{StopSignal: "SIGKILL"},
}
s = c.StopSignal()
if s != 9 {
t.Fatalf("Expected 9, got %v", s)
}
}
func TestContainerStopTimeout(t *testing.T) {
c := &Container{
Config: &container.Config{},
}
s := c.StopTimeout()
if s != DefaultStopTimeout {
t.Fatalf("Expected %v, got %v", DefaultStopTimeout, s)
}
stopTimeout := 15
c = &Container{
Config: &container.Config{StopTimeout: &stopTimeout},
}
s = c.StopTimeout()
if s != stopTimeout {
t.Fatalf("Expected %v, got %v", stopTimeout, s)
}
}
func TestContainerSecretReferenceDestTarget(t *testing.T) {
ref := &swarmtypes.SecretReference{
File: &swarmtypes.SecretReferenceFileTarget{
Name: "app",
},
}
d := getSecretTargetPath(ref)
expected := filepath.Join(containerSecretMountPath, "app")
if d != expected {
t.Fatalf("expected secret dest %q; received %q", expected, d)
}
}
func TestContainerLogPathSetForJSONFileLogger(t *testing.T) {
containerRoot, err := ioutil.TempDir("", "TestContainerLogPathSetForJSONFileLogger")
assert.NilError(t, err)
defer os.RemoveAll(containerRoot)
c := &Container{
Config: &container.Config{},
HostConfig: &container.HostConfig{
LogConfig: container.LogConfig{
Type: jsonfilelog.Name,
},
},
ID: "TestContainerLogPathSetForJSONFileLogger",
Root: containerRoot,
}
logger, err := c.StartLogger()
assert.NilError(t, err)
defer logger.Close()
expectedLogPath, err := filepath.Abs(filepath.Join(containerRoot, fmt.Sprintf("%s-json.log", c.ID)))
assert.NilError(t, err)
assert.Equal(t, c.LogPath, expectedLogPath)
}
func TestContainerLogPathSetForRingLogger(t *testing.T) {
containerRoot, err := ioutil.TempDir("", "TestContainerLogPathSetForRingLogger")
assert.NilError(t, err)
defer os.RemoveAll(containerRoot)
c := &Container{
Config: &container.Config{},
HostConfig: &container.HostConfig{
LogConfig: container.LogConfig{
Type: jsonfilelog.Name,
Config: map[string]string{
"mode": string(container.LogModeNonBlock),
},
},
},
ID: "TestContainerLogPathSetForRingLogger",
Root: containerRoot,
}
logger, err := c.StartLogger()
assert.NilError(t, err)
defer logger.Close()
expectedLogPath, err := filepath.Abs(filepath.Join(containerRoot, fmt.Sprintf("%s-json.log", c.ID)))
assert.NilError(t, err)
assert.Equal(t, c.LogPath, expectedLogPath)
}

View file

@ -0,0 +1,463 @@
// +build !windows
package container // import "github.com/docker/docker/container"
import (
"io/ioutil"
"os"
"path/filepath"
"github.com/containerd/continuity/fs"
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
mounttypes "github.com/docker/docker/api/types/mount"
swarmtypes "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/volume"
volumemounts "github.com/docker/docker/volume/mounts"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
const (
// DefaultStopTimeout sets the default time, in seconds, to wait
// for the graceful container stop before forcefully terminating it.
DefaultStopTimeout = 10
containerSecretMountPath = "/run/secrets"
)
// TrySetNetworkMount attempts to set the network mounts given a provided destination and
// the path to use for it; return true if the given destination was a network mount file
func (container *Container) TrySetNetworkMount(destination string, path string) bool {
if destination == "/etc/resolv.conf" {
container.ResolvConfPath = path
return true
}
if destination == "/etc/hostname" {
container.HostnamePath = path
return true
}
if destination == "/etc/hosts" {
container.HostsPath = path
return true
}
return false
}
// BuildHostnameFile writes the container's hostname file.
func (container *Container) BuildHostnameFile() error {
hostnamePath, err := container.GetRootResourcePath("hostname")
if err != nil {
return err
}
container.HostnamePath = hostnamePath
return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644)
}
// NetworkMounts returns the list of network mounts.
func (container *Container) NetworkMounts() []Mount {
var mounts []Mount
shared := container.HostConfig.NetworkMode.IsContainer()
parser := volumemounts.NewParser(container.OS)
if container.ResolvConfPath != "" {
if _, err := os.Stat(container.ResolvConfPath); err != nil {
logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err)
} else {
writable := !container.HostConfig.ReadonlyRootfs
if m, exists := container.MountPoints["/etc/resolv.conf"]; exists {
writable = m.RW
} else {
label.Relabel(container.ResolvConfPath, container.MountLabel, shared)
}
mounts = append(mounts, Mount{
Source: container.ResolvConfPath,
Destination: "/etc/resolv.conf",
Writable: writable,
Propagation: string(parser.DefaultPropagationMode()),
})
}
}
if container.HostnamePath != "" {
if _, err := os.Stat(container.HostnamePath); err != nil {
logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err)
} else {
writable := !container.HostConfig.ReadonlyRootfs
if m, exists := container.MountPoints["/etc/hostname"]; exists {
writable = m.RW
} else {
label.Relabel(container.HostnamePath, container.MountLabel, shared)
}
mounts = append(mounts, Mount{
Source: container.HostnamePath,
Destination: "/etc/hostname",
Writable: writable,
Propagation: string(parser.DefaultPropagationMode()),
})
}
}
if container.HostsPath != "" {
if _, err := os.Stat(container.HostsPath); err != nil {
logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err)
} else {
writable := !container.HostConfig.ReadonlyRootfs
if m, exists := container.MountPoints["/etc/hosts"]; exists {
writable = m.RW
} else {
label.Relabel(container.HostsPath, container.MountLabel, shared)
}
mounts = append(mounts, Mount{
Source: container.HostsPath,
Destination: "/etc/hosts",
Writable: writable,
Propagation: string(parser.DefaultPropagationMode()),
})
}
}
return mounts
}
// CopyImagePathContent copies files in destination to the volume.
func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error {
rootfs, err := container.GetResourcePath(destination)
if err != nil {
return err
}
if _, err := os.Stat(rootfs); err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
id := stringid.GenerateNonCryptoID()
path, err := v.Mount(id)
if err != nil {
return err
}
defer func() {
if err := v.Unmount(id); err != nil {
logrus.Warnf("error while unmounting volume %s: %v", v.Name(), err)
}
}()
if err := label.Relabel(path, container.MountLabel, true); err != nil && err != unix.ENOTSUP {
return err
}
return copyExistingContents(rootfs, path)
}
// ShmResourcePath returns path to shm
func (container *Container) ShmResourcePath() (string, error) {
return container.MountsResourcePath("shm")
}
// HasMountFor checks if path is a mountpoint
func (container *Container) HasMountFor(path string) bool {
_, exists := container.MountPoints[path]
if exists {
return true
}
// Also search among the tmpfs mounts
for dest := range container.HostConfig.Tmpfs {
if dest == path {
return true
}
}
return false
}
// UnmountIpcMount uses the provided unmount function to unmount shm if it was mounted
func (container *Container) UnmountIpcMount(unmount func(pth string) error) error {
if container.HasMountFor("/dev/shm") {
return nil
}
// container.ShmPath should not be used here as it may point
// to the host's or other container's /dev/shm
shmPath, err := container.ShmResourcePath()
if err != nil {
return err
}
if shmPath == "" {
return nil
}
if err = unmount(shmPath); err != nil && !os.IsNotExist(err) {
if mounted, mErr := mount.Mounted(shmPath); mounted || mErr != nil {
return errors.Wrapf(err, "umount %s", shmPath)
}
}
return nil
}
// IpcMounts returns the list of IPC mounts
func (container *Container) IpcMounts() []Mount {
var mounts []Mount
parser := volumemounts.NewParser(container.OS)
if container.HasMountFor("/dev/shm") {
return mounts
}
if container.ShmPath == "" {
return mounts
}
label.SetFileLabel(container.ShmPath, container.MountLabel)
mounts = append(mounts, Mount{
Source: container.ShmPath,
Destination: "/dev/shm",
Writable: true,
Propagation: string(parser.DefaultPropagationMode()),
})
return mounts
}
// SecretMounts returns the mounts for the secret path.
func (container *Container) SecretMounts() ([]Mount, error) {
var mounts []Mount
for _, r := range container.SecretReferences {
if r.File == nil {
continue
}
src, err := container.SecretFilePath(*r)
if err != nil {
return nil, err
}
mounts = append(mounts, Mount{
Source: src,
Destination: getSecretTargetPath(r),
Writable: false,
})
}
for _, r := range container.ConfigReferences {
fPath, err := container.ConfigFilePath(*r)
if err != nil {
return nil, err
}
mounts = append(mounts, Mount{
Source: fPath,
Destination: r.File.Name,
Writable: false,
})
}
return mounts, nil
}
// UnmountSecrets unmounts the local tmpfs for secrets
func (container *Container) UnmountSecrets() error {
p, err := container.SecretMountPath()
if err != nil {
return err
}
if _, err := os.Stat(p); err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
return mount.RecursiveUnmount(p)
}
type conflictingUpdateOptions string
func (e conflictingUpdateOptions) Error() string {
return string(e)
}
func (e conflictingUpdateOptions) Conflict() {}
// UpdateContainer updates configuration of a container. Callers must hold a Lock on the Container.
func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error {
// update resources of container
resources := hostConfig.Resources
cResources := &container.HostConfig.Resources
// validate NanoCPUs, CPUPeriod, and CPUQuota
// Because NanoCPU effectively updates CPUPeriod/CPUQuota,
// once NanoCPU is already set, updating CPUPeriod/CPUQuota will be blocked, and vice versa.
// In the following we make sure the intended update (resources) does not conflict with the existing (cResource).
if resources.NanoCPUs > 0 && cResources.CPUPeriod > 0 {
return conflictingUpdateOptions("Conflicting options: Nano CPUs cannot be updated as CPU Period has already been set")
}
if resources.NanoCPUs > 0 && cResources.CPUQuota > 0 {
return conflictingUpdateOptions("Conflicting options: Nano CPUs cannot be updated as CPU Quota has already been set")
}
if resources.CPUPeriod > 0 && cResources.NanoCPUs > 0 {
return conflictingUpdateOptions("Conflicting options: CPU Period cannot be updated as NanoCPUs has already been set")
}
if resources.CPUQuota > 0 && cResources.NanoCPUs > 0 {
return conflictingUpdateOptions("Conflicting options: CPU Quota cannot be updated as NanoCPUs has already been set")
}
if resources.BlkioWeight != 0 {
cResources.BlkioWeight = resources.BlkioWeight
}
if resources.CPUShares != 0 {
cResources.CPUShares = resources.CPUShares
}
if resources.NanoCPUs != 0 {
cResources.NanoCPUs = resources.NanoCPUs
}
if resources.CPUPeriod != 0 {
cResources.CPUPeriod = resources.CPUPeriod
}
if resources.CPUQuota != 0 {
cResources.CPUQuota = resources.CPUQuota
}
if resources.CpusetCpus != "" {
cResources.CpusetCpus = resources.CpusetCpus
}
if resources.CpusetMems != "" {
cResources.CpusetMems = resources.CpusetMems
}
if resources.Memory != 0 {
// if memory limit smaller than already set memoryswap limit and doesn't
// update the memoryswap limit, then error out.
if resources.Memory > cResources.MemorySwap && resources.MemorySwap == 0 {
return conflictingUpdateOptions("Memory limit should be smaller than already set memoryswap limit, update the memoryswap at the same time")
}
cResources.Memory = resources.Memory
}
if resources.MemorySwap != 0 {
cResources.MemorySwap = resources.MemorySwap
}
if resources.MemoryReservation != 0 {
cResources.MemoryReservation = resources.MemoryReservation
}
if resources.KernelMemory != 0 {
cResources.KernelMemory = resources.KernelMemory
}
if resources.CPURealtimePeriod != 0 {
cResources.CPURealtimePeriod = resources.CPURealtimePeriod
}
if resources.CPURealtimeRuntime != 0 {
cResources.CPURealtimeRuntime = resources.CPURealtimeRuntime
}
// update HostConfig of container
if hostConfig.RestartPolicy.Name != "" {
if container.HostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() {
return conflictingUpdateOptions("Restart policy cannot be updated because AutoRemove is enabled for the container")
}
container.HostConfig.RestartPolicy = hostConfig.RestartPolicy
}
return nil
}
// DetachAndUnmount uses a detached mount on all mount destinations, then
// unmounts each volume normally.
// This is used from daemon/archive for `docker cp`
func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error {
networkMounts := container.NetworkMounts()
mountPaths := make([]string, 0, len(container.MountPoints)+len(networkMounts))
for _, mntPoint := range container.MountPoints {
dest, err := container.GetResourcePath(mntPoint.Destination)
if err != nil {
logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, mntPoint.Destination, err)
continue
}
mountPaths = append(mountPaths, dest)
}
for _, m := range networkMounts {
dest, err := container.GetResourcePath(m.Destination)
if err != nil {
logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, m.Destination, err)
continue
}
mountPaths = append(mountPaths, dest)
}
for _, mountPath := range mountPaths {
if err := mount.Unmount(mountPath); err != nil {
logrus.Warnf("%s unmountVolumes: Failed to do lazy umount fo volume '%s': %v", container.ID, mountPath, err)
}
}
return container.UnmountVolumes(volumeEventLog)
}
// copyExistingContents copies from the source to the destination and
// ensures the ownership is appropriately set.
func copyExistingContents(source, destination string) error {
dstList, err := ioutil.ReadDir(destination)
if err != nil {
return err
}
if len(dstList) != 0 {
// destination is not empty, do not copy
return nil
}
return fs.CopyDir(destination, source)
}
// TmpfsMounts returns the list of tmpfs mounts
func (container *Container) TmpfsMounts() ([]Mount, error) {
parser := volumemounts.NewParser(container.OS)
var mounts []Mount
for dest, data := range container.HostConfig.Tmpfs {
mounts = append(mounts, Mount{
Source: "tmpfs",
Destination: dest,
Data: data,
})
}
for dest, mnt := range container.MountPoints {
if mnt.Type == mounttypes.TypeTmpfs {
data, err := parser.ConvertTmpfsOptions(mnt.Spec.TmpfsOptions, mnt.Spec.ReadOnly)
if err != nil {
return nil, err
}
mounts = append(mounts, Mount{
Source: "tmpfs",
Destination: dest,
Data: data,
})
}
}
return mounts, nil
}
// EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network
func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool {
return false
}
// GetMountPoints gives a platform specific transformation to types.MountPoint. Callers must hold a Container lock.
func (container *Container) GetMountPoints() []types.MountPoint {
mountPoints := make([]types.MountPoint, 0, len(container.MountPoints))
for _, m := range container.MountPoints {
mountPoints = append(mountPoints, types.MountPoint{
Type: m.Type,
Name: m.Name,
Source: m.Path(),
Destination: m.Destination,
Driver: m.Driver,
Mode: m.Mode,
RW: m.RW,
Propagation: m.Propagation,
})
}
return mountPoints
}
// ConfigFilePath returns the path to the on-disk location of a config.
// On unix, configs are always considered secret
func (container *Container) ConfigFilePath(configRef swarmtypes.ConfigReference) (string, error) {
mounts, err := container.SecretMountPath()
if err != nil {
return "", err
}
return filepath.Join(mounts, configRef.ConfigID), nil
}

View file

@ -0,0 +1,213 @@
package container // import "github.com/docker/docker/container"
import (
"fmt"
"os"
"path/filepath"
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
swarmtypes "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/pkg/system"
)
const (
containerSecretMountPath = `C:\ProgramData\Docker\secrets`
containerInternalSecretMountPath = `C:\ProgramData\Docker\internal\secrets`
containerInternalConfigsDirPath = `C:\ProgramData\Docker\internal\configs`
// DefaultStopTimeout is the timeout (in seconds) for the shutdown call on a container
DefaultStopTimeout = 30
)
// UnmountIpcMount unmounts Ipc related mounts.
// This is a NOOP on windows.
func (container *Container) UnmountIpcMount(unmount func(pth string) error) error {
return nil
}
// IpcMounts returns the list of Ipc related mounts.
func (container *Container) IpcMounts() []Mount {
return nil
}
// CreateSecretSymlinks creates symlinks to files in the secret mount.
func (container *Container) CreateSecretSymlinks() error {
for _, r := range container.SecretReferences {
if r.File == nil {
continue
}
resolvedPath, _, err := container.ResolvePath(getSecretTargetPath(r))
if err != nil {
return err
}
if err := system.MkdirAll(filepath.Dir(resolvedPath), 0, ""); err != nil {
return err
}
if err := os.Symlink(filepath.Join(containerInternalSecretMountPath, r.SecretID), resolvedPath); err != nil {
return err
}
}
return nil
}
// SecretMounts returns the mount for the secret path.
// All secrets are stored in a single mount on Windows. Target symlinks are
// created for each secret, pointing to the files in this mount.
func (container *Container) SecretMounts() ([]Mount, error) {
var mounts []Mount
if len(container.SecretReferences) > 0 {
src, err := container.SecretMountPath()
if err != nil {
return nil, err
}
mounts = append(mounts, Mount{
Source: src,
Destination: containerInternalSecretMountPath,
Writable: false,
})
}
return mounts, nil
}
// UnmountSecrets unmounts the fs for secrets
func (container *Container) UnmountSecrets() error {
p, err := container.SecretMountPath()
if err != nil {
return err
}
return os.RemoveAll(p)
}
// CreateConfigSymlinks creates symlinks to files in the config mount.
func (container *Container) CreateConfigSymlinks() error {
for _, configRef := range container.ConfigReferences {
if configRef.File == nil {
continue
}
resolvedPath, _, err := container.ResolvePath(configRef.File.Name)
if err != nil {
return err
}
if err := system.MkdirAll(filepath.Dir(resolvedPath), 0, ""); err != nil {
return err
}
if err := os.Symlink(filepath.Join(containerInternalConfigsDirPath, configRef.ConfigID), resolvedPath); err != nil {
return err
}
}
return nil
}
// ConfigMounts returns the mount for configs.
// TODO: Right now Windows doesn't really have a "secure" storage for secrets,
// however some configs may contain secrets. Once secure storage is worked out,
// configs and secret handling should be merged.
func (container *Container) ConfigMounts() []Mount {
var mounts []Mount
if len(container.ConfigReferences) > 0 {
mounts = append(mounts, Mount{
Source: container.ConfigsDirPath(),
Destination: containerInternalConfigsDirPath,
Writable: false,
})
}
return mounts
}
// DetachAndUnmount unmounts all volumes.
// On Windows it only delegates to `UnmountVolumes` since there is nothing to
// force unmount.
func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error {
return container.UnmountVolumes(volumeEventLog)
}
// TmpfsMounts returns the list of tmpfs mounts
func (container *Container) TmpfsMounts() ([]Mount, error) {
var mounts []Mount
return mounts, nil
}
// UpdateContainer updates configuration of a container. Callers must hold a Lock on the Container.
func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error {
resources := hostConfig.Resources
if resources.CPUShares != 0 ||
resources.Memory != 0 ||
resources.NanoCPUs != 0 ||
resources.CgroupParent != "" ||
resources.BlkioWeight != 0 ||
len(resources.BlkioWeightDevice) != 0 ||
len(resources.BlkioDeviceReadBps) != 0 ||
len(resources.BlkioDeviceWriteBps) != 0 ||
len(resources.BlkioDeviceReadIOps) != 0 ||
len(resources.BlkioDeviceWriteIOps) != 0 ||
resources.CPUPeriod != 0 ||
resources.CPUQuota != 0 ||
resources.CPURealtimePeriod != 0 ||
resources.CPURealtimeRuntime != 0 ||
resources.CpusetCpus != "" ||
resources.CpusetMems != "" ||
len(resources.Devices) != 0 ||
len(resources.DeviceCgroupRules) != 0 ||
resources.DiskQuota != 0 ||
resources.KernelMemory != 0 ||
resources.MemoryReservation != 0 ||
resources.MemorySwap != 0 ||
resources.MemorySwappiness != nil ||
resources.OomKillDisable != nil ||
resources.PidsLimit != 0 ||
len(resources.Ulimits) != 0 ||
resources.CPUCount != 0 ||
resources.CPUPercent != 0 ||
resources.IOMaximumIOps != 0 ||
resources.IOMaximumBandwidth != 0 {
return fmt.Errorf("resource updating isn't supported on Windows")
}
// update HostConfig of container
if hostConfig.RestartPolicy.Name != "" {
if container.HostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() {
return fmt.Errorf("Restart policy cannot be updated because AutoRemove is enabled for the container")
}
container.HostConfig.RestartPolicy = hostConfig.RestartPolicy
}
return nil
}
// BuildHostnameFile writes the container's hostname file.
func (container *Container) BuildHostnameFile() error {
return nil
}
// EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network
func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool {
return true
}
// GetMountPoints gives a platform specific transformation to types.MountPoint. Callers must hold a Container lock.
func (container *Container) GetMountPoints() []types.MountPoint {
mountPoints := make([]types.MountPoint, 0, len(container.MountPoints))
for _, m := range container.MountPoints {
mountPoints = append(mountPoints, types.MountPoint{
Type: m.Type,
Name: m.Name,
Source: m.Path(),
Destination: m.Destination,
Driver: m.Driver,
RW: m.RW,
})
}
return mountPoints
}
func (container *Container) ConfigsDirPath() string {
return filepath.Join(container.Root, "configs")
}
// ConfigFilePath returns the path to the on-disk location of a config.
func (container *Container) ConfigFilePath(configRef swarmtypes.ConfigReference) string {
return filepath.Join(container.ConfigsDirPath(), configRef.ConfigID)
}

View file

@ -0,0 +1,43 @@
package container // import "github.com/docker/docker/container"
import (
"strings"
)
// ReplaceOrAppendEnvValues returns the defaults with the overrides either
// replaced by env key or appended to the list
func ReplaceOrAppendEnvValues(defaults, overrides []string) []string {
cache := make(map[string]int, len(defaults))
for i, e := range defaults {
parts := strings.SplitN(e, "=", 2)
cache[parts[0]] = i
}
for _, value := range overrides {
// Values w/o = means they want this env to be removed/unset.
if !strings.Contains(value, "=") {
if i, exists := cache[value]; exists {
defaults[i] = "" // Used to indicate it should be removed
}
continue
}
// Just do a normal set/update
parts := strings.SplitN(value, "=", 2)
if i, exists := cache[parts[0]]; exists {
defaults[i] = value
} else {
defaults = append(defaults, value)
}
}
// Now remove all entries that we want to "unset"
for i := 0; i < len(defaults); i++ {
if defaults[i] == "" {
defaults = append(defaults[:i], defaults[i+1:]...)
i--
}
}
return defaults
}

View file

@ -0,0 +1,24 @@
package container // import "github.com/docker/docker/container"
import "testing"
func TestReplaceAndAppendEnvVars(t *testing.T) {
var (
d = []string{"HOME=/", "FOO=foo_default"}
// remove FOO from env
// remove BAR from env (nop)
o = []string{"HOME=/root", "TERM=xterm", "FOO", "BAR"}
)
env := ReplaceOrAppendEnvValues(d, o)
t.Logf("default=%v, override=%v, result=%v", d, o, env)
if len(env) != 2 {
t.Fatalf("expected len of 2 got %d", len(env))
}
if env[0] != "HOME=/root" {
t.Fatalf("expected HOME=/root got '%s'", env[0])
}
if env[1] != "TERM=xterm" {
t.Fatalf("expected TERM=xterm got '%s'", env[1])
}
}

View file

@ -0,0 +1,82 @@
package container // import "github.com/docker/docker/container"
import (
"sync"
"github.com/docker/docker/api/types"
"github.com/sirupsen/logrus"
)
// Health holds the current container health-check state
type Health struct {
types.Health
stop chan struct{} // Write struct{} to stop the monitor
mu sync.Mutex
}
// String returns a human-readable description of the health-check state
func (s *Health) String() string {
status := s.Status()
switch status {
case types.Starting:
return "health: starting"
default: // Healthy and Unhealthy are clear on their own
return s.Health.Status
}
}
// Status returns the current health status.
//
// Note that this takes a lock and the value may change after being read.
func (s *Health) Status() string {
s.mu.Lock()
defer s.mu.Unlock()
// This happens when the monitor has yet to be setup.
if s.Health.Status == "" {
return types.Unhealthy
}
return s.Health.Status
}
// SetStatus writes the current status to the underlying health structure,
// obeying the locking semantics.
//
// Status may be set directly if another lock is used.
func (s *Health) SetStatus(new string) {
s.mu.Lock()
defer s.mu.Unlock()
s.Health.Status = new
}
// OpenMonitorChannel creates and returns a new monitor channel. If there
// already is one, it returns nil.
func (s *Health) OpenMonitorChannel() chan struct{} {
s.mu.Lock()
defer s.mu.Unlock()
if s.stop == nil {
logrus.Debug("OpenMonitorChannel")
s.stop = make(chan struct{})
return s.stop
}
return nil
}
// CloseMonitorChannel closes any existing monitor channel.
func (s *Health) CloseMonitorChannel() {
s.mu.Lock()
defer s.mu.Unlock()
if s.stop != nil {
logrus.Debug("CloseMonitorChannel: waiting for probe to stop")
close(s.stop)
s.stop = nil
// unhealthy when the monitor has stopped for compatibility reasons
s.Health.Status = types.Unhealthy
logrus.Debug("CloseMonitorChannel done")
}
}

View file

@ -0,0 +1,30 @@
package container // import "github.com/docker/docker/container"
import "sort"
// History is a convenience type for storing a list of containers,
// sorted by creation date in descendant order.
type History []*Container
// Len returns the number of containers in the history.
func (history *History) Len() int {
return len(*history)
}
// Less compares two containers and returns true if the second one
// was created before the first one.
func (history *History) Less(i, j int) bool {
containers := *history
return containers[j].Created.Before(containers[i].Created)
}
// Swap switches containers i and j positions in the history.
func (history *History) Swap(i, j int) {
containers := *history
containers[i], containers[j] = containers[j], containers[i]
}
// sort orders the history by creation date in descendant order.
func (history *History) sort() {
sort.Sort(history)
}

View file

@ -0,0 +1,95 @@
package container // import "github.com/docker/docker/container"
import (
"sync"
)
// memoryStore implements a Store in memory.
type memoryStore struct {
s map[string]*Container
sync.RWMutex
}
// NewMemoryStore initializes a new memory store.
func NewMemoryStore() Store {
return &memoryStore{
s: make(map[string]*Container),
}
}
// Add appends a new container to the memory store.
// It overrides the id if it existed before.
func (c *memoryStore) Add(id string, cont *Container) {
c.Lock()
c.s[id] = cont
c.Unlock()
}
// Get returns a container from the store by id.
func (c *memoryStore) Get(id string) *Container {
var res *Container
c.RLock()
res = c.s[id]
c.RUnlock()
return res
}
// Delete removes a container from the store by id.
func (c *memoryStore) Delete(id string) {
c.Lock()
delete(c.s, id)
c.Unlock()
}
// List returns a sorted list of containers from the store.
// The containers are ordered by creation date.
func (c *memoryStore) List() []*Container {
containers := History(c.all())
containers.sort()
return containers
}
// Size returns the number of containers in the store.
func (c *memoryStore) Size() int {
c.RLock()
defer c.RUnlock()
return len(c.s)
}
// First returns the first container found in the store by a given filter.
func (c *memoryStore) First(filter StoreFilter) *Container {
for _, cont := range c.all() {
if filter(cont) {
return cont
}
}
return nil
}
// ApplyAll calls the reducer function with every container in the store.
// This operation is asynchronous in the memory store.
// NOTE: Modifications to the store MUST NOT be done by the StoreReducer.
func (c *memoryStore) ApplyAll(apply StoreReducer) {
wg := new(sync.WaitGroup)
for _, cont := range c.all() {
wg.Add(1)
go func(container *Container) {
apply(container)
wg.Done()
}(cont)
}
wg.Wait()
}
func (c *memoryStore) all() []*Container {
c.RLock()
containers := make([]*Container, 0, len(c.s))
for _, cont := range c.s {
containers = append(containers, cont)
}
c.RUnlock()
return containers
}
var _ Store = &memoryStore{}

View file

@ -0,0 +1,106 @@
package container // import "github.com/docker/docker/container"
import (
"testing"
"time"
)
func TestNewMemoryStore(t *testing.T) {
s := NewMemoryStore()
m, ok := s.(*memoryStore)
if !ok {
t.Fatalf("store is not a memory store %v", s)
}
if m.s == nil {
t.Fatal("expected store map to not be nil")
}
}
func TestAddContainers(t *testing.T) {
s := NewMemoryStore()
s.Add("id", NewBaseContainer("id", "root"))
if s.Size() != 1 {
t.Fatalf("expected store size 1, got %v", s.Size())
}
}
func TestGetContainer(t *testing.T) {
s := NewMemoryStore()
s.Add("id", NewBaseContainer("id", "root"))
c := s.Get("id")
if c == nil {
t.Fatal("expected container to not be nil")
}
}
func TestDeleteContainer(t *testing.T) {
s := NewMemoryStore()
s.Add("id", NewBaseContainer("id", "root"))
s.Delete("id")
if c := s.Get("id"); c != nil {
t.Fatalf("expected container to be nil after removal, got %v", c)
}
if s.Size() != 0 {
t.Fatalf("expected store size to be 0, got %v", s.Size())
}
}
func TestListContainers(t *testing.T) {
s := NewMemoryStore()
cont := NewBaseContainer("id", "root")
cont.Created = time.Now()
cont2 := NewBaseContainer("id2", "root")
cont2.Created = time.Now().Add(24 * time.Hour)
s.Add("id", cont)
s.Add("id2", cont2)
list := s.List()
if len(list) != 2 {
t.Fatalf("expected list size 2, got %v", len(list))
}
if list[0].ID != "id2" {
t.Fatalf("expected id2, got %v", list[0].ID)
}
}
func TestFirstContainer(t *testing.T) {
s := NewMemoryStore()
s.Add("id", NewBaseContainer("id", "root"))
s.Add("id2", NewBaseContainer("id2", "root"))
first := s.First(func(cont *Container) bool {
return cont.ID == "id2"
})
if first == nil {
t.Fatal("expected container to not be nil")
}
if first.ID != "id2" {
t.Fatalf("expected id2, got %v", first)
}
}
func TestApplyAllContainer(t *testing.T) {
s := NewMemoryStore()
s.Add("id", NewBaseContainer("id", "root"))
s.Add("id2", NewBaseContainer("id2", "root"))
s.ApplyAll(func(cont *Container) {
if cont.ID == "id2" {
cont.ID = "newID"
}
})
cont := s.Get("id2")
if cont == nil {
t.Fatal("expected container to not be nil")
}
if cont.ID != "newID" {
t.Fatalf("expected newID, got %v", cont.ID)
}
}

View file

@ -0,0 +1,46 @@
package container // import "github.com/docker/docker/container"
import (
"time"
"github.com/sirupsen/logrus"
)
const (
loggerCloseTimeout = 10 * time.Second
)
// Reset puts a container into a state where it can be restarted again.
func (container *Container) Reset(lock bool) {
if lock {
container.Lock()
defer container.Unlock()
}
if err := container.CloseStreams(); err != nil {
logrus.Errorf("%s: %s", container.ID, err)
}
// Re-create a brand new stdin pipe once the container exited
if container.Config.OpenStdin {
container.StreamConfig.NewInputPipes()
}
if container.LogDriver != nil {
if container.LogCopier != nil {
exit := make(chan struct{})
go func() {
container.LogCopier.Wait()
close(exit)
}()
select {
case <-time.After(loggerCloseTimeout):
logrus.Warn("Logger didn't exit in time: logs may be truncated")
case <-exit:
}
}
container.LogDriver.Close()
container.LogCopier = nil
container.LogDriver = nil
}
}

View file

@ -0,0 +1,12 @@
// +build !windows
package container // import "github.com/docker/docker/container"
// Mount contains information for a mount operation.
type Mount struct {
Source string `json:"source"`
Destination string `json:"destination"`
Writable bool `json:"writable"`
Data string `json:"data"`
Propagation string `json:"mountpropagation"`
}

View file

@ -0,0 +1,8 @@
package container // import "github.com/docker/docker/container"
// Mount contains information for a mount operation.
type Mount struct {
Source string `json:"source"`
Destination string `json:"destination"`
Writable bool `json:"writable"`
}

View file

@ -0,0 +1,409 @@
package container // import "github.com/docker/docker/container"
import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/go-units"
)
// State holds the current container state, and has methods to get and
// set the state. Container has an embed, which allows all of the
// functions defined against State to run against Container.
type State struct {
sync.Mutex
// Note that `Running` and `Paused` are not mutually exclusive:
// When pausing a container (on Linux), the cgroups freezer is used to suspend
// all processes in the container. Freezing the process requires the process to
// be running. As a result, paused containers are both `Running` _and_ `Paused`.
Running bool
Paused bool
Restarting bool
OOMKilled bool
RemovalInProgress bool // Not need for this to be persistent on disk.
Dead bool
Pid int
ExitCodeValue int `json:"ExitCode"`
ErrorMsg string `json:"Error"` // contains last known error during container start, stop, or remove
StartedAt time.Time
FinishedAt time.Time
Health *Health
waitStop chan struct{}
waitRemove chan struct{}
}
// StateStatus is used to return container wait results.
// Implements exec.ExitCode interface.
// This type is needed as State include a sync.Mutex field which make
// copying it unsafe.
type StateStatus struct {
exitCode int
err error
}
// ExitCode returns current exitcode for the state.
func (s StateStatus) ExitCode() int {
return s.exitCode
}
// Err returns current error for the state. Returns nil if the container had
// exited on its own.
func (s StateStatus) Err() error {
return s.err
}
// NewState creates a default state object with a fresh channel for state changes.
func NewState() *State {
return &State{
waitStop: make(chan struct{}),
waitRemove: make(chan struct{}),
}
}
// String returns a human-readable description of the state
func (s *State) String() string {
if s.Running {
if s.Paused {
return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)))
}
if s.Restarting {
return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCodeValue, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)))
}
if h := s.Health; h != nil {
return fmt.Sprintf("Up %s (%s)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)), h.String())
}
return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)))
}
if s.RemovalInProgress {
return "Removal In Progress"
}
if s.Dead {
return "Dead"
}
if s.StartedAt.IsZero() {
return "Created"
}
if s.FinishedAt.IsZero() {
return ""
}
return fmt.Sprintf("Exited (%d) %s ago", s.ExitCodeValue, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)))
}
// IsValidHealthString checks if the provided string is a valid container health status or not.
func IsValidHealthString(s string) bool {
return s == types.Starting ||
s == types.Healthy ||
s == types.Unhealthy ||
s == types.NoHealthcheck
}
// StateString returns a single string to describe state
func (s *State) StateString() string {
if s.Running {
if s.Paused {
return "paused"
}
if s.Restarting {
return "restarting"
}
return "running"
}
if s.RemovalInProgress {
return "removing"
}
if s.Dead {
return "dead"
}
if s.StartedAt.IsZero() {
return "created"
}
return "exited"
}
// IsValidStateString checks if the provided string is a valid container state or not.
func IsValidStateString(s string) bool {
if s != "paused" &&
s != "restarting" &&
s != "removing" &&
s != "running" &&
s != "dead" &&
s != "created" &&
s != "exited" {
return false
}
return true
}
// WaitCondition is an enum type for different states to wait for.
type WaitCondition int
// Possible WaitCondition Values.
//
// WaitConditionNotRunning (default) is used to wait for any of the non-running
// states: "created", "exited", "dead", "removing", or "removed".
//
// WaitConditionNextExit is used to wait for the next time the state changes
// to a non-running state. If the state is currently "created" or "exited",
// this would cause Wait() to block until either the container runs and exits
// or is removed.
//
// WaitConditionRemoved is used to wait for the container to be removed.
const (
WaitConditionNotRunning WaitCondition = iota
WaitConditionNextExit
WaitConditionRemoved
)
// Wait waits until the container is in a certain state indicated by the given
// condition. A context must be used for cancelling the request, controlling
// timeouts, and avoiding goroutine leaks. Wait must be called without holding
// the state lock. Returns a channel from which the caller will receive the
// result. If the container exited on its own, the result's Err() method will
// be nil and its ExitCode() method will return the container's exit code,
// otherwise, the results Err() method will return an error indicating why the
// wait operation failed.
func (s *State) Wait(ctx context.Context, condition WaitCondition) <-chan StateStatus {
s.Lock()
defer s.Unlock()
if condition == WaitConditionNotRunning && !s.Running {
// Buffer so we can put it in the channel now.
resultC := make(chan StateStatus, 1)
// Send the current status.
resultC <- StateStatus{
exitCode: s.ExitCode(),
err: s.Err(),
}
return resultC
}
// If we are waiting only for removal, the waitStop channel should
// remain nil and block forever.
var waitStop chan struct{}
if condition < WaitConditionRemoved {
waitStop = s.waitStop
}
// Always wait for removal, just in case the container gets removed
// while it is still in a "created" state, in which case it is never
// actually stopped.
waitRemove := s.waitRemove
resultC := make(chan StateStatus)
go func() {
select {
case <-ctx.Done():
// Context timeout or cancellation.
resultC <- StateStatus{
exitCode: -1,
err: ctx.Err(),
}
return
case <-waitStop:
case <-waitRemove:
}
s.Lock()
result := StateStatus{
exitCode: s.ExitCode(),
err: s.Err(),
}
s.Unlock()
resultC <- result
}()
return resultC
}
// IsRunning returns whether the running flag is set. Used by Container to check whether a container is running.
func (s *State) IsRunning() bool {
s.Lock()
res := s.Running
s.Unlock()
return res
}
// GetPID holds the process id of a container.
func (s *State) GetPID() int {
s.Lock()
res := s.Pid
s.Unlock()
return res
}
// ExitCode returns current exitcode for the state. Take lock before if state
// may be shared.
func (s *State) ExitCode() int {
return s.ExitCodeValue
}
// SetExitCode sets current exitcode for the state. Take lock before if state
// may be shared.
func (s *State) SetExitCode(ec int) {
s.ExitCodeValue = ec
}
// SetRunning sets the state of the container to "running".
func (s *State) SetRunning(pid int, initial bool) {
s.ErrorMsg = ""
s.Paused = false
s.Running = true
s.Restarting = false
if initial {
s.Paused = false
}
s.ExitCodeValue = 0
s.Pid = pid
if initial {
s.StartedAt = time.Now().UTC()
}
}
// SetStopped sets the container state to "stopped" without locking.
func (s *State) SetStopped(exitStatus *ExitStatus) {
s.Running = false
s.Paused = false
s.Restarting = false
s.Pid = 0
if exitStatus.ExitedAt.IsZero() {
s.FinishedAt = time.Now().UTC()
} else {
s.FinishedAt = exitStatus.ExitedAt
}
s.ExitCodeValue = exitStatus.ExitCode
s.OOMKilled = exitStatus.OOMKilled
close(s.waitStop) // fire waiters for stop
s.waitStop = make(chan struct{})
}
// SetRestarting sets the container state to "restarting" without locking.
// It also sets the container PID to 0.
func (s *State) SetRestarting(exitStatus *ExitStatus) {
// we should consider the container running when it is restarting because of
// all the checks in docker around rm/stop/etc
s.Running = true
s.Restarting = true
s.Paused = false
s.Pid = 0
s.FinishedAt = time.Now().UTC()
s.ExitCodeValue = exitStatus.ExitCode
s.OOMKilled = exitStatus.OOMKilled
close(s.waitStop) // fire waiters for stop
s.waitStop = make(chan struct{})
}
// SetError sets the container's error state. This is useful when we want to
// know the error that occurred when container transits to another state
// when inspecting it
func (s *State) SetError(err error) {
s.ErrorMsg = ""
if err != nil {
s.ErrorMsg = err.Error()
}
}
// IsPaused returns whether the container is paused or not.
func (s *State) IsPaused() bool {
s.Lock()
res := s.Paused
s.Unlock()
return res
}
// IsRestarting returns whether the container is restarting or not.
func (s *State) IsRestarting() bool {
s.Lock()
res := s.Restarting
s.Unlock()
return res
}
// SetRemovalInProgress sets the container state as being removed.
// It returns true if the container was already in that state.
func (s *State) SetRemovalInProgress() bool {
s.Lock()
defer s.Unlock()
if s.RemovalInProgress {
return true
}
s.RemovalInProgress = true
return false
}
// ResetRemovalInProgress makes the RemovalInProgress state to false.
func (s *State) ResetRemovalInProgress() {
s.Lock()
s.RemovalInProgress = false
s.Unlock()
}
// IsRemovalInProgress returns whether the RemovalInProgress flag is set.
// Used by Container to check whether a container is being removed.
func (s *State) IsRemovalInProgress() bool {
s.Lock()
res := s.RemovalInProgress
s.Unlock()
return res
}
// SetDead sets the container state to "dead"
func (s *State) SetDead() {
s.Lock()
s.Dead = true
s.Unlock()
}
// IsDead returns whether the Dead flag is set. Used by Container to check whether a container is dead.
func (s *State) IsDead() bool {
s.Lock()
res := s.Dead
s.Unlock()
return res
}
// SetRemoved assumes this container is already in the "dead" state and
// closes the internal waitRemove channel to unblock callers waiting for a
// container to be removed.
func (s *State) SetRemoved() {
s.SetRemovalError(nil)
}
// SetRemovalError is to be called in case a container remove failed.
// It sets an error and closes the internal waitRemove channel to unblock
// callers waiting for the container to be removed.
func (s *State) SetRemovalError(err error) {
s.SetError(err)
s.Lock()
close(s.waitRemove) // Unblock those waiting on remove.
// Recreate the channel so next ContainerWait will work
s.waitRemove = make(chan struct{})
s.Unlock()
}
// Err returns an error if there is one.
func (s *State) Err() error {
if s.ErrorMsg != "" {
return errors.New(s.ErrorMsg)
}
return nil
}

View file

@ -0,0 +1,192 @@
package container // import "github.com/docker/docker/container"
import (
"context"
"testing"
"time"
"github.com/docker/docker/api/types"
)
func TestIsValidHealthString(t *testing.T) {
contexts := []struct {
Health string
Expected bool
}{
{types.Healthy, true},
{types.Unhealthy, true},
{types.Starting, true},
{types.NoHealthcheck, true},
{"fail", false},
}
for _, c := range contexts {
v := IsValidHealthString(c.Health)
if v != c.Expected {
t.Fatalf("Expected %t, but got %t", c.Expected, v)
}
}
}
func TestStateRunStop(t *testing.T) {
s := NewState()
// Begin another wait with WaitConditionRemoved. It should complete
// within 200 milliseconds.
ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
defer cancel()
removalWait := s.Wait(ctx, WaitConditionRemoved)
// Full lifecycle two times.
for i := 1; i <= 2; i++ {
// A wait with WaitConditionNotRunning should return
// immediately since the state is now either "created" (on the
// first iteration) or "exited" (on the second iteration). It
// shouldn't take more than 50 milliseconds.
ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
defer cancel()
// Expectx exit code to be i-1 since it should be the exit
// code from the previous loop or 0 for the created state.
if status := <-s.Wait(ctx, WaitConditionNotRunning); status.ExitCode() != i-1 {
t.Fatalf("ExitCode %v, expected %v, err %q", status.ExitCode(), i-1, status.Err())
}
// A wait with WaitConditionNextExit should block until the
// container has started and exited. It shouldn't take more
// than 100 milliseconds.
ctx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond)
defer cancel()
initialWait := s.Wait(ctx, WaitConditionNextExit)
// Set the state to "Running".
s.Lock()
s.SetRunning(i, true)
s.Unlock()
// Assert desired state.
if !s.IsRunning() {
t.Fatal("State not running")
}
if s.Pid != i {
t.Fatalf("Pid %v, expected %v", s.Pid, i)
}
if s.ExitCode() != 0 {
t.Fatalf("ExitCode %v, expected 0", s.ExitCode())
}
// Now that it's running, a wait with WaitConditionNotRunning
// should block until we stop the container. It shouldn't take
// more than 100 milliseconds.
ctx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond)
defer cancel()
exitWait := s.Wait(ctx, WaitConditionNotRunning)
// Set the state to "Exited".
s.Lock()
s.SetStopped(&ExitStatus{ExitCode: i})
s.Unlock()
// Assert desired state.
if s.IsRunning() {
t.Fatal("State is running")
}
if s.ExitCode() != i {
t.Fatalf("ExitCode %v, expected %v", s.ExitCode(), i)
}
if s.Pid != 0 {
t.Fatalf("Pid %v, expected 0", s.Pid)
}
// Receive the initialWait result.
if status := <-initialWait; status.ExitCode() != i {
t.Fatalf("ExitCode %v, expected %v, err %q", status.ExitCode(), i, status.Err())
}
// Receive the exitWait result.
if status := <-exitWait; status.ExitCode() != i {
t.Fatalf("ExitCode %v, expected %v, err %q", status.ExitCode(), i, status.Err())
}
}
// Set the state to dead and removed.
s.SetDead()
s.SetRemoved()
// Wait for removed status or timeout.
if status := <-removalWait; status.ExitCode() != 2 {
// Should have the final exit code from the loop.
t.Fatalf("Removal wait exitCode %v, expected %v, err %q", status.ExitCode(), 2, status.Err())
}
}
func TestStateTimeoutWait(t *testing.T) {
s := NewState()
s.Lock()
s.SetRunning(0, true)
s.Unlock()
// Start a wait with a timeout.
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
defer cancel()
waitC := s.Wait(ctx, WaitConditionNotRunning)
// It should timeout *before* this 200ms timer does.
select {
case <-time.After(200 * time.Millisecond):
t.Fatal("Stop callback doesn't fire in 200 milliseconds")
case status := <-waitC:
t.Log("Stop callback fired")
// Should be a timeout error.
if status.Err() == nil {
t.Fatal("expected timeout error, got nil")
}
if status.ExitCode() != -1 {
t.Fatalf("expected exit code %v, got %v", -1, status.ExitCode())
}
}
s.Lock()
s.SetStopped(&ExitStatus{ExitCode: 0})
s.Unlock()
// Start another wait with a timeout. This one should return
// immediately.
ctx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond)
defer cancel()
waitC = s.Wait(ctx, WaitConditionNotRunning)
select {
case <-time.After(200 * time.Millisecond):
t.Fatal("Stop callback doesn't fire in 200 milliseconds")
case status := <-waitC:
t.Log("Stop callback fired")
if status.ExitCode() != 0 {
t.Fatalf("expected exit code %v, got %v, err %q", 0, status.ExitCode(), status.Err())
}
}
}
func TestIsValidStateString(t *testing.T) {
states := []struct {
state string
expected bool
}{
{"paused", true},
{"restarting", true},
{"running", true},
{"dead", true},
{"start", false},
{"created", true},
{"exited", true},
{"removing", true},
{"stop", false},
}
for _, s := range states {
v := IsValidStateString(s.state)
if v != s.expected {
t.Fatalf("Expected %t, but got %t", s.expected, v)
}
}
}

View file

@ -0,0 +1,28 @@
package container // import "github.com/docker/docker/container"
// StoreFilter defines a function to filter
// container in the store.
type StoreFilter func(*Container) bool
// StoreReducer defines a function to
// manipulate containers in the store
type StoreReducer func(*Container)
// Store defines an interface that
// any container store must implement.
type Store interface {
// Add appends a new container to the store.
Add(string, *Container)
// Get returns a container from the store by the identifier it was stored with.
Get(string) *Container
// Delete removes a container from the store by the identifier it was stored with.
Delete(string)
// List returns a list of containers from the store.
List() []*Container
// Size returns the number of containers in the store.
Size() int
// First returns the first container found in the store by a given filter.
First(StoreFilter) *Container
// ApplyAll calls the reducer function with every container in the store.
ApplyAll(StoreReducer)
}

View file

@ -0,0 +1,175 @@
package stream // import "github.com/docker/docker/container/stream"
import (
"context"
"io"
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/term"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
var defaultEscapeSequence = []byte{16, 17} // ctrl-p, ctrl-q
// AttachConfig is the config struct used to attach a client to a stream's stdio
type AttachConfig struct {
// Tells the attach copier that the stream's stdin is a TTY and to look for
// escape sequences in stdin to detach from the stream.
// When true the escape sequence is not passed to the underlying stream
TTY bool
// Specifies the detach keys the client will be using
// Only useful when `TTY` is true
DetachKeys []byte
// CloseStdin signals that once done, stdin for the attached stream should be closed
// For example, this would close the attached container's stdin.
CloseStdin bool
// UseStd* indicate whether the client has requested to be connected to the
// given stream or not. These flags are used instead of checking Std* != nil
// at points before the client streams Std* are wired up.
UseStdin, UseStdout, UseStderr bool
// CStd* are the streams directly connected to the container
CStdin io.WriteCloser
CStdout, CStderr io.ReadCloser
// Provide client streams to wire up to
Stdin io.ReadCloser
Stdout, Stderr io.Writer
}
// AttachStreams attaches the container's streams to the AttachConfig
func (c *Config) AttachStreams(cfg *AttachConfig) {
if cfg.UseStdin {
cfg.CStdin = c.StdinPipe()
}
if cfg.UseStdout {
cfg.CStdout = c.StdoutPipe()
}
if cfg.UseStderr {
cfg.CStderr = c.StderrPipe()
}
}
// CopyStreams starts goroutines to copy data in and out to/from the container
func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan error {
var group errgroup.Group
// Connect stdin of container to the attach stdin stream.
if cfg.Stdin != nil {
group.Go(func() error {
logrus.Debug("attach: stdin: begin")
defer logrus.Debug("attach: stdin: end")
defer func() {
if cfg.CloseStdin && !cfg.TTY {
cfg.CStdin.Close()
} else {
// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
if cfg.CStdout != nil {
cfg.CStdout.Close()
}
if cfg.CStderr != nil {
cfg.CStderr.Close()
}
}
}()
var err error
if cfg.TTY {
_, err = copyEscapable(cfg.CStdin, cfg.Stdin, cfg.DetachKeys)
} else {
_, err = pools.Copy(cfg.CStdin, cfg.Stdin)
}
if err == io.ErrClosedPipe {
err = nil
}
if err != nil {
logrus.WithError(err).Debug("error on attach stdin")
return errors.Wrap(err, "error on attach stdin")
}
return nil
})
}
attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) error {
logrus.Debugf("attach: %s: begin", name)
defer logrus.Debugf("attach: %s: end", name)
defer func() {
// Make sure stdin gets closed
if cfg.Stdin != nil {
cfg.Stdin.Close()
}
streamPipe.Close()
}()
_, err := pools.Copy(stream, streamPipe)
if err == io.ErrClosedPipe {
err = nil
}
if err != nil {
logrus.WithError(err).Debugf("attach: %s", name)
return errors.Wrapf(err, "error attaching %s stream", name)
}
return nil
}
if cfg.Stdout != nil {
group.Go(func() error {
return attachStream("stdout", cfg.Stdout, cfg.CStdout)
})
}
if cfg.Stderr != nil {
group.Go(func() error {
return attachStream("stderr", cfg.Stderr, cfg.CStderr)
})
}
errs := make(chan error, 1)
go func() {
defer logrus.Debug("attach done")
groupErr := make(chan error, 1)
go func() {
groupErr <- group.Wait()
}()
select {
case <-ctx.Done():
// close all pipes
if cfg.CStdin != nil {
cfg.CStdin.Close()
}
if cfg.CStdout != nil {
cfg.CStdout.Close()
}
if cfg.CStderr != nil {
cfg.CStderr.Close()
}
// Now with these closed, wait should return.
if err := group.Wait(); err != nil {
errs <- err
return
}
errs <- ctx.Err()
case err := <-groupErr:
errs <- err
}
}()
return errs
}
func copyEscapable(dst io.Writer, src io.ReadCloser, keys []byte) (written int64, err error) {
if len(keys) == 0 {
keys = defaultEscapeSequence
}
pr := term.NewEscapeProxy(src, keys)
defer src.Close()
return pools.Copy(dst, pr)
}

View file

@ -0,0 +1,146 @@
package stream // import "github.com/docker/docker/container/stream"
import (
"fmt"
"io"
"io/ioutil"
"strings"
"sync"
"github.com/containerd/containerd/cio"
"github.com/docker/docker/pkg/broadcaster"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/pools"
"github.com/sirupsen/logrus"
)
// Config holds information about I/O streams managed together.
//
// config.StdinPipe returns a WriteCloser which can be used to feed data
// to the standard input of the streamConfig's active process.
// config.StdoutPipe and streamConfig.StderrPipe each return a ReadCloser
// which can be used to retrieve the standard output (and error) generated
// by the container's active process. The output (and error) are actually
// copied and delivered to all StdoutPipe and StderrPipe consumers, using
// a kind of "broadcaster".
type Config struct {
sync.WaitGroup
stdout *broadcaster.Unbuffered
stderr *broadcaster.Unbuffered
stdin io.ReadCloser
stdinPipe io.WriteCloser
}
// NewConfig creates a stream config and initializes
// the standard err and standard out to new unbuffered broadcasters.
func NewConfig() *Config {
return &Config{
stderr: new(broadcaster.Unbuffered),
stdout: new(broadcaster.Unbuffered),
}
}
// Stdout returns the standard output in the configuration.
func (c *Config) Stdout() *broadcaster.Unbuffered {
return c.stdout
}
// Stderr returns the standard error in the configuration.
func (c *Config) Stderr() *broadcaster.Unbuffered {
return c.stderr
}
// Stdin returns the standard input in the configuration.
func (c *Config) Stdin() io.ReadCloser {
return c.stdin
}
// StdinPipe returns an input writer pipe as an io.WriteCloser.
func (c *Config) StdinPipe() io.WriteCloser {
return c.stdinPipe
}
// StdoutPipe creates a new io.ReadCloser with an empty bytes pipe.
// It adds this new out pipe to the Stdout broadcaster.
// This will block stdout if unconsumed.
func (c *Config) StdoutPipe() io.ReadCloser {
bytesPipe := ioutils.NewBytesPipe()
c.stdout.Add(bytesPipe)
return bytesPipe
}
// StderrPipe creates a new io.ReadCloser with an empty bytes pipe.
// It adds this new err pipe to the Stderr broadcaster.
// This will block stderr if unconsumed.
func (c *Config) StderrPipe() io.ReadCloser {
bytesPipe := ioutils.NewBytesPipe()
c.stderr.Add(bytesPipe)
return bytesPipe
}
// NewInputPipes creates new pipes for both standard inputs, Stdin and StdinPipe.
func (c *Config) NewInputPipes() {
c.stdin, c.stdinPipe = io.Pipe()
}
// NewNopInputPipe creates a new input pipe that will silently drop all messages in the input.
func (c *Config) NewNopInputPipe() {
c.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard)
}
// CloseStreams ensures that the configured streams are properly closed.
func (c *Config) CloseStreams() error {
var errors []string
if c.stdin != nil {
if err := c.stdin.Close(); err != nil {
errors = append(errors, fmt.Sprintf("error close stdin: %s", err))
}
}
if err := c.stdout.Clean(); err != nil {
errors = append(errors, fmt.Sprintf("error close stdout: %s", err))
}
if err := c.stderr.Clean(); err != nil {
errors = append(errors, fmt.Sprintf("error close stderr: %s", err))
}
if len(errors) > 0 {
return fmt.Errorf(strings.Join(errors, "\n"))
}
return nil
}
// CopyToPipe connects streamconfig with a libcontainerd.IOPipe
func (c *Config) CopyToPipe(iop *cio.DirectIO) {
copyFunc := func(w io.Writer, r io.ReadCloser) {
c.Add(1)
go func() {
if _, err := pools.Copy(w, r); err != nil {
logrus.Errorf("stream copy error: %v", err)
}
r.Close()
c.Done()
}()
}
if iop.Stdout != nil {
copyFunc(c.Stdout(), iop.Stdout)
}
if iop.Stderr != nil {
copyFunc(c.Stderr(), iop.Stderr)
}
if stdin := c.Stdin(); stdin != nil {
if iop.Stdin != nil {
go func() {
pools.Copy(iop.Stdin, stdin)
if err := iop.Stdin.Close(); err != nil {
logrus.Warnf("failed to close stdin: %v", err)
}
}()
}
}
}

View file

@ -0,0 +1,494 @@
package container // import "github.com/docker/docker/container"
import (
"errors"
"fmt"
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/network"
"github.com/docker/go-connections/nat"
"github.com/hashicorp/go-memdb"
"github.com/sirupsen/logrus"
)
const (
memdbContainersTable = "containers"
memdbNamesTable = "names"
memdbIDIndex = "id"
memdbContainerIDIndex = "containerid"
)
var (
// ErrNameReserved is an error which is returned when a name is requested to be reserved that already is reserved
ErrNameReserved = errors.New("name is reserved")
// ErrNameNotReserved is an error which is returned when trying to find a name that is not reserved
ErrNameNotReserved = errors.New("name is not reserved")
)
// Snapshot is a read only view for Containers. It holds all information necessary to serve container queries in a
// versioned ACID in-memory store.
type Snapshot struct {
types.Container
// additional info queries need to filter on
// preserve nanosec resolution for queries
CreatedAt time.Time
StartedAt time.Time
Name string
Pid int
ExitCode int
Running bool
Paused bool
Managed bool
ExposedPorts nat.PortSet
PortBindings nat.PortSet
Health string
HostConfig struct {
Isolation string
}
}
// nameAssociation associates a container id with a name.
type nameAssociation struct {
// name is the name to associate. Note that name is the primary key
// ("id" in memdb).
name string
containerID string
}
// ViewDB provides an in-memory transactional (ACID) container Store
type ViewDB interface {
Snapshot() View
Save(*Container) error
Delete(*Container) error
ReserveName(name, containerID string) error
ReleaseName(name string) error
}
// View can be used by readers to avoid locking
type View interface {
All() ([]Snapshot, error)
Get(id string) (*Snapshot, error)
GetID(name string) (string, error)
GetAllNames() map[string][]string
}
var schema = &memdb.DBSchema{
Tables: map[string]*memdb.TableSchema{
memdbContainersTable: {
Name: memdbContainersTable,
Indexes: map[string]*memdb.IndexSchema{
memdbIDIndex: {
Name: memdbIDIndex,
Unique: true,
Indexer: &containerByIDIndexer{},
},
},
},
memdbNamesTable: {
Name: memdbNamesTable,
Indexes: map[string]*memdb.IndexSchema{
// Used for names, because "id" is the primary key in memdb.
memdbIDIndex: {
Name: memdbIDIndex,
Unique: true,
Indexer: &namesByNameIndexer{},
},
memdbContainerIDIndex: {
Name: memdbContainerIDIndex,
Indexer: &namesByContainerIDIndexer{},
},
},
},
},
}
type memDB struct {
store *memdb.MemDB
}
// NoSuchContainerError indicates that the container wasn't found in the
// database.
type NoSuchContainerError struct {
id string
}
// Error satisfies the error interface.
func (e NoSuchContainerError) Error() string {
return "no such container " + e.id
}
// NewViewDB provides the default implementation, with the default schema
func NewViewDB() (ViewDB, error) {
store, err := memdb.NewMemDB(schema)
if err != nil {
return nil, err
}
return &memDB{store: store}, nil
}
// Snapshot provides a consistent read-only View of the database
func (db *memDB) Snapshot() View {
return &memdbView{
txn: db.store.Txn(false),
}
}
func (db *memDB) withTxn(cb func(*memdb.Txn) error) error {
txn := db.store.Txn(true)
err := cb(txn)
if err != nil {
txn.Abort()
return err
}
txn.Commit()
return nil
}
// Save atomically updates the in-memory store state for a Container.
// Only read only (deep) copies of containers may be passed in.
func (db *memDB) Save(c *Container) error {
return db.withTxn(func(txn *memdb.Txn) error {
return txn.Insert(memdbContainersTable, c)
})
}
// Delete removes an item by ID
func (db *memDB) Delete(c *Container) error {
return db.withTxn(func(txn *memdb.Txn) error {
view := &memdbView{txn: txn}
names := view.getNames(c.ID)
for _, name := range names {
txn.Delete(memdbNamesTable, nameAssociation{name: name})
}
// Ignore error - the container may not actually exist in the
// db, but we still need to clean up associated names.
txn.Delete(memdbContainersTable, NewBaseContainer(c.ID, c.Root))
return nil
})
}
// ReserveName registers a container ID to a name
// ReserveName is idempotent
// Attempting to reserve a container ID to a name that already exists results in an `ErrNameReserved`
// A name reservation is globally unique
func (db *memDB) ReserveName(name, containerID string) error {
return db.withTxn(func(txn *memdb.Txn) error {
s, err := txn.First(memdbNamesTable, memdbIDIndex, name)
if err != nil {
return err
}
if s != nil {
if s.(nameAssociation).containerID != containerID {
return ErrNameReserved
}
return nil
}
return txn.Insert(memdbNamesTable, nameAssociation{name: name, containerID: containerID})
})
}
// ReleaseName releases the reserved name
// Once released, a name can be reserved again
func (db *memDB) ReleaseName(name string) error {
return db.withTxn(func(txn *memdb.Txn) error {
return txn.Delete(memdbNamesTable, nameAssociation{name: name})
})
}
type memdbView struct {
txn *memdb.Txn
}
// All returns a all items in this snapshot. Returned objects must never be modified.
func (v *memdbView) All() ([]Snapshot, error) {
var all []Snapshot
iter, err := v.txn.Get(memdbContainersTable, memdbIDIndex)
if err != nil {
return nil, err
}
for {
item := iter.Next()
if item == nil {
break
}
snapshot := v.transform(item.(*Container))
all = append(all, *snapshot)
}
return all, nil
}
// Get returns an item by id. Returned objects must never be modified.
func (v *memdbView) Get(id string) (*Snapshot, error) {
s, err := v.txn.First(memdbContainersTable, memdbIDIndex, id)
if err != nil {
return nil, err
}
if s == nil {
return nil, NoSuchContainerError{id: id}
}
return v.transform(s.(*Container)), nil
}
// getNames lists all the reserved names for the given container ID.
func (v *memdbView) getNames(containerID string) []string {
iter, err := v.txn.Get(memdbNamesTable, memdbContainerIDIndex, containerID)
if err != nil {
return nil
}
var names []string
for {
item := iter.Next()
if item == nil {
break
}
names = append(names, item.(nameAssociation).name)
}
return names
}
// GetID returns the container ID that the passed in name is reserved to.
func (v *memdbView) GetID(name string) (string, error) {
s, err := v.txn.First(memdbNamesTable, memdbIDIndex, name)
if err != nil {
return "", err
}
if s == nil {
return "", ErrNameNotReserved
}
return s.(nameAssociation).containerID, nil
}
// GetAllNames returns all registered names.
func (v *memdbView) GetAllNames() map[string][]string {
iter, err := v.txn.Get(memdbNamesTable, memdbContainerIDIndex)
if err != nil {
return nil
}
out := make(map[string][]string)
for {
item := iter.Next()
if item == nil {
break
}
assoc := item.(nameAssociation)
out[assoc.containerID] = append(out[assoc.containerID], assoc.name)
}
return out
}
// transform maps a (deep) copied Container object to what queries need.
// A lock on the Container is not held because these are immutable deep copies.
func (v *memdbView) transform(container *Container) *Snapshot {
health := types.NoHealthcheck
if container.Health != nil {
health = container.Health.Status()
}
snapshot := &Snapshot{
Container: types.Container{
ID: container.ID,
Names: v.getNames(container.ID),
ImageID: container.ImageID.String(),
Ports: []types.Port{},
Mounts: container.GetMountPoints(),
State: container.State.StateString(),
Status: container.State.String(),
Created: container.Created.Unix(),
},
CreatedAt: container.Created,
StartedAt: container.StartedAt,
Name: container.Name,
Pid: container.Pid,
Managed: container.Managed,
ExposedPorts: make(nat.PortSet),
PortBindings: make(nat.PortSet),
Health: health,
Running: container.Running,
Paused: container.Paused,
ExitCode: container.ExitCode(),
}
if snapshot.Names == nil {
// Dead containers will often have no name, so make sure the response isn't null
snapshot.Names = []string{}
}
if container.HostConfig != nil {
snapshot.Container.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode)
snapshot.HostConfig.Isolation = string(container.HostConfig.Isolation)
for binding := range container.HostConfig.PortBindings {
snapshot.PortBindings[binding] = struct{}{}
}
}
if container.Config != nil {
snapshot.Image = container.Config.Image
snapshot.Labels = container.Config.Labels
for exposed := range container.Config.ExposedPorts {
snapshot.ExposedPorts[exposed] = struct{}{}
}
}
if len(container.Args) > 0 {
var args []string
for _, arg := range container.Args {
if strings.Contains(arg, " ") {
args = append(args, fmt.Sprintf("'%s'", arg))
} else {
args = append(args, arg)
}
}
argsAsString := strings.Join(args, " ")
snapshot.Command = fmt.Sprintf("%s %s", container.Path, argsAsString)
} else {
snapshot.Command = container.Path
}
snapshot.Ports = []types.Port{}
networks := make(map[string]*network.EndpointSettings)
if container.NetworkSettings != nil {
for name, netw := range container.NetworkSettings.Networks {
if netw == nil || netw.EndpointSettings == nil {
continue
}
networks[name] = &network.EndpointSettings{
EndpointID: netw.EndpointID,
Gateway: netw.Gateway,
IPAddress: netw.IPAddress,
IPPrefixLen: netw.IPPrefixLen,
IPv6Gateway: netw.IPv6Gateway,
GlobalIPv6Address: netw.GlobalIPv6Address,
GlobalIPv6PrefixLen: netw.GlobalIPv6PrefixLen,
MacAddress: netw.MacAddress,
NetworkID: netw.NetworkID,
}
if netw.IPAMConfig != nil {
networks[name].IPAMConfig = &network.EndpointIPAMConfig{
IPv4Address: netw.IPAMConfig.IPv4Address,
IPv6Address: netw.IPAMConfig.IPv6Address,
}
}
}
for port, bindings := range container.NetworkSettings.Ports {
p, err := nat.ParsePort(port.Port())
if err != nil {
logrus.Warnf("invalid port map %+v", err)
continue
}
if len(bindings) == 0 {
snapshot.Ports = append(snapshot.Ports, types.Port{
PrivatePort: uint16(p),
Type: port.Proto(),
})
continue
}
for _, binding := range bindings {
h, err := nat.ParsePort(binding.HostPort)
if err != nil {
logrus.Warnf("invalid host port map %+v", err)
continue
}
snapshot.Ports = append(snapshot.Ports, types.Port{
PrivatePort: uint16(p),
PublicPort: uint16(h),
Type: port.Proto(),
IP: binding.HostIP,
})
}
}
}
snapshot.NetworkSettings = &types.SummaryNetworkSettings{Networks: networks}
return snapshot
}
// containerByIDIndexer is used to extract the ID field from Container types.
// memdb.StringFieldIndex can not be used since ID is a field from an embedded struct.
type containerByIDIndexer struct{}
// FromObject implements the memdb.SingleIndexer interface for Container objects
func (e *containerByIDIndexer) FromObject(obj interface{}) (bool, []byte, error) {
c, ok := obj.(*Container)
if !ok {
return false, nil, fmt.Errorf("%T is not a Container", obj)
}
// Add the null character as a terminator
v := c.ID + "\x00"
return true, []byte(v), nil
}
// FromArgs implements the memdb.Indexer interface
func (e *containerByIDIndexer) FromArgs(args ...interface{}) ([]byte, error) {
if len(args) != 1 {
return nil, fmt.Errorf("must provide only a single argument")
}
arg, ok := args[0].(string)
if !ok {
return nil, fmt.Errorf("argument must be a string: %#v", args[0])
}
// Add the null character as a terminator
arg += "\x00"
return []byte(arg), nil
}
// namesByNameIndexer is used to index container name associations by name.
type namesByNameIndexer struct{}
func (e *namesByNameIndexer) FromObject(obj interface{}) (bool, []byte, error) {
n, ok := obj.(nameAssociation)
if !ok {
return false, nil, fmt.Errorf(`%T does not have type "nameAssociation"`, obj)
}
// Add the null character as a terminator
return true, []byte(n.name + "\x00"), nil
}
func (e *namesByNameIndexer) FromArgs(args ...interface{}) ([]byte, error) {
if len(args) != 1 {
return nil, fmt.Errorf("must provide only a single argument")
}
arg, ok := args[0].(string)
if !ok {
return nil, fmt.Errorf("argument must be a string: %#v", args[0])
}
// Add the null character as a terminator
arg += "\x00"
return []byte(arg), nil
}
// namesByContainerIDIndexer is used to index container names by container ID.
type namesByContainerIDIndexer struct{}
func (e *namesByContainerIDIndexer) FromObject(obj interface{}) (bool, []byte, error) {
n, ok := obj.(nameAssociation)
if !ok {
return false, nil, fmt.Errorf(`%T does not have type "nameAssocation"`, obj)
}
// Add the null character as a terminator
return true, []byte(n.containerID + "\x00"), nil
}
func (e *namesByContainerIDIndexer) FromArgs(args ...interface{}) ([]byte, error) {
if len(args) != 1 {
return nil, fmt.Errorf("must provide only a single argument")
}
arg, ok := args[0].(string)
if !ok {
return nil, fmt.Errorf("argument must be a string: %#v", args[0])
}
// Add the null character as a terminator
arg += "\x00"
return []byte(arg), nil
}

View file

@ -0,0 +1,186 @@
package container // import "github.com/docker/docker/container"
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
"github.com/pborman/uuid"
"gotest.tools/assert"
is "gotest.tools/assert/cmp"
)
var root string
func TestMain(m *testing.M) {
var err error
root, err = ioutil.TempDir("", "docker-container-test-")
if err != nil {
panic(err)
}
defer os.RemoveAll(root)
os.Exit(m.Run())
}
func newContainer(t *testing.T) *Container {
var (
id = uuid.New()
cRoot = filepath.Join(root, id)
)
if err := os.MkdirAll(cRoot, 0755); err != nil {
t.Fatal(err)
}
c := NewBaseContainer(id, cRoot)
c.HostConfig = &containertypes.HostConfig{}
return c
}
func TestViewSaveDelete(t *testing.T) {
db, err := NewViewDB()
if err != nil {
t.Fatal(err)
}
c := newContainer(t)
if err := c.CheckpointTo(db); err != nil {
t.Fatal(err)
}
if err := db.Delete(c); err != nil {
t.Fatal(err)
}
}
func TestViewAll(t *testing.T) {
var (
db, _ = NewViewDB()
one = newContainer(t)
two = newContainer(t)
)
one.Pid = 10
if err := one.CheckpointTo(db); err != nil {
t.Fatal(err)
}
two.Pid = 20
if err := two.CheckpointTo(db); err != nil {
t.Fatal(err)
}
all, err := db.Snapshot().All()
if err != nil {
t.Fatal(err)
}
if l := len(all); l != 2 {
t.Fatalf("expected 2 items, got %d", l)
}
byID := make(map[string]Snapshot)
for i := range all {
byID[all[i].ID] = all[i]
}
if s, ok := byID[one.ID]; !ok || s.Pid != 10 {
t.Fatalf("expected something different with for id=%s: %v", one.ID, s)
}
if s, ok := byID[two.ID]; !ok || s.Pid != 20 {
t.Fatalf("expected something different with for id=%s: %v", two.ID, s)
}
}
func TestViewGet(t *testing.T) {
var (
db, _ = NewViewDB()
one = newContainer(t)
)
one.ImageID = "some-image-123"
if err := one.CheckpointTo(db); err != nil {
t.Fatal(err)
}
s, err := db.Snapshot().Get(one.ID)
if err != nil {
t.Fatal(err)
}
if s == nil || s.ImageID != "some-image-123" {
t.Fatalf("expected ImageID=some-image-123. Got: %v", s)
}
}
func TestNames(t *testing.T) {
db, err := NewViewDB()
if err != nil {
t.Fatal(err)
}
assert.Check(t, db.ReserveName("name1", "containerid1"))
assert.Check(t, db.ReserveName("name1", "containerid1")) // idempotent
assert.Check(t, db.ReserveName("name2", "containerid2"))
assert.Check(t, is.Error(db.ReserveName("name2", "containerid3"), ErrNameReserved.Error()))
// Releasing a name allows the name to point to something else later.
assert.Check(t, db.ReleaseName("name2"))
assert.Check(t, db.ReserveName("name2", "containerid3"))
view := db.Snapshot()
id, err := view.GetID("name1")
assert.Check(t, err)
assert.Check(t, is.Equal("containerid1", id))
id, err = view.GetID("name2")
assert.Check(t, err)
assert.Check(t, is.Equal("containerid3", id))
_, err = view.GetID("notreserved")
assert.Check(t, is.Error(err, ErrNameNotReserved.Error()))
// Releasing and re-reserving a name doesn't affect the snapshot.
assert.Check(t, db.ReleaseName("name2"))
assert.Check(t, db.ReserveName("name2", "containerid4"))
id, err = view.GetID("name1")
assert.Check(t, err)
assert.Check(t, is.Equal("containerid1", id))
id, err = view.GetID("name2")
assert.Check(t, err)
assert.Check(t, is.Equal("containerid3", id))
// GetAllNames
assert.Check(t, is.DeepEqual(map[string][]string{"containerid1": {"name1"}, "containerid3": {"name2"}}, view.GetAllNames()))
assert.Check(t, db.ReserveName("name3", "containerid1"))
assert.Check(t, db.ReserveName("name4", "containerid1"))
view = db.Snapshot()
assert.Check(t, is.DeepEqual(map[string][]string{"containerid1": {"name1", "name3", "name4"}, "containerid4": {"name2"}}, view.GetAllNames()))
// Release containerid1's names with Delete even though no container exists
assert.Check(t, db.Delete(&Container{ID: "containerid1"}))
// Reusing one of those names should work
assert.Check(t, db.ReserveName("name1", "containerid4"))
view = db.Snapshot()
assert.Check(t, is.DeepEqual(map[string][]string{"containerid4": {"name1", "name2"}}, view.GetAllNames()))
}
// Test case for GitHub issue 35920
func TestViewWithHealthCheck(t *testing.T) {
var (
db, _ = NewViewDB()
one = newContainer(t)
)
one.Health = &Health{
Health: types.Health{
Status: "starting",
},
}
if err := one.CheckpointTo(db); err != nil {
t.Fatal(err)
}
s, err := db.Snapshot().Get(one.ID)
if err != nil {
t.Fatal(err)
}
if s == nil || s.Health != "starting" {
t.Fatalf("expected Health=starting. Got: %+v", s)
}
}