Revert "update to use containerd seccomp package"
This reverts commit 4f8e065faf055d3f0463a92622297ca3afac07f4.
This commit is contained in:
parent
09243b740c
commit
60f032f6f5
8199 changed files with 1598219 additions and 30742 deletions
86
vendor/github.com/docker/docker-ce/components/engine/container/archive.go
generated
vendored
Normal file
86
vendor/github.com/docker/docker-ce/components/engine/container/archive.go
generated
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
package container // import "github.com/docker/docker/container"
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ResolvePath resolves the given path in the container to a resource on the
|
||||
// host. Returns a resolved path (absolute path to the resource on the host),
|
||||
// the absolute path to the resource relative to the container's rootfs, and
|
||||
// an error if the path points to outside the container's rootfs.
|
||||
func (container *Container) ResolvePath(path string) (resolvedPath, absPath string, err error) {
|
||||
if container.BaseFS == nil {
|
||||
return "", "", errors.New("ResolvePath: BaseFS of container " + container.ID + " is unexpectedly nil")
|
||||
}
|
||||
// Check if a drive letter supplied, it must be the system drive. No-op except on Windows
|
||||
path, err = system.CheckSystemDriveAndRemoveDriveLetter(path, container.BaseFS)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// Consider the given path as an absolute path in the container.
|
||||
absPath = archive.PreserveTrailingDotOrSeparator(
|
||||
container.BaseFS.Join(string(container.BaseFS.Separator()), path),
|
||||
path,
|
||||
container.BaseFS.Separator())
|
||||
|
||||
// Split the absPath into its Directory and Base components. We will
|
||||
// resolve the dir in the scope of the container then append the base.
|
||||
dirPath, basePath := container.BaseFS.Split(absPath)
|
||||
|
||||
resolvedDirPath, err := container.GetResourcePath(dirPath)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// resolvedDirPath will have been cleaned (no trailing path separators) so
|
||||
// we can manually join it with the base path element.
|
||||
resolvedPath = resolvedDirPath + string(container.BaseFS.Separator()) + basePath
|
||||
return resolvedPath, absPath, nil
|
||||
}
|
||||
|
||||
// StatPath is the unexported version of StatPath. Locks and mounts should
|
||||
// be acquired before calling this method and the given path should be fully
|
||||
// resolved to a path on the host corresponding to the given absolute path
|
||||
// inside the container.
|
||||
func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) {
|
||||
if container.BaseFS == nil {
|
||||
return nil, errors.New("StatPath: BaseFS of container " + container.ID + " is unexpectedly nil")
|
||||
}
|
||||
driver := container.BaseFS
|
||||
|
||||
lstat, err := driver.Lstat(resolvedPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var linkTarget string
|
||||
if lstat.Mode()&os.ModeSymlink != 0 {
|
||||
// Fully evaluate the symlink in the scope of the container rootfs.
|
||||
hostPath, err := container.GetResourcePath(absPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
linkTarget, err = driver.Rel(driver.Path(), hostPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Make it an absolute path.
|
||||
linkTarget = driver.Join(string(driver.Separator()), linkTarget)
|
||||
}
|
||||
|
||||
return &types.ContainerPathStat{
|
||||
Name: driver.Base(absPath),
|
||||
Size: lstat.Size(),
|
||||
Mode: lstat.Mode(),
|
||||
Mtime: lstat.ModTime(),
|
||||
LinkTarget: linkTarget,
|
||||
}, nil
|
||||
}
|
1094
vendor/github.com/docker/docker-ce/components/engine/container/container.go
generated
vendored
Normal file
1094
vendor/github.com/docker/docker-ce/components/engine/container/container.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
9
vendor/github.com/docker/docker-ce/components/engine/container/container_linux.go
generated
vendored
Normal file
9
vendor/github.com/docker/docker-ce/components/engine/container/container_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
package container // import "github.com/docker/docker/container"
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func detachMounted(path string) error {
|
||||
return unix.Unmount(path, unix.MNT_DETACH)
|
||||
}
|
23
vendor/github.com/docker/docker-ce/components/engine/container/container_notlinux.go
generated
vendored
Normal file
23
vendor/github.com/docker/docker-ce/components/engine/container/container_notlinux.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
// +build freebsd
|
||||
|
||||
package container // import "github.com/docker/docker/container"
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func detachMounted(path string) error {
|
||||
// FreeBSD do not support the lazy unmount or MNT_DETACH feature.
|
||||
// Therefore there are separate definitions for this.
|
||||
return unix.Unmount(path, 0)
|
||||
}
|
||||
|
||||
// SecretMounts returns the mounts for the secret path
|
||||
func (container *Container) SecretMounts() []Mount {
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmountSecrets unmounts the fs for secrets
|
||||
func (container *Container) UnmountSecrets() error {
|
||||
return nil
|
||||
}
|
126
vendor/github.com/docker/docker-ce/components/engine/container/container_unit_test.go
generated
vendored
Normal file
126
vendor/github.com/docker/docker-ce/components/engine/container/container_unit_test.go
generated
vendored
Normal file
|
@ -0,0 +1,126 @@
|
|||
package container // import "github.com/docker/docker/container"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
swarmtypes "github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/daemon/logger/jsonfilelog"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
"github.com/gotestyourself/gotestyourself/assert"
|
||||
)
|
||||
|
||||
func TestContainerStopSignal(t *testing.T) {
|
||||
c := &Container{
|
||||
Config: &container.Config{},
|
||||
}
|
||||
|
||||
def, err := signal.ParseSignal(signal.DefaultStopSignal)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s := c.StopSignal()
|
||||
if s != int(def) {
|
||||
t.Fatalf("Expected %v, got %v", def, s)
|
||||
}
|
||||
|
||||
c = &Container{
|
||||
Config: &container.Config{StopSignal: "SIGKILL"},
|
||||
}
|
||||
s = c.StopSignal()
|
||||
if s != 9 {
|
||||
t.Fatalf("Expected 9, got %v", s)
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainerStopTimeout(t *testing.T) {
|
||||
c := &Container{
|
||||
Config: &container.Config{},
|
||||
}
|
||||
|
||||
s := c.StopTimeout()
|
||||
if s != DefaultStopTimeout {
|
||||
t.Fatalf("Expected %v, got %v", DefaultStopTimeout, s)
|
||||
}
|
||||
|
||||
stopTimeout := 15
|
||||
c = &Container{
|
||||
Config: &container.Config{StopTimeout: &stopTimeout},
|
||||
}
|
||||
s = c.StopSignal()
|
||||
if s != 15 {
|
||||
t.Fatalf("Expected 15, got %v", s)
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainerSecretReferenceDestTarget(t *testing.T) {
|
||||
ref := &swarmtypes.SecretReference{
|
||||
File: &swarmtypes.SecretReferenceFileTarget{
|
||||
Name: "app",
|
||||
},
|
||||
}
|
||||
|
||||
d := getSecretTargetPath(ref)
|
||||
expected := filepath.Join(containerSecretMountPath, "app")
|
||||
if d != expected {
|
||||
t.Fatalf("expected secret dest %q; received %q", expected, d)
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainerLogPathSetForJSONFileLogger(t *testing.T) {
|
||||
containerRoot, err := ioutil.TempDir("", "TestContainerLogPathSetForJSONFileLogger")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(containerRoot)
|
||||
|
||||
c := &Container{
|
||||
Config: &container.Config{},
|
||||
HostConfig: &container.HostConfig{
|
||||
LogConfig: container.LogConfig{
|
||||
Type: jsonfilelog.Name,
|
||||
},
|
||||
},
|
||||
ID: "TestContainerLogPathSetForJSONFileLogger",
|
||||
Root: containerRoot,
|
||||
}
|
||||
|
||||
logger, err := c.StartLogger()
|
||||
assert.NilError(t, err)
|
||||
defer logger.Close()
|
||||
|
||||
expectedLogPath, err := filepath.Abs(filepath.Join(containerRoot, fmt.Sprintf("%s-json.log", c.ID)))
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, c.LogPath, expectedLogPath)
|
||||
}
|
||||
|
||||
func TestContainerLogPathSetForRingLogger(t *testing.T) {
|
||||
containerRoot, err := ioutil.TempDir("", "TestContainerLogPathSetForRingLogger")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(containerRoot)
|
||||
|
||||
c := &Container{
|
||||
Config: &container.Config{},
|
||||
HostConfig: &container.HostConfig{
|
||||
LogConfig: container.LogConfig{
|
||||
Type: jsonfilelog.Name,
|
||||
Config: map[string]string{
|
||||
"mode": string(container.LogModeNonBlock),
|
||||
},
|
||||
},
|
||||
},
|
||||
ID: "TestContainerLogPathSetForRingLogger",
|
||||
Root: containerRoot,
|
||||
}
|
||||
|
||||
logger, err := c.StartLogger()
|
||||
assert.NilError(t, err)
|
||||
defer logger.Close()
|
||||
|
||||
expectedLogPath, err := filepath.Abs(filepath.Join(containerRoot, fmt.Sprintf("%s-json.log", c.ID)))
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, c.LogPath, expectedLogPath)
|
||||
}
|
461
vendor/github.com/docker/docker-ce/components/engine/container/container_unix.go
generated
vendored
Normal file
461
vendor/github.com/docker/docker-ce/components/engine/container/container_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,461 @@
|
|||
// +build linux freebsd
|
||||
|
||||
package container // import "github.com/docker/docker/container"
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/docker/docker/api/types"
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
mounttypes "github.com/docker/docker/api/types/mount"
|
||||
swarmtypes "github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/volume"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultStopTimeout is the timeout (in seconds) for the syscall signal used to stop a container.
|
||||
DefaultStopTimeout = 10
|
||||
|
||||
containerSecretMountPath = "/run/secrets"
|
||||
)
|
||||
|
||||
// TrySetNetworkMount attempts to set the network mounts given a provided destination and
|
||||
// the path to use for it; return true if the given destination was a network mount file
|
||||
func (container *Container) TrySetNetworkMount(destination string, path string) bool {
|
||||
if destination == "/etc/resolv.conf" {
|
||||
container.ResolvConfPath = path
|
||||
return true
|
||||
}
|
||||
if destination == "/etc/hostname" {
|
||||
container.HostnamePath = path
|
||||
return true
|
||||
}
|
||||
if destination == "/etc/hosts" {
|
||||
container.HostsPath = path
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// BuildHostnameFile writes the container's hostname file.
|
||||
func (container *Container) BuildHostnameFile() error {
|
||||
hostnamePath, err := container.GetRootResourcePath("hostname")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
container.HostnamePath = hostnamePath
|
||||
return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644)
|
||||
}
|
||||
|
||||
// NetworkMounts returns the list of network mounts.
|
||||
func (container *Container) NetworkMounts() []Mount {
|
||||
var mounts []Mount
|
||||
shared := container.HostConfig.NetworkMode.IsContainer()
|
||||
parser := volume.NewParser(container.OS)
|
||||
if container.ResolvConfPath != "" {
|
||||
if _, err := os.Stat(container.ResolvConfPath); err != nil {
|
||||
logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err)
|
||||
} else {
|
||||
writable := !container.HostConfig.ReadonlyRootfs
|
||||
if m, exists := container.MountPoints["/etc/resolv.conf"]; exists {
|
||||
writable = m.RW
|
||||
} else {
|
||||
label.Relabel(container.ResolvConfPath, container.MountLabel, shared)
|
||||
}
|
||||
mounts = append(mounts, Mount{
|
||||
Source: container.ResolvConfPath,
|
||||
Destination: "/etc/resolv.conf",
|
||||
Writable: writable,
|
||||
Propagation: string(parser.DefaultPropagationMode()),
|
||||
})
|
||||
}
|
||||
}
|
||||
if container.HostnamePath != "" {
|
||||
if _, err := os.Stat(container.HostnamePath); err != nil {
|
||||
logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err)
|
||||
} else {
|
||||
writable := !container.HostConfig.ReadonlyRootfs
|
||||
if m, exists := container.MountPoints["/etc/hostname"]; exists {
|
||||
writable = m.RW
|
||||
} else {
|
||||
label.Relabel(container.HostnamePath, container.MountLabel, shared)
|
||||
}
|
||||
mounts = append(mounts, Mount{
|
||||
Source: container.HostnamePath,
|
||||
Destination: "/etc/hostname",
|
||||
Writable: writable,
|
||||
Propagation: string(parser.DefaultPropagationMode()),
|
||||
})
|
||||
}
|
||||
}
|
||||
if container.HostsPath != "" {
|
||||
if _, err := os.Stat(container.HostsPath); err != nil {
|
||||
logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err)
|
||||
} else {
|
||||
writable := !container.HostConfig.ReadonlyRootfs
|
||||
if m, exists := container.MountPoints["/etc/hosts"]; exists {
|
||||
writable = m.RW
|
||||
} else {
|
||||
label.Relabel(container.HostsPath, container.MountLabel, shared)
|
||||
}
|
||||
mounts = append(mounts, Mount{
|
||||
Source: container.HostsPath,
|
||||
Destination: "/etc/hosts",
|
||||
Writable: writable,
|
||||
Propagation: string(parser.DefaultPropagationMode()),
|
||||
})
|
||||
}
|
||||
}
|
||||
return mounts
|
||||
}
|
||||
|
||||
// CopyImagePathContent copies files in destination to the volume.
|
||||
func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error {
|
||||
rootfs, err := container.GetResourcePath(destination)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = ioutil.ReadDir(rootfs); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
id := stringid.GenerateNonCryptoID()
|
||||
path, err := v.Mount(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := v.Unmount(id); err != nil {
|
||||
logrus.Warnf("error while unmounting volume %s: %v", v.Name(), err)
|
||||
}
|
||||
}()
|
||||
if err := label.Relabel(path, container.MountLabel, true); err != nil && err != unix.ENOTSUP {
|
||||
return err
|
||||
}
|
||||
return copyExistingContents(rootfs, path)
|
||||
}
|
||||
|
||||
// ShmResourcePath returns path to shm
|
||||
func (container *Container) ShmResourcePath() (string, error) {
|
||||
return container.MountsResourcePath("shm")
|
||||
}
|
||||
|
||||
// HasMountFor checks if path is a mountpoint
|
||||
func (container *Container) HasMountFor(path string) bool {
|
||||
_, exists := container.MountPoints[path]
|
||||
if exists {
|
||||
return true
|
||||
}
|
||||
|
||||
// Also search among the tmpfs mounts
|
||||
for dest := range container.HostConfig.Tmpfs {
|
||||
if dest == path {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// UnmountIpcMount uses the provided unmount function to unmount shm if it was mounted
|
||||
func (container *Container) UnmountIpcMount(unmount func(pth string) error) error {
|
||||
if container.HasMountFor("/dev/shm") {
|
||||
return nil
|
||||
}
|
||||
|
||||
// container.ShmPath should not be used here as it may point
|
||||
// to the host's or other container's /dev/shm
|
||||
shmPath, err := container.ShmResourcePath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if shmPath == "" {
|
||||
return nil
|
||||
}
|
||||
if err = unmount(shmPath); err != nil && !os.IsNotExist(err) {
|
||||
if mounted, mErr := mount.Mounted(shmPath); mounted || mErr != nil {
|
||||
return errors.Wrapf(err, "umount %s", shmPath)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IpcMounts returns the list of IPC mounts
|
||||
func (container *Container) IpcMounts() []Mount {
|
||||
var mounts []Mount
|
||||
parser := volume.NewParser(container.OS)
|
||||
|
||||
if container.HasMountFor("/dev/shm") {
|
||||
return mounts
|
||||
}
|
||||
if container.ShmPath == "" {
|
||||
return mounts
|
||||
}
|
||||
|
||||
label.SetFileLabel(container.ShmPath, container.MountLabel)
|
||||
mounts = append(mounts, Mount{
|
||||
Source: container.ShmPath,
|
||||
Destination: "/dev/shm",
|
||||
Writable: true,
|
||||
Propagation: string(parser.DefaultPropagationMode()),
|
||||
})
|
||||
|
||||
return mounts
|
||||
}
|
||||
|
||||
// SecretMounts returns the mounts for the secret path.
|
||||
func (container *Container) SecretMounts() ([]Mount, error) {
|
||||
var mounts []Mount
|
||||
for _, r := range container.SecretReferences {
|
||||
if r.File == nil {
|
||||
continue
|
||||
}
|
||||
src, err := container.SecretFilePath(*r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mounts = append(mounts, Mount{
|
||||
Source: src,
|
||||
Destination: getSecretTargetPath(r),
|
||||
Writable: false,
|
||||
})
|
||||
}
|
||||
for _, r := range container.ConfigReferences {
|
||||
fPath, err := container.ConfigFilePath(*r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mounts = append(mounts, Mount{
|
||||
Source: fPath,
|
||||
Destination: r.File.Name,
|
||||
Writable: false,
|
||||
})
|
||||
}
|
||||
|
||||
return mounts, nil
|
||||
}
|
||||
|
||||
// UnmountSecrets unmounts the local tmpfs for secrets
|
||||
func (container *Container) UnmountSecrets() error {
|
||||
p, err := container.SecretMountPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := os.Stat(p); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return mount.RecursiveUnmount(p)
|
||||
}
|
||||
|
||||
type conflictingUpdateOptions string
|
||||
|
||||
func (e conflictingUpdateOptions) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func (e conflictingUpdateOptions) Conflict() {}
|
||||
|
||||
// UpdateContainer updates configuration of a container. Callers must hold a Lock on the Container.
|
||||
func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error {
|
||||
// update resources of container
|
||||
resources := hostConfig.Resources
|
||||
cResources := &container.HostConfig.Resources
|
||||
|
||||
// validate NanoCPUs, CPUPeriod, and CPUQuota
|
||||
// Because NanoCPU effectively updates CPUPeriod/CPUQuota,
|
||||
// once NanoCPU is already set, updating CPUPeriod/CPUQuota will be blocked, and vice versa.
|
||||
// In the following we make sure the intended update (resources) does not conflict with the existing (cResource).
|
||||
if resources.NanoCPUs > 0 && cResources.CPUPeriod > 0 {
|
||||
return conflictingUpdateOptions("Conflicting options: Nano CPUs cannot be updated as CPU Period has already been set")
|
||||
}
|
||||
if resources.NanoCPUs > 0 && cResources.CPUQuota > 0 {
|
||||
return conflictingUpdateOptions("Conflicting options: Nano CPUs cannot be updated as CPU Quota has already been set")
|
||||
}
|
||||
if resources.CPUPeriod > 0 && cResources.NanoCPUs > 0 {
|
||||
return conflictingUpdateOptions("Conflicting options: CPU Period cannot be updated as NanoCPUs has already been set")
|
||||
}
|
||||
if resources.CPUQuota > 0 && cResources.NanoCPUs > 0 {
|
||||
return conflictingUpdateOptions("Conflicting options: CPU Quota cannot be updated as NanoCPUs has already been set")
|
||||
}
|
||||
|
||||
if resources.BlkioWeight != 0 {
|
||||
cResources.BlkioWeight = resources.BlkioWeight
|
||||
}
|
||||
if resources.CPUShares != 0 {
|
||||
cResources.CPUShares = resources.CPUShares
|
||||
}
|
||||
if resources.NanoCPUs != 0 {
|
||||
cResources.NanoCPUs = resources.NanoCPUs
|
||||
}
|
||||
if resources.CPUPeriod != 0 {
|
||||
cResources.CPUPeriod = resources.CPUPeriod
|
||||
}
|
||||
if resources.CPUQuota != 0 {
|
||||
cResources.CPUQuota = resources.CPUQuota
|
||||
}
|
||||
if resources.CpusetCpus != "" {
|
||||
cResources.CpusetCpus = resources.CpusetCpus
|
||||
}
|
||||
if resources.CpusetMems != "" {
|
||||
cResources.CpusetMems = resources.CpusetMems
|
||||
}
|
||||
if resources.Memory != 0 {
|
||||
// if memory limit smaller than already set memoryswap limit and doesn't
|
||||
// update the memoryswap limit, then error out.
|
||||
if resources.Memory > cResources.MemorySwap && resources.MemorySwap == 0 {
|
||||
return conflictingUpdateOptions("Memory limit should be smaller than already set memoryswap limit, update the memoryswap at the same time")
|
||||
}
|
||||
cResources.Memory = resources.Memory
|
||||
}
|
||||
if resources.MemorySwap != 0 {
|
||||
cResources.MemorySwap = resources.MemorySwap
|
||||
}
|
||||
if resources.MemoryReservation != 0 {
|
||||
cResources.MemoryReservation = resources.MemoryReservation
|
||||
}
|
||||
if resources.KernelMemory != 0 {
|
||||
cResources.KernelMemory = resources.KernelMemory
|
||||
}
|
||||
if resources.CPURealtimePeriod != 0 {
|
||||
cResources.CPURealtimePeriod = resources.CPURealtimePeriod
|
||||
}
|
||||
if resources.CPURealtimeRuntime != 0 {
|
||||
cResources.CPURealtimeRuntime = resources.CPURealtimeRuntime
|
||||
}
|
||||
|
||||
// update HostConfig of container
|
||||
if hostConfig.RestartPolicy.Name != "" {
|
||||
if container.HostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() {
|
||||
return conflictingUpdateOptions("Restart policy cannot be updated because AutoRemove is enabled for the container")
|
||||
}
|
||||
container.HostConfig.RestartPolicy = hostConfig.RestartPolicy
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DetachAndUnmount uses a detached mount on all mount destinations, then
|
||||
// unmounts each volume normally.
|
||||
// This is used from daemon/archive for `docker cp`
|
||||
func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error {
|
||||
networkMounts := container.NetworkMounts()
|
||||
mountPaths := make([]string, 0, len(container.MountPoints)+len(networkMounts))
|
||||
|
||||
for _, mntPoint := range container.MountPoints {
|
||||
dest, err := container.GetResourcePath(mntPoint.Destination)
|
||||
if err != nil {
|
||||
logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, mntPoint.Destination, err)
|
||||
continue
|
||||
}
|
||||
mountPaths = append(mountPaths, dest)
|
||||
}
|
||||
|
||||
for _, m := range networkMounts {
|
||||
dest, err := container.GetResourcePath(m.Destination)
|
||||
if err != nil {
|
||||
logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, m.Destination, err)
|
||||
continue
|
||||
}
|
||||
mountPaths = append(mountPaths, dest)
|
||||
}
|
||||
|
||||
for _, mountPath := range mountPaths {
|
||||
if err := detachMounted(mountPath); err != nil {
|
||||
logrus.Warnf("%s unmountVolumes: Failed to do lazy umount fo volume '%s': %v", container.ID, mountPath, err)
|
||||
}
|
||||
}
|
||||
return container.UnmountVolumes(volumeEventLog)
|
||||
}
|
||||
|
||||
// copyExistingContents copies from the source to the destination and
|
||||
// ensures the ownership is appropriately set.
|
||||
func copyExistingContents(source, destination string) error {
|
||||
dstList, err := ioutil.ReadDir(destination)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(dstList) != 0 {
|
||||
// destination is not empty, do not copy
|
||||
return nil
|
||||
}
|
||||
return fs.CopyDir(destination, source)
|
||||
}
|
||||
|
||||
// TmpfsMounts returns the list of tmpfs mounts
|
||||
func (container *Container) TmpfsMounts() ([]Mount, error) {
|
||||
parser := volume.NewParser(container.OS)
|
||||
var mounts []Mount
|
||||
for dest, data := range container.HostConfig.Tmpfs {
|
||||
mounts = append(mounts, Mount{
|
||||
Source: "tmpfs",
|
||||
Destination: dest,
|
||||
Data: data,
|
||||
})
|
||||
}
|
||||
for dest, mnt := range container.MountPoints {
|
||||
if mnt.Type == mounttypes.TypeTmpfs {
|
||||
data, err := parser.ConvertTmpfsOptions(mnt.Spec.TmpfsOptions, mnt.Spec.ReadOnly)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mounts = append(mounts, Mount{
|
||||
Source: "tmpfs",
|
||||
Destination: dest,
|
||||
Data: data,
|
||||
})
|
||||
}
|
||||
}
|
||||
return mounts, nil
|
||||
}
|
||||
|
||||
// EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network
|
||||
func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetMountPoints gives a platform specific transformation to types.MountPoint. Callers must hold a Container lock.
|
||||
func (container *Container) GetMountPoints() []types.MountPoint {
|
||||
mountPoints := make([]types.MountPoint, 0, len(container.MountPoints))
|
||||
for _, m := range container.MountPoints {
|
||||
mountPoints = append(mountPoints, types.MountPoint{
|
||||
Type: m.Type,
|
||||
Name: m.Name,
|
||||
Source: m.Path(),
|
||||
Destination: m.Destination,
|
||||
Driver: m.Driver,
|
||||
Mode: m.Mode,
|
||||
RW: m.RW,
|
||||
Propagation: m.Propagation,
|
||||
})
|
||||
}
|
||||
return mountPoints
|
||||
}
|
||||
|
||||
// ConfigFilePath returns the path to the on-disk location of a config.
|
||||
// On unix, configs are always considered secret
|
||||
func (container *Container) ConfigFilePath(configRef swarmtypes.ConfigReference) (string, error) {
|
||||
mounts, err := container.SecretMountPath()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(mounts, configRef.ConfigID), nil
|
||||
}
|
213
vendor/github.com/docker/docker-ce/components/engine/container/container_windows.go
generated
vendored
Normal file
213
vendor/github.com/docker/docker-ce/components/engine/container/container_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,213 @@
|
|||
package container // import "github.com/docker/docker/container"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
swarmtypes "github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
const (
|
||||
containerSecretMountPath = `C:\ProgramData\Docker\secrets`
|
||||
containerInternalSecretMountPath = `C:\ProgramData\Docker\internal\secrets`
|
||||
containerInternalConfigsDirPath = `C:\ProgramData\Docker\internal\configs`
|
||||
|
||||
// DefaultStopTimeout is the timeout (in seconds) for the shutdown call on a container
|
||||
DefaultStopTimeout = 30
|
||||
)
|
||||
|
||||
// UnmountIpcMount unmounts Ipc related mounts.
|
||||
// This is a NOOP on windows.
|
||||
func (container *Container) UnmountIpcMount(unmount func(pth string) error) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IpcMounts returns the list of Ipc related mounts.
|
||||
func (container *Container) IpcMounts() []Mount {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateSecretSymlinks creates symlinks to files in the secret mount.
|
||||
func (container *Container) CreateSecretSymlinks() error {
|
||||
for _, r := range container.SecretReferences {
|
||||
if r.File == nil {
|
||||
continue
|
||||
}
|
||||
resolvedPath, _, err := container.ResolvePath(getSecretTargetPath(r))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := system.MkdirAll(filepath.Dir(resolvedPath), 0, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Symlink(filepath.Join(containerInternalSecretMountPath, r.SecretID), resolvedPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SecretMounts returns the mount for the secret path.
|
||||
// All secrets are stored in a single mount on Windows. Target symlinks are
|
||||
// created for each secret, pointing to the files in this mount.
|
||||
func (container *Container) SecretMounts() ([]Mount, error) {
|
||||
var mounts []Mount
|
||||
if len(container.SecretReferences) > 0 {
|
||||
src, err := container.SecretMountPath()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mounts = append(mounts, Mount{
|
||||
Source: src,
|
||||
Destination: containerInternalSecretMountPath,
|
||||
Writable: false,
|
||||
})
|
||||
}
|
||||
|
||||
return mounts, nil
|
||||
}
|
||||
|
||||
// UnmountSecrets unmounts the fs for secrets
|
||||
func (container *Container) UnmountSecrets() error {
|
||||
p, err := container.SecretMountPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.RemoveAll(p)
|
||||
}
|
||||
|
||||
// CreateConfigSymlinks creates symlinks to files in the config mount.
|
||||
func (container *Container) CreateConfigSymlinks() error {
|
||||
for _, configRef := range container.ConfigReferences {
|
||||
if configRef.File == nil {
|
||||
continue
|
||||
}
|
||||
resolvedPath, _, err := container.ResolvePath(configRef.File.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := system.MkdirAll(filepath.Dir(resolvedPath), 0, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Symlink(filepath.Join(containerInternalConfigsDirPath, configRef.ConfigID), resolvedPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConfigMounts returns the mount for configs.
|
||||
// TODO: Right now Windows doesn't really have a "secure" storage for secrets,
|
||||
// however some configs may contain secrets. Once secure storage is worked out,
|
||||
// configs and secret handling should be merged.
|
||||
func (container *Container) ConfigMounts() []Mount {
|
||||
var mounts []Mount
|
||||
if len(container.ConfigReferences) > 0 {
|
||||
mounts = append(mounts, Mount{
|
||||
Source: container.ConfigsDirPath(),
|
||||
Destination: containerInternalConfigsDirPath,
|
||||
Writable: false,
|
||||
})
|
||||
}
|
||||
|
||||
return mounts
|
||||
}
|
||||
|
||||
// DetachAndUnmount unmounts all volumes.
|
||||
// On Windows it only delegates to `UnmountVolumes` since there is nothing to
|
||||
// force unmount.
|
||||
func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error {
|
||||
return container.UnmountVolumes(volumeEventLog)
|
||||
}
|
||||
|
||||
// TmpfsMounts returns the list of tmpfs mounts
|
||||
func (container *Container) TmpfsMounts() ([]Mount, error) {
|
||||
var mounts []Mount
|
||||
return mounts, nil
|
||||
}
|
||||
|
||||
// UpdateContainer updates configuration of a container. Callers must hold a Lock on the Container.
|
||||
func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error {
|
||||
resources := hostConfig.Resources
|
||||
if resources.CPUShares != 0 ||
|
||||
resources.Memory != 0 ||
|
||||
resources.NanoCPUs != 0 ||
|
||||
resources.CgroupParent != "" ||
|
||||
resources.BlkioWeight != 0 ||
|
||||
len(resources.BlkioWeightDevice) != 0 ||
|
||||
len(resources.BlkioDeviceReadBps) != 0 ||
|
||||
len(resources.BlkioDeviceWriteBps) != 0 ||
|
||||
len(resources.BlkioDeviceReadIOps) != 0 ||
|
||||
len(resources.BlkioDeviceWriteIOps) != 0 ||
|
||||
resources.CPUPeriod != 0 ||
|
||||
resources.CPUQuota != 0 ||
|
||||
resources.CPURealtimePeriod != 0 ||
|
||||
resources.CPURealtimeRuntime != 0 ||
|
||||
resources.CpusetCpus != "" ||
|
||||
resources.CpusetMems != "" ||
|
||||
len(resources.Devices) != 0 ||
|
||||
len(resources.DeviceCgroupRules) != 0 ||
|
||||
resources.DiskQuota != 0 ||
|
||||
resources.KernelMemory != 0 ||
|
||||
resources.MemoryReservation != 0 ||
|
||||
resources.MemorySwap != 0 ||
|
||||
resources.MemorySwappiness != nil ||
|
||||
resources.OomKillDisable != nil ||
|
||||
resources.PidsLimit != 0 ||
|
||||
len(resources.Ulimits) != 0 ||
|
||||
resources.CPUCount != 0 ||
|
||||
resources.CPUPercent != 0 ||
|
||||
resources.IOMaximumIOps != 0 ||
|
||||
resources.IOMaximumBandwidth != 0 {
|
||||
return fmt.Errorf("resource updating isn't supported on Windows")
|
||||
}
|
||||
// update HostConfig of container
|
||||
if hostConfig.RestartPolicy.Name != "" {
|
||||
if container.HostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() {
|
||||
return fmt.Errorf("Restart policy cannot be updated because AutoRemove is enabled for the container")
|
||||
}
|
||||
container.HostConfig.RestartPolicy = hostConfig.RestartPolicy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BuildHostnameFile writes the container's hostname file.
|
||||
func (container *Container) BuildHostnameFile() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network
|
||||
func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// GetMountPoints gives a platform specific transformation to types.MountPoint. Callers must hold a Container lock.
|
||||
func (container *Container) GetMountPoints() []types.MountPoint {
|
||||
mountPoints := make([]types.MountPoint, 0, len(container.MountPoints))
|
||||
for _, m := range container.MountPoints {
|
||||
mountPoints = append(mountPoints, types.MountPoint{
|
||||
Type: m.Type,
|
||||
Name: m.Name,
|
||||
Source: m.Path(),
|
||||
Destination: m.Destination,
|
||||
Driver: m.Driver,
|
||||
RW: m.RW,
|
||||
})
|
||||
}
|
||||
return mountPoints
|
||||
}
|
||||
|
||||
func (container *Container) ConfigsDirPath() string {
|
||||
return filepath.Join(container.Root, "configs")
|
||||
}
|
||||
|
||||
// ConfigFilePath returns the path to the on-disk location of a config.
|
||||
func (container *Container) ConfigFilePath(configRef swarmtypes.ConfigReference) string {
|
||||
return filepath.Join(container.ConfigsDirPath(), configRef.ConfigID)
|
||||
}
|
43
vendor/github.com/docker/docker-ce/components/engine/container/env.go
generated
vendored
Normal file
43
vendor/github.com/docker/docker-ce/components/engine/container/env.go
generated
vendored
Normal file
|
@ -0,0 +1,43 @@
|
|||
package container // import "github.com/docker/docker/container"
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ReplaceOrAppendEnvValues returns the defaults with the overrides either
|
||||
// replaced by env key or appended to the list
|
||||
func ReplaceOrAppendEnvValues(defaults, overrides []string) []string {
|
||||
cache := make(map[string]int, len(defaults))
|
||||
for i, e := range defaults {
|
||||
parts := strings.SplitN(e, "=", 2)
|
||||
cache[parts[0]] = i
|
||||
}
|
||||
|
||||
for _, value := range overrides {
|
||||
// Values w/o = means they want this env to be removed/unset.
|
||||
if !strings.Contains(value, "=") {
|
||||
if i, exists := cache[value]; exists {
|
||||
defaults[i] = "" // Used to indicate it should be removed
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Just do a normal set/update
|
||||
parts := strings.SplitN(value, "=", 2)
|
||||
if i, exists := cache[parts[0]]; exists {
|
||||
defaults[i] = value
|
||||
} else {
|
||||
defaults = append(defaults, value)
|
||||
}
|
||||
}
|
||||
|
||||
// Now remove all entries that we want to "unset"
|
||||
for i := 0; i < len(defaults); i++ {
|
||||
if defaults[i] == "" {
|
||||
defaults = append(defaults[:i], defaults[i+1:]...)
|
||||
i--
|
||||
}
|
||||
}
|
||||
|
||||
return defaults
|
||||
}
|
24
vendor/github.com/docker/docker-ce/components/engine/container/env_test.go
generated
vendored
Normal file
24
vendor/github.com/docker/docker-ce/components/engine/container/env_test.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
package container // import "github.com/docker/docker/container"
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestReplaceAndAppendEnvVars(t *testing.T) {
|
||||
var (
|
||||
d = []string{"HOME=/", "FOO=foo_default"}
|
||||
// remove FOO from env
|
||||
// remove BAR from env (nop)
|
||||
o = []string{"HOME=/root", "TERM=xterm", "FOO", "BAR"}
|
||||
)
|
||||
|
||||
env := ReplaceOrAppendEnvValues(d, o)
|
||||
t.Logf("default=%v, override=%v, result=%v", d, o, env)
|
||||
if len(env) != 2 {
|
||||
t.Fatalf("expected len of 2 got %d", len(env))
|
||||
}
|
||||
if env[0] != "HOME=/root" {
|
||||
t.Fatalf("expected HOME=/root got '%s'", env[0])
|
||||
}
|
||||
if env[1] != "TERM=xterm" {
|
||||
t.Fatalf("expected TERM=xterm got '%s'", env[1])
|
||||
}
|
||||
}
|
82
vendor/github.com/docker/docker-ce/components/engine/container/health.go
generated
vendored
Normal file
82
vendor/github.com/docker/docker-ce/components/engine/container/health.go
generated
vendored
Normal file
|
@ -0,0 +1,82 @@
|
|||
package container // import "github.com/docker/docker/container"
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Health holds the current container health-check state
|
||||
type Health struct {
|
||||
types.Health
|
||||
stop chan struct{} // Write struct{} to stop the monitor
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// String returns a human-readable description of the health-check state
|
||||
func (s *Health) String() string {
|
||||
status := s.Status()
|
||||
|
||||
switch status {
|
||||
case types.Starting:
|
||||
return "health: starting"
|
||||
default: // Healthy and Unhealthy are clear on their own
|
||||
return s.Health.Status
|
||||
}
|
||||
}
|
||||
|
||||
// Status returns the current health status.
|
||||
//
|
||||
// Note that this takes a lock and the value may change after being read.
|
||||
func (s *Health) Status() string {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// This happens when the monitor has yet to be setup.
|
||||
if s.Health.Status == "" {
|
||||
return types.Unhealthy
|
||||
}
|
||||
|
||||
return s.Health.Status
|
||||
}
|
||||
|
||||
// SetStatus writes the current status to the underlying health structure,
|
||||
// obeying the locking semantics.
|
||||
//
|
||||
// Status may be set directly if another lock is used.
|
||||
func (s *Health) SetStatus(new string) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.Health.Status = new
|
||||
}
|
||||
|
||||
// OpenMonitorChannel creates and returns a new monitor channel. If there
|
||||
// already is one, it returns nil.
|
||||
func (s *Health) OpenMonitorChannel() chan struct{} {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if s.stop == nil {
|
||||
logrus.Debug("OpenMonitorChannel")
|
||||
s.stop = make(chan struct{})
|
||||
return s.stop
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseMonitorChannel closes any existing monitor channel.
|
||||
func (s *Health) CloseMonitorChannel() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if s.stop != nil {
|
||||
logrus.Debug("CloseMonitorChannel: waiting for probe to stop")
|
||||
close(s.stop)
|
||||
s.stop = nil
|
||||
// unhealthy when the monitor has stopped for compatibility reasons
|
||||
s.Health.Status = types.Unhealthy
|
||||
logrus.Debug("CloseMonitorChannel done")
|
||||
}
|
||||
}
|
30
vendor/github.com/docker/docker-ce/components/engine/container/history.go
generated
vendored
Normal file
30
vendor/github.com/docker/docker-ce/components/engine/container/history.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
package container // import "github.com/docker/docker/container"
|
||||
|
||||
import "sort"
|
||||
|
||||
// History is a convenience type for storing a list of containers,
|
||||
// sorted by creation date in descendant order.
|
||||
type History []*Container
|
||||
|
||||
// Len returns the number of containers in the history.
|
||||
func (history *History) Len() int {
|
||||
return len(*history)
|
||||
}
|
||||
|
||||
// Less compares two containers and returns true if the second one
|
||||
// was created before the first one.
|
||||
func (history *History) Less(i, j int) bool {
|
||||
containers := *history
|
||||
return containers[j].Created.Before(containers[i].Created)
|
||||
}
|
||||
|
||||
// Swap switches containers i and j positions in the history.
|
||||
func (history *History) Swap(i, j int) {
|
||||
containers := *history
|
||||
containers[i], containers[j] = containers[j], containers[i]
|
||||
}
|
||||
|
||||
// sort orders the history by creation date in descendant order.
|
||||
func (history *History) sort() {
|
||||
sort.Sort(history)
|
||||
}
|
95
vendor/github.com/docker/docker-ce/components/engine/container/memory_store.go
generated
vendored
Normal file
95
vendor/github.com/docker/docker-ce/components/engine/container/memory_store.go
generated
vendored
Normal file
|
@ -0,0 +1,95 @@
|
|||
package container // import "github.com/docker/docker/container"
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// memoryStore implements a Store in memory.
|
||||
type memoryStore struct {
|
||||
s map[string]*Container
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// NewMemoryStore initializes a new memory store.
|
||||
func NewMemoryStore() Store {
|
||||
return &memoryStore{
|
||||
s: make(map[string]*Container),
|
||||
}
|
||||
}
|
||||
|
||||
// Add appends a new container to the memory store.
|
||||
// It overrides the id if it existed before.
|
||||
func (c *memoryStore) Add(id string, cont *Container) {
|
||||
c.Lock()
|
||||
c.s[id] = cont
|
||||
c.Unlock()
|
||||
}
|
||||
|
||||
// Get returns a container from the store by id.
|
||||
func (c *memoryStore) Get(id string) *Container {
|
||||
var res *Container
|
||||
c.RLock()
|
||||
res = c.s[id]
|
||||
c.RUnlock()
|
||||
return res
|
||||
}
|
||||
|
||||
// Delete removes a container from the store by id.
|
||||
func (c *memoryStore) Delete(id string) {
|
||||
c.Lock()
|
||||
delete(c.s, id)
|
||||
c.Unlock()
|
||||
}
|
||||
|
||||
// List returns a sorted list of containers from the store.
|
||||
// The containers are ordered by creation date.
|
||||
func (c *memoryStore) List() []*Container {
|
||||
containers := History(c.all())
|
||||
containers.sort()
|
||||
return containers
|
||||
}
|
||||
|
||||
// Size returns the number of containers in the store.
|
||||
func (c *memoryStore) Size() int {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
return len(c.s)
|
||||
}
|
||||
|
||||
// First returns the first container found in the store by a given filter.
|
||||
func (c *memoryStore) First(filter StoreFilter) *Container {
|
||||
for _, cont := range c.all() {
|
||||
if filter(cont) {
|
||||
return cont
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ApplyAll calls the reducer function with every container in the store.
|
||||
// This operation is asynchronous in the memory store.
|
||||
// NOTE: Modifications to the store MUST NOT be done by the StoreReducer.
|
||||
func (c *memoryStore) ApplyAll(apply StoreReducer) {
|
||||
wg := new(sync.WaitGroup)
|
||||
for _, cont := range c.all() {
|
||||
wg.Add(1)
|
||||
go func(container *Container) {
|
||||
apply(container)
|
||||
wg.Done()
|
||||
}(cont)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func (c *memoryStore) all() []*Container {
|
||||
c.RLock()
|
||||
containers := make([]*Container, 0, len(c.s))
|
||||
for _, cont := range c.s {
|
||||
containers = append(containers, cont)
|
||||
}
|
||||
c.RUnlock()
|
||||
return containers
|
||||
}
|
||||
|
||||
var _ Store = &memoryStore{}
|
106
vendor/github.com/docker/docker-ce/components/engine/container/memory_store_test.go
generated
vendored
Normal file
106
vendor/github.com/docker/docker-ce/components/engine/container/memory_store_test.go
generated
vendored
Normal file
|
@ -0,0 +1,106 @@
|
|||
package container // import "github.com/docker/docker/container"
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestNewMemoryStore(t *testing.T) {
|
||||
s := NewMemoryStore()
|
||||
m, ok := s.(*memoryStore)
|
||||
if !ok {
|
||||
t.Fatalf("store is not a memory store %v", s)
|
||||
}
|
||||
if m.s == nil {
|
||||
t.Fatal("expected store map to not be nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddContainers(t *testing.T) {
|
||||
s := NewMemoryStore()
|
||||
s.Add("id", NewBaseContainer("id", "root"))
|
||||
if s.Size() != 1 {
|
||||
t.Fatalf("expected store size 1, got %v", s.Size())
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetContainer(t *testing.T) {
|
||||
s := NewMemoryStore()
|
||||
s.Add("id", NewBaseContainer("id", "root"))
|
||||
c := s.Get("id")
|
||||
if c == nil {
|
||||
t.Fatal("expected container to not be nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteContainer(t *testing.T) {
|
||||
s := NewMemoryStore()
|
||||
s.Add("id", NewBaseContainer("id", "root"))
|
||||
s.Delete("id")
|
||||
if c := s.Get("id"); c != nil {
|
||||
t.Fatalf("expected container to be nil after removal, got %v", c)
|
||||
}
|
||||
|
||||
if s.Size() != 0 {
|
||||
t.Fatalf("expected store size to be 0, got %v", s.Size())
|
||||
}
|
||||
}
|
||||
|
||||
func TestListContainers(t *testing.T) {
|
||||
s := NewMemoryStore()
|
||||
|
||||
cont := NewBaseContainer("id", "root")
|
||||
cont.Created = time.Now()
|
||||
cont2 := NewBaseContainer("id2", "root")
|
||||
cont2.Created = time.Now().Add(24 * time.Hour)
|
||||
|
||||
s.Add("id", cont)
|
||||
s.Add("id2", cont2)
|
||||
|
||||
list := s.List()
|
||||
if len(list) != 2 {
|
||||
t.Fatalf("expected list size 2, got %v", len(list))
|
||||
}
|
||||
if list[0].ID != "id2" {
|
||||
t.Fatalf("expected id2, got %v", list[0].ID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFirstContainer(t *testing.T) {
|
||||
s := NewMemoryStore()
|
||||
|
||||
s.Add("id", NewBaseContainer("id", "root"))
|
||||
s.Add("id2", NewBaseContainer("id2", "root"))
|
||||
|
||||
first := s.First(func(cont *Container) bool {
|
||||
return cont.ID == "id2"
|
||||
})
|
||||
|
||||
if first == nil {
|
||||
t.Fatal("expected container to not be nil")
|
||||
}
|
||||
if first.ID != "id2" {
|
||||
t.Fatalf("expected id2, got %v", first)
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyAllContainer(t *testing.T) {
|
||||
s := NewMemoryStore()
|
||||
|
||||
s.Add("id", NewBaseContainer("id", "root"))
|
||||
s.Add("id2", NewBaseContainer("id2", "root"))
|
||||
|
||||
s.ApplyAll(func(cont *Container) {
|
||||
if cont.ID == "id2" {
|
||||
cont.ID = "newID"
|
||||
}
|
||||
})
|
||||
|
||||
cont := s.Get("id2")
|
||||
if cont == nil {
|
||||
t.Fatal("expected container to not be nil")
|
||||
}
|
||||
if cont.ID != "newID" {
|
||||
t.Fatalf("expected newID, got %v", cont.ID)
|
||||
}
|
||||
}
|
46
vendor/github.com/docker/docker-ce/components/engine/container/monitor.go
generated
vendored
Normal file
46
vendor/github.com/docker/docker-ce/components/engine/container/monitor.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
package container // import "github.com/docker/docker/container"
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
loggerCloseTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
// Reset puts a container into a state where it can be restarted again.
|
||||
func (container *Container) Reset(lock bool) {
|
||||
if lock {
|
||||
container.Lock()
|
||||
defer container.Unlock()
|
||||
}
|
||||
|
||||
if err := container.CloseStreams(); err != nil {
|
||||
logrus.Errorf("%s: %s", container.ID, err)
|
||||
}
|
||||
|
||||
// Re-create a brand new stdin pipe once the container exited
|
||||
if container.Config.OpenStdin {
|
||||
container.StreamConfig.NewInputPipes()
|
||||
}
|
||||
|
||||
if container.LogDriver != nil {
|
||||
if container.LogCopier != nil {
|
||||
exit := make(chan struct{})
|
||||
go func() {
|
||||
container.LogCopier.Wait()
|
||||
close(exit)
|
||||
}()
|
||||
select {
|
||||
case <-time.After(loggerCloseTimeout):
|
||||
logrus.Warn("Logger didn't exit in time: logs may be truncated")
|
||||
case <-exit:
|
||||
}
|
||||
}
|
||||
container.LogDriver.Close()
|
||||
container.LogCopier = nil
|
||||
container.LogDriver = nil
|
||||
}
|
||||
}
|
12
vendor/github.com/docker/docker-ce/components/engine/container/mounts_unix.go
generated
vendored
Normal file
12
vendor/github.com/docker/docker-ce/components/engine/container/mounts_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
// +build !windows
|
||||
|
||||
package container // import "github.com/docker/docker/container"
|
||||
|
||||
// Mount contains information for a mount operation.
|
||||
type Mount struct {
|
||||
Source string `json:"source"`
|
||||
Destination string `json:"destination"`
|
||||
Writable bool `json:"writable"`
|
||||
Data string `json:"data"`
|
||||
Propagation string `json:"mountpropagation"`
|
||||
}
|
8
vendor/github.com/docker/docker-ce/components/engine/container/mounts_windows.go
generated
vendored
Normal file
8
vendor/github.com/docker/docker-ce/components/engine/container/mounts_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
package container // import "github.com/docker/docker/container"
|
||||
|
||||
// Mount contains information for a mount operation.
|
||||
type Mount struct {
|
||||
Source string `json:"source"`
|
||||
Destination string `json:"destination"`
|
||||
Writable bool `json:"writable"`
|
||||
}
|
410
vendor/github.com/docker/docker-ce/components/engine/container/state.go
generated
vendored
Normal file
410
vendor/github.com/docker/docker-ce/components/engine/container/state.go
generated
vendored
Normal file
|
@ -0,0 +1,410 @@
|
|||
package container // import "github.com/docker/docker/container"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
|
||||
// State holds the current container state, and has methods to get and
|
||||
// set the state. Container has an embed, which allows all of the
|
||||
// functions defined against State to run against Container.
|
||||
type State struct {
|
||||
sync.Mutex
|
||||
// Note that `Running` and `Paused` are not mutually exclusive:
|
||||
// When pausing a container (on Linux), the cgroups freezer is used to suspend
|
||||
// all processes in the container. Freezing the process requires the process to
|
||||
// be running. As a result, paused containers are both `Running` _and_ `Paused`.
|
||||
Running bool
|
||||
Paused bool
|
||||
Restarting bool
|
||||
OOMKilled bool
|
||||
RemovalInProgress bool // Not need for this to be persistent on disk.
|
||||
Dead bool
|
||||
Pid int
|
||||
ExitCodeValue int `json:"ExitCode"`
|
||||
ErrorMsg string `json:"Error"` // contains last known error during container start, stop, or remove
|
||||
StartedAt time.Time
|
||||
FinishedAt time.Time
|
||||
Health *Health
|
||||
|
||||
waitStop chan struct{}
|
||||
waitRemove chan struct{}
|
||||
}
|
||||
|
||||
// StateStatus is used to return container wait results.
|
||||
// Implements exec.ExitCode interface.
|
||||
// This type is needed as State include a sync.Mutex field which make
|
||||
// copying it unsafe.
|
||||
type StateStatus struct {
|
||||
exitCode int
|
||||
err error
|
||||
}
|
||||
|
||||
// ExitCode returns current exitcode for the state.
|
||||
func (s StateStatus) ExitCode() int {
|
||||
return s.exitCode
|
||||
}
|
||||
|
||||
// Err returns current error for the state. Returns nil if the container had
|
||||
// exited on its own.
|
||||
func (s StateStatus) Err() error {
|
||||
return s.err
|
||||
}
|
||||
|
||||
// NewState creates a default state object with a fresh channel for state changes.
|
||||
func NewState() *State {
|
||||
return &State{
|
||||
waitStop: make(chan struct{}),
|
||||
waitRemove: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a human-readable description of the state
|
||||
func (s *State) String() string {
|
||||
if s.Running {
|
||||
if s.Paused {
|
||||
return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)))
|
||||
}
|
||||
if s.Restarting {
|
||||
return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCodeValue, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)))
|
||||
}
|
||||
|
||||
if h := s.Health; h != nil {
|
||||
return fmt.Sprintf("Up %s (%s)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)), h.String())
|
||||
}
|
||||
|
||||
return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)))
|
||||
}
|
||||
|
||||
if s.RemovalInProgress {
|
||||
return "Removal In Progress"
|
||||
}
|
||||
|
||||
if s.Dead {
|
||||
return "Dead"
|
||||
}
|
||||
|
||||
if s.StartedAt.IsZero() {
|
||||
return "Created"
|
||||
}
|
||||
|
||||
if s.FinishedAt.IsZero() {
|
||||
return ""
|
||||
}
|
||||
|
||||
return fmt.Sprintf("Exited (%d) %s ago", s.ExitCodeValue, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)))
|
||||
}
|
||||
|
||||
// IsValidHealthString checks if the provided string is a valid container health status or not.
|
||||
func IsValidHealthString(s string) bool {
|
||||
return s == types.Starting ||
|
||||
s == types.Healthy ||
|
||||
s == types.Unhealthy ||
|
||||
s == types.NoHealthcheck
|
||||
}
|
||||
|
||||
// StateString returns a single string to describe state
|
||||
func (s *State) StateString() string {
|
||||
if s.Running {
|
||||
if s.Paused {
|
||||
return "paused"
|
||||
}
|
||||
if s.Restarting {
|
||||
return "restarting"
|
||||
}
|
||||
return "running"
|
||||
}
|
||||
|
||||
if s.RemovalInProgress {
|
||||
return "removing"
|
||||
}
|
||||
|
||||
if s.Dead {
|
||||
return "dead"
|
||||
}
|
||||
|
||||
if s.StartedAt.IsZero() {
|
||||
return "created"
|
||||
}
|
||||
|
||||
return "exited"
|
||||
}
|
||||
|
||||
// IsValidStateString checks if the provided string is a valid container state or not.
|
||||
func IsValidStateString(s string) bool {
|
||||
if s != "paused" &&
|
||||
s != "restarting" &&
|
||||
s != "removing" &&
|
||||
s != "running" &&
|
||||
s != "dead" &&
|
||||
s != "created" &&
|
||||
s != "exited" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// WaitCondition is an enum type for different states to wait for.
|
||||
type WaitCondition int
|
||||
|
||||
// Possible WaitCondition Values.
|
||||
//
|
||||
// WaitConditionNotRunning (default) is used to wait for any of the non-running
|
||||
// states: "created", "exited", "dead", "removing", or "removed".
|
||||
//
|
||||
// WaitConditionNextExit is used to wait for the next time the state changes
|
||||
// to a non-running state. If the state is currently "created" or "exited",
|
||||
// this would cause Wait() to block until either the container runs and exits
|
||||
// or is removed.
|
||||
//
|
||||
// WaitConditionRemoved is used to wait for the container to be removed.
|
||||
const (
|
||||
WaitConditionNotRunning WaitCondition = iota
|
||||
WaitConditionNextExit
|
||||
WaitConditionRemoved
|
||||
)
|
||||
|
||||
// Wait waits until the container is in a certain state indicated by the given
|
||||
// condition. A context must be used for cancelling the request, controlling
|
||||
// timeouts, and avoiding goroutine leaks. Wait must be called without holding
|
||||
// the state lock. Returns a channel from which the caller will receive the
|
||||
// result. If the container exited on its own, the result's Err() method will
|
||||
// be nil and its ExitCode() method will return the container's exit code,
|
||||
// otherwise, the results Err() method will return an error indicating why the
|
||||
// wait operation failed.
|
||||
func (s *State) Wait(ctx context.Context, condition WaitCondition) <-chan StateStatus {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if condition == WaitConditionNotRunning && !s.Running {
|
||||
// Buffer so we can put it in the channel now.
|
||||
resultC := make(chan StateStatus, 1)
|
||||
|
||||
// Send the current status.
|
||||
resultC <- StateStatus{
|
||||
exitCode: s.ExitCode(),
|
||||
err: s.Err(),
|
||||
}
|
||||
|
||||
return resultC
|
||||
}
|
||||
|
||||
// If we are waiting only for removal, the waitStop channel should
|
||||
// remain nil and block forever.
|
||||
var waitStop chan struct{}
|
||||
if condition < WaitConditionRemoved {
|
||||
waitStop = s.waitStop
|
||||
}
|
||||
|
||||
// Always wait for removal, just in case the container gets removed
|
||||
// while it is still in a "created" state, in which case it is never
|
||||
// actually stopped.
|
||||
waitRemove := s.waitRemove
|
||||
|
||||
resultC := make(chan StateStatus)
|
||||
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// Context timeout or cancellation.
|
||||
resultC <- StateStatus{
|
||||
exitCode: -1,
|
||||
err: ctx.Err(),
|
||||
}
|
||||
return
|
||||
case <-waitStop:
|
||||
case <-waitRemove:
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
result := StateStatus{
|
||||
exitCode: s.ExitCode(),
|
||||
err: s.Err(),
|
||||
}
|
||||
s.Unlock()
|
||||
|
||||
resultC <- result
|
||||
}()
|
||||
|
||||
return resultC
|
||||
}
|
||||
|
||||
// IsRunning returns whether the running flag is set. Used by Container to check whether a container is running.
|
||||
func (s *State) IsRunning() bool {
|
||||
s.Lock()
|
||||
res := s.Running
|
||||
s.Unlock()
|
||||
return res
|
||||
}
|
||||
|
||||
// GetPID holds the process id of a container.
|
||||
func (s *State) GetPID() int {
|
||||
s.Lock()
|
||||
res := s.Pid
|
||||
s.Unlock()
|
||||
return res
|
||||
}
|
||||
|
||||
// ExitCode returns current exitcode for the state. Take lock before if state
|
||||
// may be shared.
|
||||
func (s *State) ExitCode() int {
|
||||
return s.ExitCodeValue
|
||||
}
|
||||
|
||||
// SetExitCode sets current exitcode for the state. Take lock before if state
|
||||
// may be shared.
|
||||
func (s *State) SetExitCode(ec int) {
|
||||
s.ExitCodeValue = ec
|
||||
}
|
||||
|
||||
// SetRunning sets the state of the container to "running".
|
||||
func (s *State) SetRunning(pid int, initial bool) {
|
||||
s.ErrorMsg = ""
|
||||
s.Paused = false
|
||||
s.Running = true
|
||||
s.Restarting = false
|
||||
if initial {
|
||||
s.Paused = false
|
||||
}
|
||||
s.ExitCodeValue = 0
|
||||
s.Pid = pid
|
||||
if initial {
|
||||
s.StartedAt = time.Now().UTC()
|
||||
}
|
||||
}
|
||||
|
||||
// SetStopped sets the container state to "stopped" without locking.
|
||||
func (s *State) SetStopped(exitStatus *ExitStatus) {
|
||||
s.Running = false
|
||||
s.Paused = false
|
||||
s.Restarting = false
|
||||
s.Pid = 0
|
||||
if exitStatus.ExitedAt.IsZero() {
|
||||
s.FinishedAt = time.Now().UTC()
|
||||
} else {
|
||||
s.FinishedAt = exitStatus.ExitedAt
|
||||
}
|
||||
s.ExitCodeValue = exitStatus.ExitCode
|
||||
s.OOMKilled = exitStatus.OOMKilled
|
||||
close(s.waitStop) // fire waiters for stop
|
||||
s.waitStop = make(chan struct{})
|
||||
}
|
||||
|
||||
// SetRestarting sets the container state to "restarting" without locking.
|
||||
// It also sets the container PID to 0.
|
||||
func (s *State) SetRestarting(exitStatus *ExitStatus) {
|
||||
// we should consider the container running when it is restarting because of
|
||||
// all the checks in docker around rm/stop/etc
|
||||
s.Running = true
|
||||
s.Restarting = true
|
||||
s.Paused = false
|
||||
s.Pid = 0
|
||||
s.FinishedAt = time.Now().UTC()
|
||||
s.ExitCodeValue = exitStatus.ExitCode
|
||||
s.OOMKilled = exitStatus.OOMKilled
|
||||
close(s.waitStop) // fire waiters for stop
|
||||
s.waitStop = make(chan struct{})
|
||||
}
|
||||
|
||||
// SetError sets the container's error state. This is useful when we want to
|
||||
// know the error that occurred when container transits to another state
|
||||
// when inspecting it
|
||||
func (s *State) SetError(err error) {
|
||||
s.ErrorMsg = ""
|
||||
if err != nil {
|
||||
s.ErrorMsg = err.Error()
|
||||
}
|
||||
}
|
||||
|
||||
// IsPaused returns whether the container is paused or not.
|
||||
func (s *State) IsPaused() bool {
|
||||
s.Lock()
|
||||
res := s.Paused
|
||||
s.Unlock()
|
||||
return res
|
||||
}
|
||||
|
||||
// IsRestarting returns whether the container is restarting or not.
|
||||
func (s *State) IsRestarting() bool {
|
||||
s.Lock()
|
||||
res := s.Restarting
|
||||
s.Unlock()
|
||||
return res
|
||||
}
|
||||
|
||||
// SetRemovalInProgress sets the container state as being removed.
|
||||
// It returns true if the container was already in that state.
|
||||
func (s *State) SetRemovalInProgress() bool {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if s.RemovalInProgress {
|
||||
return true
|
||||
}
|
||||
s.RemovalInProgress = true
|
||||
return false
|
||||
}
|
||||
|
||||
// ResetRemovalInProgress makes the RemovalInProgress state to false.
|
||||
func (s *State) ResetRemovalInProgress() {
|
||||
s.Lock()
|
||||
s.RemovalInProgress = false
|
||||
s.Unlock()
|
||||
}
|
||||
|
||||
// IsRemovalInProgress returns whether the RemovalInProgress flag is set.
|
||||
// Used by Container to check whether a container is being removed.
|
||||
func (s *State) IsRemovalInProgress() bool {
|
||||
s.Lock()
|
||||
res := s.RemovalInProgress
|
||||
s.Unlock()
|
||||
return res
|
||||
}
|
||||
|
||||
// SetDead sets the container state to "dead"
|
||||
func (s *State) SetDead() {
|
||||
s.Lock()
|
||||
s.Dead = true
|
||||
s.Unlock()
|
||||
}
|
||||
|
||||
// IsDead returns whether the Dead flag is set. Used by Container to check whether a container is dead.
|
||||
func (s *State) IsDead() bool {
|
||||
s.Lock()
|
||||
res := s.Dead
|
||||
s.Unlock()
|
||||
return res
|
||||
}
|
||||
|
||||
// SetRemoved assumes this container is already in the "dead" state and
|
||||
// closes the internal waitRemove channel to unblock callers waiting for a
|
||||
// container to be removed.
|
||||
func (s *State) SetRemoved() {
|
||||
s.SetRemovalError(nil)
|
||||
}
|
||||
|
||||
// SetRemovalError is to be called in case a container remove failed.
|
||||
// It sets an error and closes the internal waitRemove channel to unblock
|
||||
// callers waiting for the container to be removed.
|
||||
func (s *State) SetRemovalError(err error) {
|
||||
s.SetError(err)
|
||||
s.Lock()
|
||||
close(s.waitRemove) // Unblock those waiting on remove.
|
||||
// Recreate the channel so next ContainerWait will work
|
||||
s.waitRemove = make(chan struct{})
|
||||
s.Unlock()
|
||||
}
|
||||
|
||||
// Err returns an error if there is one.
|
||||
func (s *State) Err() error {
|
||||
if s.ErrorMsg != "" {
|
||||
return errors.New(s.ErrorMsg)
|
||||
}
|
||||
return nil
|
||||
}
|
192
vendor/github.com/docker/docker-ce/components/engine/container/state_test.go
generated
vendored
Normal file
192
vendor/github.com/docker/docker-ce/components/engine/container/state_test.go
generated
vendored
Normal file
|
@ -0,0 +1,192 @@
|
|||
package container // import "github.com/docker/docker/container"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
)
|
||||
|
||||
func TestIsValidHealthString(t *testing.T) {
|
||||
contexts := []struct {
|
||||
Health string
|
||||
Expected bool
|
||||
}{
|
||||
{types.Healthy, true},
|
||||
{types.Unhealthy, true},
|
||||
{types.Starting, true},
|
||||
{types.NoHealthcheck, true},
|
||||
{"fail", false},
|
||||
}
|
||||
|
||||
for _, c := range contexts {
|
||||
v := IsValidHealthString(c.Health)
|
||||
if v != c.Expected {
|
||||
t.Fatalf("Expected %t, but got %t", c.Expected, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateRunStop(t *testing.T) {
|
||||
s := NewState()
|
||||
|
||||
// Begin another wait with WaitConditionRemoved. It should complete
|
||||
// within 200 milliseconds.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
|
||||
defer cancel()
|
||||
removalWait := s.Wait(ctx, WaitConditionRemoved)
|
||||
|
||||
// Full lifecycle two times.
|
||||
for i := 1; i <= 2; i++ {
|
||||
// A wait with WaitConditionNotRunning should return
|
||||
// immediately since the state is now either "created" (on the
|
||||
// first iteration) or "exited" (on the second iteration). It
|
||||
// shouldn't take more than 50 milliseconds.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
|
||||
defer cancel()
|
||||
// Expectx exit code to be i-1 since it should be the exit
|
||||
// code from the previous loop or 0 for the created state.
|
||||
if status := <-s.Wait(ctx, WaitConditionNotRunning); status.ExitCode() != i-1 {
|
||||
t.Fatalf("ExitCode %v, expected %v, err %q", status.ExitCode(), i-1, status.Err())
|
||||
}
|
||||
|
||||
// A wait with WaitConditionNextExit should block until the
|
||||
// container has started and exited. It shouldn't take more
|
||||
// than 100 milliseconds.
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||
defer cancel()
|
||||
initialWait := s.Wait(ctx, WaitConditionNextExit)
|
||||
|
||||
// Set the state to "Running".
|
||||
s.Lock()
|
||||
s.SetRunning(i, true)
|
||||
s.Unlock()
|
||||
|
||||
// Assert desired state.
|
||||
if !s.IsRunning() {
|
||||
t.Fatal("State not running")
|
||||
}
|
||||
if s.Pid != i {
|
||||
t.Fatalf("Pid %v, expected %v", s.Pid, i)
|
||||
}
|
||||
if s.ExitCode() != 0 {
|
||||
t.Fatalf("ExitCode %v, expected 0", s.ExitCode())
|
||||
}
|
||||
|
||||
// Now that it's running, a wait with WaitConditionNotRunning
|
||||
// should block until we stop the container. It shouldn't take
|
||||
// more than 100 milliseconds.
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||
defer cancel()
|
||||
exitWait := s.Wait(ctx, WaitConditionNotRunning)
|
||||
|
||||
// Set the state to "Exited".
|
||||
s.Lock()
|
||||
s.SetStopped(&ExitStatus{ExitCode: i})
|
||||
s.Unlock()
|
||||
|
||||
// Assert desired state.
|
||||
if s.IsRunning() {
|
||||
t.Fatal("State is running")
|
||||
}
|
||||
if s.ExitCode() != i {
|
||||
t.Fatalf("ExitCode %v, expected %v", s.ExitCode(), i)
|
||||
}
|
||||
if s.Pid != 0 {
|
||||
t.Fatalf("Pid %v, expected 0", s.Pid)
|
||||
}
|
||||
|
||||
// Receive the initialWait result.
|
||||
if status := <-initialWait; status.ExitCode() != i {
|
||||
t.Fatalf("ExitCode %v, expected %v, err %q", status.ExitCode(), i, status.Err())
|
||||
}
|
||||
|
||||
// Receive the exitWait result.
|
||||
if status := <-exitWait; status.ExitCode() != i {
|
||||
t.Fatalf("ExitCode %v, expected %v, err %q", status.ExitCode(), i, status.Err())
|
||||
}
|
||||
}
|
||||
|
||||
// Set the state to dead and removed.
|
||||
s.SetDead()
|
||||
s.SetRemoved()
|
||||
|
||||
// Wait for removed status or timeout.
|
||||
if status := <-removalWait; status.ExitCode() != 2 {
|
||||
// Should have the final exit code from the loop.
|
||||
t.Fatalf("Removal wait exitCode %v, expected %v, err %q", status.ExitCode(), 2, status.Err())
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateTimeoutWait(t *testing.T) {
|
||||
s := NewState()
|
||||
|
||||
s.Lock()
|
||||
s.SetRunning(0, true)
|
||||
s.Unlock()
|
||||
|
||||
// Start a wait with a timeout.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||
defer cancel()
|
||||
waitC := s.Wait(ctx, WaitConditionNotRunning)
|
||||
|
||||
// It should timeout *before* this 200ms timer does.
|
||||
select {
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
t.Fatal("Stop callback doesn't fire in 200 milliseconds")
|
||||
case status := <-waitC:
|
||||
t.Log("Stop callback fired")
|
||||
// Should be a timeout error.
|
||||
if status.Err() == nil {
|
||||
t.Fatal("expected timeout error, got nil")
|
||||
}
|
||||
if status.ExitCode() != -1 {
|
||||
t.Fatalf("expected exit code %v, got %v", -1, status.ExitCode())
|
||||
}
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
s.SetStopped(&ExitStatus{ExitCode: 0})
|
||||
s.Unlock()
|
||||
|
||||
// Start another wait with a timeout. This one should return
|
||||
// immediately.
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||
defer cancel()
|
||||
waitC = s.Wait(ctx, WaitConditionNotRunning)
|
||||
|
||||
select {
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
t.Fatal("Stop callback doesn't fire in 200 milliseconds")
|
||||
case status := <-waitC:
|
||||
t.Log("Stop callback fired")
|
||||
if status.ExitCode() != 0 {
|
||||
t.Fatalf("expected exit code %v, got %v, err %q", 0, status.ExitCode(), status.Err())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsValidStateString(t *testing.T) {
|
||||
states := []struct {
|
||||
state string
|
||||
expected bool
|
||||
}{
|
||||
{"paused", true},
|
||||
{"restarting", true},
|
||||
{"running", true},
|
||||
{"dead", true},
|
||||
{"start", false},
|
||||
{"created", true},
|
||||
{"exited", true},
|
||||
{"removing", true},
|
||||
{"stop", false},
|
||||
}
|
||||
|
||||
for _, s := range states {
|
||||
v := IsValidStateString(s.state)
|
||||
if v != s.expected {
|
||||
t.Fatalf("Expected %t, but got %t", s.expected, v)
|
||||
}
|
||||
}
|
||||
}
|
28
vendor/github.com/docker/docker-ce/components/engine/container/store.go
generated
vendored
Normal file
28
vendor/github.com/docker/docker-ce/components/engine/container/store.go
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
package container // import "github.com/docker/docker/container"
|
||||
|
||||
// StoreFilter defines a function to filter
|
||||
// container in the store.
|
||||
type StoreFilter func(*Container) bool
|
||||
|
||||
// StoreReducer defines a function to
|
||||
// manipulate containers in the store
|
||||
type StoreReducer func(*Container)
|
||||
|
||||
// Store defines an interface that
|
||||
// any container store must implement.
|
||||
type Store interface {
|
||||
// Add appends a new container to the store.
|
||||
Add(string, *Container)
|
||||
// Get returns a container from the store by the identifier it was stored with.
|
||||
Get(string) *Container
|
||||
// Delete removes a container from the store by the identifier it was stored with.
|
||||
Delete(string)
|
||||
// List returns a list of containers from the store.
|
||||
List() []*Container
|
||||
// Size returns the number of containers in the store.
|
||||
Size() int
|
||||
// First returns the first container found in the store by a given filter.
|
||||
First(StoreFilter) *Container
|
||||
// ApplyAll calls the reducer function with every container in the store.
|
||||
ApplyAll(StoreReducer)
|
||||
}
|
185
vendor/github.com/docker/docker-ce/components/engine/container/stream/attach.go
generated
vendored
Normal file
185
vendor/github.com/docker/docker-ce/components/engine/container/stream/attach.go
generated
vendored
Normal file
|
@ -0,0 +1,185 @@
|
|||
package stream // import "github.com/docker/docker/container/stream"
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/docker/pkg/pools"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var defaultEscapeSequence = []byte{16, 17} // ctrl-p, ctrl-q
|
||||
|
||||
// AttachConfig is the config struct used to attach a client to a stream's stdio
|
||||
type AttachConfig struct {
|
||||
// Tells the attach copier that the stream's stdin is a TTY and to look for
|
||||
// escape sequences in stdin to detach from the stream.
|
||||
// When true the escape sequence is not passed to the underlying stream
|
||||
TTY bool
|
||||
// Specifies the detach keys the client will be using
|
||||
// Only useful when `TTY` is true
|
||||
DetachKeys []byte
|
||||
|
||||
// CloseStdin signals that once done, stdin for the attached stream should be closed
|
||||
// For example, this would close the attached container's stdin.
|
||||
CloseStdin bool
|
||||
|
||||
// UseStd* indicate whether the client has requested to be connected to the
|
||||
// given stream or not. These flags are used instead of checking Std* != nil
|
||||
// at points before the client streams Std* are wired up.
|
||||
UseStdin, UseStdout, UseStderr bool
|
||||
|
||||
// CStd* are the streams directly connected to the container
|
||||
CStdin io.WriteCloser
|
||||
CStdout, CStderr io.ReadCloser
|
||||
|
||||
// Provide client streams to wire up to
|
||||
Stdin io.ReadCloser
|
||||
Stdout, Stderr io.Writer
|
||||
}
|
||||
|
||||
// AttachStreams attaches the container's streams to the AttachConfig
|
||||
func (c *Config) AttachStreams(cfg *AttachConfig) {
|
||||
if cfg.UseStdin {
|
||||
cfg.CStdin = c.StdinPipe()
|
||||
}
|
||||
|
||||
if cfg.UseStdout {
|
||||
cfg.CStdout = c.StdoutPipe()
|
||||
}
|
||||
|
||||
if cfg.UseStderr {
|
||||
cfg.CStderr = c.StderrPipe()
|
||||
}
|
||||
}
|
||||
|
||||
// CopyStreams starts goroutines to copy data in and out to/from the container
|
||||
func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan error {
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
errors = make(chan error, 3)
|
||||
)
|
||||
|
||||
if cfg.Stdin != nil {
|
||||
wg.Add(1)
|
||||
}
|
||||
|
||||
if cfg.Stdout != nil {
|
||||
wg.Add(1)
|
||||
}
|
||||
|
||||
if cfg.Stderr != nil {
|
||||
wg.Add(1)
|
||||
}
|
||||
|
||||
// Connect stdin of container to the attach stdin stream.
|
||||
go func() {
|
||||
if cfg.Stdin == nil {
|
||||
return
|
||||
}
|
||||
logrus.Debug("attach: stdin: begin")
|
||||
|
||||
var err error
|
||||
if cfg.TTY {
|
||||
_, err = copyEscapable(cfg.CStdin, cfg.Stdin, cfg.DetachKeys)
|
||||
} else {
|
||||
_, err = pools.Copy(cfg.CStdin, cfg.Stdin)
|
||||
}
|
||||
if err == io.ErrClosedPipe {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Errorf("attach: stdin: %s", err)
|
||||
errors <- err
|
||||
}
|
||||
if cfg.CloseStdin && !cfg.TTY {
|
||||
cfg.CStdin.Close()
|
||||
} else {
|
||||
// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
|
||||
if cfg.CStdout != nil {
|
||||
cfg.CStdout.Close()
|
||||
}
|
||||
if cfg.CStderr != nil {
|
||||
cfg.CStderr.Close()
|
||||
}
|
||||
}
|
||||
logrus.Debug("attach: stdin: end")
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) {
|
||||
if stream == nil {
|
||||
return
|
||||
}
|
||||
|
||||
logrus.Debugf("attach: %s: begin", name)
|
||||
_, err := pools.Copy(stream, streamPipe)
|
||||
if err == io.ErrClosedPipe {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Errorf("attach: %s: %v", name, err)
|
||||
errors <- err
|
||||
}
|
||||
// Make sure stdin gets closed
|
||||
if cfg.Stdin != nil {
|
||||
cfg.Stdin.Close()
|
||||
}
|
||||
streamPipe.Close()
|
||||
logrus.Debugf("attach: %s: end", name)
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
go attachStream("stdout", cfg.Stdout, cfg.CStdout)
|
||||
go attachStream("stderr", cfg.Stderr, cfg.CStderr)
|
||||
|
||||
errs := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
defer close(errs)
|
||||
errs <- func() error {
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(done)
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
case <-ctx.Done():
|
||||
// close all pipes
|
||||
if cfg.CStdin != nil {
|
||||
cfg.CStdin.Close()
|
||||
}
|
||||
if cfg.CStdout != nil {
|
||||
cfg.CStdout.Close()
|
||||
}
|
||||
if cfg.CStderr != nil {
|
||||
cfg.CStderr.Close()
|
||||
}
|
||||
<-done
|
||||
}
|
||||
close(errors)
|
||||
for err := range errors {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
}()
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func copyEscapable(dst io.Writer, src io.ReadCloser, keys []byte) (written int64, err error) {
|
||||
if len(keys) == 0 {
|
||||
keys = defaultEscapeSequence
|
||||
}
|
||||
pr := term.NewEscapeProxy(src, keys)
|
||||
defer src.Close()
|
||||
|
||||
return pools.Copy(dst, pr)
|
||||
}
|
146
vendor/github.com/docker/docker-ce/components/engine/container/stream/streams.go
generated
vendored
Normal file
146
vendor/github.com/docker/docker-ce/components/engine/container/stream/streams.go
generated
vendored
Normal file
|
@ -0,0 +1,146 @@
|
|||
package stream // import "github.com/docker/docker/container/stream"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/cio"
|
||||
"github.com/docker/docker/pkg/broadcaster"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/pools"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Config holds information about I/O streams managed together.
|
||||
//
|
||||
// config.StdinPipe returns a WriteCloser which can be used to feed data
|
||||
// to the standard input of the streamConfig's active process.
|
||||
// config.StdoutPipe and streamConfig.StderrPipe each return a ReadCloser
|
||||
// which can be used to retrieve the standard output (and error) generated
|
||||
// by the container's active process. The output (and error) are actually
|
||||
// copied and delivered to all StdoutPipe and StderrPipe consumers, using
|
||||
// a kind of "broadcaster".
|
||||
type Config struct {
|
||||
sync.WaitGroup
|
||||
stdout *broadcaster.Unbuffered
|
||||
stderr *broadcaster.Unbuffered
|
||||
stdin io.ReadCloser
|
||||
stdinPipe io.WriteCloser
|
||||
}
|
||||
|
||||
// NewConfig creates a stream config and initializes
|
||||
// the standard err and standard out to new unbuffered broadcasters.
|
||||
func NewConfig() *Config {
|
||||
return &Config{
|
||||
stderr: new(broadcaster.Unbuffered),
|
||||
stdout: new(broadcaster.Unbuffered),
|
||||
}
|
||||
}
|
||||
|
||||
// Stdout returns the standard output in the configuration.
|
||||
func (c *Config) Stdout() *broadcaster.Unbuffered {
|
||||
return c.stdout
|
||||
}
|
||||
|
||||
// Stderr returns the standard error in the configuration.
|
||||
func (c *Config) Stderr() *broadcaster.Unbuffered {
|
||||
return c.stderr
|
||||
}
|
||||
|
||||
// Stdin returns the standard input in the configuration.
|
||||
func (c *Config) Stdin() io.ReadCloser {
|
||||
return c.stdin
|
||||
}
|
||||
|
||||
// StdinPipe returns an input writer pipe as an io.WriteCloser.
|
||||
func (c *Config) StdinPipe() io.WriteCloser {
|
||||
return c.stdinPipe
|
||||
}
|
||||
|
||||
// StdoutPipe creates a new io.ReadCloser with an empty bytes pipe.
|
||||
// It adds this new out pipe to the Stdout broadcaster.
|
||||
// This will block stdout if unconsumed.
|
||||
func (c *Config) StdoutPipe() io.ReadCloser {
|
||||
bytesPipe := ioutils.NewBytesPipe()
|
||||
c.stdout.Add(bytesPipe)
|
||||
return bytesPipe
|
||||
}
|
||||
|
||||
// StderrPipe creates a new io.ReadCloser with an empty bytes pipe.
|
||||
// It adds this new err pipe to the Stderr broadcaster.
|
||||
// This will block stderr if unconsumed.
|
||||
func (c *Config) StderrPipe() io.ReadCloser {
|
||||
bytesPipe := ioutils.NewBytesPipe()
|
||||
c.stderr.Add(bytesPipe)
|
||||
return bytesPipe
|
||||
}
|
||||
|
||||
// NewInputPipes creates new pipes for both standard inputs, Stdin and StdinPipe.
|
||||
func (c *Config) NewInputPipes() {
|
||||
c.stdin, c.stdinPipe = io.Pipe()
|
||||
}
|
||||
|
||||
// NewNopInputPipe creates a new input pipe that will silently drop all messages in the input.
|
||||
func (c *Config) NewNopInputPipe() {
|
||||
c.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard)
|
||||
}
|
||||
|
||||
// CloseStreams ensures that the configured streams are properly closed.
|
||||
func (c *Config) CloseStreams() error {
|
||||
var errors []string
|
||||
|
||||
if c.stdin != nil {
|
||||
if err := c.stdin.Close(); err != nil {
|
||||
errors = append(errors, fmt.Sprintf("error close stdin: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.stdout.Clean(); err != nil {
|
||||
errors = append(errors, fmt.Sprintf("error close stdout: %s", err))
|
||||
}
|
||||
|
||||
if err := c.stderr.Clean(); err != nil {
|
||||
errors = append(errors, fmt.Sprintf("error close stderr: %s", err))
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return fmt.Errorf(strings.Join(errors, "\n"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CopyToPipe connects streamconfig with a libcontainerd.IOPipe
|
||||
func (c *Config) CopyToPipe(iop *cio.DirectIO) {
|
||||
copyFunc := func(w io.Writer, r io.ReadCloser) {
|
||||
c.Add(1)
|
||||
go func() {
|
||||
if _, err := pools.Copy(w, r); err != nil {
|
||||
logrus.Errorf("stream copy error: %v", err)
|
||||
}
|
||||
r.Close()
|
||||
c.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
if iop.Stdout != nil {
|
||||
copyFunc(c.Stdout(), iop.Stdout)
|
||||
}
|
||||
if iop.Stderr != nil {
|
||||
copyFunc(c.Stderr(), iop.Stderr)
|
||||
}
|
||||
|
||||
if stdin := c.Stdin(); stdin != nil {
|
||||
if iop.Stdin != nil {
|
||||
go func() {
|
||||
pools.Copy(iop.Stdin, stdin)
|
||||
if err := iop.Stdin.Close(); err != nil {
|
||||
logrus.Warnf("failed to close stdin: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}
|
494
vendor/github.com/docker/docker-ce/components/engine/container/view.go
generated
vendored
Normal file
494
vendor/github.com/docker/docker-ce/components/engine/container/view.go
generated
vendored
Normal file
|
@ -0,0 +1,494 @@
|
|||
package container // import "github.com/docker/docker/container"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
memdbContainersTable = "containers"
|
||||
memdbNamesTable = "names"
|
||||
memdbIDIndex = "id"
|
||||
memdbContainerIDIndex = "containerid"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNameReserved is an error which is returned when a name is requested to be reserved that already is reserved
|
||||
ErrNameReserved = errors.New("name is reserved")
|
||||
// ErrNameNotReserved is an error which is returned when trying to find a name that is not reserved
|
||||
ErrNameNotReserved = errors.New("name is not reserved")
|
||||
)
|
||||
|
||||
// Snapshot is a read only view for Containers. It holds all information necessary to serve container queries in a
|
||||
// versioned ACID in-memory store.
|
||||
type Snapshot struct {
|
||||
types.Container
|
||||
|
||||
// additional info queries need to filter on
|
||||
// preserve nanosec resolution for queries
|
||||
CreatedAt time.Time
|
||||
StartedAt time.Time
|
||||
Name string
|
||||
Pid int
|
||||
ExitCode int
|
||||
Running bool
|
||||
Paused bool
|
||||
Managed bool
|
||||
ExposedPorts nat.PortSet
|
||||
PortBindings nat.PortSet
|
||||
Health string
|
||||
HostConfig struct {
|
||||
Isolation string
|
||||
}
|
||||
}
|
||||
|
||||
// nameAssociation associates a container id with a name.
|
||||
type nameAssociation struct {
|
||||
// name is the name to associate. Note that name is the primary key
|
||||
// ("id" in memdb).
|
||||
name string
|
||||
containerID string
|
||||
}
|
||||
|
||||
// ViewDB provides an in-memory transactional (ACID) container Store
|
||||
type ViewDB interface {
|
||||
Snapshot() View
|
||||
Save(*Container) error
|
||||
Delete(*Container) error
|
||||
|
||||
ReserveName(name, containerID string) error
|
||||
ReleaseName(name string) error
|
||||
}
|
||||
|
||||
// View can be used by readers to avoid locking
|
||||
type View interface {
|
||||
All() ([]Snapshot, error)
|
||||
Get(id string) (*Snapshot, error)
|
||||
|
||||
GetID(name string) (string, error)
|
||||
GetAllNames() map[string][]string
|
||||
}
|
||||
|
||||
var schema = &memdb.DBSchema{
|
||||
Tables: map[string]*memdb.TableSchema{
|
||||
memdbContainersTable: {
|
||||
Name: memdbContainersTable,
|
||||
Indexes: map[string]*memdb.IndexSchema{
|
||||
memdbIDIndex: {
|
||||
Name: memdbIDIndex,
|
||||
Unique: true,
|
||||
Indexer: &containerByIDIndexer{},
|
||||
},
|
||||
},
|
||||
},
|
||||
memdbNamesTable: {
|
||||
Name: memdbNamesTable,
|
||||
Indexes: map[string]*memdb.IndexSchema{
|
||||
// Used for names, because "id" is the primary key in memdb.
|
||||
memdbIDIndex: {
|
||||
Name: memdbIDIndex,
|
||||
Unique: true,
|
||||
Indexer: &namesByNameIndexer{},
|
||||
},
|
||||
memdbContainerIDIndex: {
|
||||
Name: memdbContainerIDIndex,
|
||||
Indexer: &namesByContainerIDIndexer{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
type memDB struct {
|
||||
store *memdb.MemDB
|
||||
}
|
||||
|
||||
// NoSuchContainerError indicates that the container wasn't found in the
|
||||
// database.
|
||||
type NoSuchContainerError struct {
|
||||
id string
|
||||
}
|
||||
|
||||
// Error satisfies the error interface.
|
||||
func (e NoSuchContainerError) Error() string {
|
||||
return "no such container " + e.id
|
||||
}
|
||||
|
||||
// NewViewDB provides the default implementation, with the default schema
|
||||
func NewViewDB() (ViewDB, error) {
|
||||
store, err := memdb.NewMemDB(schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &memDB{store: store}, nil
|
||||
}
|
||||
|
||||
// Snapshot provides a consistent read-only View of the database
|
||||
func (db *memDB) Snapshot() View {
|
||||
return &memdbView{
|
||||
txn: db.store.Txn(false),
|
||||
}
|
||||
}
|
||||
|
||||
func (db *memDB) withTxn(cb func(*memdb.Txn) error) error {
|
||||
txn := db.store.Txn(true)
|
||||
err := cb(txn)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
return err
|
||||
}
|
||||
txn.Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Save atomically updates the in-memory store state for a Container.
|
||||
// Only read only (deep) copies of containers may be passed in.
|
||||
func (db *memDB) Save(c *Container) error {
|
||||
return db.withTxn(func(txn *memdb.Txn) error {
|
||||
return txn.Insert(memdbContainersTable, c)
|
||||
})
|
||||
}
|
||||
|
||||
// Delete removes an item by ID
|
||||
func (db *memDB) Delete(c *Container) error {
|
||||
return db.withTxn(func(txn *memdb.Txn) error {
|
||||
view := &memdbView{txn: txn}
|
||||
names := view.getNames(c.ID)
|
||||
|
||||
for _, name := range names {
|
||||
txn.Delete(memdbNamesTable, nameAssociation{name: name})
|
||||
}
|
||||
|
||||
// Ignore error - the container may not actually exist in the
|
||||
// db, but we still need to clean up associated names.
|
||||
txn.Delete(memdbContainersTable, NewBaseContainer(c.ID, c.Root))
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// ReserveName registers a container ID to a name
|
||||
// ReserveName is idempotent
|
||||
// Attempting to reserve a container ID to a name that already exists results in an `ErrNameReserved`
|
||||
// A name reservation is globally unique
|
||||
func (db *memDB) ReserveName(name, containerID string) error {
|
||||
return db.withTxn(func(txn *memdb.Txn) error {
|
||||
s, err := txn.First(memdbNamesTable, memdbIDIndex, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if s != nil {
|
||||
if s.(nameAssociation).containerID != containerID {
|
||||
return ErrNameReserved
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return txn.Insert(memdbNamesTable, nameAssociation{name: name, containerID: containerID})
|
||||
})
|
||||
}
|
||||
|
||||
// ReleaseName releases the reserved name
|
||||
// Once released, a name can be reserved again
|
||||
func (db *memDB) ReleaseName(name string) error {
|
||||
return db.withTxn(func(txn *memdb.Txn) error {
|
||||
return txn.Delete(memdbNamesTable, nameAssociation{name: name})
|
||||
})
|
||||
}
|
||||
|
||||
type memdbView struct {
|
||||
txn *memdb.Txn
|
||||
}
|
||||
|
||||
// All returns a all items in this snapshot. Returned objects must never be modified.
|
||||
func (v *memdbView) All() ([]Snapshot, error) {
|
||||
var all []Snapshot
|
||||
iter, err := v.txn.Get(memdbContainersTable, memdbIDIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for {
|
||||
item := iter.Next()
|
||||
if item == nil {
|
||||
break
|
||||
}
|
||||
snapshot := v.transform(item.(*Container))
|
||||
all = append(all, *snapshot)
|
||||
}
|
||||
return all, nil
|
||||
}
|
||||
|
||||
// Get returns an item by id. Returned objects must never be modified.
|
||||
func (v *memdbView) Get(id string) (*Snapshot, error) {
|
||||
s, err := v.txn.First(memdbContainersTable, memdbIDIndex, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if s == nil {
|
||||
return nil, NoSuchContainerError{id: id}
|
||||
}
|
||||
return v.transform(s.(*Container)), nil
|
||||
}
|
||||
|
||||
// getNames lists all the reserved names for the given container ID.
|
||||
func (v *memdbView) getNames(containerID string) []string {
|
||||
iter, err := v.txn.Get(memdbNamesTable, memdbContainerIDIndex, containerID)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var names []string
|
||||
for {
|
||||
item := iter.Next()
|
||||
if item == nil {
|
||||
break
|
||||
}
|
||||
names = append(names, item.(nameAssociation).name)
|
||||
}
|
||||
|
||||
return names
|
||||
}
|
||||
|
||||
// GetID returns the container ID that the passed in name is reserved to.
|
||||
func (v *memdbView) GetID(name string) (string, error) {
|
||||
s, err := v.txn.First(memdbNamesTable, memdbIDIndex, name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if s == nil {
|
||||
return "", ErrNameNotReserved
|
||||
}
|
||||
return s.(nameAssociation).containerID, nil
|
||||
}
|
||||
|
||||
// GetAllNames returns all registered names.
|
||||
func (v *memdbView) GetAllNames() map[string][]string {
|
||||
iter, err := v.txn.Get(memdbNamesTable, memdbContainerIDIndex)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
out := make(map[string][]string)
|
||||
for {
|
||||
item := iter.Next()
|
||||
if item == nil {
|
||||
break
|
||||
}
|
||||
assoc := item.(nameAssociation)
|
||||
out[assoc.containerID] = append(out[assoc.containerID], assoc.name)
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// transform maps a (deep) copied Container object to what queries need.
|
||||
// A lock on the Container is not held because these are immutable deep copies.
|
||||
func (v *memdbView) transform(container *Container) *Snapshot {
|
||||
health := types.NoHealthcheck
|
||||
if container.Health != nil {
|
||||
health = container.Health.Status()
|
||||
}
|
||||
snapshot := &Snapshot{
|
||||
Container: types.Container{
|
||||
ID: container.ID,
|
||||
Names: v.getNames(container.ID),
|
||||
ImageID: container.ImageID.String(),
|
||||
Ports: []types.Port{},
|
||||
Mounts: container.GetMountPoints(),
|
||||
State: container.State.StateString(),
|
||||
Status: container.State.String(),
|
||||
Created: container.Created.Unix(),
|
||||
},
|
||||
CreatedAt: container.Created,
|
||||
StartedAt: container.StartedAt,
|
||||
Name: container.Name,
|
||||
Pid: container.Pid,
|
||||
Managed: container.Managed,
|
||||
ExposedPorts: make(nat.PortSet),
|
||||
PortBindings: make(nat.PortSet),
|
||||
Health: health,
|
||||
Running: container.Running,
|
||||
Paused: container.Paused,
|
||||
ExitCode: container.ExitCode(),
|
||||
}
|
||||
|
||||
if snapshot.Names == nil {
|
||||
// Dead containers will often have no name, so make sure the response isn't null
|
||||
snapshot.Names = []string{}
|
||||
}
|
||||
|
||||
if container.HostConfig != nil {
|
||||
snapshot.Container.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode)
|
||||
snapshot.HostConfig.Isolation = string(container.HostConfig.Isolation)
|
||||
for binding := range container.HostConfig.PortBindings {
|
||||
snapshot.PortBindings[binding] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
if container.Config != nil {
|
||||
snapshot.Image = container.Config.Image
|
||||
snapshot.Labels = container.Config.Labels
|
||||
for exposed := range container.Config.ExposedPorts {
|
||||
snapshot.ExposedPorts[exposed] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
if len(container.Args) > 0 {
|
||||
args := []string{}
|
||||
for _, arg := range container.Args {
|
||||
if strings.Contains(arg, " ") {
|
||||
args = append(args, fmt.Sprintf("'%s'", arg))
|
||||
} else {
|
||||
args = append(args, arg)
|
||||
}
|
||||
}
|
||||
argsAsString := strings.Join(args, " ")
|
||||
snapshot.Command = fmt.Sprintf("%s %s", container.Path, argsAsString)
|
||||
} else {
|
||||
snapshot.Command = container.Path
|
||||
}
|
||||
|
||||
snapshot.Ports = []types.Port{}
|
||||
networks := make(map[string]*network.EndpointSettings)
|
||||
if container.NetworkSettings != nil {
|
||||
for name, netw := range container.NetworkSettings.Networks {
|
||||
if netw == nil || netw.EndpointSettings == nil {
|
||||
continue
|
||||
}
|
||||
networks[name] = &network.EndpointSettings{
|
||||
EndpointID: netw.EndpointID,
|
||||
Gateway: netw.Gateway,
|
||||
IPAddress: netw.IPAddress,
|
||||
IPPrefixLen: netw.IPPrefixLen,
|
||||
IPv6Gateway: netw.IPv6Gateway,
|
||||
GlobalIPv6Address: netw.GlobalIPv6Address,
|
||||
GlobalIPv6PrefixLen: netw.GlobalIPv6PrefixLen,
|
||||
MacAddress: netw.MacAddress,
|
||||
NetworkID: netw.NetworkID,
|
||||
}
|
||||
if netw.IPAMConfig != nil {
|
||||
networks[name].IPAMConfig = &network.EndpointIPAMConfig{
|
||||
IPv4Address: netw.IPAMConfig.IPv4Address,
|
||||
IPv6Address: netw.IPAMConfig.IPv6Address,
|
||||
}
|
||||
}
|
||||
}
|
||||
for port, bindings := range container.NetworkSettings.Ports {
|
||||
p, err := nat.ParsePort(port.Port())
|
||||
if err != nil {
|
||||
logrus.Warnf("invalid port map %+v", err)
|
||||
continue
|
||||
}
|
||||
if len(bindings) == 0 {
|
||||
snapshot.Ports = append(snapshot.Ports, types.Port{
|
||||
PrivatePort: uint16(p),
|
||||
Type: port.Proto(),
|
||||
})
|
||||
continue
|
||||
}
|
||||
for _, binding := range bindings {
|
||||
h, err := nat.ParsePort(binding.HostPort)
|
||||
if err != nil {
|
||||
logrus.Warnf("invalid host port map %+v", err)
|
||||
continue
|
||||
}
|
||||
snapshot.Ports = append(snapshot.Ports, types.Port{
|
||||
PrivatePort: uint16(p),
|
||||
PublicPort: uint16(h),
|
||||
Type: port.Proto(),
|
||||
IP: binding.HostIP,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
snapshot.NetworkSettings = &types.SummaryNetworkSettings{Networks: networks}
|
||||
|
||||
return snapshot
|
||||
}
|
||||
|
||||
// containerByIDIndexer is used to extract the ID field from Container types.
|
||||
// memdb.StringFieldIndex can not be used since ID is a field from an embedded struct.
|
||||
type containerByIDIndexer struct{}
|
||||
|
||||
// FromObject implements the memdb.SingleIndexer interface for Container objects
|
||||
func (e *containerByIDIndexer) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
c, ok := obj.(*Container)
|
||||
if !ok {
|
||||
return false, nil, fmt.Errorf("%T is not a Container", obj)
|
||||
}
|
||||
// Add the null character as a terminator
|
||||
v := c.ID + "\x00"
|
||||
return true, []byte(v), nil
|
||||
}
|
||||
|
||||
// FromArgs implements the memdb.Indexer interface
|
||||
func (e *containerByIDIndexer) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
if len(args) != 1 {
|
||||
return nil, fmt.Errorf("must provide only a single argument")
|
||||
}
|
||||
arg, ok := args[0].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("argument must be a string: %#v", args[0])
|
||||
}
|
||||
// Add the null character as a terminator
|
||||
arg += "\x00"
|
||||
return []byte(arg), nil
|
||||
}
|
||||
|
||||
// namesByNameIndexer is used to index container name associations by name.
|
||||
type namesByNameIndexer struct{}
|
||||
|
||||
func (e *namesByNameIndexer) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
n, ok := obj.(nameAssociation)
|
||||
if !ok {
|
||||
return false, nil, fmt.Errorf(`%T does not have type "nameAssociation"`, obj)
|
||||
}
|
||||
|
||||
// Add the null character as a terminator
|
||||
return true, []byte(n.name + "\x00"), nil
|
||||
}
|
||||
|
||||
func (e *namesByNameIndexer) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
if len(args) != 1 {
|
||||
return nil, fmt.Errorf("must provide only a single argument")
|
||||
}
|
||||
arg, ok := args[0].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("argument must be a string: %#v", args[0])
|
||||
}
|
||||
// Add the null character as a terminator
|
||||
arg += "\x00"
|
||||
return []byte(arg), nil
|
||||
}
|
||||
|
||||
// namesByContainerIDIndexer is used to index container names by container ID.
|
||||
type namesByContainerIDIndexer struct{}
|
||||
|
||||
func (e *namesByContainerIDIndexer) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
n, ok := obj.(nameAssociation)
|
||||
if !ok {
|
||||
return false, nil, fmt.Errorf(`%T does not have type "nameAssocation"`, obj)
|
||||
}
|
||||
|
||||
// Add the null character as a terminator
|
||||
return true, []byte(n.containerID + "\x00"), nil
|
||||
}
|
||||
|
||||
func (e *namesByContainerIDIndexer) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
if len(args) != 1 {
|
||||
return nil, fmt.Errorf("must provide only a single argument")
|
||||
}
|
||||
arg, ok := args[0].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("argument must be a string: %#v", args[0])
|
||||
}
|
||||
// Add the null character as a terminator
|
||||
arg += "\x00"
|
||||
return []byte(arg), nil
|
||||
}
|
186
vendor/github.com/docker/docker-ce/components/engine/container/view_test.go
generated
vendored
Normal file
186
vendor/github.com/docker/docker-ce/components/engine/container/view_test.go
generated
vendored
Normal file
|
@ -0,0 +1,186 @@
|
|||
package container // import "github.com/docker/docker/container"
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/gotestyourself/gotestyourself/assert"
|
||||
is "github.com/gotestyourself/gotestyourself/assert/cmp"
|
||||
"github.com/pborman/uuid"
|
||||
)
|
||||
|
||||
var root string
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
var err error
|
||||
root, err = ioutil.TempDir("", "docker-container-test-")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(root)
|
||||
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func newContainer(t *testing.T) *Container {
|
||||
var (
|
||||
id = uuid.New()
|
||||
cRoot = filepath.Join(root, id)
|
||||
)
|
||||
if err := os.MkdirAll(cRoot, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c := NewBaseContainer(id, cRoot)
|
||||
c.HostConfig = &containertypes.HostConfig{}
|
||||
return c
|
||||
}
|
||||
|
||||
func TestViewSaveDelete(t *testing.T) {
|
||||
db, err := NewViewDB()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c := newContainer(t)
|
||||
if err := c.CheckpointTo(db); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.Delete(c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestViewAll(t *testing.T) {
|
||||
var (
|
||||
db, _ = NewViewDB()
|
||||
one = newContainer(t)
|
||||
two = newContainer(t)
|
||||
)
|
||||
one.Pid = 10
|
||||
if err := one.CheckpointTo(db); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
two.Pid = 20
|
||||
if err := two.CheckpointTo(db); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
all, err := db.Snapshot().All()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if l := len(all); l != 2 {
|
||||
t.Fatalf("expected 2 items, got %d", l)
|
||||
}
|
||||
byID := make(map[string]Snapshot)
|
||||
for i := range all {
|
||||
byID[all[i].ID] = all[i]
|
||||
}
|
||||
if s, ok := byID[one.ID]; !ok || s.Pid != 10 {
|
||||
t.Fatalf("expected something different with for id=%s: %v", one.ID, s)
|
||||
}
|
||||
if s, ok := byID[two.ID]; !ok || s.Pid != 20 {
|
||||
t.Fatalf("expected something different with for id=%s: %v", two.ID, s)
|
||||
}
|
||||
}
|
||||
|
||||
func TestViewGet(t *testing.T) {
|
||||
var (
|
||||
db, _ = NewViewDB()
|
||||
one = newContainer(t)
|
||||
)
|
||||
one.ImageID = "some-image-123"
|
||||
if err := one.CheckpointTo(db); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
s, err := db.Snapshot().Get(one.ID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s == nil || s.ImageID != "some-image-123" {
|
||||
t.Fatalf("expected ImageID=some-image-123. Got: %v", s)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNames(t *testing.T) {
|
||||
db, err := NewViewDB()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Check(t, db.ReserveName("name1", "containerid1"))
|
||||
assert.Check(t, db.ReserveName("name1", "containerid1")) // idempotent
|
||||
assert.Check(t, db.ReserveName("name2", "containerid2"))
|
||||
assert.Check(t, is.Error(db.ReserveName("name2", "containerid3"), ErrNameReserved.Error()))
|
||||
|
||||
// Releasing a name allows the name to point to something else later.
|
||||
assert.Check(t, db.ReleaseName("name2"))
|
||||
assert.Check(t, db.ReserveName("name2", "containerid3"))
|
||||
|
||||
view := db.Snapshot()
|
||||
|
||||
id, err := view.GetID("name1")
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal("containerid1", id))
|
||||
|
||||
id, err = view.GetID("name2")
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal("containerid3", id))
|
||||
|
||||
_, err = view.GetID("notreserved")
|
||||
assert.Check(t, is.Error(err, ErrNameNotReserved.Error()))
|
||||
|
||||
// Releasing and re-reserving a name doesn't affect the snapshot.
|
||||
assert.Check(t, db.ReleaseName("name2"))
|
||||
assert.Check(t, db.ReserveName("name2", "containerid4"))
|
||||
|
||||
id, err = view.GetID("name1")
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal("containerid1", id))
|
||||
|
||||
id, err = view.GetID("name2")
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal("containerid3", id))
|
||||
|
||||
// GetAllNames
|
||||
assert.Check(t, is.DeepEqual(map[string][]string{"containerid1": {"name1"}, "containerid3": {"name2"}}, view.GetAllNames()))
|
||||
|
||||
assert.Check(t, db.ReserveName("name3", "containerid1"))
|
||||
assert.Check(t, db.ReserveName("name4", "containerid1"))
|
||||
|
||||
view = db.Snapshot()
|
||||
assert.Check(t, is.DeepEqual(map[string][]string{"containerid1": {"name1", "name3", "name4"}, "containerid4": {"name2"}}, view.GetAllNames()))
|
||||
|
||||
// Release containerid1's names with Delete even though no container exists
|
||||
assert.Check(t, db.Delete(&Container{ID: "containerid1"}))
|
||||
|
||||
// Reusing one of those names should work
|
||||
assert.Check(t, db.ReserveName("name1", "containerid4"))
|
||||
view = db.Snapshot()
|
||||
assert.Check(t, is.DeepEqual(map[string][]string{"containerid4": {"name1", "name2"}}, view.GetAllNames()))
|
||||
}
|
||||
|
||||
// Test case for GitHub issue 35920
|
||||
func TestViewWithHealthCheck(t *testing.T) {
|
||||
var (
|
||||
db, _ = NewViewDB()
|
||||
one = newContainer(t)
|
||||
)
|
||||
one.Health = &Health{
|
||||
Health: types.Health{
|
||||
Status: "starting",
|
||||
},
|
||||
}
|
||||
if err := one.CheckpointTo(db); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
s, err := db.Snapshot().Get(one.ID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s == nil || s.Health != "starting" {
|
||||
t.Fatalf("expected Health=starting. Got: %+v", s)
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue