add better generate

Signed-off-by: Jess Frazelle <acidburn@microsoft.com>
This commit is contained in:
Jess Frazelle 2018-03-20 01:33:56 -04:00
parent 3fc6abf56b
commit cdd93563f5
5655 changed files with 1187011 additions and 392 deletions

View file

@ -0,0 +1,36 @@
// +build linux
package daemon // import "github.com/docker/docker/daemon"
import (
"fmt"
aaprofile "github.com/docker/docker/profiles/apparmor"
"github.com/opencontainers/runc/libcontainer/apparmor"
)
// Define constants for native driver
const (
defaultApparmorProfile = "docker-default"
)
func ensureDefaultAppArmorProfile() error {
if apparmor.IsEnabled() {
loaded, err := aaprofile.IsLoaded(defaultApparmorProfile)
if err != nil {
return fmt.Errorf("Could not check if %s AppArmor profile was loaded: %s", defaultApparmorProfile, err)
}
// Nothing to do.
if loaded {
return nil
}
// Load the profile.
if err := aaprofile.InstallDefault(defaultApparmorProfile); err != nil {
return fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded: %s", defaultApparmorProfile, err)
}
}
return nil
}

View file

@ -0,0 +1,7 @@
// +build !linux
package daemon // import "github.com/docker/docker/daemon"
func ensureDefaultAppArmorProfile() error {
return nil
}

View file

@ -0,0 +1,449 @@
package daemon // import "github.com/docker/docker/daemon"
import (
"io"
"os"
"strings"
"github.com/docker/docker/api/types"
"github.com/docker/docker/container"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/system"
"github.com/pkg/errors"
)
// ErrExtractPointNotDirectory is used to convey that the operation to extract
// a tar archive to a directory in a container has failed because the specified
// path does not refer to a directory.
var ErrExtractPointNotDirectory = errors.New("extraction point is not a directory")
// The daemon will use the following interfaces if the container fs implements
// these for optimized copies to and from the container.
type extractor interface {
ExtractArchive(src io.Reader, dst string, opts *archive.TarOptions) error
}
type archiver interface {
ArchivePath(src string, opts *archive.TarOptions) (io.ReadCloser, error)
}
// helper functions to extract or archive
func extractArchive(i interface{}, src io.Reader, dst string, opts *archive.TarOptions) error {
if ea, ok := i.(extractor); ok {
return ea.ExtractArchive(src, dst, opts)
}
return chrootarchive.Untar(src, dst, opts)
}
func archivePath(i interface{}, src string, opts *archive.TarOptions) (io.ReadCloser, error) {
if ap, ok := i.(archiver); ok {
return ap.ArchivePath(src, opts)
}
return archive.TarWithOptions(src, opts)
}
// ContainerCopy performs a deprecated operation of archiving the resource at
// the specified path in the container identified by the given name.
func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) {
container, err := daemon.GetContainer(name)
if err != nil {
return nil, err
}
// Make sure an online file-system operation is permitted.
if err := daemon.isOnlineFSOperationPermitted(container); err != nil {
return nil, errdefs.System(err)
}
data, err := daemon.containerCopy(container, res)
if err == nil {
return data, nil
}
if os.IsNotExist(err) {
return nil, containerFileNotFound{res, name}
}
return nil, errdefs.System(err)
}
// ContainerStatPath stats the filesystem resource at the specified path in the
// container identified by the given name.
func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) {
container, err := daemon.GetContainer(name)
if err != nil {
return nil, err
}
// Make sure an online file-system operation is permitted.
if err := daemon.isOnlineFSOperationPermitted(container); err != nil {
return nil, errdefs.System(err)
}
stat, err = daemon.containerStatPath(container, path)
if err == nil {
return stat, nil
}
if os.IsNotExist(err) {
return nil, containerFileNotFound{path, name}
}
return nil, errdefs.System(err)
}
// ContainerArchivePath creates an archive of the filesystem resource at the
// specified path in the container identified by the given name. Returns a
// tar archive of the resource and whether it was a directory or a single file.
func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
container, err := daemon.GetContainer(name)
if err != nil {
return nil, nil, err
}
// Make sure an online file-system operation is permitted.
if err := daemon.isOnlineFSOperationPermitted(container); err != nil {
return nil, nil, errdefs.System(err)
}
content, stat, err = daemon.containerArchivePath(container, path)
if err == nil {
return content, stat, nil
}
if os.IsNotExist(err) {
return nil, nil, containerFileNotFound{path, name}
}
return nil, nil, errdefs.System(err)
}
// ContainerExtractToDir extracts the given archive to the specified location
// in the filesystem of the container identified by the given name. The given
// path must be of a directory in the container. If it is not, the error will
// be ErrExtractPointNotDirectory. If noOverwriteDirNonDir is true then it will
// be an error if unpacking the given content would cause an existing directory
// to be replaced with a non-directory and vice versa.
func (daemon *Daemon) ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error {
container, err := daemon.GetContainer(name)
if err != nil {
return err
}
// Make sure an online file-system operation is permitted.
if err := daemon.isOnlineFSOperationPermitted(container); err != nil {
return errdefs.System(err)
}
err = daemon.containerExtractToDir(container, path, copyUIDGID, noOverwriteDirNonDir, content)
if err == nil {
return nil
}
if os.IsNotExist(err) {
return containerFileNotFound{path, name}
}
return errdefs.System(err)
}
// containerStatPath stats the filesystem resource at the specified path in this
// container. Returns stat info about the resource.
func (daemon *Daemon) containerStatPath(container *container.Container, path string) (stat *types.ContainerPathStat, err error) {
container.Lock()
defer container.Unlock()
if err = daemon.Mount(container); err != nil {
return nil, err
}
defer daemon.Unmount(container)
err = daemon.mountVolumes(container)
defer container.DetachAndUnmount(daemon.LogVolumeEvent)
if err != nil {
return nil, err
}
// Normalize path before sending to rootfs
path = container.BaseFS.FromSlash(path)
resolvedPath, absPath, err := container.ResolvePath(path)
if err != nil {
return nil, err
}
return container.StatPath(resolvedPath, absPath)
}
// containerArchivePath creates an archive of the filesystem resource at the specified
// path in this container. Returns a tar archive of the resource and stat info
// about the resource.
func (daemon *Daemon) containerArchivePath(container *container.Container, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
container.Lock()
defer func() {
if err != nil {
// Wait to unlock the container until the archive is fully read
// (see the ReadCloseWrapper func below) or if there is an error
// before that occurs.
container.Unlock()
}
}()
if err = daemon.Mount(container); err != nil {
return nil, nil, err
}
defer func() {
if err != nil {
// unmount any volumes
container.DetachAndUnmount(daemon.LogVolumeEvent)
// unmount the container's rootfs
daemon.Unmount(container)
}
}()
if err = daemon.mountVolumes(container); err != nil {
return nil, nil, err
}
// Normalize path before sending to rootfs
path = container.BaseFS.FromSlash(path)
resolvedPath, absPath, err := container.ResolvePath(path)
if err != nil {
return nil, nil, err
}
stat, err = container.StatPath(resolvedPath, absPath)
if err != nil {
return nil, nil, err
}
// We need to rebase the archive entries if the last element of the
// resolved path was a symlink that was evaluated and is now different
// than the requested path. For example, if the given path was "/foo/bar/",
// but it resolved to "/var/lib/docker/containers/{id}/foo/baz/", we want
// to ensure that the archive entries start with "bar" and not "baz". This
// also catches the case when the root directory of the container is
// requested: we want the archive entries to start with "/" and not the
// container ID.
driver := container.BaseFS
// Get the source and the base paths of the container resolved path in order
// to get the proper tar options for the rebase tar.
resolvedPath = driver.Clean(resolvedPath)
if driver.Base(resolvedPath) == "." {
resolvedPath += string(driver.Separator()) + "."
}
sourceDir, sourceBase := driver.Dir(resolvedPath), driver.Base(resolvedPath)
opts := archive.TarResourceRebaseOpts(sourceBase, driver.Base(absPath))
data, err := archivePath(driver, sourceDir, opts)
if err != nil {
return nil, nil, err
}
content = ioutils.NewReadCloserWrapper(data, func() error {
err := data.Close()
container.DetachAndUnmount(daemon.LogVolumeEvent)
daemon.Unmount(container)
container.Unlock()
return err
})
daemon.LogContainerEvent(container, "archive-path")
return content, stat, nil
}
// containerExtractToDir extracts the given tar archive to the specified location in the
// filesystem of this container. The given path must be of a directory in the
// container. If it is not, the error will be ErrExtractPointNotDirectory. If
// noOverwriteDirNonDir is true then it will be an error if unpacking the
// given content would cause an existing directory to be replaced with a non-
// directory and vice versa.
func (daemon *Daemon) containerExtractToDir(container *container.Container, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) (err error) {
container.Lock()
defer container.Unlock()
if err = daemon.Mount(container); err != nil {
return err
}
defer daemon.Unmount(container)
err = daemon.mountVolumes(container)
defer container.DetachAndUnmount(daemon.LogVolumeEvent)
if err != nil {
return err
}
// Normalize path before sending to rootfs'
path = container.BaseFS.FromSlash(path)
driver := container.BaseFS
// Check if a drive letter supplied, it must be the system drive. No-op except on Windows
path, err = system.CheckSystemDriveAndRemoveDriveLetter(path, driver)
if err != nil {
return err
}
// The destination path needs to be resolved to a host path, with all
// symbolic links followed in the scope of the container's rootfs. Note
// that we do not use `container.ResolvePath(path)` here because we need
// to also evaluate the last path element if it is a symlink. This is so
// that you can extract an archive to a symlink that points to a directory.
// Consider the given path as an absolute path in the container.
absPath := archive.PreserveTrailingDotOrSeparator(
driver.Join(string(driver.Separator()), path),
path,
driver.Separator())
// This will evaluate the last path element if it is a symlink.
resolvedPath, err := container.GetResourcePath(absPath)
if err != nil {
return err
}
stat, err := driver.Lstat(resolvedPath)
if err != nil {
return err
}
if !stat.IsDir() {
return ErrExtractPointNotDirectory
}
// Need to check if the path is in a volume. If it is, it cannot be in a
// read-only volume. If it is not in a volume, the container cannot be
// configured with a read-only rootfs.
// Use the resolved path relative to the container rootfs as the new
// absPath. This way we fully follow any symlinks in a volume that may
// lead back outside the volume.
//
// The Windows implementation of filepath.Rel in golang 1.4 does not
// support volume style file path semantics. On Windows when using the
// filter driver, we are guaranteed that the path will always be
// a volume file path.
var baseRel string
if strings.HasPrefix(resolvedPath, `\\?\Volume{`) {
if strings.HasPrefix(resolvedPath, driver.Path()) {
baseRel = resolvedPath[len(driver.Path()):]
if baseRel[:1] == `\` {
baseRel = baseRel[1:]
}
}
} else {
baseRel, err = driver.Rel(driver.Path(), resolvedPath)
}
if err != nil {
return err
}
// Make it an absolute path.
absPath = driver.Join(string(driver.Separator()), baseRel)
// @ TODO: gupta-ak: Technically, this works since it no-ops
// on Windows and the file system is local anyway on linux.
// But eventually, it should be made driver aware.
toVolume, err := checkIfPathIsInAVolume(container, absPath)
if err != nil {
return err
}
if !toVolume && container.HostConfig.ReadonlyRootfs {
return ErrRootFSReadOnly
}
options := daemon.defaultTarCopyOptions(noOverwriteDirNonDir)
if copyUIDGID {
var err error
// tarCopyOptions will appropriately pull in the right uid/gid for the
// user/group and will set the options.
options, err = daemon.tarCopyOptions(container, noOverwriteDirNonDir)
if err != nil {
return err
}
}
if err := extractArchive(driver, content, resolvedPath, options); err != nil {
return err
}
daemon.LogContainerEvent(container, "extract-to-dir")
return nil
}
func (daemon *Daemon) containerCopy(container *container.Container, resource string) (rc io.ReadCloser, err error) {
if resource[0] == '/' || resource[0] == '\\' {
resource = resource[1:]
}
container.Lock()
defer func() {
if err != nil {
// Wait to unlock the container until the archive is fully read
// (see the ReadCloseWrapper func below) or if there is an error
// before that occurs.
container.Unlock()
}
}()
if err := daemon.Mount(container); err != nil {
return nil, err
}
defer func() {
if err != nil {
// unmount any volumes
container.DetachAndUnmount(daemon.LogVolumeEvent)
// unmount the container's rootfs
daemon.Unmount(container)
}
}()
if err := daemon.mountVolumes(container); err != nil {
return nil, err
}
// Normalize path before sending to rootfs
resource = container.BaseFS.FromSlash(resource)
driver := container.BaseFS
basePath, err := container.GetResourcePath(resource)
if err != nil {
return nil, err
}
stat, err := driver.Stat(basePath)
if err != nil {
return nil, err
}
var filter []string
if !stat.IsDir() {
d, f := driver.Split(basePath)
basePath = d
filter = []string{f}
} else {
filter = []string{driver.Base(basePath)}
basePath = driver.Dir(basePath)
}
archive, err := archivePath(driver, basePath, &archive.TarOptions{
Compression: archive.Uncompressed,
IncludeFiles: filter,
})
if err != nil {
return nil, err
}
reader := ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
container.DetachAndUnmount(daemon.LogVolumeEvent)
daemon.Unmount(container)
container.Unlock()
return err
})
daemon.LogContainerEvent(container, "copy")
return reader, nil
}

View file

@ -0,0 +1,15 @@
package daemon // import "github.com/docker/docker/daemon"
import (
"github.com/docker/docker/pkg/archive"
)
// defaultTarCopyOptions is the setting that is used when unpacking an archive
// for a copy API event.
func (daemon *Daemon) defaultTarCopyOptions(noOverwriteDirNonDir bool) *archive.TarOptions {
return &archive.TarOptions{
NoOverwriteDirNonDir: noOverwriteDirNonDir,
UIDMaps: daemon.idMappings.UIDs(),
GIDMaps: daemon.idMappings.GIDs(),
}
}

View file

@ -0,0 +1,25 @@
// +build !windows
package daemon // import "github.com/docker/docker/daemon"
import (
"github.com/docker/docker/container"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/idtools"
)
func (daemon *Daemon) tarCopyOptions(container *container.Container, noOverwriteDirNonDir bool) (*archive.TarOptions, error) {
if container.Config.User == "" {
return daemon.defaultTarCopyOptions(noOverwriteDirNonDir), nil
}
user, err := idtools.LookupUser(container.Config.User)
if err != nil {
return nil, err
}
return &archive.TarOptions{
NoOverwriteDirNonDir: noOverwriteDirNonDir,
ChownOpts: &idtools.IDPair{UID: user.Uid, GID: user.Gid},
}, nil
}

View file

@ -0,0 +1,10 @@
package daemon // import "github.com/docker/docker/daemon"
import (
"github.com/docker/docker/container"
"github.com/docker/docker/pkg/archive"
)
func (daemon *Daemon) tarCopyOptions(container *container.Container, noOverwriteDirNonDir bool) (*archive.TarOptions, error) {
return daemon.defaultTarCopyOptions(noOverwriteDirNonDir), nil
}

View file

@ -0,0 +1,31 @@
// +build !windows
package daemon // import "github.com/docker/docker/daemon"
import (
"github.com/docker/docker/container"
"github.com/docker/docker/volume"
)
// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it
// cannot be in a read-only volume. If it is not in a volume, the container
// cannot be configured with a read-only rootfs.
func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) {
var toVolume bool
parser := volume.NewParser(container.OS)
for _, mnt := range container.MountPoints {
if toVolume = parser.HasResource(mnt, absPath); toVolume {
if mnt.RW {
break
}
return false, ErrVolumeReadonly
}
}
return toVolume, nil
}
// isOnlineFSOperationPermitted returns an error if an online filesystem operation
// is not permitted.
func (daemon *Daemon) isOnlineFSOperationPermitted(container *container.Container) error {
return nil
}

View file

@ -0,0 +1,39 @@
package daemon // import "github.com/docker/docker/daemon"
import (
"errors"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
)
// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it
// cannot be in a read-only volume. If it is not in a volume, the container
// cannot be configured with a read-only rootfs.
//
// This is a no-op on Windows which does not support read-only volumes, or
// extracting to a mount point inside a volume. TODO Windows: FIXME Post-TP5
func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) {
return false, nil
}
// isOnlineFSOperationPermitted returns an error if an online filesystem operation
// is not permitted (such as stat or for copying). Running Hyper-V containers
// cannot have their file-system interrogated from the host as the filter is
// loaded inside the utility VM, not the host.
// IMPORTANT: The container lock must NOT be held when calling this function.
func (daemon *Daemon) isOnlineFSOperationPermitted(container *container.Container) error {
if !container.IsRunning() {
return nil
}
// Determine isolation. If not specified in the hostconfig, use daemon default.
actualIsolation := container.HostConfig.Isolation
if containertypes.Isolation.IsDefault(containertypes.Isolation(actualIsolation)) {
actualIsolation = daemon.defaultIsolation
}
if containertypes.Isolation.IsHyperV(actualIsolation) {
return errors.New("filesystem operations against a running Hyper-V container are not supported")
}
return nil
}

View file

@ -0,0 +1,187 @@
package daemon // import "github.com/docker/docker/daemon"
import (
"context"
"fmt"
"io"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/container"
"github.com/docker/docker/container/stream"
"github.com/docker/docker/daemon/logger"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/stdcopy"
"github.com/docker/docker/pkg/term"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// ContainerAttach attaches to logs according to the config passed in. See ContainerAttachConfig.
func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerAttachConfig) error {
keys := []byte{}
var err error
if c.DetachKeys != "" {
keys, err = term.ToBytes(c.DetachKeys)
if err != nil {
return errdefs.InvalidParameter(errors.Errorf("Invalid detach keys (%s) provided", c.DetachKeys))
}
}
container, err := daemon.GetContainer(prefixOrName)
if err != nil {
return err
}
if container.IsPaused() {
err := fmt.Errorf("container %s is paused, unpause the container before attach", prefixOrName)
return errdefs.Conflict(err)
}
if container.IsRestarting() {
err := fmt.Errorf("container %s is restarting, wait until the container is running", prefixOrName)
return errdefs.Conflict(err)
}
cfg := stream.AttachConfig{
UseStdin: c.UseStdin,
UseStdout: c.UseStdout,
UseStderr: c.UseStderr,
TTY: container.Config.Tty,
CloseStdin: container.Config.StdinOnce,
DetachKeys: keys,
}
container.StreamConfig.AttachStreams(&cfg)
inStream, outStream, errStream, err := c.GetStreams()
if err != nil {
return err
}
defer inStream.Close()
if !container.Config.Tty && c.MuxStreams {
errStream = stdcopy.NewStdWriter(errStream, stdcopy.Stderr)
outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout)
}
if cfg.UseStdin {
cfg.Stdin = inStream
}
if cfg.UseStdout {
cfg.Stdout = outStream
}
if cfg.UseStderr {
cfg.Stderr = errStream
}
if err := daemon.containerAttach(container, &cfg, c.Logs, c.Stream); err != nil {
fmt.Fprintf(outStream, "Error attaching: %s\n", err)
}
return nil
}
// ContainerAttachRaw attaches the provided streams to the container's stdio
func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadCloser, stdout, stderr io.Writer, doStream bool, attached chan struct{}) error {
container, err := daemon.GetContainer(prefixOrName)
if err != nil {
return err
}
cfg := stream.AttachConfig{
UseStdin: stdin != nil,
UseStdout: stdout != nil,
UseStderr: stderr != nil,
TTY: container.Config.Tty,
CloseStdin: container.Config.StdinOnce,
}
container.StreamConfig.AttachStreams(&cfg)
close(attached)
if cfg.UseStdin {
cfg.Stdin = stdin
}
if cfg.UseStdout {
cfg.Stdout = stdout
}
if cfg.UseStderr {
cfg.Stderr = stderr
}
return daemon.containerAttach(container, &cfg, false, doStream)
}
func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.AttachConfig, logs, doStream bool) error {
if logs {
logDriver, logCreated, err := daemon.getLogger(c)
if err != nil {
return err
}
if logCreated {
defer func() {
if err = logDriver.Close(); err != nil {
logrus.Errorf("Error closing logger: %v", err)
}
}()
}
cLog, ok := logDriver.(logger.LogReader)
if !ok {
return logger.ErrReadLogsNotSupported{}
}
logs := cLog.ReadLogs(logger.ReadConfig{Tail: -1})
defer logs.Close()
LogLoop:
for {
select {
case msg, ok := <-logs.Msg:
if !ok {
break LogLoop
}
if msg.Source == "stdout" && cfg.Stdout != nil {
cfg.Stdout.Write(msg.Line)
}
if msg.Source == "stderr" && cfg.Stderr != nil {
cfg.Stderr.Write(msg.Line)
}
case err := <-logs.Err:
logrus.Errorf("Error streaming logs: %v", err)
break LogLoop
}
}
}
daemon.LogContainerEvent(c, "attach")
if !doStream {
return nil
}
if cfg.Stdin != nil {
r, w := io.Pipe()
go func(stdin io.ReadCloser) {
defer w.Close()
defer logrus.Debug("Closing buffered stdin pipe")
io.Copy(w, stdin)
}(cfg.Stdin)
cfg.Stdin = r
}
if !c.Config.OpenStdin {
cfg.Stdin = nil
}
if c.Config.StdinOnce && !c.Config.Tty {
// Wait for the container to stop before returning.
waitChan := c.Wait(context.Background(), container.WaitConditionNotRunning)
defer func() {
<-waitChan // Ignore returned exit code.
}()
}
ctx := c.InitAttachContext()
err := <-c.StreamConfig.CopyStreams(ctx, cfg)
if err != nil {
if _, ok := err.(term.EscapeError); ok {
daemon.LogContainerEvent(c, "detach")
} else {
logrus.Errorf("attach failed with error: %v", err)
}
}
return nil
}

View file

@ -0,0 +1,13 @@
package daemon // import "github.com/docker/docker/daemon"
import (
"golang.org/x/net/context"
"github.com/docker/docker/api/types"
"github.com/docker/docker/dockerversion"
)
// AuthenticateToRegistry checks the validity of credentials in authConfig
func (daemon *Daemon) AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) {
return daemon.RegistryService.Auth(ctx, authConfig, dockerversion.DockerUserAgent(ctx))
}

View file

@ -0,0 +1,5 @@
// +build linux freebsd
package daemon // import "github.com/docker/docker/daemon"
const bindMountType = "bind"

View file

@ -0,0 +1,141 @@
// +build !windows
package caps // import "github.com/docker/docker/daemon/caps"
import (
"fmt"
"strings"
"github.com/syndtr/gocapability/capability"
)
var capabilityList Capabilities
func init() {
last := capability.CAP_LAST_CAP
// hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap
if last == capability.Cap(63) {
last = capability.CAP_BLOCK_SUSPEND
}
for _, cap := range capability.List() {
if cap > last {
continue
}
capabilityList = append(capabilityList,
&CapabilityMapping{
Key: "CAP_" + strings.ToUpper(cap.String()),
Value: cap,
},
)
}
}
type (
// CapabilityMapping maps linux capability name to its value of capability.Cap type
// Capabilities is one of the security systems in Linux Security Module (LSM)
// framework provided by the kernel.
// For more details on capabilities, see http://man7.org/linux/man-pages/man7/capabilities.7.html
CapabilityMapping struct {
Key string `json:"key,omitempty"`
Value capability.Cap `json:"value,omitempty"`
}
// Capabilities contains all CapabilityMapping
Capabilities []*CapabilityMapping
)
// String returns <key> of CapabilityMapping
func (c *CapabilityMapping) String() string {
return c.Key
}
// GetCapability returns CapabilityMapping which contains specific key
func GetCapability(key string) *CapabilityMapping {
for _, capp := range capabilityList {
if capp.Key == key {
cpy := *capp
return &cpy
}
}
return nil
}
// GetAllCapabilities returns all of the capabilities
func GetAllCapabilities() []string {
output := make([]string, len(capabilityList))
for i, capability := range capabilityList {
output[i] = capability.String()
}
return output
}
// inSlice tests whether a string is contained in a slice of strings or not.
// Comparison is case insensitive
func inSlice(slice []string, s string) bool {
for _, ss := range slice {
if strings.ToLower(s) == strings.ToLower(ss) {
return true
}
}
return false
}
// TweakCapabilities can tweak capabilities by adding or dropping capabilities
// based on the basics capabilities.
func TweakCapabilities(basics, adds, drops []string) ([]string, error) {
var (
newCaps []string
allCaps = GetAllCapabilities()
)
// FIXME(tonistiigi): docker format is without CAP_ prefix, oci is with prefix
// Currently they are mixed in here. We should do conversion in one place.
// look for invalid cap in the drop list
for _, cap := range drops {
if strings.ToLower(cap) == "all" {
continue
}
if !inSlice(allCaps, "CAP_"+cap) {
return nil, fmt.Errorf("Unknown capability drop: %q", cap)
}
}
// handle --cap-add=all
if inSlice(adds, "all") {
basics = allCaps
}
if !inSlice(drops, "all") {
for _, cap := range basics {
// skip `all` already handled above
if strings.ToLower(cap) == "all" {
continue
}
// if we don't drop `all`, add back all the non-dropped caps
if !inSlice(drops, cap[4:]) {
newCaps = append(newCaps, strings.ToUpper(cap))
}
}
}
for _, cap := range adds {
// skip `all` already handled above
if strings.ToLower(cap) == "all" {
continue
}
cap = "CAP_" + cap
if !inSlice(allCaps, cap) {
return nil, fmt.Errorf("Unknown capability to add: %q", cap)
}
// add cap if not already in the list
if !inSlice(newCaps, cap) {
newCaps = append(newCaps, strings.ToUpper(cap))
}
}
return newCaps, nil
}

View file

@ -0,0 +1,34 @@
package daemon // import "github.com/docker/docker/daemon"
import (
"errors"
"runtime"
"time"
"github.com/docker/docker/pkg/archive"
)
// ContainerChanges returns a list of container fs changes
func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) {
start := time.Now()
container, err := daemon.GetContainer(name)
if err != nil {
return nil, err
}
if runtime.GOOS == "windows" && container.IsRunning() {
return nil, errors.New("Windows does not support diff of a running container")
}
container.Lock()
defer container.Unlock()
if container.RWLayer == nil {
return nil, errors.New("RWLayer of container " + name + " is unexpectedly nil")
}
c, err := container.RWLayer.Changes()
if err != nil {
return nil, err
}
containerActions.WithValues("changes").UpdateSince(start)
return c, nil
}

View file

@ -0,0 +1,143 @@
package daemon // import "github.com/docker/docker/daemon"
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/docker/docker/api/types"
"github.com/docker/docker/daemon/names"
)
var (
validCheckpointNameChars = names.RestrictedNameChars
validCheckpointNamePattern = names.RestrictedNamePattern
)
// getCheckpointDir verifies checkpoint directory for create,remove, list options and checks if checkpoint already exists
func getCheckpointDir(checkDir, checkpointID, ctrName, ctrID, ctrCheckpointDir string, create bool) (string, error) {
var checkpointDir string
var err2 error
if checkDir != "" {
checkpointDir = checkDir
} else {
checkpointDir = ctrCheckpointDir
}
checkpointAbsDir := filepath.Join(checkpointDir, checkpointID)
stat, err := os.Stat(checkpointAbsDir)
if create {
switch {
case err == nil && stat.IsDir():
err2 = fmt.Errorf("checkpoint with name %s already exists for container %s", checkpointID, ctrName)
case err != nil && os.IsNotExist(err):
err2 = os.MkdirAll(checkpointAbsDir, 0700)
case err != nil:
err2 = err
case err == nil:
err2 = fmt.Errorf("%s exists and is not a directory", checkpointAbsDir)
}
} else {
switch {
case err != nil:
err2 = fmt.Errorf("checkpoint %s does not exists for container %s", checkpointID, ctrName)
case err == nil && stat.IsDir():
err2 = nil
case err == nil:
err2 = fmt.Errorf("%s exists and is not a directory", checkpointAbsDir)
}
}
return checkpointAbsDir, err2
}
// CheckpointCreate checkpoints the process running in a container with CRIU
func (daemon *Daemon) CheckpointCreate(name string, config types.CheckpointCreateOptions) error {
container, err := daemon.GetContainer(name)
if err != nil {
return err
}
if !container.IsRunning() {
return fmt.Errorf("Container %s not running", name)
}
if container.Config.Tty {
return fmt.Errorf("checkpoint not support on containers with tty")
}
if !validCheckpointNamePattern.MatchString(config.CheckpointID) {
return fmt.Errorf("Invalid checkpoint ID (%s), only %s are allowed", config.CheckpointID, validCheckpointNameChars)
}
checkpointDir, err := getCheckpointDir(config.CheckpointDir, config.CheckpointID, name, container.ID, container.CheckpointDir(), true)
if err != nil {
return fmt.Errorf("cannot checkpoint container %s: %s", name, err)
}
err = daemon.containerd.CreateCheckpoint(context.Background(), container.ID, checkpointDir, config.Exit)
if err != nil {
os.RemoveAll(checkpointDir)
return fmt.Errorf("Cannot checkpoint container %s: %s", name, err)
}
daemon.LogContainerEvent(container, "checkpoint")
return nil
}
// CheckpointDelete deletes the specified checkpoint
func (daemon *Daemon) CheckpointDelete(name string, config types.CheckpointDeleteOptions) error {
container, err := daemon.GetContainer(name)
if err != nil {
return err
}
checkpointDir, err := getCheckpointDir(config.CheckpointDir, config.CheckpointID, name, container.ID, container.CheckpointDir(), false)
if err == nil {
return os.RemoveAll(filepath.Join(checkpointDir, config.CheckpointID))
}
return err
}
// CheckpointList lists all checkpoints of the specified container
func (daemon *Daemon) CheckpointList(name string, config types.CheckpointListOptions) ([]types.Checkpoint, error) {
var out []types.Checkpoint
container, err := daemon.GetContainer(name)
if err != nil {
return nil, err
}
checkpointDir, err := getCheckpointDir(config.CheckpointDir, "", name, container.ID, container.CheckpointDir(), false)
if err != nil {
return nil, err
}
if err := os.MkdirAll(checkpointDir, 0755); err != nil {
return nil, err
}
dirs, err := ioutil.ReadDir(checkpointDir)
if err != nil {
return nil, err
}
for _, d := range dirs {
if !d.IsDir() {
continue
}
path := filepath.Join(checkpointDir, d.Name(), "config.json")
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
var cpt types.Checkpoint
if err := json.Unmarshal(data, &cpt); err != nil {
return nil, err
}
out = append(out, cpt)
}
return out, nil
}

View file

@ -0,0 +1,26 @@
package daemon // import "github.com/docker/docker/daemon"
import (
apitypes "github.com/docker/docker/api/types"
lncluster "github.com/docker/libnetwork/cluster"
)
// Cluster is the interface for github.com/docker/docker/daemon/cluster.(*Cluster).
type Cluster interface {
ClusterStatus
NetworkManager
SendClusterEvent(event lncluster.ConfigEventType)
}
// ClusterStatus interface provides information about the Swarm status of the Cluster
type ClusterStatus interface {
IsAgent() bool
IsManager() bool
}
// NetworkManager provides methods to manage networks
type NetworkManager interface {
GetNetwork(input string) (apitypes.NetworkResource, error)
GetNetworks() ([]apitypes.NetworkResource, error)
RemoveNetwork(input string) error
}

View file

@ -0,0 +1,427 @@
package cluster // import "github.com/docker/docker/daemon/cluster"
//
// ## Swarmkit integration
//
// Cluster - static configurable object for accessing everything swarm related.
// Contains methods for connecting and controlling the cluster. Exists always,
// even if swarm mode is not enabled.
//
// NodeRunner - Manager for starting the swarmkit node. Is present only and
// always if swarm mode is enabled. Implements backoff restart loop in case of
// errors.
//
// NodeState - Information about the current node status including access to
// gRPC clients if a manager is active.
//
// ### Locking
//
// `cluster.controlMutex` - taken for the whole lifecycle of the processes that
// can reconfigure cluster(init/join/leave etc). Protects that one
// reconfiguration action has fully completed before another can start.
//
// `cluster.mu` - taken when the actual changes in cluster configurations
// happen. Different from `controlMutex` because in some cases we need to
// access current cluster state even if the long-running reconfiguration is
// going on. For example network stack may ask for the current cluster state in
// the middle of the shutdown. Any time current cluster state is asked you
// should take the read lock of `cluster.mu`. If you are writing an API
// responder that returns synchronously, hold `cluster.mu.RLock()` for the
// duration of the whole handler function. That ensures that node will not be
// shut down until the handler has finished.
//
// NodeRunner implements its internal locks that should not be used outside of
// the struct. Instead, you should just call `nodeRunner.State()` method to get
// the current state of the cluster(still need `cluster.mu.RLock()` to access
// `cluster.nr` reference itself). Most of the changes in NodeRunner happen
// because of an external event(network problem, unexpected swarmkit error) and
// Docker shouldn't take any locks that delay these changes from happening.
//
import (
"fmt"
"net"
"os"
"path/filepath"
"sync"
"time"
"github.com/docker/docker/api/types/network"
types "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/controllers/plugin"
executorpkg "github.com/docker/docker/daemon/cluster/executor"
"github.com/docker/docker/pkg/signal"
lncluster "github.com/docker/libnetwork/cluster"
swarmapi "github.com/docker/swarmkit/api"
swarmnode "github.com/docker/swarmkit/node"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
)
const swarmDirName = "swarm"
const controlSocket = "control.sock"
const swarmConnectTimeout = 20 * time.Second
const swarmRequestTimeout = 20 * time.Second
const stateFile = "docker-state.json"
const defaultAddr = "0.0.0.0:2377"
const (
initialReconnectDelay = 100 * time.Millisecond
maxReconnectDelay = 30 * time.Second
contextPrefix = "com.docker.swarm"
)
// NetworkSubnetsProvider exposes functions for retrieving the subnets
// of networks managed by Docker, so they can be filtered.
type NetworkSubnetsProvider interface {
Subnets() ([]net.IPNet, []net.IPNet)
}
// Config provides values for Cluster.
type Config struct {
Root string
Name string
Backend executorpkg.Backend
ImageBackend executorpkg.ImageBackend
PluginBackend plugin.Backend
NetworkSubnetsProvider NetworkSubnetsProvider
// DefaultAdvertiseAddr is the default host/IP or network interface to use
// if no AdvertiseAddr value is specified.
DefaultAdvertiseAddr string
// path to store runtime state, such as the swarm control socket
RuntimeRoot string
// WatchStream is a channel to pass watch API notifications to daemon
WatchStream chan *swarmapi.WatchMessage
}
// Cluster provides capabilities to participate in a cluster as a worker or a
// manager.
type Cluster struct {
mu sync.RWMutex
controlMutex sync.RWMutex // protect init/join/leave user operations
nr *nodeRunner
root string
runtimeRoot string
config Config
configEvent chan lncluster.ConfigEventType // todo: make this array and goroutine safe
attachers map[string]*attacher
watchStream chan *swarmapi.WatchMessage
}
// attacher manages the in-memory attachment state of a container
// attachment to a global scope network managed by swarm manager. It
// helps in identifying the attachment ID via the taskID and the
// corresponding attachment configuration obtained from the manager.
type attacher struct {
taskID string
config *network.NetworkingConfig
inProgress bool
attachWaitCh chan *network.NetworkingConfig
attachCompleteCh chan struct{}
detachWaitCh chan struct{}
}
// New creates a new Cluster instance using provided config.
func New(config Config) (*Cluster, error) {
root := filepath.Join(config.Root, swarmDirName)
if err := os.MkdirAll(root, 0700); err != nil {
return nil, err
}
if config.RuntimeRoot == "" {
config.RuntimeRoot = root
}
if err := os.MkdirAll(config.RuntimeRoot, 0700); err != nil {
return nil, err
}
c := &Cluster{
root: root,
config: config,
configEvent: make(chan lncluster.ConfigEventType, 10),
runtimeRoot: config.RuntimeRoot,
attachers: make(map[string]*attacher),
watchStream: config.WatchStream,
}
return c, nil
}
// Start the Cluster instance
// TODO The split between New and Start can be join again when the SendClusterEvent
// method is no longer required
func (c *Cluster) Start() error {
root := filepath.Join(c.config.Root, swarmDirName)
nodeConfig, err := loadPersistentState(root)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
nr, err := c.newNodeRunner(*nodeConfig)
if err != nil {
return err
}
c.nr = nr
select {
case <-time.After(swarmConnectTimeout):
logrus.Error("swarm component could not be started before timeout was reached")
case err := <-nr.Ready():
if err != nil {
logrus.WithError(err).Error("swarm component could not be started")
return nil
}
}
return nil
}
func (c *Cluster) newNodeRunner(conf nodeStartConfig) (*nodeRunner, error) {
if err := c.config.Backend.IsSwarmCompatible(); err != nil {
return nil, err
}
actualLocalAddr := conf.LocalAddr
if actualLocalAddr == "" {
// If localAddr was not specified, resolve it automatically
// based on the route to joinAddr. localAddr can only be left
// empty on "join".
listenHost, _, err := net.SplitHostPort(conf.ListenAddr)
if err != nil {
return nil, fmt.Errorf("could not parse listen address: %v", err)
}
listenAddrIP := net.ParseIP(listenHost)
if listenAddrIP == nil || !listenAddrIP.IsUnspecified() {
actualLocalAddr = listenHost
} else {
if conf.RemoteAddr == "" {
// Should never happen except using swarms created by
// old versions that didn't save remoteAddr.
conf.RemoteAddr = "8.8.8.8:53"
}
conn, err := net.Dial("udp", conf.RemoteAddr)
if err != nil {
return nil, fmt.Errorf("could not find local IP address: %v", err)
}
localHostPort := conn.LocalAddr().String()
actualLocalAddr, _, _ = net.SplitHostPort(localHostPort)
conn.Close()
}
}
nr := &nodeRunner{cluster: c}
nr.actualLocalAddr = actualLocalAddr
if err := nr.Start(conf); err != nil {
return nil, err
}
c.config.Backend.DaemonJoinsCluster(c)
return nr, nil
}
func (c *Cluster) getRequestContext() (context.Context, func()) { // TODO: not needed when requests don't block on qourum lost
return context.WithTimeout(context.Background(), swarmRequestTimeout)
}
// IsManager returns true if Cluster is participating as a manager.
func (c *Cluster) IsManager() bool {
c.mu.RLock()
defer c.mu.RUnlock()
return c.currentNodeState().IsActiveManager()
}
// IsAgent returns true if Cluster is participating as a worker/agent.
func (c *Cluster) IsAgent() bool {
c.mu.RLock()
defer c.mu.RUnlock()
return c.currentNodeState().status == types.LocalNodeStateActive
}
// GetLocalAddress returns the local address.
func (c *Cluster) GetLocalAddress() string {
c.mu.RLock()
defer c.mu.RUnlock()
return c.currentNodeState().actualLocalAddr
}
// GetListenAddress returns the listen address.
func (c *Cluster) GetListenAddress() string {
c.mu.RLock()
defer c.mu.RUnlock()
if c.nr != nil {
return c.nr.config.ListenAddr
}
return ""
}
// GetAdvertiseAddress returns the remotely reachable address of this node.
func (c *Cluster) GetAdvertiseAddress() string {
c.mu.RLock()
defer c.mu.RUnlock()
if c.nr != nil && c.nr.config.AdvertiseAddr != "" {
advertiseHost, _, _ := net.SplitHostPort(c.nr.config.AdvertiseAddr)
return advertiseHost
}
return c.currentNodeState().actualLocalAddr
}
// GetDataPathAddress returns the address to be used for the data path traffic, if specified.
func (c *Cluster) GetDataPathAddress() string {
c.mu.RLock()
defer c.mu.RUnlock()
if c.nr != nil {
return c.nr.config.DataPathAddr
}
return ""
}
// GetRemoteAddressList returns the advertise address for each of the remote managers if
// available.
func (c *Cluster) GetRemoteAddressList() []string {
c.mu.RLock()
defer c.mu.RUnlock()
return c.getRemoteAddressList()
}
func (c *Cluster) getRemoteAddressList() []string {
state := c.currentNodeState()
if state.swarmNode == nil {
return []string{}
}
nodeID := state.swarmNode.NodeID()
remotes := state.swarmNode.Remotes()
addressList := make([]string, 0, len(remotes))
for _, r := range remotes {
if r.NodeID != nodeID {
addressList = append(addressList, r.Addr)
}
}
return addressList
}
// ListenClusterEvents returns a channel that receives messages on cluster
// participation changes.
// todo: make cancelable and accessible to multiple callers
func (c *Cluster) ListenClusterEvents() <-chan lncluster.ConfigEventType {
return c.configEvent
}
// currentNodeState should not be called without a read lock
func (c *Cluster) currentNodeState() nodeState {
return c.nr.State()
}
// errNoManager returns error describing why manager commands can't be used.
// Call with read lock.
func (c *Cluster) errNoManager(st nodeState) error {
if st.swarmNode == nil {
if errors.Cause(st.err) == errSwarmLocked {
return errSwarmLocked
}
if st.err == errSwarmCertificatesExpired {
return errSwarmCertificatesExpired
}
return errors.WithStack(notAvailableError("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again."))
}
if st.swarmNode.Manager() != nil {
return errors.WithStack(notAvailableError("This node is not a swarm manager. Manager is being prepared or has trouble connecting to the cluster."))
}
return errors.WithStack(notAvailableError("This node is not a swarm manager. Worker nodes can't be used to view or modify cluster state. Please run this command on a manager node or promote the current node to a manager."))
}
// Cleanup stops active swarm node. This is run before daemon shutdown.
func (c *Cluster) Cleanup() {
c.controlMutex.Lock()
defer c.controlMutex.Unlock()
c.mu.Lock()
node := c.nr
if node == nil {
c.mu.Unlock()
return
}
state := c.currentNodeState()
c.mu.Unlock()
if state.IsActiveManager() {
active, reachable, unreachable, err := managerStats(state.controlClient, state.NodeID())
if err == nil {
singlenode := active && isLastManager(reachable, unreachable)
if active && !singlenode && removingManagerCausesLossOfQuorum(reachable, unreachable) {
logrus.Errorf("Leaving cluster with %v managers left out of %v. Raft quorum will be lost.", reachable-1, reachable+unreachable)
}
}
}
if err := node.Stop(); err != nil {
logrus.Errorf("failed to shut down cluster node: %v", err)
signal.DumpStacks("")
}
c.mu.Lock()
c.nr = nil
c.mu.Unlock()
}
func managerStats(client swarmapi.ControlClient, currentNodeID string) (current bool, reachable int, unreachable int, err error) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
nodes, err := client.ListNodes(ctx, &swarmapi.ListNodesRequest{})
if err != nil {
return false, 0, 0, err
}
for _, n := range nodes.Nodes {
if n.ManagerStatus != nil {
if n.ManagerStatus.Reachability == swarmapi.RaftMemberStatus_REACHABLE {
reachable++
if n.ID == currentNodeID {
current = true
}
}
if n.ManagerStatus.Reachability == swarmapi.RaftMemberStatus_UNREACHABLE {
unreachable++
}
}
}
return
}
func detectLockedError(err error) error {
if err == swarmnode.ErrInvalidUnlockKey {
return errors.WithStack(errSwarmLocked)
}
return err
}
func (c *Cluster) lockedManagerAction(fn func(ctx context.Context, state nodeState) error) error {
c.mu.RLock()
defer c.mu.RUnlock()
state := c.currentNodeState()
if !state.IsActiveManager() {
return c.errNoManager(state)
}
ctx, cancel := c.getRequestContext()
defer cancel()
return fn(ctx, state)
}
// SendClusterEvent allows to send cluster events on the configEvent channel
// TODO This method should not be exposed.
// Currently it is used to notify the network controller that the keys are
// available
func (c *Cluster) SendClusterEvent(event lncluster.ConfigEventType) {
c.mu.RLock()
defer c.mu.RUnlock()
c.configEvent <- event
}

View file

@ -0,0 +1,117 @@
package cluster // import "github.com/docker/docker/daemon/cluster"
import (
apitypes "github.com/docker/docker/api/types"
types "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/convert"
swarmapi "github.com/docker/swarmkit/api"
"golang.org/x/net/context"
)
// GetConfig returns a config from a managed swarm cluster
func (c *Cluster) GetConfig(input string) (types.Config, error) {
var config *swarmapi.Config
if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
s, err := getConfig(ctx, state.controlClient, input)
if err != nil {
return err
}
config = s
return nil
}); err != nil {
return types.Config{}, err
}
return convert.ConfigFromGRPC(config), nil
}
// GetConfigs returns all configs of a managed swarm cluster.
func (c *Cluster) GetConfigs(options apitypes.ConfigListOptions) ([]types.Config, error) {
c.mu.RLock()
defer c.mu.RUnlock()
state := c.currentNodeState()
if !state.IsActiveManager() {
return nil, c.errNoManager(state)
}
filters, err := newListConfigsFilters(options.Filters)
if err != nil {
return nil, err
}
ctx, cancel := c.getRequestContext()
defer cancel()
r, err := state.controlClient.ListConfigs(ctx,
&swarmapi.ListConfigsRequest{Filters: filters})
if err != nil {
return nil, err
}
configs := []types.Config{}
for _, config := range r.Configs {
configs = append(configs, convert.ConfigFromGRPC(config))
}
return configs, nil
}
// CreateConfig creates a new config in a managed swarm cluster.
func (c *Cluster) CreateConfig(s types.ConfigSpec) (string, error) {
var resp *swarmapi.CreateConfigResponse
if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
configSpec := convert.ConfigSpecToGRPC(s)
r, err := state.controlClient.CreateConfig(ctx,
&swarmapi.CreateConfigRequest{Spec: &configSpec})
if err != nil {
return err
}
resp = r
return nil
}); err != nil {
return "", err
}
return resp.Config.ID, nil
}
// RemoveConfig removes a config from a managed swarm cluster.
func (c *Cluster) RemoveConfig(input string) error {
return c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
config, err := getConfig(ctx, state.controlClient, input)
if err != nil {
return err
}
req := &swarmapi.RemoveConfigRequest{
ConfigID: config.ID,
}
_, err = state.controlClient.RemoveConfig(ctx, req)
return err
})
}
// UpdateConfig updates a config in a managed swarm cluster.
// Note: this is not exposed to the CLI but is available from the API only
func (c *Cluster) UpdateConfig(input string, version uint64, spec types.ConfigSpec) error {
return c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
config, err := getConfig(ctx, state.controlClient, input)
if err != nil {
return err
}
configSpec := convert.ConfigSpecToGRPC(spec)
_, err = state.controlClient.UpdateConfig(ctx,
&swarmapi.UpdateConfigRequest{
ConfigID: config.ID,
ConfigVersion: &swarmapi.Version{
Index: version,
},
Spec: &configSpec,
})
return err
})
}

View file

@ -0,0 +1,261 @@
package plugin // import "github.com/docker/docker/daemon/cluster/controllers/plugin"
import (
"io"
"io/ioutil"
"net/http"
"github.com/docker/distribution/reference"
enginetypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm/runtime"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/plugin"
"github.com/docker/docker/plugin/v2"
"github.com/docker/swarmkit/api"
"github.com/gogo/protobuf/proto"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
)
// Controller is the controller for the plugin backend.
// Plugins are managed as a singleton object with a desired state (different from containers).
// With the the plugin controller instead of having a strict create->start->stop->remove
// task lifecycle like containers, we manage the desired state of the plugin and let
// the plugin manager do what it already does and monitor the plugin.
// We'll also end up with many tasks all pointing to the same plugin ID.
//
// TODO(@cpuguy83): registry auth is intentionally not supported until we work out
// the right way to pass registry credentials via secrets.
type Controller struct {
backend Backend
spec runtime.PluginSpec
logger *logrus.Entry
pluginID string
serviceID string
taskID string
// hook used to signal tests that `Wait()` is actually ready and waiting
signalWaitReady func()
}
// Backend is the interface for interacting with the plugin manager
// Controller actions are passed to the configured backend to do the real work.
type Backend interface {
Disable(name string, config *enginetypes.PluginDisableConfig) error
Enable(name string, config *enginetypes.PluginEnableConfig) error
Remove(name string, config *enginetypes.PluginRmConfig) error
Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer, opts ...plugin.CreateOpt) error
Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error
Get(name string) (*v2.Plugin, error)
SubscribeEvents(buffer int, events ...plugin.Event) (eventCh <-chan interface{}, cancel func())
}
// NewController returns a new cluster plugin controller
func NewController(backend Backend, t *api.Task) (*Controller, error) {
spec, err := readSpec(t)
if err != nil {
return nil, err
}
return &Controller{
backend: backend,
spec: spec,
serviceID: t.ServiceID,
logger: logrus.WithFields(logrus.Fields{
"controller": "plugin",
"task": t.ID,
"plugin": spec.Name,
})}, nil
}
func readSpec(t *api.Task) (runtime.PluginSpec, error) {
var cfg runtime.PluginSpec
generic := t.Spec.GetGeneric()
if err := proto.Unmarshal(generic.Payload.Value, &cfg); err != nil {
return cfg, errors.Wrap(err, "error reading plugin spec")
}
return cfg, nil
}
// Update is the update phase from swarmkit
func (p *Controller) Update(ctx context.Context, t *api.Task) error {
p.logger.Debug("Update")
return nil
}
// Prepare is the prepare phase from swarmkit
func (p *Controller) Prepare(ctx context.Context) (err error) {
p.logger.Debug("Prepare")
remote, err := reference.ParseNormalizedNamed(p.spec.Remote)
if err != nil {
return errors.Wrapf(err, "error parsing remote reference %q", p.spec.Remote)
}
if p.spec.Name == "" {
p.spec.Name = remote.String()
}
var authConfig enginetypes.AuthConfig
privs := convertPrivileges(p.spec.Privileges)
pl, err := p.backend.Get(p.spec.Name)
defer func() {
if pl != nil && err == nil {
pl.Acquire()
}
}()
if err == nil && pl != nil {
if pl.SwarmServiceID != p.serviceID {
return errors.Errorf("plugin already exists: %s", p.spec.Name)
}
if pl.IsEnabled() {
if err := p.backend.Disable(pl.GetID(), &enginetypes.PluginDisableConfig{ForceDisable: true}); err != nil {
p.logger.WithError(err).Debug("could not disable plugin before running upgrade")
}
}
p.pluginID = pl.GetID()
return p.backend.Upgrade(ctx, remote, p.spec.Name, nil, &authConfig, privs, ioutil.Discard)
}
if err := p.backend.Pull(ctx, remote, p.spec.Name, nil, &authConfig, privs, ioutil.Discard, plugin.WithSwarmService(p.serviceID)); err != nil {
return err
}
pl, err = p.backend.Get(p.spec.Name)
if err != nil {
return err
}
p.pluginID = pl.GetID()
return nil
}
// Start is the start phase from swarmkit
func (p *Controller) Start(ctx context.Context) error {
p.logger.Debug("Start")
pl, err := p.backend.Get(p.pluginID)
if err != nil {
return err
}
if p.spec.Disabled {
if pl.IsEnabled() {
return p.backend.Disable(p.pluginID, &enginetypes.PluginDisableConfig{ForceDisable: false})
}
return nil
}
if !pl.IsEnabled() {
return p.backend.Enable(p.pluginID, &enginetypes.PluginEnableConfig{Timeout: 30})
}
return nil
}
// Wait causes the task to wait until returned
func (p *Controller) Wait(ctx context.Context) error {
p.logger.Debug("Wait")
pl, err := p.backend.Get(p.pluginID)
if err != nil {
return err
}
events, cancel := p.backend.SubscribeEvents(1, plugin.EventDisable{Plugin: pl.PluginObj}, plugin.EventRemove{Plugin: pl.PluginObj}, plugin.EventEnable{Plugin: pl.PluginObj})
defer cancel()
if p.signalWaitReady != nil {
p.signalWaitReady()
}
if !p.spec.Disabled != pl.IsEnabled() {
return errors.New("mismatched plugin state")
}
for {
select {
case <-ctx.Done():
return ctx.Err()
case e := <-events:
p.logger.Debugf("got event %#T", e)
switch e.(type) {
case plugin.EventEnable:
if p.spec.Disabled {
return errors.New("plugin enabled")
}
case plugin.EventRemove:
return errors.New("plugin removed")
case plugin.EventDisable:
if !p.spec.Disabled {
return errors.New("plugin disabled")
}
}
}
}
}
func isNotFound(err error) bool {
return errdefs.IsNotFound(err)
}
// Shutdown is the shutdown phase from swarmkit
func (p *Controller) Shutdown(ctx context.Context) error {
p.logger.Debug("Shutdown")
return nil
}
// Terminate is the terminate phase from swarmkit
func (p *Controller) Terminate(ctx context.Context) error {
p.logger.Debug("Terminate")
return nil
}
// Remove is the remove phase from swarmkit
func (p *Controller) Remove(ctx context.Context) error {
p.logger.Debug("Remove")
pl, err := p.backend.Get(p.pluginID)
if err != nil {
if isNotFound(err) {
return nil
}
return err
}
pl.Release()
if pl.GetRefCount() > 0 {
p.logger.Debug("skipping remove due to ref count")
return nil
}
// This may error because we have exactly 1 plugin, but potentially multiple
// tasks which are calling remove.
err = p.backend.Remove(p.pluginID, &enginetypes.PluginRmConfig{ForceRemove: true})
if isNotFound(err) {
return nil
}
return err
}
// Close is the close phase from swarmkit
func (p *Controller) Close() error {
p.logger.Debug("Close")
return nil
}
func convertPrivileges(ls []*runtime.PluginPrivilege) enginetypes.PluginPrivileges {
var out enginetypes.PluginPrivileges
for _, p := range ls {
pp := enginetypes.PluginPrivilege{
Name: p.Name,
Description: p.Description,
Value: p.Value,
}
out = append(out, pp)
}
return out
}

View file

@ -0,0 +1,390 @@
package plugin // import "github.com/docker/docker/daemon/cluster/controllers/plugin"
import (
"errors"
"io"
"io/ioutil"
"net/http"
"strings"
"testing"
"time"
"github.com/docker/distribution/reference"
enginetypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm/runtime"
"github.com/docker/docker/pkg/pubsub"
"github.com/docker/docker/plugin"
"github.com/docker/docker/plugin/v2"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
)
const (
pluginTestName = "test"
pluginTestRemote = "testremote"
pluginTestRemoteUpgrade = "testremote2"
)
func TestPrepare(t *testing.T) {
b := newMockBackend()
c := newTestController(b, false)
ctx := context.Background()
if err := c.Prepare(ctx); err != nil {
t.Fatal(err)
}
if b.p == nil {
t.Fatal("pull not performed")
}
c = newTestController(b, false)
if err := c.Prepare(ctx); err != nil {
t.Fatal(err)
}
if b.p == nil {
t.Fatal("unexpected nil")
}
if b.p.PluginObj.PluginReference != pluginTestRemoteUpgrade {
t.Fatal("upgrade not performed")
}
c = newTestController(b, false)
c.serviceID = "1"
if err := c.Prepare(ctx); err == nil {
t.Fatal("expected error on prepare")
}
}
func TestStart(t *testing.T) {
b := newMockBackend()
c := newTestController(b, false)
ctx := context.Background()
if err := c.Prepare(ctx); err != nil {
t.Fatal(err)
}
if err := c.Start(ctx); err != nil {
t.Fatal(err)
}
if !b.p.IsEnabled() {
t.Fatal("expected plugin to be enabled")
}
c = newTestController(b, true)
if err := c.Prepare(ctx); err != nil {
t.Fatal(err)
}
if err := c.Start(ctx); err != nil {
t.Fatal(err)
}
if b.p.IsEnabled() {
t.Fatal("expected plugin to be disabled")
}
c = newTestController(b, false)
if err := c.Prepare(ctx); err != nil {
t.Fatal(err)
}
if err := c.Start(ctx); err != nil {
t.Fatal(err)
}
if !b.p.IsEnabled() {
t.Fatal("expected plugin to be enabled")
}
}
func TestWaitCancel(t *testing.T) {
b := newMockBackend()
c := newTestController(b, true)
ctx := context.Background()
if err := c.Prepare(ctx); err != nil {
t.Fatal(err)
}
if err := c.Start(ctx); err != nil {
t.Fatal(err)
}
ctxCancel, cancel := context.WithCancel(ctx)
chErr := make(chan error)
go func() {
chErr <- c.Wait(ctxCancel)
}()
cancel()
select {
case err := <-chErr:
if err != context.Canceled {
t.Fatal(err)
}
case <-time.After(10 * time.Second):
t.Fatal("timeout waiting for cancelation")
}
}
func TestWaitDisabled(t *testing.T) {
b := newMockBackend()
c := newTestController(b, true)
ctx := context.Background()
if err := c.Prepare(ctx); err != nil {
t.Fatal(err)
}
if err := c.Start(ctx); err != nil {
t.Fatal(err)
}
chErr := make(chan error)
go func() {
chErr <- c.Wait(ctx)
}()
if err := b.Enable("test", nil); err != nil {
t.Fatal(err)
}
select {
case err := <-chErr:
if err == nil {
t.Fatal("expected error")
}
case <-time.After(10 * time.Second):
t.Fatal("timeout waiting for event")
}
if err := c.Start(ctx); err != nil {
t.Fatal(err)
}
ctxWaitReady, cancelCtxWaitReady := context.WithTimeout(ctx, 30*time.Second)
c.signalWaitReady = cancelCtxWaitReady
defer cancelCtxWaitReady()
go func() {
chErr <- c.Wait(ctx)
}()
chEvent, cancel := b.SubscribeEvents(1)
defer cancel()
if err := b.Disable("test", nil); err != nil {
t.Fatal(err)
}
select {
case <-chEvent:
<-ctxWaitReady.Done()
if err := ctxWaitReady.Err(); err == context.DeadlineExceeded {
t.Fatal(err)
}
select {
case <-chErr:
t.Fatal("wait returned unexpectedly")
default:
// all good
}
case <-chErr:
t.Fatal("wait returned unexpectedly")
case <-time.After(10 * time.Second):
t.Fatal("timeout waiting for event")
}
if err := b.Remove("test", nil); err != nil {
t.Fatal(err)
}
select {
case err := <-chErr:
if err == nil {
t.Fatal("expected error")
}
if !strings.Contains(err.Error(), "removed") {
t.Fatal(err)
}
case <-time.After(10 * time.Second):
t.Fatal("timeout waiting for event")
}
}
func TestWaitEnabled(t *testing.T) {
b := newMockBackend()
c := newTestController(b, false)
ctx := context.Background()
if err := c.Prepare(ctx); err != nil {
t.Fatal(err)
}
if err := c.Start(ctx); err != nil {
t.Fatal(err)
}
chErr := make(chan error)
go func() {
chErr <- c.Wait(ctx)
}()
if err := b.Disable("test", nil); err != nil {
t.Fatal(err)
}
select {
case err := <-chErr:
if err == nil {
t.Fatal("expected error")
}
case <-time.After(10 * time.Second):
t.Fatal("timeout waiting for event")
}
if err := c.Start(ctx); err != nil {
t.Fatal(err)
}
ctxWaitReady, ctxWaitCancel := context.WithCancel(ctx)
c.signalWaitReady = ctxWaitCancel
defer ctxWaitCancel()
go func() {
chErr <- c.Wait(ctx)
}()
chEvent, cancel := b.SubscribeEvents(1)
defer cancel()
if err := b.Enable("test", nil); err != nil {
t.Fatal(err)
}
select {
case <-chEvent:
<-ctxWaitReady.Done()
if err := ctxWaitReady.Err(); err == context.DeadlineExceeded {
t.Fatal(err)
}
select {
case <-chErr:
t.Fatal("wait returned unexpectedly")
default:
// all good
}
case <-chErr:
t.Fatal("wait returned unexpectedly")
case <-time.After(10 * time.Second):
t.Fatal("timeout waiting for event")
}
if err := b.Remove("test", nil); err != nil {
t.Fatal(err)
}
select {
case err := <-chErr:
if err == nil {
t.Fatal("expected error")
}
if !strings.Contains(err.Error(), "removed") {
t.Fatal(err)
}
case <-time.After(10 * time.Second):
t.Fatal("timeout waiting for event")
}
}
func TestRemove(t *testing.T) {
b := newMockBackend()
c := newTestController(b, false)
ctx := context.Background()
if err := c.Prepare(ctx); err != nil {
t.Fatal(err)
}
if err := c.Shutdown(ctx); err != nil {
t.Fatal(err)
}
c2 := newTestController(b, false)
if err := c2.Prepare(ctx); err != nil {
t.Fatal(err)
}
if err := c.Remove(ctx); err != nil {
t.Fatal(err)
}
if b.p == nil {
t.Fatal("plugin removed unexpectedly")
}
if err := c2.Shutdown(ctx); err != nil {
t.Fatal(err)
}
if err := c2.Remove(ctx); err != nil {
t.Fatal(err)
}
if b.p != nil {
t.Fatal("expected plugin to be removed")
}
}
func newTestController(b Backend, disabled bool) *Controller {
return &Controller{
logger: &logrus.Entry{Logger: &logrus.Logger{Out: ioutil.Discard}},
backend: b,
spec: runtime.PluginSpec{
Name: pluginTestName,
Remote: pluginTestRemote,
Disabled: disabled,
},
}
}
func newMockBackend() *mockBackend {
return &mockBackend{
pub: pubsub.NewPublisher(0, 0),
}
}
type mockBackend struct {
p *v2.Plugin
pub *pubsub.Publisher
}
func (m *mockBackend) Disable(name string, config *enginetypes.PluginDisableConfig) error {
m.p.PluginObj.Enabled = false
m.pub.Publish(plugin.EventDisable{})
return nil
}
func (m *mockBackend) Enable(name string, config *enginetypes.PluginEnableConfig) error {
m.p.PluginObj.Enabled = true
m.pub.Publish(plugin.EventEnable{})
return nil
}
func (m *mockBackend) Remove(name string, config *enginetypes.PluginRmConfig) error {
m.p = nil
m.pub.Publish(plugin.EventRemove{})
return nil
}
func (m *mockBackend) Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer, opts ...plugin.CreateOpt) error {
m.p = &v2.Plugin{
PluginObj: enginetypes.Plugin{
ID: "1234",
Name: name,
PluginReference: ref.String(),
},
}
return nil
}
func (m *mockBackend) Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error {
m.p.PluginObj.PluginReference = pluginTestRemoteUpgrade
return nil
}
func (m *mockBackend) Get(name string) (*v2.Plugin, error) {
if m.p == nil {
return nil, errors.New("not found")
}
return m.p, nil
}
func (m *mockBackend) SubscribeEvents(buffer int, events ...plugin.Event) (eventCh <-chan interface{}, cancel func()) {
ch := m.pub.SubscribeTopicWithBuffer(nil, buffer)
cancel = func() { m.pub.Evict(ch) }
return ch, cancel
}

View file

@ -0,0 +1,78 @@
package convert // import "github.com/docker/docker/daemon/cluster/convert"
import (
swarmtypes "github.com/docker/docker/api/types/swarm"
types "github.com/docker/docker/api/types/swarm"
swarmapi "github.com/docker/swarmkit/api"
gogotypes "github.com/gogo/protobuf/types"
)
// ConfigFromGRPC converts a grpc Config to a Config.
func ConfigFromGRPC(s *swarmapi.Config) swarmtypes.Config {
config := swarmtypes.Config{
ID: s.ID,
Spec: swarmtypes.ConfigSpec{
Annotations: annotationsFromGRPC(s.Spec.Annotations),
Data: s.Spec.Data,
},
}
config.Version.Index = s.Meta.Version.Index
// Meta
config.CreatedAt, _ = gogotypes.TimestampFromProto(s.Meta.CreatedAt)
config.UpdatedAt, _ = gogotypes.TimestampFromProto(s.Meta.UpdatedAt)
if s.Spec.Templating != nil {
config.Spec.Templating = &types.Driver{
Name: s.Spec.Templating.Name,
Options: s.Spec.Templating.Options,
}
}
return config
}
// ConfigSpecToGRPC converts Config to a grpc Config.
func ConfigSpecToGRPC(s swarmtypes.ConfigSpec) swarmapi.ConfigSpec {
spec := swarmapi.ConfigSpec{
Annotations: swarmapi.Annotations{
Name: s.Name,
Labels: s.Labels,
},
Data: s.Data,
}
if s.Templating != nil {
spec.Templating = &swarmapi.Driver{
Name: s.Templating.Name,
Options: s.Templating.Options,
}
}
return spec
}
// ConfigReferencesFromGRPC converts a slice of grpc ConfigReference to ConfigReference
func ConfigReferencesFromGRPC(s []*swarmapi.ConfigReference) []*swarmtypes.ConfigReference {
refs := []*swarmtypes.ConfigReference{}
for _, r := range s {
ref := &swarmtypes.ConfigReference{
ConfigID: r.ConfigID,
ConfigName: r.ConfigName,
}
if t, ok := r.Target.(*swarmapi.ConfigReference_File); ok {
ref.File = &swarmtypes.ConfigReferenceFileTarget{
Name: t.File.Name,
UID: t.File.UID,
GID: t.File.GID,
Mode: t.File.Mode,
}
}
refs = append(refs, ref)
}
return refs
}

View file

@ -0,0 +1,381 @@
package convert // import "github.com/docker/docker/daemon/cluster/convert"
import (
"errors"
"fmt"
"strings"
container "github.com/docker/docker/api/types/container"
mounttypes "github.com/docker/docker/api/types/mount"
types "github.com/docker/docker/api/types/swarm"
swarmapi "github.com/docker/swarmkit/api"
gogotypes "github.com/gogo/protobuf/types"
"github.com/sirupsen/logrus"
)
func containerSpecFromGRPC(c *swarmapi.ContainerSpec) *types.ContainerSpec {
if c == nil {
return nil
}
containerSpec := &types.ContainerSpec{
Image: c.Image,
Labels: c.Labels,
Command: c.Command,
Args: c.Args,
Hostname: c.Hostname,
Env: c.Env,
Dir: c.Dir,
User: c.User,
Groups: c.Groups,
StopSignal: c.StopSignal,
TTY: c.TTY,
OpenStdin: c.OpenStdin,
ReadOnly: c.ReadOnly,
Hosts: c.Hosts,
Secrets: secretReferencesFromGRPC(c.Secrets),
Configs: configReferencesFromGRPC(c.Configs),
Isolation: IsolationFromGRPC(c.Isolation),
}
if c.DNSConfig != nil {
containerSpec.DNSConfig = &types.DNSConfig{
Nameservers: c.DNSConfig.Nameservers,
Search: c.DNSConfig.Search,
Options: c.DNSConfig.Options,
}
}
// Privileges
if c.Privileges != nil {
containerSpec.Privileges = &types.Privileges{}
if c.Privileges.CredentialSpec != nil {
containerSpec.Privileges.CredentialSpec = &types.CredentialSpec{}
switch c.Privileges.CredentialSpec.Source.(type) {
case *swarmapi.Privileges_CredentialSpec_File:
containerSpec.Privileges.CredentialSpec.File = c.Privileges.CredentialSpec.GetFile()
case *swarmapi.Privileges_CredentialSpec_Registry:
containerSpec.Privileges.CredentialSpec.Registry = c.Privileges.CredentialSpec.GetRegistry()
}
}
if c.Privileges.SELinuxContext != nil {
containerSpec.Privileges.SELinuxContext = &types.SELinuxContext{
Disable: c.Privileges.SELinuxContext.Disable,
User: c.Privileges.SELinuxContext.User,
Type: c.Privileges.SELinuxContext.Type,
Role: c.Privileges.SELinuxContext.Role,
Level: c.Privileges.SELinuxContext.Level,
}
}
}
// Mounts
for _, m := range c.Mounts {
mount := mounttypes.Mount{
Target: m.Target,
Source: m.Source,
Type: mounttypes.Type(strings.ToLower(swarmapi.Mount_MountType_name[int32(m.Type)])),
ReadOnly: m.ReadOnly,
}
if m.BindOptions != nil {
mount.BindOptions = &mounttypes.BindOptions{
Propagation: mounttypes.Propagation(strings.ToLower(swarmapi.Mount_BindOptions_MountPropagation_name[int32(m.BindOptions.Propagation)])),
}
}
if m.VolumeOptions != nil {
mount.VolumeOptions = &mounttypes.VolumeOptions{
NoCopy: m.VolumeOptions.NoCopy,
Labels: m.VolumeOptions.Labels,
}
if m.VolumeOptions.DriverConfig != nil {
mount.VolumeOptions.DriverConfig = &mounttypes.Driver{
Name: m.VolumeOptions.DriverConfig.Name,
Options: m.VolumeOptions.DriverConfig.Options,
}
}
}
if m.TmpfsOptions != nil {
mount.TmpfsOptions = &mounttypes.TmpfsOptions{
SizeBytes: m.TmpfsOptions.SizeBytes,
Mode: m.TmpfsOptions.Mode,
}
}
containerSpec.Mounts = append(containerSpec.Mounts, mount)
}
if c.StopGracePeriod != nil {
grace, _ := gogotypes.DurationFromProto(c.StopGracePeriod)
containerSpec.StopGracePeriod = &grace
}
if c.Healthcheck != nil {
containerSpec.Healthcheck = healthConfigFromGRPC(c.Healthcheck)
}
return containerSpec
}
func secretReferencesToGRPC(sr []*types.SecretReference) []*swarmapi.SecretReference {
refs := make([]*swarmapi.SecretReference, 0, len(sr))
for _, s := range sr {
ref := &swarmapi.SecretReference{
SecretID: s.SecretID,
SecretName: s.SecretName,
}
if s.File != nil {
ref.Target = &swarmapi.SecretReference_File{
File: &swarmapi.FileTarget{
Name: s.File.Name,
UID: s.File.UID,
GID: s.File.GID,
Mode: s.File.Mode,
},
}
}
refs = append(refs, ref)
}
return refs
}
func secretReferencesFromGRPC(sr []*swarmapi.SecretReference) []*types.SecretReference {
refs := make([]*types.SecretReference, 0, len(sr))
for _, s := range sr {
target := s.GetFile()
if target == nil {
// not a file target
logrus.Warnf("secret target not a file: secret=%s", s.SecretID)
continue
}
refs = append(refs, &types.SecretReference{
File: &types.SecretReferenceFileTarget{
Name: target.Name,
UID: target.UID,
GID: target.GID,
Mode: target.Mode,
},
SecretID: s.SecretID,
SecretName: s.SecretName,
})
}
return refs
}
func configReferencesToGRPC(sr []*types.ConfigReference) []*swarmapi.ConfigReference {
refs := make([]*swarmapi.ConfigReference, 0, len(sr))
for _, s := range sr {
ref := &swarmapi.ConfigReference{
ConfigID: s.ConfigID,
ConfigName: s.ConfigName,
}
if s.File != nil {
ref.Target = &swarmapi.ConfigReference_File{
File: &swarmapi.FileTarget{
Name: s.File.Name,
UID: s.File.UID,
GID: s.File.GID,
Mode: s.File.Mode,
},
}
}
refs = append(refs, ref)
}
return refs
}
func configReferencesFromGRPC(sr []*swarmapi.ConfigReference) []*types.ConfigReference {
refs := make([]*types.ConfigReference, 0, len(sr))
for _, s := range sr {
target := s.GetFile()
if target == nil {
// not a file target
logrus.Warnf("config target not a file: config=%s", s.ConfigID)
continue
}
refs = append(refs, &types.ConfigReference{
File: &types.ConfigReferenceFileTarget{
Name: target.Name,
UID: target.UID,
GID: target.GID,
Mode: target.Mode,
},
ConfigID: s.ConfigID,
ConfigName: s.ConfigName,
})
}
return refs
}
func containerToGRPC(c *types.ContainerSpec) (*swarmapi.ContainerSpec, error) {
containerSpec := &swarmapi.ContainerSpec{
Image: c.Image,
Labels: c.Labels,
Command: c.Command,
Args: c.Args,
Hostname: c.Hostname,
Env: c.Env,
Dir: c.Dir,
User: c.User,
Groups: c.Groups,
StopSignal: c.StopSignal,
TTY: c.TTY,
OpenStdin: c.OpenStdin,
ReadOnly: c.ReadOnly,
Hosts: c.Hosts,
Secrets: secretReferencesToGRPC(c.Secrets),
Configs: configReferencesToGRPC(c.Configs),
Isolation: isolationToGRPC(c.Isolation),
}
if c.DNSConfig != nil {
containerSpec.DNSConfig = &swarmapi.ContainerSpec_DNSConfig{
Nameservers: c.DNSConfig.Nameservers,
Search: c.DNSConfig.Search,
Options: c.DNSConfig.Options,
}
}
if c.StopGracePeriod != nil {
containerSpec.StopGracePeriod = gogotypes.DurationProto(*c.StopGracePeriod)
}
// Privileges
if c.Privileges != nil {
containerSpec.Privileges = &swarmapi.Privileges{}
if c.Privileges.CredentialSpec != nil {
containerSpec.Privileges.CredentialSpec = &swarmapi.Privileges_CredentialSpec{}
if c.Privileges.CredentialSpec.File != "" && c.Privileges.CredentialSpec.Registry != "" {
return nil, errors.New("cannot specify both \"file\" and \"registry\" credential specs")
}
if c.Privileges.CredentialSpec.File != "" {
containerSpec.Privileges.CredentialSpec.Source = &swarmapi.Privileges_CredentialSpec_File{
File: c.Privileges.CredentialSpec.File,
}
} else if c.Privileges.CredentialSpec.Registry != "" {
containerSpec.Privileges.CredentialSpec.Source = &swarmapi.Privileges_CredentialSpec_Registry{
Registry: c.Privileges.CredentialSpec.Registry,
}
} else {
return nil, errors.New("must either provide \"file\" or \"registry\" for credential spec")
}
}
if c.Privileges.SELinuxContext != nil {
containerSpec.Privileges.SELinuxContext = &swarmapi.Privileges_SELinuxContext{
Disable: c.Privileges.SELinuxContext.Disable,
User: c.Privileges.SELinuxContext.User,
Type: c.Privileges.SELinuxContext.Type,
Role: c.Privileges.SELinuxContext.Role,
Level: c.Privileges.SELinuxContext.Level,
}
}
}
// Mounts
for _, m := range c.Mounts {
mount := swarmapi.Mount{
Target: m.Target,
Source: m.Source,
ReadOnly: m.ReadOnly,
}
if mountType, ok := swarmapi.Mount_MountType_value[strings.ToUpper(string(m.Type))]; ok {
mount.Type = swarmapi.Mount_MountType(mountType)
} else if string(m.Type) != "" {
return nil, fmt.Errorf("invalid MountType: %q", m.Type)
}
if m.BindOptions != nil {
if mountPropagation, ok := swarmapi.Mount_BindOptions_MountPropagation_value[strings.ToUpper(string(m.BindOptions.Propagation))]; ok {
mount.BindOptions = &swarmapi.Mount_BindOptions{Propagation: swarmapi.Mount_BindOptions_MountPropagation(mountPropagation)}
} else if string(m.BindOptions.Propagation) != "" {
return nil, fmt.Errorf("invalid MountPropagation: %q", m.BindOptions.Propagation)
}
}
if m.VolumeOptions != nil {
mount.VolumeOptions = &swarmapi.Mount_VolumeOptions{
NoCopy: m.VolumeOptions.NoCopy,
Labels: m.VolumeOptions.Labels,
}
if m.VolumeOptions.DriverConfig != nil {
mount.VolumeOptions.DriverConfig = &swarmapi.Driver{
Name: m.VolumeOptions.DriverConfig.Name,
Options: m.VolumeOptions.DriverConfig.Options,
}
}
}
if m.TmpfsOptions != nil {
mount.TmpfsOptions = &swarmapi.Mount_TmpfsOptions{
SizeBytes: m.TmpfsOptions.SizeBytes,
Mode: m.TmpfsOptions.Mode,
}
}
containerSpec.Mounts = append(containerSpec.Mounts, mount)
}
if c.Healthcheck != nil {
containerSpec.Healthcheck = healthConfigToGRPC(c.Healthcheck)
}
return containerSpec, nil
}
func healthConfigFromGRPC(h *swarmapi.HealthConfig) *container.HealthConfig {
interval, _ := gogotypes.DurationFromProto(h.Interval)
timeout, _ := gogotypes.DurationFromProto(h.Timeout)
startPeriod, _ := gogotypes.DurationFromProto(h.StartPeriod)
return &container.HealthConfig{
Test: h.Test,
Interval: interval,
Timeout: timeout,
Retries: int(h.Retries),
StartPeriod: startPeriod,
}
}
func healthConfigToGRPC(h *container.HealthConfig) *swarmapi.HealthConfig {
return &swarmapi.HealthConfig{
Test: h.Test,
Interval: gogotypes.DurationProto(h.Interval),
Timeout: gogotypes.DurationProto(h.Timeout),
Retries: int32(h.Retries),
StartPeriod: gogotypes.DurationProto(h.StartPeriod),
}
}
// IsolationFromGRPC converts a swarm api container isolation to a moby isolation representation
func IsolationFromGRPC(i swarmapi.ContainerSpec_Isolation) container.Isolation {
switch i {
case swarmapi.ContainerIsolationHyperV:
return container.IsolationHyperV
case swarmapi.ContainerIsolationProcess:
return container.IsolationProcess
case swarmapi.ContainerIsolationDefault:
return container.IsolationDefault
}
return container.IsolationEmpty
}
func isolationToGRPC(i container.Isolation) swarmapi.ContainerSpec_Isolation {
if i.IsHyperV() {
return swarmapi.ContainerIsolationHyperV
}
if i.IsProcess() {
return swarmapi.ContainerIsolationProcess
}
return swarmapi.ContainerIsolationDefault
}

View file

@ -0,0 +1,240 @@
package convert // import "github.com/docker/docker/daemon/cluster/convert"
import (
"strings"
basictypes "github.com/docker/docker/api/types"
networktypes "github.com/docker/docker/api/types/network"
types "github.com/docker/docker/api/types/swarm"
netconst "github.com/docker/libnetwork/datastore"
swarmapi "github.com/docker/swarmkit/api"
gogotypes "github.com/gogo/protobuf/types"
)
func networkAttachmentFromGRPC(na *swarmapi.NetworkAttachment) types.NetworkAttachment {
if na != nil {
return types.NetworkAttachment{
Network: networkFromGRPC(na.Network),
Addresses: na.Addresses,
}
}
return types.NetworkAttachment{}
}
func networkFromGRPC(n *swarmapi.Network) types.Network {
if n != nil {
network := types.Network{
ID: n.ID,
Spec: types.NetworkSpec{
IPv6Enabled: n.Spec.Ipv6Enabled,
Internal: n.Spec.Internal,
Attachable: n.Spec.Attachable,
Ingress: IsIngressNetwork(n),
IPAMOptions: ipamFromGRPC(n.Spec.IPAM),
Scope: netconst.SwarmScope,
},
IPAMOptions: ipamFromGRPC(n.IPAM),
}
if n.Spec.GetNetwork() != "" {
network.Spec.ConfigFrom = &networktypes.ConfigReference{
Network: n.Spec.GetNetwork(),
}
}
// Meta
network.Version.Index = n.Meta.Version.Index
network.CreatedAt, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt)
network.UpdatedAt, _ = gogotypes.TimestampFromProto(n.Meta.UpdatedAt)
//Annotations
network.Spec.Annotations = annotationsFromGRPC(n.Spec.Annotations)
//DriverConfiguration
if n.Spec.DriverConfig != nil {
network.Spec.DriverConfiguration = &types.Driver{
Name: n.Spec.DriverConfig.Name,
Options: n.Spec.DriverConfig.Options,
}
}
//DriverState
if n.DriverState != nil {
network.DriverState = types.Driver{
Name: n.DriverState.Name,
Options: n.DriverState.Options,
}
}
return network
}
return types.Network{}
}
func ipamFromGRPC(i *swarmapi.IPAMOptions) *types.IPAMOptions {
var ipam *types.IPAMOptions
if i != nil {
ipam = &types.IPAMOptions{}
if i.Driver != nil {
ipam.Driver.Name = i.Driver.Name
ipam.Driver.Options = i.Driver.Options
}
for _, config := range i.Configs {
ipam.Configs = append(ipam.Configs, types.IPAMConfig{
Subnet: config.Subnet,
Range: config.Range,
Gateway: config.Gateway,
})
}
}
return ipam
}
func endpointSpecFromGRPC(es *swarmapi.EndpointSpec) *types.EndpointSpec {
var endpointSpec *types.EndpointSpec
if es != nil {
endpointSpec = &types.EndpointSpec{}
endpointSpec.Mode = types.ResolutionMode(strings.ToLower(es.Mode.String()))
for _, portState := range es.Ports {
endpointSpec.Ports = append(endpointSpec.Ports, swarmPortConfigToAPIPortConfig(portState))
}
}
return endpointSpec
}
func endpointFromGRPC(e *swarmapi.Endpoint) types.Endpoint {
endpoint := types.Endpoint{}
if e != nil {
if espec := endpointSpecFromGRPC(e.Spec); espec != nil {
endpoint.Spec = *espec
}
for _, portState := range e.Ports {
endpoint.Ports = append(endpoint.Ports, swarmPortConfigToAPIPortConfig(portState))
}
for _, v := range e.VirtualIPs {
endpoint.VirtualIPs = append(endpoint.VirtualIPs, types.EndpointVirtualIP{
NetworkID: v.NetworkID,
Addr: v.Addr})
}
}
return endpoint
}
func swarmPortConfigToAPIPortConfig(portConfig *swarmapi.PortConfig) types.PortConfig {
return types.PortConfig{
Name: portConfig.Name,
Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portConfig.Protocol)])),
PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(portConfig.PublishMode)])),
TargetPort: portConfig.TargetPort,
PublishedPort: portConfig.PublishedPort,
}
}
// BasicNetworkFromGRPC converts a grpc Network to a NetworkResource.
func BasicNetworkFromGRPC(n swarmapi.Network) basictypes.NetworkResource {
spec := n.Spec
var ipam networktypes.IPAM
if spec.IPAM != nil {
if spec.IPAM.Driver != nil {
ipam.Driver = spec.IPAM.Driver.Name
ipam.Options = spec.IPAM.Driver.Options
}
ipam.Config = make([]networktypes.IPAMConfig, 0, len(spec.IPAM.Configs))
for _, ic := range spec.IPAM.Configs {
ipamConfig := networktypes.IPAMConfig{
Subnet: ic.Subnet,
IPRange: ic.Range,
Gateway: ic.Gateway,
AuxAddress: ic.Reserved,
}
ipam.Config = append(ipam.Config, ipamConfig)
}
}
nr := basictypes.NetworkResource{
ID: n.ID,
Name: n.Spec.Annotations.Name,
Scope: netconst.SwarmScope,
EnableIPv6: spec.Ipv6Enabled,
IPAM: ipam,
Internal: spec.Internal,
Attachable: spec.Attachable,
Ingress: IsIngressNetwork(&n),
Labels: n.Spec.Annotations.Labels,
}
nr.Created, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt)
if n.Spec.GetNetwork() != "" {
nr.ConfigFrom = networktypes.ConfigReference{
Network: n.Spec.GetNetwork(),
}
}
if n.DriverState != nil {
nr.Driver = n.DriverState.Name
nr.Options = n.DriverState.Options
}
return nr
}
// BasicNetworkCreateToGRPC converts a NetworkCreateRequest to a grpc NetworkSpec.
func BasicNetworkCreateToGRPC(create basictypes.NetworkCreateRequest) swarmapi.NetworkSpec {
ns := swarmapi.NetworkSpec{
Annotations: swarmapi.Annotations{
Name: create.Name,
Labels: create.Labels,
},
DriverConfig: &swarmapi.Driver{
Name: create.Driver,
Options: create.Options,
},
Ipv6Enabled: create.EnableIPv6,
Internal: create.Internal,
Attachable: create.Attachable,
Ingress: create.Ingress,
}
if create.IPAM != nil {
driver := create.IPAM.Driver
if driver == "" {
driver = "default"
}
ns.IPAM = &swarmapi.IPAMOptions{
Driver: &swarmapi.Driver{
Name: driver,
Options: create.IPAM.Options,
},
}
ipamSpec := make([]*swarmapi.IPAMConfig, 0, len(create.IPAM.Config))
for _, ipamConfig := range create.IPAM.Config {
ipamSpec = append(ipamSpec, &swarmapi.IPAMConfig{
Subnet: ipamConfig.Subnet,
Range: ipamConfig.IPRange,
Gateway: ipamConfig.Gateway,
})
}
ns.IPAM.Configs = ipamSpec
}
if create.ConfigFrom != nil {
ns.ConfigFrom = &swarmapi.NetworkSpec_Network{
Network: create.ConfigFrom.Network,
}
}
return ns
}
// IsIngressNetwork check if the swarm network is an ingress network
func IsIngressNetwork(n *swarmapi.Network) bool {
if n.Spec.Ingress {
return true
}
// Check if legacy defined ingress network
_, ok := n.Spec.Annotations.Labels["com.docker.swarm.internal"]
return ok && n.Spec.Annotations.Name == "ingress"
}

View file

@ -0,0 +1,34 @@
package convert // import "github.com/docker/docker/daemon/cluster/convert"
import (
"testing"
"time"
swarmapi "github.com/docker/swarmkit/api"
gogotypes "github.com/gogo/protobuf/types"
)
func TestNetworkConvertBasicNetworkFromGRPCCreatedAt(t *testing.T) {
expected, err := time.Parse("Jan 2, 2006 at 3:04pm (MST)", "Jan 10, 2018 at 7:54pm (PST)")
if err != nil {
t.Fatal(err)
}
createdAt, err := gogotypes.TimestampProto(expected)
if err != nil {
t.Fatal(err)
}
nw := swarmapi.Network{
Meta: swarmapi.Meta{
Version: swarmapi.Version{
Index: 1,
},
CreatedAt: createdAt,
},
}
n := BasicNetworkFromGRPC(nw)
if !n.Created.Equal(expected) {
t.Fatalf("expected time %s; received %s", expected, n.Created)
}
}

View file

@ -0,0 +1,94 @@
package convert // import "github.com/docker/docker/daemon/cluster/convert"
import (
"fmt"
"strings"
types "github.com/docker/docker/api/types/swarm"
swarmapi "github.com/docker/swarmkit/api"
gogotypes "github.com/gogo/protobuf/types"
)
// NodeFromGRPC converts a grpc Node to a Node.
func NodeFromGRPC(n swarmapi.Node) types.Node {
node := types.Node{
ID: n.ID,
Spec: types.NodeSpec{
Role: types.NodeRole(strings.ToLower(n.Spec.DesiredRole.String())),
Availability: types.NodeAvailability(strings.ToLower(n.Spec.Availability.String())),
},
Status: types.NodeStatus{
State: types.NodeState(strings.ToLower(n.Status.State.String())),
Message: n.Status.Message,
Addr: n.Status.Addr,
},
}
// Meta
node.Version.Index = n.Meta.Version.Index
node.CreatedAt, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt)
node.UpdatedAt, _ = gogotypes.TimestampFromProto(n.Meta.UpdatedAt)
//Annotations
node.Spec.Annotations = annotationsFromGRPC(n.Spec.Annotations)
//Description
if n.Description != nil {
node.Description.Hostname = n.Description.Hostname
if n.Description.Platform != nil {
node.Description.Platform.Architecture = n.Description.Platform.Architecture
node.Description.Platform.OS = n.Description.Platform.OS
}
if n.Description.Resources != nil {
node.Description.Resources.NanoCPUs = n.Description.Resources.NanoCPUs
node.Description.Resources.MemoryBytes = n.Description.Resources.MemoryBytes
node.Description.Resources.GenericResources = GenericResourcesFromGRPC(n.Description.Resources.Generic)
}
if n.Description.Engine != nil {
node.Description.Engine.EngineVersion = n.Description.Engine.EngineVersion
node.Description.Engine.Labels = n.Description.Engine.Labels
for _, plugin := range n.Description.Engine.Plugins {
node.Description.Engine.Plugins = append(node.Description.Engine.Plugins, types.PluginDescription{Type: plugin.Type, Name: plugin.Name})
}
}
if n.Description.TLSInfo != nil {
node.Description.TLSInfo.TrustRoot = string(n.Description.TLSInfo.TrustRoot)
node.Description.TLSInfo.CertIssuerPublicKey = n.Description.TLSInfo.CertIssuerPublicKey
node.Description.TLSInfo.CertIssuerSubject = n.Description.TLSInfo.CertIssuerSubject
}
}
//Manager
if n.ManagerStatus != nil {
node.ManagerStatus = &types.ManagerStatus{
Leader: n.ManagerStatus.Leader,
Reachability: types.Reachability(strings.ToLower(n.ManagerStatus.Reachability.String())),
Addr: n.ManagerStatus.Addr,
}
}
return node
}
// NodeSpecToGRPC converts a NodeSpec to a grpc NodeSpec.
func NodeSpecToGRPC(s types.NodeSpec) (swarmapi.NodeSpec, error) {
spec := swarmapi.NodeSpec{
Annotations: swarmapi.Annotations{
Name: s.Name,
Labels: s.Labels,
},
}
if role, ok := swarmapi.NodeRole_value[strings.ToUpper(string(s.Role))]; ok {
spec.DesiredRole = swarmapi.NodeRole(role)
} else {
return swarmapi.NodeSpec{}, fmt.Errorf("invalid Role: %q", s.Role)
}
if availability, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(s.Availability))]; ok {
spec.Availability = swarmapi.NodeSpec_Availability(availability)
} else {
return swarmapi.NodeSpec{}, fmt.Errorf("invalid Availability: %q", s.Availability)
}
return spec, nil
}

View file

@ -0,0 +1,80 @@
package convert // import "github.com/docker/docker/daemon/cluster/convert"
import (
swarmtypes "github.com/docker/docker/api/types/swarm"
types "github.com/docker/docker/api/types/swarm"
swarmapi "github.com/docker/swarmkit/api"
gogotypes "github.com/gogo/protobuf/types"
)
// SecretFromGRPC converts a grpc Secret to a Secret.
func SecretFromGRPC(s *swarmapi.Secret) swarmtypes.Secret {
secret := swarmtypes.Secret{
ID: s.ID,
Spec: swarmtypes.SecretSpec{
Annotations: annotationsFromGRPC(s.Spec.Annotations),
Data: s.Spec.Data,
Driver: driverFromGRPC(s.Spec.Driver),
},
}
secret.Version.Index = s.Meta.Version.Index
// Meta
secret.CreatedAt, _ = gogotypes.TimestampFromProto(s.Meta.CreatedAt)
secret.UpdatedAt, _ = gogotypes.TimestampFromProto(s.Meta.UpdatedAt)
if s.Spec.Templating != nil {
secret.Spec.Templating = &types.Driver{
Name: s.Spec.Templating.Name,
Options: s.Spec.Templating.Options,
}
}
return secret
}
// SecretSpecToGRPC converts Secret to a grpc Secret.
func SecretSpecToGRPC(s swarmtypes.SecretSpec) swarmapi.SecretSpec {
spec := swarmapi.SecretSpec{
Annotations: swarmapi.Annotations{
Name: s.Name,
Labels: s.Labels,
},
Data: s.Data,
Driver: driverToGRPC(s.Driver),
}
if s.Templating != nil {
spec.Templating = &swarmapi.Driver{
Name: s.Templating.Name,
Options: s.Templating.Options,
}
}
return spec
}
// SecretReferencesFromGRPC converts a slice of grpc SecretReference to SecretReference
func SecretReferencesFromGRPC(s []*swarmapi.SecretReference) []*swarmtypes.SecretReference {
refs := []*swarmtypes.SecretReference{}
for _, r := range s {
ref := &swarmtypes.SecretReference{
SecretID: r.SecretID,
SecretName: r.SecretName,
}
if t, ok := r.Target.(*swarmapi.SecretReference_File); ok {
ref.File = &swarmtypes.SecretReferenceFileTarget{
Name: t.File.Name,
UID: t.File.UID,
GID: t.File.GID,
Mode: t.File.Mode,
}
}
refs = append(refs, ref)
}
return refs
}

View file

@ -0,0 +1,613 @@
package convert // import "github.com/docker/docker/daemon/cluster/convert"
import (
"fmt"
"strings"
types "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/swarm/runtime"
"github.com/docker/docker/pkg/namesgenerator"
swarmapi "github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/api/genericresource"
"github.com/gogo/protobuf/proto"
gogotypes "github.com/gogo/protobuf/types"
"github.com/pkg/errors"
)
var (
// ErrUnsupportedRuntime returns an error if the runtime is not supported by the daemon
ErrUnsupportedRuntime = errors.New("unsupported runtime")
)
// ServiceFromGRPC converts a grpc Service to a Service.
func ServiceFromGRPC(s swarmapi.Service) (types.Service, error) {
curSpec, err := serviceSpecFromGRPC(&s.Spec)
if err != nil {
return types.Service{}, err
}
prevSpec, err := serviceSpecFromGRPC(s.PreviousSpec)
if err != nil {
return types.Service{}, err
}
service := types.Service{
ID: s.ID,
Spec: *curSpec,
PreviousSpec: prevSpec,
Endpoint: endpointFromGRPC(s.Endpoint),
}
// Meta
service.Version.Index = s.Meta.Version.Index
service.CreatedAt, _ = gogotypes.TimestampFromProto(s.Meta.CreatedAt)
service.UpdatedAt, _ = gogotypes.TimestampFromProto(s.Meta.UpdatedAt)
// UpdateStatus
if s.UpdateStatus != nil {
service.UpdateStatus = &types.UpdateStatus{}
switch s.UpdateStatus.State {
case swarmapi.UpdateStatus_UPDATING:
service.UpdateStatus.State = types.UpdateStateUpdating
case swarmapi.UpdateStatus_PAUSED:
service.UpdateStatus.State = types.UpdateStatePaused
case swarmapi.UpdateStatus_COMPLETED:
service.UpdateStatus.State = types.UpdateStateCompleted
case swarmapi.UpdateStatus_ROLLBACK_STARTED:
service.UpdateStatus.State = types.UpdateStateRollbackStarted
case swarmapi.UpdateStatus_ROLLBACK_PAUSED:
service.UpdateStatus.State = types.UpdateStateRollbackPaused
case swarmapi.UpdateStatus_ROLLBACK_COMPLETED:
service.UpdateStatus.State = types.UpdateStateRollbackCompleted
}
startedAt, _ := gogotypes.TimestampFromProto(s.UpdateStatus.StartedAt)
if !startedAt.IsZero() && startedAt.Unix() != 0 {
service.UpdateStatus.StartedAt = &startedAt
}
completedAt, _ := gogotypes.TimestampFromProto(s.UpdateStatus.CompletedAt)
if !completedAt.IsZero() && completedAt.Unix() != 0 {
service.UpdateStatus.CompletedAt = &completedAt
}
service.UpdateStatus.Message = s.UpdateStatus.Message
}
return service, nil
}
func serviceSpecFromGRPC(spec *swarmapi.ServiceSpec) (*types.ServiceSpec, error) {
if spec == nil {
return nil, nil
}
serviceNetworks := make([]types.NetworkAttachmentConfig, 0, len(spec.Networks))
for _, n := range spec.Networks {
netConfig := types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverOpts: n.DriverAttachmentOpts}
serviceNetworks = append(serviceNetworks, netConfig)
}
taskTemplate, err := taskSpecFromGRPC(spec.Task)
if err != nil {
return nil, err
}
switch t := spec.Task.GetRuntime().(type) {
case *swarmapi.TaskSpec_Container:
containerConfig := t.Container
taskTemplate.ContainerSpec = containerSpecFromGRPC(containerConfig)
taskTemplate.Runtime = types.RuntimeContainer
case *swarmapi.TaskSpec_Generic:
switch t.Generic.Kind {
case string(types.RuntimePlugin):
taskTemplate.Runtime = types.RuntimePlugin
default:
return nil, fmt.Errorf("unknown task runtime type: %s", t.Generic.Payload.TypeUrl)
}
default:
return nil, fmt.Errorf("error creating service; unsupported runtime %T", t)
}
convertedSpec := &types.ServiceSpec{
Annotations: annotationsFromGRPC(spec.Annotations),
TaskTemplate: taskTemplate,
Networks: serviceNetworks,
EndpointSpec: endpointSpecFromGRPC(spec.Endpoint),
}
// UpdateConfig
convertedSpec.UpdateConfig = updateConfigFromGRPC(spec.Update)
convertedSpec.RollbackConfig = updateConfigFromGRPC(spec.Rollback)
// Mode
switch t := spec.GetMode().(type) {
case *swarmapi.ServiceSpec_Global:
convertedSpec.Mode.Global = &types.GlobalService{}
case *swarmapi.ServiceSpec_Replicated:
convertedSpec.Mode.Replicated = &types.ReplicatedService{
Replicas: &t.Replicated.Replicas,
}
}
return convertedSpec, nil
}
// ServiceSpecToGRPC converts a ServiceSpec to a grpc ServiceSpec.
func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) {
name := s.Name
if name == "" {
name = namesgenerator.GetRandomName(0)
}
serviceNetworks := make([]*swarmapi.NetworkAttachmentConfig, 0, len(s.Networks))
for _, n := range s.Networks {
netConfig := &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverAttachmentOpts: n.DriverOpts}
serviceNetworks = append(serviceNetworks, netConfig)
}
taskNetworks := make([]*swarmapi.NetworkAttachmentConfig, 0, len(s.TaskTemplate.Networks))
for _, n := range s.TaskTemplate.Networks {
netConfig := &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverAttachmentOpts: n.DriverOpts}
taskNetworks = append(taskNetworks, netConfig)
}
spec := swarmapi.ServiceSpec{
Annotations: swarmapi.Annotations{
Name: name,
Labels: s.Labels,
},
Task: swarmapi.TaskSpec{
Resources: resourcesToGRPC(s.TaskTemplate.Resources),
LogDriver: driverToGRPC(s.TaskTemplate.LogDriver),
Networks: taskNetworks,
ForceUpdate: s.TaskTemplate.ForceUpdate,
},
Networks: serviceNetworks,
}
switch s.TaskTemplate.Runtime {
case types.RuntimeContainer, "": // if empty runtime default to container
if s.TaskTemplate.ContainerSpec != nil {
containerSpec, err := containerToGRPC(s.TaskTemplate.ContainerSpec)
if err != nil {
return swarmapi.ServiceSpec{}, err
}
spec.Task.Runtime = &swarmapi.TaskSpec_Container{Container: containerSpec}
}
case types.RuntimePlugin:
if s.Mode.Replicated != nil {
return swarmapi.ServiceSpec{}, errors.New("plugins must not use replicated mode")
}
s.Mode.Global = &types.GlobalService{} // must always be global
if s.TaskTemplate.PluginSpec != nil {
pluginSpec, err := proto.Marshal(s.TaskTemplate.PluginSpec)
if err != nil {
return swarmapi.ServiceSpec{}, err
}
spec.Task.Runtime = &swarmapi.TaskSpec_Generic{
Generic: &swarmapi.GenericRuntimeSpec{
Kind: string(types.RuntimePlugin),
Payload: &gogotypes.Any{
TypeUrl: string(types.RuntimeURLPlugin),
Value: pluginSpec,
},
},
}
}
default:
return swarmapi.ServiceSpec{}, ErrUnsupportedRuntime
}
restartPolicy, err := restartPolicyToGRPC(s.TaskTemplate.RestartPolicy)
if err != nil {
return swarmapi.ServiceSpec{}, err
}
spec.Task.Restart = restartPolicy
if s.TaskTemplate.Placement != nil {
var preferences []*swarmapi.PlacementPreference
for _, pref := range s.TaskTemplate.Placement.Preferences {
if pref.Spread != nil {
preferences = append(preferences, &swarmapi.PlacementPreference{
Preference: &swarmapi.PlacementPreference_Spread{
Spread: &swarmapi.SpreadOver{
SpreadDescriptor: pref.Spread.SpreadDescriptor,
},
},
})
}
}
var platforms []*swarmapi.Platform
for _, plat := range s.TaskTemplate.Placement.Platforms {
platforms = append(platforms, &swarmapi.Platform{
Architecture: plat.Architecture,
OS: plat.OS,
})
}
spec.Task.Placement = &swarmapi.Placement{
Constraints: s.TaskTemplate.Placement.Constraints,
Preferences: preferences,
Platforms: platforms,
}
}
spec.Update, err = updateConfigToGRPC(s.UpdateConfig)
if err != nil {
return swarmapi.ServiceSpec{}, err
}
spec.Rollback, err = updateConfigToGRPC(s.RollbackConfig)
if err != nil {
return swarmapi.ServiceSpec{}, err
}
if s.EndpointSpec != nil {
if s.EndpointSpec.Mode != "" &&
s.EndpointSpec.Mode != types.ResolutionModeVIP &&
s.EndpointSpec.Mode != types.ResolutionModeDNSRR {
return swarmapi.ServiceSpec{}, fmt.Errorf("invalid resolution mode: %q", s.EndpointSpec.Mode)
}
spec.Endpoint = &swarmapi.EndpointSpec{}
spec.Endpoint.Mode = swarmapi.EndpointSpec_ResolutionMode(swarmapi.EndpointSpec_ResolutionMode_value[strings.ToUpper(string(s.EndpointSpec.Mode))])
for _, portConfig := range s.EndpointSpec.Ports {
spec.Endpoint.Ports = append(spec.Endpoint.Ports, &swarmapi.PortConfig{
Name: portConfig.Name,
Protocol: swarmapi.PortConfig_Protocol(swarmapi.PortConfig_Protocol_value[strings.ToUpper(string(portConfig.Protocol))]),
PublishMode: swarmapi.PortConfig_PublishMode(swarmapi.PortConfig_PublishMode_value[strings.ToUpper(string(portConfig.PublishMode))]),
TargetPort: portConfig.TargetPort,
PublishedPort: portConfig.PublishedPort,
})
}
}
// Mode
if s.Mode.Global != nil && s.Mode.Replicated != nil {
return swarmapi.ServiceSpec{}, fmt.Errorf("cannot specify both replicated mode and global mode")
}
if s.Mode.Global != nil {
spec.Mode = &swarmapi.ServiceSpec_Global{
Global: &swarmapi.GlobalService{},
}
} else if s.Mode.Replicated != nil && s.Mode.Replicated.Replicas != nil {
spec.Mode = &swarmapi.ServiceSpec_Replicated{
Replicated: &swarmapi.ReplicatedService{Replicas: *s.Mode.Replicated.Replicas},
}
} else {
spec.Mode = &swarmapi.ServiceSpec_Replicated{
Replicated: &swarmapi.ReplicatedService{Replicas: 1},
}
}
return spec, nil
}
func annotationsFromGRPC(ann swarmapi.Annotations) types.Annotations {
a := types.Annotations{
Name: ann.Name,
Labels: ann.Labels,
}
if a.Labels == nil {
a.Labels = make(map[string]string)
}
return a
}
// GenericResourcesFromGRPC converts a GRPC GenericResource to a GenericResource
func GenericResourcesFromGRPC(genericRes []*swarmapi.GenericResource) []types.GenericResource {
var generic []types.GenericResource
for _, res := range genericRes {
var current types.GenericResource
switch r := res.Resource.(type) {
case *swarmapi.GenericResource_DiscreteResourceSpec:
current.DiscreteResourceSpec = &types.DiscreteGenericResource{
Kind: r.DiscreteResourceSpec.Kind,
Value: r.DiscreteResourceSpec.Value,
}
case *swarmapi.GenericResource_NamedResourceSpec:
current.NamedResourceSpec = &types.NamedGenericResource{
Kind: r.NamedResourceSpec.Kind,
Value: r.NamedResourceSpec.Value,
}
}
generic = append(generic, current)
}
return generic
}
func resourcesFromGRPC(res *swarmapi.ResourceRequirements) *types.ResourceRequirements {
var resources *types.ResourceRequirements
if res != nil {
resources = &types.ResourceRequirements{}
if res.Limits != nil {
resources.Limits = &types.Resources{
NanoCPUs: res.Limits.NanoCPUs,
MemoryBytes: res.Limits.MemoryBytes,
}
}
if res.Reservations != nil {
resources.Reservations = &types.Resources{
NanoCPUs: res.Reservations.NanoCPUs,
MemoryBytes: res.Reservations.MemoryBytes,
GenericResources: GenericResourcesFromGRPC(res.Reservations.Generic),
}
}
}
return resources
}
// GenericResourcesToGRPC converts a GenericResource to a GRPC GenericResource
func GenericResourcesToGRPC(genericRes []types.GenericResource) []*swarmapi.GenericResource {
var generic []*swarmapi.GenericResource
for _, res := range genericRes {
var r *swarmapi.GenericResource
if res.DiscreteResourceSpec != nil {
r = genericresource.NewDiscrete(res.DiscreteResourceSpec.Kind, res.DiscreteResourceSpec.Value)
} else if res.NamedResourceSpec != nil {
r = genericresource.NewString(res.NamedResourceSpec.Kind, res.NamedResourceSpec.Value)
}
generic = append(generic, r)
}
return generic
}
func resourcesToGRPC(res *types.ResourceRequirements) *swarmapi.ResourceRequirements {
var reqs *swarmapi.ResourceRequirements
if res != nil {
reqs = &swarmapi.ResourceRequirements{}
if res.Limits != nil {
reqs.Limits = &swarmapi.Resources{
NanoCPUs: res.Limits.NanoCPUs,
MemoryBytes: res.Limits.MemoryBytes,
}
}
if res.Reservations != nil {
reqs.Reservations = &swarmapi.Resources{
NanoCPUs: res.Reservations.NanoCPUs,
MemoryBytes: res.Reservations.MemoryBytes,
Generic: GenericResourcesToGRPC(res.Reservations.GenericResources),
}
}
}
return reqs
}
func restartPolicyFromGRPC(p *swarmapi.RestartPolicy) *types.RestartPolicy {
var rp *types.RestartPolicy
if p != nil {
rp = &types.RestartPolicy{}
switch p.Condition {
case swarmapi.RestartOnNone:
rp.Condition = types.RestartPolicyConditionNone
case swarmapi.RestartOnFailure:
rp.Condition = types.RestartPolicyConditionOnFailure
case swarmapi.RestartOnAny:
rp.Condition = types.RestartPolicyConditionAny
default:
rp.Condition = types.RestartPolicyConditionAny
}
if p.Delay != nil {
delay, _ := gogotypes.DurationFromProto(p.Delay)
rp.Delay = &delay
}
if p.Window != nil {
window, _ := gogotypes.DurationFromProto(p.Window)
rp.Window = &window
}
rp.MaxAttempts = &p.MaxAttempts
}
return rp
}
func restartPolicyToGRPC(p *types.RestartPolicy) (*swarmapi.RestartPolicy, error) {
var rp *swarmapi.RestartPolicy
if p != nil {
rp = &swarmapi.RestartPolicy{}
switch p.Condition {
case types.RestartPolicyConditionNone:
rp.Condition = swarmapi.RestartOnNone
case types.RestartPolicyConditionOnFailure:
rp.Condition = swarmapi.RestartOnFailure
case types.RestartPolicyConditionAny:
rp.Condition = swarmapi.RestartOnAny
default:
if string(p.Condition) != "" {
return nil, fmt.Errorf("invalid RestartCondition: %q", p.Condition)
}
rp.Condition = swarmapi.RestartOnAny
}
if p.Delay != nil {
rp.Delay = gogotypes.DurationProto(*p.Delay)
}
if p.Window != nil {
rp.Window = gogotypes.DurationProto(*p.Window)
}
if p.MaxAttempts != nil {
rp.MaxAttempts = *p.MaxAttempts
}
}
return rp, nil
}
func placementFromGRPC(p *swarmapi.Placement) *types.Placement {
if p == nil {
return nil
}
r := &types.Placement{
Constraints: p.Constraints,
}
for _, pref := range p.Preferences {
if spread := pref.GetSpread(); spread != nil {
r.Preferences = append(r.Preferences, types.PlacementPreference{
Spread: &types.SpreadOver{
SpreadDescriptor: spread.SpreadDescriptor,
},
})
}
}
for _, plat := range p.Platforms {
r.Platforms = append(r.Platforms, types.Platform{
Architecture: plat.Architecture,
OS: plat.OS,
})
}
return r
}
func driverFromGRPC(p *swarmapi.Driver) *types.Driver {
if p == nil {
return nil
}
return &types.Driver{
Name: p.Name,
Options: p.Options,
}
}
func driverToGRPC(p *types.Driver) *swarmapi.Driver {
if p == nil {
return nil
}
return &swarmapi.Driver{
Name: p.Name,
Options: p.Options,
}
}
func updateConfigFromGRPC(updateConfig *swarmapi.UpdateConfig) *types.UpdateConfig {
if updateConfig == nil {
return nil
}
converted := &types.UpdateConfig{
Parallelism: updateConfig.Parallelism,
MaxFailureRatio: updateConfig.MaxFailureRatio,
}
converted.Delay = updateConfig.Delay
if updateConfig.Monitor != nil {
converted.Monitor, _ = gogotypes.DurationFromProto(updateConfig.Monitor)
}
switch updateConfig.FailureAction {
case swarmapi.UpdateConfig_PAUSE:
converted.FailureAction = types.UpdateFailureActionPause
case swarmapi.UpdateConfig_CONTINUE:
converted.FailureAction = types.UpdateFailureActionContinue
case swarmapi.UpdateConfig_ROLLBACK:
converted.FailureAction = types.UpdateFailureActionRollback
}
switch updateConfig.Order {
case swarmapi.UpdateConfig_STOP_FIRST:
converted.Order = types.UpdateOrderStopFirst
case swarmapi.UpdateConfig_START_FIRST:
converted.Order = types.UpdateOrderStartFirst
}
return converted
}
func updateConfigToGRPC(updateConfig *types.UpdateConfig) (*swarmapi.UpdateConfig, error) {
if updateConfig == nil {
return nil, nil
}
converted := &swarmapi.UpdateConfig{
Parallelism: updateConfig.Parallelism,
Delay: updateConfig.Delay,
MaxFailureRatio: updateConfig.MaxFailureRatio,
}
switch updateConfig.FailureAction {
case types.UpdateFailureActionPause, "":
converted.FailureAction = swarmapi.UpdateConfig_PAUSE
case types.UpdateFailureActionContinue:
converted.FailureAction = swarmapi.UpdateConfig_CONTINUE
case types.UpdateFailureActionRollback:
converted.FailureAction = swarmapi.UpdateConfig_ROLLBACK
default:
return nil, fmt.Errorf("unrecognized update failure action %s", updateConfig.FailureAction)
}
if updateConfig.Monitor != 0 {
converted.Monitor = gogotypes.DurationProto(updateConfig.Monitor)
}
switch updateConfig.Order {
case types.UpdateOrderStopFirst, "":
converted.Order = swarmapi.UpdateConfig_STOP_FIRST
case types.UpdateOrderStartFirst:
converted.Order = swarmapi.UpdateConfig_START_FIRST
default:
return nil, fmt.Errorf("unrecognized update order %s", updateConfig.Order)
}
return converted, nil
}
func taskSpecFromGRPC(taskSpec swarmapi.TaskSpec) (types.TaskSpec, error) {
taskNetworks := make([]types.NetworkAttachmentConfig, 0, len(taskSpec.Networks))
for _, n := range taskSpec.Networks {
netConfig := types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverOpts: n.DriverAttachmentOpts}
taskNetworks = append(taskNetworks, netConfig)
}
t := types.TaskSpec{
Resources: resourcesFromGRPC(taskSpec.Resources),
RestartPolicy: restartPolicyFromGRPC(taskSpec.Restart),
Placement: placementFromGRPC(taskSpec.Placement),
LogDriver: driverFromGRPC(taskSpec.LogDriver),
Networks: taskNetworks,
ForceUpdate: taskSpec.ForceUpdate,
}
switch taskSpec.GetRuntime().(type) {
case *swarmapi.TaskSpec_Container, nil:
c := taskSpec.GetContainer()
if c != nil {
t.ContainerSpec = containerSpecFromGRPC(c)
}
case *swarmapi.TaskSpec_Generic:
g := taskSpec.GetGeneric()
if g != nil {
switch g.Kind {
case string(types.RuntimePlugin):
var p runtime.PluginSpec
if err := proto.Unmarshal(g.Payload.Value, &p); err != nil {
return t, errors.Wrap(err, "error unmarshalling plugin spec")
}
t.PluginSpec = &p
}
}
}
return t, nil
}

View file

@ -0,0 +1,234 @@
package convert // import "github.com/docker/docker/daemon/cluster/convert"
import (
"testing"
containertypes "github.com/docker/docker/api/types/container"
swarmtypes "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/swarm/runtime"
swarmapi "github.com/docker/swarmkit/api"
google_protobuf3 "github.com/gogo/protobuf/types"
"github.com/gotestyourself/gotestyourself/assert"
)
func TestServiceConvertFromGRPCRuntimeContainer(t *testing.T) {
gs := swarmapi.Service{
Meta: swarmapi.Meta{
Version: swarmapi.Version{
Index: 1,
},
CreatedAt: nil,
UpdatedAt: nil,
},
SpecVersion: &swarmapi.Version{
Index: 1,
},
Spec: swarmapi.ServiceSpec{
Task: swarmapi.TaskSpec{
Runtime: &swarmapi.TaskSpec_Container{
Container: &swarmapi.ContainerSpec{
Image: "alpine:latest",
},
},
},
},
}
svc, err := ServiceFromGRPC(gs)
if err != nil {
t.Fatal(err)
}
if svc.Spec.TaskTemplate.Runtime != swarmtypes.RuntimeContainer {
t.Fatalf("expected type %s; received %T", swarmtypes.RuntimeContainer, svc.Spec.TaskTemplate.Runtime)
}
}
func TestServiceConvertFromGRPCGenericRuntimePlugin(t *testing.T) {
kind := string(swarmtypes.RuntimePlugin)
url := swarmtypes.RuntimeURLPlugin
gs := swarmapi.Service{
Meta: swarmapi.Meta{
Version: swarmapi.Version{
Index: 1,
},
CreatedAt: nil,
UpdatedAt: nil,
},
SpecVersion: &swarmapi.Version{
Index: 1,
},
Spec: swarmapi.ServiceSpec{
Task: swarmapi.TaskSpec{
Runtime: &swarmapi.TaskSpec_Generic{
Generic: &swarmapi.GenericRuntimeSpec{
Kind: kind,
Payload: &google_protobuf3.Any{
TypeUrl: string(url),
},
},
},
},
},
}
svc, err := ServiceFromGRPC(gs)
if err != nil {
t.Fatal(err)
}
if svc.Spec.TaskTemplate.Runtime != swarmtypes.RuntimePlugin {
t.Fatalf("expected type %s; received %T", swarmtypes.RuntimePlugin, svc.Spec.TaskTemplate.Runtime)
}
}
func TestServiceConvertToGRPCGenericRuntimePlugin(t *testing.T) {
s := swarmtypes.ServiceSpec{
TaskTemplate: swarmtypes.TaskSpec{
Runtime: swarmtypes.RuntimePlugin,
PluginSpec: &runtime.PluginSpec{},
},
Mode: swarmtypes.ServiceMode{
Global: &swarmtypes.GlobalService{},
},
}
svc, err := ServiceSpecToGRPC(s)
if err != nil {
t.Fatal(err)
}
v, ok := svc.Task.Runtime.(*swarmapi.TaskSpec_Generic)
if !ok {
t.Fatal("expected type swarmapi.TaskSpec_Generic")
}
if v.Generic.Payload.TypeUrl != string(swarmtypes.RuntimeURLPlugin) {
t.Fatalf("expected url %s; received %s", swarmtypes.RuntimeURLPlugin, v.Generic.Payload.TypeUrl)
}
}
func TestServiceConvertToGRPCContainerRuntime(t *testing.T) {
image := "alpine:latest"
s := swarmtypes.ServiceSpec{
TaskTemplate: swarmtypes.TaskSpec{
ContainerSpec: &swarmtypes.ContainerSpec{
Image: image,
},
},
Mode: swarmtypes.ServiceMode{
Global: &swarmtypes.GlobalService{},
},
}
svc, err := ServiceSpecToGRPC(s)
if err != nil {
t.Fatal(err)
}
v, ok := svc.Task.Runtime.(*swarmapi.TaskSpec_Container)
if !ok {
t.Fatal("expected type swarmapi.TaskSpec_Container")
}
if v.Container.Image != image {
t.Fatalf("expected image %s; received %s", image, v.Container.Image)
}
}
func TestServiceConvertToGRPCGenericRuntimeCustom(t *testing.T) {
s := swarmtypes.ServiceSpec{
TaskTemplate: swarmtypes.TaskSpec{
Runtime: "customruntime",
},
Mode: swarmtypes.ServiceMode{
Global: &swarmtypes.GlobalService{},
},
}
if _, err := ServiceSpecToGRPC(s); err != ErrUnsupportedRuntime {
t.Fatal(err)
}
}
func TestServiceConvertToGRPCIsolation(t *testing.T) {
cases := []struct {
name string
from containertypes.Isolation
to swarmapi.ContainerSpec_Isolation
}{
{name: "empty", from: containertypes.IsolationEmpty, to: swarmapi.ContainerIsolationDefault},
{name: "default", from: containertypes.IsolationDefault, to: swarmapi.ContainerIsolationDefault},
{name: "process", from: containertypes.IsolationProcess, to: swarmapi.ContainerIsolationProcess},
{name: "hyperv", from: containertypes.IsolationHyperV, to: swarmapi.ContainerIsolationHyperV},
{name: "proCess", from: containertypes.Isolation("proCess"), to: swarmapi.ContainerIsolationProcess},
{name: "hypErv", from: containertypes.Isolation("hypErv"), to: swarmapi.ContainerIsolationHyperV},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := swarmtypes.ServiceSpec{
TaskTemplate: swarmtypes.TaskSpec{
ContainerSpec: &swarmtypes.ContainerSpec{
Image: "alpine:latest",
Isolation: c.from,
},
},
Mode: swarmtypes.ServiceMode{
Global: &swarmtypes.GlobalService{},
},
}
res, err := ServiceSpecToGRPC(s)
assert.NilError(t, err)
v, ok := res.Task.Runtime.(*swarmapi.TaskSpec_Container)
if !ok {
t.Fatal("expected type swarmapi.TaskSpec_Container")
}
assert.Equal(t, c.to, v.Container.Isolation)
})
}
}
func TestServiceConvertFromGRPCIsolation(t *testing.T) {
cases := []struct {
name string
from swarmapi.ContainerSpec_Isolation
to containertypes.Isolation
}{
{name: "default", to: containertypes.IsolationDefault, from: swarmapi.ContainerIsolationDefault},
{name: "process", to: containertypes.IsolationProcess, from: swarmapi.ContainerIsolationProcess},
{name: "hyperv", to: containertypes.IsolationHyperV, from: swarmapi.ContainerIsolationHyperV},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
gs := swarmapi.Service{
Meta: swarmapi.Meta{
Version: swarmapi.Version{
Index: 1,
},
CreatedAt: nil,
UpdatedAt: nil,
},
SpecVersion: &swarmapi.Version{
Index: 1,
},
Spec: swarmapi.ServiceSpec{
Task: swarmapi.TaskSpec{
Runtime: &swarmapi.TaskSpec_Container{
Container: &swarmapi.ContainerSpec{
Image: "alpine:latest",
Isolation: c.from,
},
},
},
},
}
svc, err := ServiceFromGRPC(gs)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, c.to, svc.Spec.TaskTemplate.ContainerSpec.Isolation)
})
}
}

View file

@ -0,0 +1,147 @@
package convert // import "github.com/docker/docker/daemon/cluster/convert"
import (
"fmt"
"strings"
types "github.com/docker/docker/api/types/swarm"
swarmapi "github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/ca"
gogotypes "github.com/gogo/protobuf/types"
)
// SwarmFromGRPC converts a grpc Cluster to a Swarm.
func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm {
swarm := types.Swarm{
ClusterInfo: types.ClusterInfo{
ID: c.ID,
Spec: types.Spec{
Orchestration: types.OrchestrationConfig{
TaskHistoryRetentionLimit: &c.Spec.Orchestration.TaskHistoryRetentionLimit,
},
Raft: types.RaftConfig{
SnapshotInterval: c.Spec.Raft.SnapshotInterval,
KeepOldSnapshots: &c.Spec.Raft.KeepOldSnapshots,
LogEntriesForSlowFollowers: c.Spec.Raft.LogEntriesForSlowFollowers,
HeartbeatTick: int(c.Spec.Raft.HeartbeatTick),
ElectionTick: int(c.Spec.Raft.ElectionTick),
},
EncryptionConfig: types.EncryptionConfig{
AutoLockManagers: c.Spec.EncryptionConfig.AutoLockManagers,
},
CAConfig: types.CAConfig{
// do not include the signing CA cert or key (it should already be redacted via the swarm APIs) -
// the key because it's secret, and the cert because otherwise doing a get + update on the spec
// can cause issues because the key would be missing and the cert wouldn't
ForceRotate: c.Spec.CAConfig.ForceRotate,
},
},
TLSInfo: types.TLSInfo{
TrustRoot: string(c.RootCA.CACert),
},
RootRotationInProgress: c.RootCA.RootRotation != nil,
},
JoinTokens: types.JoinTokens{
Worker: c.RootCA.JoinTokens.Worker,
Manager: c.RootCA.JoinTokens.Manager,
},
}
issuerInfo, err := ca.IssuerFromAPIRootCA(&c.RootCA)
if err == nil && issuerInfo != nil {
swarm.TLSInfo.CertIssuerSubject = issuerInfo.Subject
swarm.TLSInfo.CertIssuerPublicKey = issuerInfo.PublicKey
}
heartbeatPeriod, _ := gogotypes.DurationFromProto(c.Spec.Dispatcher.HeartbeatPeriod)
swarm.Spec.Dispatcher.HeartbeatPeriod = heartbeatPeriod
swarm.Spec.CAConfig.NodeCertExpiry, _ = gogotypes.DurationFromProto(c.Spec.CAConfig.NodeCertExpiry)
for _, ca := range c.Spec.CAConfig.ExternalCAs {
swarm.Spec.CAConfig.ExternalCAs = append(swarm.Spec.CAConfig.ExternalCAs, &types.ExternalCA{
Protocol: types.ExternalCAProtocol(strings.ToLower(ca.Protocol.String())),
URL: ca.URL,
Options: ca.Options,
CACert: string(ca.CACert),
})
}
// Meta
swarm.Version.Index = c.Meta.Version.Index
swarm.CreatedAt, _ = gogotypes.TimestampFromProto(c.Meta.CreatedAt)
swarm.UpdatedAt, _ = gogotypes.TimestampFromProto(c.Meta.UpdatedAt)
// Annotations
swarm.Spec.Annotations = annotationsFromGRPC(c.Spec.Annotations)
return swarm
}
// SwarmSpecToGRPC converts a Spec to a grpc ClusterSpec.
func SwarmSpecToGRPC(s types.Spec) (swarmapi.ClusterSpec, error) {
return MergeSwarmSpecToGRPC(s, swarmapi.ClusterSpec{})
}
// MergeSwarmSpecToGRPC merges a Spec with an initial grpc ClusterSpec
func MergeSwarmSpecToGRPC(s types.Spec, spec swarmapi.ClusterSpec) (swarmapi.ClusterSpec, error) {
// We take the initSpec (either created from scratch, or returned by swarmkit),
// and will only change the value if the one taken from types.Spec is not nil or 0.
// In other words, if the value taken from types.Spec is nil or 0, we will maintain the status quo.
if s.Annotations.Name != "" {
spec.Annotations.Name = s.Annotations.Name
}
if len(s.Annotations.Labels) != 0 {
spec.Annotations.Labels = s.Annotations.Labels
}
if s.Orchestration.TaskHistoryRetentionLimit != nil {
spec.Orchestration.TaskHistoryRetentionLimit = *s.Orchestration.TaskHistoryRetentionLimit
}
if s.Raft.SnapshotInterval != 0 {
spec.Raft.SnapshotInterval = s.Raft.SnapshotInterval
}
if s.Raft.KeepOldSnapshots != nil {
spec.Raft.KeepOldSnapshots = *s.Raft.KeepOldSnapshots
}
if s.Raft.LogEntriesForSlowFollowers != 0 {
spec.Raft.LogEntriesForSlowFollowers = s.Raft.LogEntriesForSlowFollowers
}
if s.Raft.HeartbeatTick != 0 {
spec.Raft.HeartbeatTick = uint32(s.Raft.HeartbeatTick)
}
if s.Raft.ElectionTick != 0 {
spec.Raft.ElectionTick = uint32(s.Raft.ElectionTick)
}
if s.Dispatcher.HeartbeatPeriod != 0 {
spec.Dispatcher.HeartbeatPeriod = gogotypes.DurationProto(s.Dispatcher.HeartbeatPeriod)
}
if s.CAConfig.NodeCertExpiry != 0 {
spec.CAConfig.NodeCertExpiry = gogotypes.DurationProto(s.CAConfig.NodeCertExpiry)
}
if s.CAConfig.SigningCACert != "" {
spec.CAConfig.SigningCACert = []byte(s.CAConfig.SigningCACert)
}
if s.CAConfig.SigningCAKey != "" {
// do propagate the signing CA key here because we want to provide it TO the swarm APIs
spec.CAConfig.SigningCAKey = []byte(s.CAConfig.SigningCAKey)
}
spec.CAConfig.ForceRotate = s.CAConfig.ForceRotate
for _, ca := range s.CAConfig.ExternalCAs {
protocol, ok := swarmapi.ExternalCA_CAProtocol_value[strings.ToUpper(string(ca.Protocol))]
if !ok {
return swarmapi.ClusterSpec{}, fmt.Errorf("invalid protocol: %q", ca.Protocol)
}
spec.CAConfig.ExternalCAs = append(spec.CAConfig.ExternalCAs, &swarmapi.ExternalCA{
Protocol: swarmapi.ExternalCA_CAProtocol(protocol),
URL: ca.URL,
Options: ca.Options,
CACert: []byte(ca.CACert),
})
}
spec.EncryptionConfig.AutoLockManagers = s.EncryptionConfig.AutoLockManagers
return spec, nil
}

View file

@ -0,0 +1,72 @@
package convert // import "github.com/docker/docker/daemon/cluster/convert"
import (
"strings"
types "github.com/docker/docker/api/types/swarm"
swarmapi "github.com/docker/swarmkit/api"
gogotypes "github.com/gogo/protobuf/types"
)
// TaskFromGRPC converts a grpc Task to a Task.
func TaskFromGRPC(t swarmapi.Task) (types.Task, error) {
if t.Spec.GetAttachment() != nil {
return types.Task{}, nil
}
containerStatus := t.Status.GetContainer()
taskSpec, err := taskSpecFromGRPC(t.Spec)
if err != nil {
return types.Task{}, err
}
task := types.Task{
ID: t.ID,
Annotations: annotationsFromGRPC(t.Annotations),
ServiceID: t.ServiceID,
Slot: int(t.Slot),
NodeID: t.NodeID,
Spec: taskSpec,
Status: types.TaskStatus{
State: types.TaskState(strings.ToLower(t.Status.State.String())),
Message: t.Status.Message,
Err: t.Status.Err,
},
DesiredState: types.TaskState(strings.ToLower(t.DesiredState.String())),
GenericResources: GenericResourcesFromGRPC(t.AssignedGenericResources),
}
// Meta
task.Version.Index = t.Meta.Version.Index
task.CreatedAt, _ = gogotypes.TimestampFromProto(t.Meta.CreatedAt)
task.UpdatedAt, _ = gogotypes.TimestampFromProto(t.Meta.UpdatedAt)
task.Status.Timestamp, _ = gogotypes.TimestampFromProto(t.Status.Timestamp)
if containerStatus != nil {
task.Status.ContainerStatus = &types.ContainerStatus{
ContainerID: containerStatus.ContainerID,
PID: int(containerStatus.PID),
ExitCode: int(containerStatus.ExitCode),
}
}
// NetworksAttachments
for _, na := range t.Networks {
task.NetworksAttachments = append(task.NetworksAttachments, networkAttachmentFromGRPC(na))
}
if t.Status.PortStatus == nil {
return task, nil
}
for _, p := range t.Status.PortStatus.Ports {
task.Status.PortStatus.Ports = append(task.Status.PortStatus.Ports, types.PortConfig{
Name: p.Name,
Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(p.Protocol)])),
PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(p.PublishMode)])),
TargetPort: p.TargetPort,
PublishedPort: p.PublishedPort,
})
}
return task, nil
}

View file

@ -0,0 +1,61 @@
package cluster // import "github.com/docker/docker/daemon/cluster"
const (
// errNoSwarm is returned on leaving a cluster that was never initialized
errNoSwarm notAvailableError = "This node is not part of a swarm"
// errSwarmExists is returned on initialize or join request for a cluster that has already been activated
errSwarmExists notAvailableError = "This node is already part of a swarm. Use \"docker swarm leave\" to leave this swarm and join another one."
// errSwarmJoinTimeoutReached is returned when cluster join could not complete before timeout was reached.
errSwarmJoinTimeoutReached notAvailableError = "Timeout was reached before node joined. The attempt to join the swarm will continue in the background. Use the \"docker info\" command to see the current swarm status of your node."
// errSwarmLocked is returned if the swarm is encrypted and needs a key to unlock it.
errSwarmLocked notAvailableError = "Swarm is encrypted and needs to be unlocked before it can be used. Please use \"docker swarm unlock\" to unlock it."
// errSwarmCertificatesExpired is returned if docker was not started for the whole validity period and they had no chance to renew automatically.
errSwarmCertificatesExpired notAvailableError = "Swarm certificates have expired. To replace them, leave the swarm and join again."
// errSwarmNotManager is returned if the node is not a swarm manager.
errSwarmNotManager notAvailableError = "This node is not a swarm manager. Worker nodes can't be used to view or modify cluster state. Please run this command on a manager node or promote the current node to a manager."
)
type notAllowedError string
func (e notAllowedError) Error() string {
return string(e)
}
func (e notAllowedError) Forbidden() {}
type notAvailableError string
func (e notAvailableError) Error() string {
return string(e)
}
func (e notAvailableError) Unavailable() {}
type configError string
func (e configError) Error() string {
return string(e)
}
func (e configError) InvalidParameter() {}
type invalidUnlockKey struct{}
func (invalidUnlockKey) Error() string {
return "swarm could not be unlocked: invalid key provided"
}
func (invalidUnlockKey) Unauthorized() {}
type notLockedError struct{}
func (notLockedError) Error() string {
return "swarm is not locked"
}
func (notLockedError) Conflict() {}

View file

@ -0,0 +1,70 @@
package executor // import "github.com/docker/docker/daemon/cluster/executor"
import (
"io"
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
swarmtypes "github.com/docker/docker/api/types/swarm"
containerpkg "github.com/docker/docker/container"
clustertypes "github.com/docker/docker/daemon/cluster/provider"
networkSettings "github.com/docker/docker/daemon/network"
"github.com/docker/docker/plugin"
"github.com/docker/libnetwork"
"github.com/docker/libnetwork/cluster"
networktypes "github.com/docker/libnetwork/types"
"github.com/docker/swarmkit/agent/exec"
"golang.org/x/net/context"
)
// Backend defines the executor component for a swarm agent.
type Backend interface {
CreateManagedNetwork(clustertypes.NetworkCreateRequest) error
DeleteManagedNetwork(networkID string) error
FindNetwork(idName string) (libnetwork.Network, error)
SetupIngress(clustertypes.NetworkCreateRequest, string) (<-chan struct{}, error)
ReleaseIngress() (<-chan struct{}, error)
CreateManagedContainer(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error)
ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error
ContainerStop(name string, seconds *int) error
ContainerLogs(context.Context, string, *types.ContainerLogsOptions) (msgs <-chan *backend.LogMessage, tty bool, err error)
ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error
ActivateContainerServiceBinding(containerName string) error
DeactivateContainerServiceBinding(containerName string) error
UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error
ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error)
ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error)
ContainerRm(name string, config *types.ContainerRmConfig) error
ContainerKill(name string, sig uint64) error
SetContainerDependencyStore(name string, store exec.DependencyGetter) error
SetContainerSecretReferences(name string, refs []*swarmtypes.SecretReference) error
SetContainerConfigReferences(name string, refs []*swarmtypes.ConfigReference) error
SystemInfo() (*types.Info, error)
VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error)
Containers(config *types.ContainerListOptions) ([]*types.Container, error)
SetNetworkBootstrapKeys([]*networktypes.EncryptionKey) error
DaemonJoinsCluster(provider cluster.Provider)
DaemonLeavesCluster()
IsSwarmCompatible() error
SubscribeToEvents(since, until time.Time, filter filters.Args) ([]events.Message, chan interface{})
UnsubscribeFromEvents(listener chan interface{})
UpdateAttachment(string, string, string, *network.NetworkingConfig) error
WaitForDetachment(context.Context, string, string, string, string) error
PluginManager() *plugin.Manager
PluginGetter() *plugin.Store
GetAttachmentStore() *networkSettings.AttachmentStore
}
// ImageBackend is used by an executor to perform image operations
type ImageBackend interface {
PullImage(ctx context.Context, image, tag, platform string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error
GetRepository(context.Context, reference.Named, *types.AuthConfig) (distribution.Repository, bool, error)
LookupImage(name string) (*types.ImageInspect, error)
}

View file

@ -0,0 +1,471 @@
package container // import "github.com/docker/docker/daemon/cluster/executor/container"
import (
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"runtime"
"strings"
"syscall"
"time"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/events"
containerpkg "github.com/docker/docker/container"
"github.com/docker/docker/daemon"
"github.com/docker/docker/daemon/cluster/convert"
executorpkg "github.com/docker/docker/daemon/cluster/executor"
"github.com/docker/libnetwork"
"github.com/docker/swarmkit/agent/exec"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/log"
gogotypes "github.com/gogo/protobuf/types"
"github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
"golang.org/x/time/rate"
)
// containerAdapter conducts remote operations for a container. All calls
// are mostly naked calls to the client API, seeded with information from
// containerConfig.
type containerAdapter struct {
backend executorpkg.Backend
imageBackend executorpkg.ImageBackend
container *containerConfig
dependencies exec.DependencyGetter
}
func newContainerAdapter(b executorpkg.Backend, i executorpkg.ImageBackend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*containerAdapter, error) {
ctnr, err := newContainerConfig(task, node)
if err != nil {
return nil, err
}
return &containerAdapter{
container: ctnr,
backend: b,
imageBackend: i,
dependencies: dependencies,
}, nil
}
func (c *containerAdapter) pullImage(ctx context.Context) error {
spec := c.container.spec()
// Skip pulling if the image is referenced by image ID.
if _, err := digest.Parse(spec.Image); err == nil {
return nil
}
// Skip pulling if the image is referenced by digest and already
// exists locally.
named, err := reference.ParseNormalizedNamed(spec.Image)
if err == nil {
if _, ok := named.(reference.Canonical); ok {
_, err := c.imageBackend.LookupImage(spec.Image)
if err == nil {
return nil
}
}
}
// if the image needs to be pulled, the auth config will be retrieved and updated
var encodedAuthConfig string
if spec.PullOptions != nil {
encodedAuthConfig = spec.PullOptions.RegistryAuth
}
authConfig := &types.AuthConfig{}
if encodedAuthConfig != "" {
if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuthConfig))).Decode(authConfig); err != nil {
logrus.Warnf("invalid authconfig: %v", err)
}
}
pr, pw := io.Pipe()
metaHeaders := map[string][]string{}
go func() {
// TODO @jhowardmsft LCOW Support: This will need revisiting as
// the stack is built up to include LCOW support for swarm.
platform := runtime.GOOS
err := c.imageBackend.PullImage(ctx, c.container.image(), "", platform, metaHeaders, authConfig, pw)
pw.CloseWithError(err)
}()
dec := json.NewDecoder(pr)
dec.UseNumber()
m := map[string]interface{}{}
spamLimiter := rate.NewLimiter(rate.Every(time.Second), 1)
lastStatus := ""
for {
if err := dec.Decode(&m); err != nil {
if err == io.EOF {
break
}
return err
}
l := log.G(ctx)
// limit pull progress logs unless the status changes
if spamLimiter.Allow() || lastStatus != m["status"] {
// if we have progress details, we have everything we need
if progress, ok := m["progressDetail"].(map[string]interface{}); ok {
// first, log the image and status
l = l.WithFields(logrus.Fields{
"image": c.container.image(),
"status": m["status"],
})
// then, if we have progress, log the progress
if progress["current"] != nil && progress["total"] != nil {
l = l.WithFields(logrus.Fields{
"current": progress["current"],
"total": progress["total"],
})
}
}
l.Debug("pull in progress")
}
// sometimes, we get no useful information at all, and add no fields
if status, ok := m["status"].(string); ok {
lastStatus = status
}
}
// if the final stream object contained an error, return it
if errMsg, ok := m["error"]; ok {
return fmt.Errorf("%v", errMsg)
}
return nil
}
func (c *containerAdapter) createNetworks(ctx context.Context) error {
for name := range c.container.networksAttachments {
ncr, err := c.container.networkCreateRequest(name)
if err != nil {
return err
}
if err := c.backend.CreateManagedNetwork(ncr); err != nil { // todo name missing
if _, ok := err.(libnetwork.NetworkNameError); ok {
continue
}
// We will continue if CreateManagedNetwork returns PredefinedNetworkError error.
// Other callers still can treat it as Error.
if _, ok := err.(daemon.PredefinedNetworkError); ok {
continue
}
return err
}
}
return nil
}
func (c *containerAdapter) removeNetworks(ctx context.Context) error {
for name, v := range c.container.networksAttachments {
if err := c.backend.DeleteManagedNetwork(v.Network.ID); err != nil {
switch err.(type) {
case *libnetwork.ActiveEndpointsError:
continue
case libnetwork.ErrNoSuchNetwork:
continue
default:
log.G(ctx).Errorf("network %s remove failed: %v", name, err)
return err
}
}
}
return nil
}
func (c *containerAdapter) networkAttach(ctx context.Context) error {
config := c.container.createNetworkingConfig(c.backend)
var (
networkName string
networkID string
)
if config != nil {
for n, epConfig := range config.EndpointsConfig {
networkName = n
networkID = epConfig.NetworkID
break
}
}
return c.backend.UpdateAttachment(networkName, networkID, c.container.networkAttachmentContainerID(), config)
}
func (c *containerAdapter) waitForDetach(ctx context.Context) error {
config := c.container.createNetworkingConfig(c.backend)
var (
networkName string
networkID string
)
if config != nil {
for n, epConfig := range config.EndpointsConfig {
networkName = n
networkID = epConfig.NetworkID
break
}
}
return c.backend.WaitForDetachment(ctx, networkName, networkID, c.container.taskID(), c.container.networkAttachmentContainerID())
}
func (c *containerAdapter) create(ctx context.Context) error {
var cr containertypes.ContainerCreateCreatedBody
var err error
if cr, err = c.backend.CreateManagedContainer(types.ContainerCreateConfig{
Name: c.container.name(),
Config: c.container.config(),
HostConfig: c.container.hostConfig(),
// Use the first network in container create
NetworkingConfig: c.container.createNetworkingConfig(c.backend),
}); err != nil {
return err
}
// Docker daemon currently doesn't support multiple networks in container create
// Connect to all other networks
nc := c.container.connectNetworkingConfig(c.backend)
if nc != nil {
for n, ep := range nc.EndpointsConfig {
if err := c.backend.ConnectContainerToNetwork(cr.ID, n, ep); err != nil {
return err
}
}
}
container := c.container.task.Spec.GetContainer()
if container == nil {
return errors.New("unable to get container from task spec")
}
if err := c.backend.SetContainerDependencyStore(cr.ID, c.dependencies); err != nil {
return err
}
// configure secrets
secretRefs := convert.SecretReferencesFromGRPC(container.Secrets)
if err := c.backend.SetContainerSecretReferences(cr.ID, secretRefs); err != nil {
return err
}
configRefs := convert.ConfigReferencesFromGRPC(container.Configs)
if err := c.backend.SetContainerConfigReferences(cr.ID, configRefs); err != nil {
return err
}
return c.backend.UpdateContainerServiceConfig(cr.ID, c.container.serviceConfig())
}
// checkMounts ensures that the provided mounts won't have any host-specific
// problems at start up. For example, we disallow bind mounts without an
// existing path, which slightly different from the container API.
func (c *containerAdapter) checkMounts() error {
spec := c.container.spec()
for _, mount := range spec.Mounts {
switch mount.Type {
case api.MountTypeBind:
if _, err := os.Stat(mount.Source); os.IsNotExist(err) {
return fmt.Errorf("invalid bind mount source, source path not found: %s", mount.Source)
}
}
}
return nil
}
func (c *containerAdapter) start(ctx context.Context) error {
if err := c.checkMounts(); err != nil {
return err
}
return c.backend.ContainerStart(c.container.name(), nil, "", "")
}
func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) {
cs, err := c.backend.ContainerInspectCurrent(c.container.name(), false)
if ctx.Err() != nil {
return types.ContainerJSON{}, ctx.Err()
}
if err != nil {
return types.ContainerJSON{}, err
}
return *cs, nil
}
// events issues a call to the events API and returns a channel with all
// events. The stream of events can be shutdown by cancelling the context.
func (c *containerAdapter) events(ctx context.Context) <-chan events.Message {
log.G(ctx).Debugf("waiting on events")
buffer, l := c.backend.SubscribeToEvents(time.Time{}, time.Time{}, c.container.eventFilter())
eventsq := make(chan events.Message, len(buffer))
for _, event := range buffer {
eventsq <- event
}
go func() {
defer c.backend.UnsubscribeFromEvents(l)
for {
select {
case ev := <-l:
jev, ok := ev.(events.Message)
if !ok {
log.G(ctx).Warnf("unexpected event message: %q", ev)
continue
}
select {
case eventsq <- jev:
case <-ctx.Done():
return
}
case <-ctx.Done():
return
}
}
}()
return eventsq
}
func (c *containerAdapter) wait(ctx context.Context) (<-chan containerpkg.StateStatus, error) {
return c.backend.ContainerWait(ctx, c.container.nameOrID(), containerpkg.WaitConditionNotRunning)
}
func (c *containerAdapter) shutdown(ctx context.Context) error {
// Default stop grace period to nil (daemon will use the stopTimeout of the container)
var stopgrace *int
spec := c.container.spec()
if spec.StopGracePeriod != nil {
stopgraceValue := int(spec.StopGracePeriod.Seconds)
stopgrace = &stopgraceValue
}
return c.backend.ContainerStop(c.container.name(), stopgrace)
}
func (c *containerAdapter) terminate(ctx context.Context) error {
return c.backend.ContainerKill(c.container.name(), uint64(syscall.SIGKILL))
}
func (c *containerAdapter) remove(ctx context.Context) error {
return c.backend.ContainerRm(c.container.name(), &types.ContainerRmConfig{
RemoveVolume: true,
ForceRemove: true,
})
}
func (c *containerAdapter) createVolumes(ctx context.Context) error {
// Create plugin volumes that are embedded inside a Mount
for _, mount := range c.container.task.Spec.GetContainer().Mounts {
if mount.Type != api.MountTypeVolume {
continue
}
if mount.VolumeOptions == nil {
continue
}
if mount.VolumeOptions.DriverConfig == nil {
continue
}
req := c.container.volumeCreateRequest(&mount)
// Check if this volume exists on the engine
if _, err := c.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels); err != nil {
// TODO(amitshukla): Today, volume create through the engine api does not return an error
// when the named volume with the same parameters already exists.
// It returns an error if the driver name is different - that is a valid error
return err
}
}
return nil
}
func (c *containerAdapter) activateServiceBinding() error {
return c.backend.ActivateContainerServiceBinding(c.container.name())
}
func (c *containerAdapter) deactivateServiceBinding() error {
return c.backend.DeactivateContainerServiceBinding(c.container.name())
}
func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscriptionOptions) (<-chan *backend.LogMessage, error) {
apiOptions := &types.ContainerLogsOptions{
Follow: options.Follow,
// Always say yes to Timestamps and Details. we make the decision
// of whether to return these to the user or not way higher up the
// stack.
Timestamps: true,
Details: true,
}
if options.Since != nil {
since, err := gogotypes.TimestampFromProto(options.Since)
if err != nil {
return nil, err
}
// print since as this formatted string because the docker container
// logs interface expects it like this.
// see github.com/docker/docker/api/types/time.ParseTimestamps
apiOptions.Since = fmt.Sprintf("%d.%09d", since.Unix(), int64(since.Nanosecond()))
}
if options.Tail < 0 {
// See protobuf documentation for details of how this works.
apiOptions.Tail = fmt.Sprint(-options.Tail - 1)
} else if options.Tail > 0 {
return nil, errors.New("tail relative to start of logs not supported via docker API")
}
if len(options.Streams) == 0 {
// empty == all
apiOptions.ShowStdout, apiOptions.ShowStderr = true, true
} else {
for _, stream := range options.Streams {
switch stream {
case api.LogStreamStdout:
apiOptions.ShowStdout = true
case api.LogStreamStderr:
apiOptions.ShowStderr = true
}
}
}
msgs, _, err := c.backend.ContainerLogs(ctx, c.container.name(), apiOptions)
if err != nil {
return nil, err
}
return msgs, nil
}
// todo: typed/wrapped errors
func isContainerCreateNameConflict(err error) bool {
return strings.Contains(err.Error(), "Conflict. The name")
}
func isUnknownContainer(err error) bool {
return strings.Contains(err.Error(), "No such container:")
}
func isStoppedContainer(err error) bool {
return strings.Contains(err.Error(), "is already stopped")
}

View file

@ -0,0 +1,73 @@
package container // import "github.com/docker/docker/daemon/cluster/executor/container"
import (
executorpkg "github.com/docker/docker/daemon/cluster/executor"
"github.com/docker/swarmkit/agent/exec"
"github.com/docker/swarmkit/api"
"golang.org/x/net/context"
)
// networkAttacherController implements agent.Controller against docker's API.
//
// networkAttacherController manages the lifecycle of network
// attachment of a docker unmanaged container managed as a task from
// agent point of view. It provides network attachment information to
// the unmanaged container for it to attach to the network and run.
type networkAttacherController struct {
backend executorpkg.Backend
task *api.Task
adapter *containerAdapter
closed chan struct{}
}
func newNetworkAttacherController(b executorpkg.Backend, i executorpkg.ImageBackend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*networkAttacherController, error) {
adapter, err := newContainerAdapter(b, i, task, node, dependencies)
if err != nil {
return nil, err
}
return &networkAttacherController{
backend: b,
task: task,
adapter: adapter,
closed: make(chan struct{}),
}, nil
}
func (nc *networkAttacherController) Update(ctx context.Context, t *api.Task) error {
return nil
}
func (nc *networkAttacherController) Prepare(ctx context.Context) error {
// Make sure all the networks that the task needs are created.
return nc.adapter.createNetworks(ctx)
}
func (nc *networkAttacherController) Start(ctx context.Context) error {
return nc.adapter.networkAttach(ctx)
}
func (nc *networkAttacherController) Wait(pctx context.Context) error {
ctx, cancel := context.WithCancel(pctx)
defer cancel()
return nc.adapter.waitForDetach(ctx)
}
func (nc *networkAttacherController) Shutdown(ctx context.Context) error {
return nil
}
func (nc *networkAttacherController) Terminate(ctx context.Context) error {
return nil
}
func (nc *networkAttacherController) Remove(ctx context.Context) error {
// Try removing the network referenced in this task in case this
// task is the last one referencing it
return nc.adapter.removeNetworks(ctx)
}
func (nc *networkAttacherController) Close() error {
return nil
}

View file

@ -0,0 +1,671 @@
package container // import "github.com/docker/docker/daemon/cluster/executor/container"
import (
"errors"
"fmt"
"net"
"strconv"
"strings"
"time"
"github.com/sirupsen/logrus"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
enginecontainer "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters"
enginemount "github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network"
volumetypes "github.com/docker/docker/api/types/volume"
"github.com/docker/docker/daemon/cluster/convert"
executorpkg "github.com/docker/docker/daemon/cluster/executor"
clustertypes "github.com/docker/docker/daemon/cluster/provider"
"github.com/docker/go-connections/nat"
netconst "github.com/docker/libnetwork/datastore"
"github.com/docker/swarmkit/agent/exec"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/api/genericresource"
"github.com/docker/swarmkit/template"
gogotypes "github.com/gogo/protobuf/types"
)
const (
// Explicitly use the kernel's default setting for CPU quota of 100ms.
// https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt
cpuQuotaPeriod = 100 * time.Millisecond
// systemLabelPrefix represents the reserved namespace for system labels.
systemLabelPrefix = "com.docker.swarm"
)
// containerConfig converts task properties into docker container compatible
// components.
type containerConfig struct {
task *api.Task
networksAttachments map[string]*api.NetworkAttachment
}
// newContainerConfig returns a validated container config. No methods should
// return an error if this function returns without error.
func newContainerConfig(t *api.Task, node *api.NodeDescription) (*containerConfig, error) {
var c containerConfig
return &c, c.setTask(t, node)
}
func (c *containerConfig) setTask(t *api.Task, node *api.NodeDescription) error {
if t.Spec.GetContainer() == nil && t.Spec.GetAttachment() == nil {
return exec.ErrRuntimeUnsupported
}
container := t.Spec.GetContainer()
if container != nil {
if container.Image == "" {
return ErrImageRequired
}
if err := validateMounts(container.Mounts); err != nil {
return err
}
}
// index the networks by name
c.networksAttachments = make(map[string]*api.NetworkAttachment, len(t.Networks))
for _, attachment := range t.Networks {
c.networksAttachments[attachment.Network.Spec.Annotations.Name] = attachment
}
c.task = t
if t.Spec.GetContainer() != nil {
preparedSpec, err := template.ExpandContainerSpec(node, t)
if err != nil {
return err
}
c.task.Spec.Runtime = &api.TaskSpec_Container{
Container: preparedSpec,
}
}
return nil
}
func (c *containerConfig) networkAttachmentContainerID() string {
attachment := c.task.Spec.GetAttachment()
if attachment == nil {
return ""
}
return attachment.ContainerID
}
func (c *containerConfig) taskID() string {
return c.task.ID
}
func (c *containerConfig) endpoint() *api.Endpoint {
return c.task.Endpoint
}
func (c *containerConfig) spec() *api.ContainerSpec {
return c.task.Spec.GetContainer()
}
func (c *containerConfig) nameOrID() string {
if c.task.Spec.GetContainer() != nil {
return c.name()
}
return c.networkAttachmentContainerID()
}
func (c *containerConfig) name() string {
if c.task.Annotations.Name != "" {
// if set, use the container Annotations.Name field, set in the orchestrator.
return c.task.Annotations.Name
}
slot := fmt.Sprint(c.task.Slot)
if slot == "" || c.task.Slot == 0 {
slot = c.task.NodeID
}
// fallback to service.slot.id.
return fmt.Sprintf("%s.%s.%s", c.task.ServiceAnnotations.Name, slot, c.task.ID)
}
func (c *containerConfig) image() string {
raw := c.spec().Image
ref, err := reference.ParseNormalizedNamed(raw)
if err != nil {
return raw
}
return reference.FamiliarString(reference.TagNameOnly(ref))
}
func (c *containerConfig) portBindings() nat.PortMap {
portBindings := nat.PortMap{}
if c.task.Endpoint == nil {
return portBindings
}
for _, portConfig := range c.task.Endpoint.Ports {
if portConfig.PublishMode != api.PublishModeHost {
continue
}
port := nat.Port(fmt.Sprintf("%d/%s", portConfig.TargetPort, strings.ToLower(portConfig.Protocol.String())))
binding := []nat.PortBinding{
{},
}
if portConfig.PublishedPort != 0 {
binding[0].HostPort = strconv.Itoa(int(portConfig.PublishedPort))
}
portBindings[port] = binding
}
return portBindings
}
func (c *containerConfig) isolation() enginecontainer.Isolation {
return convert.IsolationFromGRPC(c.spec().Isolation)
}
func (c *containerConfig) exposedPorts() map[nat.Port]struct{} {
exposedPorts := make(map[nat.Port]struct{})
if c.task.Endpoint == nil {
return exposedPorts
}
for _, portConfig := range c.task.Endpoint.Ports {
if portConfig.PublishMode != api.PublishModeHost {
continue
}
port := nat.Port(fmt.Sprintf("%d/%s", portConfig.TargetPort, strings.ToLower(portConfig.Protocol.String())))
exposedPorts[port] = struct{}{}
}
return exposedPorts
}
func (c *containerConfig) config() *enginecontainer.Config {
genericEnvs := genericresource.EnvFormat(c.task.AssignedGenericResources, "DOCKER_RESOURCE")
env := append(c.spec().Env, genericEnvs...)
config := &enginecontainer.Config{
Labels: c.labels(),
StopSignal: c.spec().StopSignal,
Tty: c.spec().TTY,
OpenStdin: c.spec().OpenStdin,
User: c.spec().User,
Env: env,
Hostname: c.spec().Hostname,
WorkingDir: c.spec().Dir,
Image: c.image(),
ExposedPorts: c.exposedPorts(),
Healthcheck: c.healthcheck(),
}
if len(c.spec().Command) > 0 {
// If Command is provided, we replace the whole invocation with Command
// by replacing Entrypoint and specifying Cmd. Args is ignored in this
// case.
config.Entrypoint = append(config.Entrypoint, c.spec().Command...)
config.Cmd = append(config.Cmd, c.spec().Args...)
} else if len(c.spec().Args) > 0 {
// In this case, we assume the image has an Entrypoint and Args
// specifies the arguments for that entrypoint.
config.Cmd = c.spec().Args
}
return config
}
func (c *containerConfig) labels() map[string]string {
var (
system = map[string]string{
"task": "", // mark as cluster task
"task.id": c.task.ID,
"task.name": c.name(),
"node.id": c.task.NodeID,
"service.id": c.task.ServiceID,
"service.name": c.task.ServiceAnnotations.Name,
}
labels = make(map[string]string)
)
// base labels are those defined in the spec.
for k, v := range c.spec().Labels {
labels[k] = v
}
// we then apply the overrides from the task, which may be set via the
// orchestrator.
for k, v := range c.task.Annotations.Labels {
labels[k] = v
}
// finally, we apply the system labels, which override all labels.
for k, v := range system {
labels[strings.Join([]string{systemLabelPrefix, k}, ".")] = v
}
return labels
}
func (c *containerConfig) mounts() []enginemount.Mount {
var r []enginemount.Mount
for _, mount := range c.spec().Mounts {
r = append(r, convertMount(mount))
}
return r
}
func convertMount(m api.Mount) enginemount.Mount {
mount := enginemount.Mount{
Source: m.Source,
Target: m.Target,
ReadOnly: m.ReadOnly,
}
switch m.Type {
case api.MountTypeBind:
mount.Type = enginemount.TypeBind
case api.MountTypeVolume:
mount.Type = enginemount.TypeVolume
case api.MountTypeTmpfs:
mount.Type = enginemount.TypeTmpfs
}
if m.BindOptions != nil {
mount.BindOptions = &enginemount.BindOptions{}
switch m.BindOptions.Propagation {
case api.MountPropagationRPrivate:
mount.BindOptions.Propagation = enginemount.PropagationRPrivate
case api.MountPropagationPrivate:
mount.BindOptions.Propagation = enginemount.PropagationPrivate
case api.MountPropagationRSlave:
mount.BindOptions.Propagation = enginemount.PropagationRSlave
case api.MountPropagationSlave:
mount.BindOptions.Propagation = enginemount.PropagationSlave
case api.MountPropagationRShared:
mount.BindOptions.Propagation = enginemount.PropagationRShared
case api.MountPropagationShared:
mount.BindOptions.Propagation = enginemount.PropagationShared
}
}
if m.VolumeOptions != nil {
mount.VolumeOptions = &enginemount.VolumeOptions{
NoCopy: m.VolumeOptions.NoCopy,
}
if m.VolumeOptions.Labels != nil {
mount.VolumeOptions.Labels = make(map[string]string, len(m.VolumeOptions.Labels))
for k, v := range m.VolumeOptions.Labels {
mount.VolumeOptions.Labels[k] = v
}
}
if m.VolumeOptions.DriverConfig != nil {
mount.VolumeOptions.DriverConfig = &enginemount.Driver{
Name: m.VolumeOptions.DriverConfig.Name,
}
if m.VolumeOptions.DriverConfig.Options != nil {
mount.VolumeOptions.DriverConfig.Options = make(map[string]string, len(m.VolumeOptions.DriverConfig.Options))
for k, v := range m.VolumeOptions.DriverConfig.Options {
mount.VolumeOptions.DriverConfig.Options[k] = v
}
}
}
}
if m.TmpfsOptions != nil {
mount.TmpfsOptions = &enginemount.TmpfsOptions{
SizeBytes: m.TmpfsOptions.SizeBytes,
Mode: m.TmpfsOptions.Mode,
}
}
return mount
}
func (c *containerConfig) healthcheck() *enginecontainer.HealthConfig {
hcSpec := c.spec().Healthcheck
if hcSpec == nil {
return nil
}
interval, _ := gogotypes.DurationFromProto(hcSpec.Interval)
timeout, _ := gogotypes.DurationFromProto(hcSpec.Timeout)
startPeriod, _ := gogotypes.DurationFromProto(hcSpec.StartPeriod)
return &enginecontainer.HealthConfig{
Test: hcSpec.Test,
Interval: interval,
Timeout: timeout,
Retries: int(hcSpec.Retries),
StartPeriod: startPeriod,
}
}
func (c *containerConfig) hostConfig() *enginecontainer.HostConfig {
hc := &enginecontainer.HostConfig{
Resources: c.resources(),
GroupAdd: c.spec().Groups,
PortBindings: c.portBindings(),
Mounts: c.mounts(),
ReadonlyRootfs: c.spec().ReadOnly,
Isolation: c.isolation(),
}
if c.spec().DNSConfig != nil {
hc.DNS = c.spec().DNSConfig.Nameservers
hc.DNSSearch = c.spec().DNSConfig.Search
hc.DNSOptions = c.spec().DNSConfig.Options
}
c.applyPrivileges(hc)
// The format of extra hosts on swarmkit is specified in:
// http://man7.org/linux/man-pages/man5/hosts.5.html
// IP_address canonical_hostname [aliases...]
// However, the format of ExtraHosts in HostConfig is
// <host>:<ip>
// We need to do the conversion here
// (Alias is ignored for now)
for _, entry := range c.spec().Hosts {
parts := strings.Fields(entry)
if len(parts) > 1 {
hc.ExtraHosts = append(hc.ExtraHosts, fmt.Sprintf("%s:%s", parts[1], parts[0]))
}
}
if c.task.LogDriver != nil {
hc.LogConfig = enginecontainer.LogConfig{
Type: c.task.LogDriver.Name,
Config: c.task.LogDriver.Options,
}
}
if len(c.task.Networks) > 0 {
labels := c.task.Networks[0].Network.Spec.Annotations.Labels
name := c.task.Networks[0].Network.Spec.Annotations.Name
if v, ok := labels["com.docker.swarm.predefined"]; ok && v == "true" {
hc.NetworkMode = enginecontainer.NetworkMode(name)
}
}
return hc
}
// This handles the case of volumes that are defined inside a service Mount
func (c *containerConfig) volumeCreateRequest(mount *api.Mount) *volumetypes.VolumesCreateBody {
var (
driverName string
driverOpts map[string]string
labels map[string]string
)
if mount.VolumeOptions != nil && mount.VolumeOptions.DriverConfig != nil {
driverName = mount.VolumeOptions.DriverConfig.Name
driverOpts = mount.VolumeOptions.DriverConfig.Options
labels = mount.VolumeOptions.Labels
}
if mount.VolumeOptions != nil {
return &volumetypes.VolumesCreateBody{
Name: mount.Source,
Driver: driverName,
DriverOpts: driverOpts,
Labels: labels,
}
}
return nil
}
func (c *containerConfig) resources() enginecontainer.Resources {
resources := enginecontainer.Resources{}
// If no limits are specified let the engine use its defaults.
//
// TODO(aluzzardi): We might want to set some limits anyway otherwise
// "unlimited" tasks will step over the reservation of other tasks.
r := c.task.Spec.Resources
if r == nil || r.Limits == nil {
return resources
}
if r.Limits.MemoryBytes > 0 {
resources.Memory = r.Limits.MemoryBytes
}
if r.Limits.NanoCPUs > 0 {
// CPU Period must be set in microseconds.
resources.CPUPeriod = int64(cpuQuotaPeriod / time.Microsecond)
resources.CPUQuota = r.Limits.NanoCPUs * resources.CPUPeriod / 1e9
}
return resources
}
// Docker daemon supports just 1 network during container create.
func (c *containerConfig) createNetworkingConfig(b executorpkg.Backend) *network.NetworkingConfig {
var networks []*api.NetworkAttachment
if c.task.Spec.GetContainer() != nil || c.task.Spec.GetAttachment() != nil {
networks = c.task.Networks
}
epConfig := make(map[string]*network.EndpointSettings)
if len(networks) > 0 {
epConfig[networks[0].Network.Spec.Annotations.Name] = getEndpointConfig(networks[0], b)
}
return &network.NetworkingConfig{EndpointsConfig: epConfig}
}
// TODO: Merge this function with createNetworkingConfig after daemon supports multiple networks in container create
func (c *containerConfig) connectNetworkingConfig(b executorpkg.Backend) *network.NetworkingConfig {
var networks []*api.NetworkAttachment
if c.task.Spec.GetContainer() != nil {
networks = c.task.Networks
}
// First network is used during container create. Other networks are used in "docker network connect"
if len(networks) < 2 {
return nil
}
epConfig := make(map[string]*network.EndpointSettings)
for _, na := range networks[1:] {
epConfig[na.Network.Spec.Annotations.Name] = getEndpointConfig(na, b)
}
return &network.NetworkingConfig{EndpointsConfig: epConfig}
}
func getEndpointConfig(na *api.NetworkAttachment, b executorpkg.Backend) *network.EndpointSettings {
var ipv4, ipv6 string
for _, addr := range na.Addresses {
ip, _, err := net.ParseCIDR(addr)
if err != nil {
continue
}
if ip.To4() != nil {
ipv4 = ip.String()
continue
}
if ip.To16() != nil {
ipv6 = ip.String()
}
}
n := &network.EndpointSettings{
NetworkID: na.Network.ID,
IPAMConfig: &network.EndpointIPAMConfig{
IPv4Address: ipv4,
IPv6Address: ipv6,
},
DriverOpts: na.DriverAttachmentOpts,
}
if v, ok := na.Network.Spec.Annotations.Labels["com.docker.swarm.predefined"]; ok && v == "true" {
if ln, err := b.FindNetwork(na.Network.Spec.Annotations.Name); err == nil {
n.NetworkID = ln.ID()
}
}
return n
}
func (c *containerConfig) virtualIP(networkID string) string {
if c.task.Endpoint == nil {
return ""
}
for _, eVip := range c.task.Endpoint.VirtualIPs {
// We only support IPv4 VIPs for now.
if eVip.NetworkID == networkID {
vip, _, err := net.ParseCIDR(eVip.Addr)
if err != nil {
return ""
}
return vip.String()
}
}
return ""
}
func (c *containerConfig) serviceConfig() *clustertypes.ServiceConfig {
if len(c.task.Networks) == 0 {
return nil
}
logrus.Debugf("Creating service config in agent for t = %+v", c.task)
svcCfg := &clustertypes.ServiceConfig{
Name: c.task.ServiceAnnotations.Name,
Aliases: make(map[string][]string),
ID: c.task.ServiceID,
VirtualAddresses: make(map[string]*clustertypes.VirtualAddress),
}
for _, na := range c.task.Networks {
svcCfg.VirtualAddresses[na.Network.ID] = &clustertypes.VirtualAddress{
// We support only IPv4 virtual IP for now.
IPv4: c.virtualIP(na.Network.ID),
}
if len(na.Aliases) > 0 {
svcCfg.Aliases[na.Network.ID] = na.Aliases
}
}
if c.task.Endpoint != nil {
for _, ePort := range c.task.Endpoint.Ports {
if ePort.PublishMode != api.PublishModeIngress {
continue
}
svcCfg.ExposedPorts = append(svcCfg.ExposedPorts, &clustertypes.PortConfig{
Name: ePort.Name,
Protocol: int32(ePort.Protocol),
TargetPort: ePort.TargetPort,
PublishedPort: ePort.PublishedPort,
})
}
}
return svcCfg
}
func (c *containerConfig) networkCreateRequest(name string) (clustertypes.NetworkCreateRequest, error) {
na, ok := c.networksAttachments[name]
if !ok {
return clustertypes.NetworkCreateRequest{}, errors.New("container: unknown network referenced")
}
options := types.NetworkCreate{
// ID: na.Network.ID,
Labels: na.Network.Spec.Annotations.Labels,
Internal: na.Network.Spec.Internal,
Attachable: na.Network.Spec.Attachable,
Ingress: convert.IsIngressNetwork(na.Network),
EnableIPv6: na.Network.Spec.Ipv6Enabled,
CheckDuplicate: true,
Scope: netconst.SwarmScope,
}
if na.Network.Spec.GetNetwork() != "" {
options.ConfigFrom = &network.ConfigReference{
Network: na.Network.Spec.GetNetwork(),
}
}
if na.Network.DriverState != nil {
options.Driver = na.Network.DriverState.Name
options.Options = na.Network.DriverState.Options
}
if na.Network.IPAM != nil {
options.IPAM = &network.IPAM{
Driver: na.Network.IPAM.Driver.Name,
Options: na.Network.IPAM.Driver.Options,
}
for _, ic := range na.Network.IPAM.Configs {
c := network.IPAMConfig{
Subnet: ic.Subnet,
IPRange: ic.Range,
Gateway: ic.Gateway,
}
options.IPAM.Config = append(options.IPAM.Config, c)
}
}
return clustertypes.NetworkCreateRequest{
ID: na.Network.ID,
NetworkCreateRequest: types.NetworkCreateRequest{
Name: name,
NetworkCreate: options,
},
}, nil
}
func (c *containerConfig) applyPrivileges(hc *enginecontainer.HostConfig) {
privileges := c.spec().Privileges
if privileges == nil {
return
}
credentials := privileges.CredentialSpec
if credentials != nil {
switch credentials.Source.(type) {
case *api.Privileges_CredentialSpec_File:
hc.SecurityOpt = append(hc.SecurityOpt, "credentialspec=file://"+credentials.GetFile())
case *api.Privileges_CredentialSpec_Registry:
hc.SecurityOpt = append(hc.SecurityOpt, "credentialspec=registry://"+credentials.GetRegistry())
}
}
selinux := privileges.SELinuxContext
if selinux != nil {
if selinux.Disable {
hc.SecurityOpt = append(hc.SecurityOpt, "label=disable")
}
if selinux.User != "" {
hc.SecurityOpt = append(hc.SecurityOpt, "label=user:"+selinux.User)
}
if selinux.Role != "" {
hc.SecurityOpt = append(hc.SecurityOpt, "label=role:"+selinux.Role)
}
if selinux.Level != "" {
hc.SecurityOpt = append(hc.SecurityOpt, "label=level:"+selinux.Level)
}
if selinux.Type != "" {
hc.SecurityOpt = append(hc.SecurityOpt, "label=type:"+selinux.Type)
}
}
}
func (c containerConfig) eventFilter() filters.Args {
filter := filters.NewArgs()
filter.Add("type", events.ContainerEventType)
filter.Add("name", c.name())
filter.Add("label", fmt.Sprintf("%v.task.id=%v", systemLabelPrefix, c.task.ID))
return filter
}

View file

@ -0,0 +1,37 @@
package container // import "github.com/docker/docker/daemon/cluster/executor/container"
import (
"testing"
container "github.com/docker/docker/api/types/container"
swarmapi "github.com/docker/swarmkit/api"
"github.com/gotestyourself/gotestyourself/assert"
)
func TestIsolationConversion(t *testing.T) {
cases := []struct {
name string
from swarmapi.ContainerSpec_Isolation
to container.Isolation
}{
{name: "default", from: swarmapi.ContainerIsolationDefault, to: container.IsolationDefault},
{name: "process", from: swarmapi.ContainerIsolationProcess, to: container.IsolationProcess},
{name: "hyperv", from: swarmapi.ContainerIsolationHyperV, to: container.IsolationHyperV},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
task := swarmapi.Task{
Spec: swarmapi.TaskSpec{
Runtime: &swarmapi.TaskSpec_Container{
Container: &swarmapi.ContainerSpec{
Image: "alpine:latest",
Isolation: c.from,
},
},
},
}
config := containerConfig{task: &task}
assert.Equal(t, c.to, config.hostConfig().Isolation)
})
}
}

View file

@ -0,0 +1,692 @@
package container // import "github.com/docker/docker/daemon/cluster/executor/container"
import (
"fmt"
"os"
"strconv"
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/events"
executorpkg "github.com/docker/docker/daemon/cluster/executor"
"github.com/docker/go-connections/nat"
"github.com/docker/libnetwork"
"github.com/docker/swarmkit/agent/exec"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/log"
gogotypes "github.com/gogo/protobuf/types"
"github.com/pkg/errors"
"golang.org/x/net/context"
"golang.org/x/time/rate"
)
const defaultGossipConvergeDelay = 2 * time.Second
// controller implements agent.Controller against docker's API.
//
// Most operations against docker's API are done through the container name,
// which is unique to the task.
type controller struct {
task *api.Task
adapter *containerAdapter
closed chan struct{}
err error
pulled chan struct{} // closed after pull
cancelPull func() // cancels pull context if not nil
pullErr error // pull error, only read after pulled closed
}
var _ exec.Controller = &controller{}
// NewController returns a docker exec runner for the provided task.
func newController(b executorpkg.Backend, i executorpkg.ImageBackend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*controller, error) {
adapter, err := newContainerAdapter(b, i, task, node, dependencies)
if err != nil {
return nil, err
}
return &controller{
task: task,
adapter: adapter,
closed: make(chan struct{}),
}, nil
}
func (r *controller) Task() (*api.Task, error) {
return r.task, nil
}
// ContainerStatus returns the container-specific status for the task.
func (r *controller) ContainerStatus(ctx context.Context) (*api.ContainerStatus, error) {
ctnr, err := r.adapter.inspect(ctx)
if err != nil {
if isUnknownContainer(err) {
return nil, nil
}
return nil, err
}
return parseContainerStatus(ctnr)
}
func (r *controller) PortStatus(ctx context.Context) (*api.PortStatus, error) {
ctnr, err := r.adapter.inspect(ctx)
if err != nil {
if isUnknownContainer(err) {
return nil, nil
}
return nil, err
}
return parsePortStatus(ctnr)
}
// Update tasks a recent task update and applies it to the container.
func (r *controller) Update(ctx context.Context, t *api.Task) error {
// TODO(stevvooe): While assignment of tasks is idempotent, we do allow
// updates of metadata, such as labelling, as well as any other properties
// that make sense.
return nil
}
// Prepare creates a container and ensures the image is pulled.
//
// If the container has already be created, exec.ErrTaskPrepared is returned.
func (r *controller) Prepare(ctx context.Context) error {
if err := r.checkClosed(); err != nil {
return err
}
// Make sure all the networks that the task needs are created.
if err := r.adapter.createNetworks(ctx); err != nil {
return err
}
// Make sure all the volumes that the task needs are created.
if err := r.adapter.createVolumes(ctx); err != nil {
return err
}
if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" {
if r.pulled == nil {
// Fork the pull to a different context to allow pull to continue
// on re-entrant calls to Prepare. This ensures that Prepare can be
// idempotent and not incur the extra cost of pulling when
// cancelled on updates.
var pctx context.Context
r.pulled = make(chan struct{})
pctx, r.cancelPull = context.WithCancel(context.Background()) // TODO(stevvooe): Bind a context to the entire controller.
go func() {
defer close(r.pulled)
r.pullErr = r.adapter.pullImage(pctx) // protected by closing r.pulled
}()
}
select {
case <-ctx.Done():
return ctx.Err()
case <-r.pulled:
if r.pullErr != nil {
// NOTE(stevvooe): We always try to pull the image to make sure we have
// the most up to date version. This will return an error, but we only
// log it. If the image truly doesn't exist, the create below will
// error out.
//
// This gives us some nice behavior where we use up to date versions of
// mutable tags, but will still run if the old image is available but a
// registry is down.
//
// If you don't want this behavior, lock down your image to an
// immutable tag or digest.
log.G(ctx).WithError(r.pullErr).Error("pulling image failed")
}
}
}
if err := r.adapter.create(ctx); err != nil {
if isContainerCreateNameConflict(err) {
if _, err := r.adapter.inspect(ctx); err != nil {
return err
}
// container is already created. success!
return exec.ErrTaskPrepared
}
return err
}
return nil
}
// Start the container. An error will be returned if the container is already started.
func (r *controller) Start(ctx context.Context) error {
if err := r.checkClosed(); err != nil {
return err
}
ctnr, err := r.adapter.inspect(ctx)
if err != nil {
return err
}
// Detect whether the container has *ever* been started. If so, we don't
// issue the start.
//
// TODO(stevvooe): This is very racy. While reading inspect, another could
// start the process and we could end up starting it twice.
if ctnr.State.Status != "created" {
return exec.ErrTaskStarted
}
for {
if err := r.adapter.start(ctx); err != nil {
if _, ok := errors.Cause(err).(libnetwork.ErrNoSuchNetwork); ok {
// Retry network creation again if we
// failed because some of the networks
// were not found.
if err := r.adapter.createNetworks(ctx); err != nil {
return err
}
continue
}
return errors.Wrap(err, "starting container failed")
}
break
}
// no health check
if ctnr.Config == nil || ctnr.Config.Healthcheck == nil || len(ctnr.Config.Healthcheck.Test) == 0 || ctnr.Config.Healthcheck.Test[0] == "NONE" {
if err := r.adapter.activateServiceBinding(); err != nil {
log.G(ctx).WithError(err).Errorf("failed to activate service binding for container %s which has no healthcheck config", r.adapter.container.name())
return err
}
return nil
}
// wait for container to be healthy
eventq := r.adapter.events(ctx)
var healthErr error
for {
select {
case event := <-eventq:
if !r.matchevent(event) {
continue
}
switch event.Action {
case "die": // exit on terminal events
ctnr, err := r.adapter.inspect(ctx)
if err != nil {
return errors.Wrap(err, "die event received")
} else if ctnr.State.ExitCode != 0 {
return &exitError{code: ctnr.State.ExitCode, cause: healthErr}
}
return nil
case "destroy":
// If we get here, something has gone wrong but we want to exit
// and report anyways.
return ErrContainerDestroyed
case "health_status: unhealthy":
// in this case, we stop the container and report unhealthy status
if err := r.Shutdown(ctx); err != nil {
return errors.Wrap(err, "unhealthy container shutdown failed")
}
// set health check error, and wait for container to fully exit ("die" event)
healthErr = ErrContainerUnhealthy
case "health_status: healthy":
if err := r.adapter.activateServiceBinding(); err != nil {
log.G(ctx).WithError(err).Errorf("failed to activate service binding for container %s after healthy event", r.adapter.container.name())
return err
}
return nil
}
case <-ctx.Done():
return ctx.Err()
case <-r.closed:
return r.err
}
}
}
// Wait on the container to exit.
func (r *controller) Wait(pctx context.Context) error {
if err := r.checkClosed(); err != nil {
return err
}
ctx, cancel := context.WithCancel(pctx)
defer cancel()
healthErr := make(chan error, 1)
go func() {
ectx, cancel := context.WithCancel(ctx) // cancel event context on first event
defer cancel()
if err := r.checkHealth(ectx); err == ErrContainerUnhealthy {
healthErr <- ErrContainerUnhealthy
if err := r.Shutdown(ectx); err != nil {
log.G(ectx).WithError(err).Debug("shutdown failed on unhealthy")
}
}
}()
waitC, err := r.adapter.wait(ctx)
if err != nil {
return err
}
if status := <-waitC; status.ExitCode() != 0 {
exitErr := &exitError{
code: status.ExitCode(),
}
// Set the cause if it is knowable.
select {
case e := <-healthErr:
exitErr.cause = e
default:
if status.Err() != nil {
exitErr.cause = status.Err()
}
}
return exitErr
}
return nil
}
func (r *controller) hasServiceBinding() bool {
if r.task == nil {
return false
}
// service is attached to a network besides the default bridge
for _, na := range r.task.Networks {
if na.Network == nil ||
na.Network.DriverState == nil ||
na.Network.DriverState.Name == "bridge" && na.Network.Spec.Annotations.Name == "bridge" {
continue
}
return true
}
return false
}
// Shutdown the container cleanly.
func (r *controller) Shutdown(ctx context.Context) error {
if err := r.checkClosed(); err != nil {
return err
}
if r.cancelPull != nil {
r.cancelPull()
}
if r.hasServiceBinding() {
// remove container from service binding
if err := r.adapter.deactivateServiceBinding(); err != nil {
log.G(ctx).WithError(err).Warningf("failed to deactivate service binding for container %s", r.adapter.container.name())
// Don't return an error here, because failure to deactivate
// the service binding is expected if the container was never
// started.
}
// add a delay for gossip converge
// TODO(dongluochen): this delay should be configurable to fit different cluster size and network delay.
time.Sleep(defaultGossipConvergeDelay)
}
if err := r.adapter.shutdown(ctx); err != nil {
if isUnknownContainer(err) || isStoppedContainer(err) {
return nil
}
return err
}
return nil
}
// Terminate the container, with force.
func (r *controller) Terminate(ctx context.Context) error {
if err := r.checkClosed(); err != nil {
return err
}
if r.cancelPull != nil {
r.cancelPull()
}
if err := r.adapter.terminate(ctx); err != nil {
if isUnknownContainer(err) {
return nil
}
return err
}
return nil
}
// Remove the container and its resources.
func (r *controller) Remove(ctx context.Context) error {
if err := r.checkClosed(); err != nil {
return err
}
if r.cancelPull != nil {
r.cancelPull()
}
// It may be necessary to shut down the task before removing it.
if err := r.Shutdown(ctx); err != nil {
if isUnknownContainer(err) {
return nil
}
// This may fail if the task was already shut down.
log.G(ctx).WithError(err).Debug("shutdown failed on removal")
}
// Try removing networks referenced in this task in case this
// task is the last one referencing it
if err := r.adapter.removeNetworks(ctx); err != nil {
if isUnknownContainer(err) {
return nil
}
return err
}
if err := r.adapter.remove(ctx); err != nil {
if isUnknownContainer(err) {
return nil
}
return err
}
return nil
}
// waitReady waits for a container to be "ready".
// Ready means it's past the started state.
func (r *controller) waitReady(pctx context.Context) error {
if err := r.checkClosed(); err != nil {
return err
}
ctx, cancel := context.WithCancel(pctx)
defer cancel()
eventq := r.adapter.events(ctx)
ctnr, err := r.adapter.inspect(ctx)
if err != nil {
if !isUnknownContainer(err) {
return errors.Wrap(err, "inspect container failed")
}
} else {
switch ctnr.State.Status {
case "running", "exited", "dead":
return nil
}
}
for {
select {
case event := <-eventq:
if !r.matchevent(event) {
continue
}
switch event.Action {
case "start":
return nil
}
case <-ctx.Done():
return ctx.Err()
case <-r.closed:
return r.err
}
}
}
func (r *controller) Logs(ctx context.Context, publisher exec.LogPublisher, options api.LogSubscriptionOptions) error {
if err := r.checkClosed(); err != nil {
return err
}
// if we're following, wait for this container to be ready. there is a
// problem here: if the container will never be ready (for example, it has
// been totally deleted) then this will wait forever. however, this doesn't
// actually cause any UI issues, and shouldn't be a problem. the stuck wait
// will go away when the follow (context) is canceled.
if options.Follow {
if err := r.waitReady(ctx); err != nil {
return errors.Wrap(err, "container not ready for logs")
}
}
// if we're not following, we're not gonna wait for the container to be
// ready. just call logs. if the container isn't ready, the call will fail
// and return an error. no big deal, we don't care, we only want the logs
// we can get RIGHT NOW with no follow
logsContext, cancel := context.WithCancel(ctx)
msgs, err := r.adapter.logs(logsContext, options)
defer cancel()
if err != nil {
return errors.Wrap(err, "failed getting container logs")
}
var (
// use a rate limiter to keep things under control but also provides some
// ability coalesce messages.
limiter = rate.NewLimiter(rate.Every(time.Second), 10<<20) // 10 MB/s
msgctx = api.LogContext{
NodeID: r.task.NodeID,
ServiceID: r.task.ServiceID,
TaskID: r.task.ID,
}
)
for {
msg, ok := <-msgs
if !ok {
// we're done here, no more messages
return nil
}
if msg.Err != nil {
// the defered cancel closes the adapter's log stream
return msg.Err
}
// wait here for the limiter to catch up
if err := limiter.WaitN(ctx, len(msg.Line)); err != nil {
return errors.Wrap(err, "failed rate limiter")
}
tsp, err := gogotypes.TimestampProto(msg.Timestamp)
if err != nil {
return errors.Wrap(err, "failed to convert timestamp")
}
var stream api.LogStream
if msg.Source == "stdout" {
stream = api.LogStreamStdout
} else if msg.Source == "stderr" {
stream = api.LogStreamStderr
}
// parse the details out of the Attrs map
var attrs []api.LogAttr
if len(msg.Attrs) != 0 {
attrs = make([]api.LogAttr, 0, len(msg.Attrs))
for _, attr := range msg.Attrs {
attrs = append(attrs, api.LogAttr{Key: attr.Key, Value: attr.Value})
}
}
if err := publisher.Publish(ctx, api.LogMessage{
Context: msgctx,
Timestamp: tsp,
Stream: stream,
Attrs: attrs,
Data: msg.Line,
}); err != nil {
return errors.Wrap(err, "failed to publish log message")
}
}
}
// Close the runner and clean up any ephemeral resources.
func (r *controller) Close() error {
select {
case <-r.closed:
return r.err
default:
if r.cancelPull != nil {
r.cancelPull()
}
r.err = exec.ErrControllerClosed
close(r.closed)
}
return nil
}
func (r *controller) matchevent(event events.Message) bool {
if event.Type != events.ContainerEventType {
return false
}
// we can't filter using id since it will have huge chances to introduce a deadlock. see #33377.
return event.Actor.Attributes["name"] == r.adapter.container.name()
}
func (r *controller) checkClosed() error {
select {
case <-r.closed:
return r.err
default:
return nil
}
}
func parseContainerStatus(ctnr types.ContainerJSON) (*api.ContainerStatus, error) {
status := &api.ContainerStatus{
ContainerID: ctnr.ID,
PID: int32(ctnr.State.Pid),
ExitCode: int32(ctnr.State.ExitCode),
}
return status, nil
}
func parsePortStatus(ctnr types.ContainerJSON) (*api.PortStatus, error) {
status := &api.PortStatus{}
if ctnr.NetworkSettings != nil && len(ctnr.NetworkSettings.Ports) > 0 {
exposedPorts, err := parsePortMap(ctnr.NetworkSettings.Ports)
if err != nil {
return nil, err
}
status.Ports = exposedPorts
}
return status, nil
}
func parsePortMap(portMap nat.PortMap) ([]*api.PortConfig, error) {
exposedPorts := make([]*api.PortConfig, 0, len(portMap))
for portProtocol, mapping := range portMap {
parts := strings.SplitN(string(portProtocol), "/", 2)
if len(parts) != 2 {
return nil, fmt.Errorf("invalid port mapping: %s", portProtocol)
}
port, err := strconv.ParseUint(parts[0], 10, 16)
if err != nil {
return nil, err
}
protocol := api.ProtocolTCP
switch strings.ToLower(parts[1]) {
case "tcp":
protocol = api.ProtocolTCP
case "udp":
protocol = api.ProtocolUDP
case "sctp":
protocol = api.ProtocolSCTP
default:
return nil, fmt.Errorf("invalid protocol: %s", parts[1])
}
for _, binding := range mapping {
hostPort, err := strconv.ParseUint(binding.HostPort, 10, 16)
if err != nil {
return nil, err
}
// TODO(aluzzardi): We're losing the port `name` here since
// there's no way to retrieve it back from the Engine.
exposedPorts = append(exposedPorts, &api.PortConfig{
PublishMode: api.PublishModeHost,
Protocol: protocol,
TargetPort: uint32(port),
PublishedPort: uint32(hostPort),
})
}
}
return exposedPorts, nil
}
type exitError struct {
code int
cause error
}
func (e *exitError) Error() string {
if e.cause != nil {
return fmt.Sprintf("task: non-zero exit (%v): %v", e.code, e.cause)
}
return fmt.Sprintf("task: non-zero exit (%v)", e.code)
}
func (e *exitError) ExitCode() int {
return e.code
}
func (e *exitError) Cause() error {
return e.cause
}
// checkHealth blocks until unhealthy container is detected or ctx exits
func (r *controller) checkHealth(ctx context.Context) error {
eventq := r.adapter.events(ctx)
for {
select {
case <-ctx.Done():
return nil
case <-r.closed:
return nil
case event := <-eventq:
if !r.matchevent(event) {
continue
}
switch event.Action {
case "health_status: unhealthy":
return ErrContainerUnhealthy
}
}
}
}

View file

@ -0,0 +1,17 @@
package container // import "github.com/docker/docker/daemon/cluster/executor/container"
import (
"errors"
)
var (
// ErrImageRequired returned if a task is missing the image definition.
ErrImageRequired = errors.New("dockerexec: image required")
// ErrContainerDestroyed returned when a container is prematurely destroyed
// during a wait call.
ErrContainerDestroyed = errors.New("dockerexec: container destroyed")
// ErrContainerUnhealthy returned if controller detects the health check failure
ErrContainerUnhealthy = errors.New("dockerexec: unhealthy container")
)

View file

@ -0,0 +1,282 @@
package container // import "github.com/docker/docker/daemon/cluster/executor/container"
import (
"fmt"
"sort"
"strings"
"sync"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
swarmtypes "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/controllers/plugin"
"github.com/docker/docker/daemon/cluster/convert"
executorpkg "github.com/docker/docker/daemon/cluster/executor"
clustertypes "github.com/docker/docker/daemon/cluster/provider"
networktypes "github.com/docker/libnetwork/types"
"github.com/docker/swarmkit/agent"
"github.com/docker/swarmkit/agent/exec"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/api/naming"
"github.com/docker/swarmkit/template"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
)
type executor struct {
backend executorpkg.Backend
imageBackend executorpkg.ImageBackend
pluginBackend plugin.Backend
dependencies exec.DependencyManager
mutex sync.Mutex // This mutex protects the following node field
node *api.NodeDescription
}
// NewExecutor returns an executor from the docker client.
func NewExecutor(b executorpkg.Backend, p plugin.Backend, i executorpkg.ImageBackend) exec.Executor {
return &executor{
backend: b,
pluginBackend: p,
imageBackend: i,
dependencies: agent.NewDependencyManager(),
}
}
// Describe returns the underlying node description from the docker client.
func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) {
info, err := e.backend.SystemInfo()
if err != nil {
return nil, err
}
plugins := map[api.PluginDescription]struct{}{}
addPlugins := func(typ string, names []string) {
for _, name := range names {
plugins[api.PluginDescription{
Type: typ,
Name: name,
}] = struct{}{}
}
}
// add v1 plugins
addPlugins("Volume", info.Plugins.Volume)
// Add builtin driver "overlay" (the only builtin multi-host driver) to
// the plugin list by default.
addPlugins("Network", append([]string{"overlay"}, info.Plugins.Network...))
addPlugins("Authorization", info.Plugins.Authorization)
addPlugins("Log", info.Plugins.Log)
// add v2 plugins
v2Plugins, err := e.backend.PluginManager().List(filters.NewArgs())
if err == nil {
for _, plgn := range v2Plugins {
for _, typ := range plgn.Config.Interface.Types {
if typ.Prefix != "docker" || !plgn.Enabled {
continue
}
plgnTyp := typ.Capability
switch typ.Capability {
case "volumedriver":
plgnTyp = "Volume"
case "networkdriver":
plgnTyp = "Network"
case "logdriver":
plgnTyp = "Log"
}
plugins[api.PluginDescription{
Type: plgnTyp,
Name: plgn.Name,
}] = struct{}{}
}
}
}
pluginFields := make([]api.PluginDescription, 0, len(plugins))
for k := range plugins {
pluginFields = append(pluginFields, k)
}
sort.Sort(sortedPlugins(pluginFields))
// parse []string labels into a map[string]string
labels := map[string]string{}
for _, l := range info.Labels {
stringSlice := strings.SplitN(l, "=", 2)
// this will take the last value in the list for a given key
// ideally, one shouldn't assign multiple values to the same key
if len(stringSlice) > 1 {
labels[stringSlice[0]] = stringSlice[1]
}
}
description := &api.NodeDescription{
Hostname: info.Name,
Platform: &api.Platform{
Architecture: info.Architecture,
OS: info.OSType,
},
Engine: &api.EngineDescription{
EngineVersion: info.ServerVersion,
Labels: labels,
Plugins: pluginFields,
},
Resources: &api.Resources{
NanoCPUs: int64(info.NCPU) * 1e9,
MemoryBytes: info.MemTotal,
Generic: convert.GenericResourcesToGRPC(info.GenericResources),
},
}
// Save the node information in the executor field
e.mutex.Lock()
e.node = description
e.mutex.Unlock()
return description, nil
}
func (e *executor) Configure(ctx context.Context, node *api.Node) error {
var ingressNA *api.NetworkAttachment
attachments := make(map[string]string)
for _, na := range node.Attachments {
if na.Network.Spec.Ingress {
ingressNA = na
}
attachments[na.Network.ID] = na.Addresses[0]
}
if (ingressNA == nil) && (node.Attachment != nil) {
ingressNA = node.Attachment
attachments[ingressNA.Network.ID] = ingressNA.Addresses[0]
}
if ingressNA == nil {
e.backend.ReleaseIngress()
return e.backend.GetAttachmentStore().ResetAttachments(attachments)
}
options := types.NetworkCreate{
Driver: ingressNA.Network.DriverState.Name,
IPAM: &network.IPAM{
Driver: ingressNA.Network.IPAM.Driver.Name,
},
Options: ingressNA.Network.DriverState.Options,
Ingress: true,
CheckDuplicate: true,
}
for _, ic := range ingressNA.Network.IPAM.Configs {
c := network.IPAMConfig{
Subnet: ic.Subnet,
IPRange: ic.Range,
Gateway: ic.Gateway,
}
options.IPAM.Config = append(options.IPAM.Config, c)
}
_, err := e.backend.SetupIngress(clustertypes.NetworkCreateRequest{
ID: ingressNA.Network.ID,
NetworkCreateRequest: types.NetworkCreateRequest{
Name: ingressNA.Network.Spec.Annotations.Name,
NetworkCreate: options,
},
}, ingressNA.Addresses[0])
if err != nil {
return err
}
return e.backend.GetAttachmentStore().ResetAttachments(attachments)
}
// Controller returns a docker container runner.
func (e *executor) Controller(t *api.Task) (exec.Controller, error) {
dependencyGetter := template.NewTemplatedDependencyGetter(agent.Restrict(e.dependencies, t), t, nil)
// Get the node description from the executor field
e.mutex.Lock()
nodeDescription := e.node
e.mutex.Unlock()
if t.Spec.GetAttachment() != nil {
return newNetworkAttacherController(e.backend, e.imageBackend, t, nodeDescription, dependencyGetter)
}
var ctlr exec.Controller
switch r := t.Spec.GetRuntime().(type) {
case *api.TaskSpec_Generic:
logrus.WithFields(logrus.Fields{
"kind": r.Generic.Kind,
"type_url": r.Generic.Payload.TypeUrl,
}).Debug("custom runtime requested")
runtimeKind, err := naming.Runtime(t.Spec)
if err != nil {
return ctlr, err
}
switch runtimeKind {
case string(swarmtypes.RuntimePlugin):
info, _ := e.backend.SystemInfo()
if !info.ExperimentalBuild {
return ctlr, fmt.Errorf("runtime type %q only supported in experimental", swarmtypes.RuntimePlugin)
}
c, err := plugin.NewController(e.pluginBackend, t)
if err != nil {
return ctlr, err
}
ctlr = c
default:
return ctlr, fmt.Errorf("unsupported runtime type: %q", runtimeKind)
}
case *api.TaskSpec_Container:
c, err := newController(e.backend, e.imageBackend, t, nodeDescription, dependencyGetter)
if err != nil {
return ctlr, err
}
ctlr = c
default:
return ctlr, fmt.Errorf("unsupported runtime: %q", r)
}
return ctlr, nil
}
func (e *executor) SetNetworkBootstrapKeys(keys []*api.EncryptionKey) error {
nwKeys := []*networktypes.EncryptionKey{}
for _, key := range keys {
nwKey := &networktypes.EncryptionKey{
Subsystem: key.Subsystem,
Algorithm: int32(key.Algorithm),
Key: make([]byte, len(key.Key)),
LamportTime: key.LamportTime,
}
copy(nwKey.Key, key.Key)
nwKeys = append(nwKeys, nwKey)
}
e.backend.SetNetworkBootstrapKeys(nwKeys)
return nil
}
func (e *executor) Secrets() exec.SecretsManager {
return e.dependencies.Secrets()
}
func (e *executor) Configs() exec.ConfigsManager {
return e.dependencies.Configs()
}
type sortedPlugins []api.PluginDescription
func (sp sortedPlugins) Len() int { return len(sp) }
func (sp sortedPlugins) Swap(i, j int) { sp[i], sp[j] = sp[j], sp[i] }
func (sp sortedPlugins) Less(i, j int) bool {
if sp[i].Type != sp[j].Type {
return sp[i].Type < sp[j].Type
}
return sp[i].Name < sp[j].Name
}

View file

@ -0,0 +1,100 @@
// +build !windows
package container // import "github.com/docker/docker/daemon/cluster/executor/container"
import (
"testing"
"time"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon"
"github.com/docker/docker/daemon/events"
"github.com/docker/swarmkit/api"
"golang.org/x/net/context"
)
func TestHealthStates(t *testing.T) {
// set up environment: events, task, container ....
e := events.New()
_, l, _ := e.Subscribe()
defer e.Evict(l)
task := &api.Task{
ID: "id",
ServiceID: "sid",
Spec: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{
Image: "image_name",
Labels: map[string]string{
"com.docker.swarm.task.id": "id",
},
},
},
},
Annotations: api.Annotations{Name: "name"},
}
c := &container.Container{
ID: "id",
Name: "name",
Config: &containertypes.Config{
Image: "image_name",
Labels: map[string]string{
"com.docker.swarm.task.id": "id",
},
},
}
daemon := &daemon.Daemon{
EventsService: e,
}
controller, err := newController(daemon, nil, task, nil, nil)
if err != nil {
t.Fatalf("create controller fail %v", err)
}
errChan := make(chan error, 1)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// fire checkHealth
go func() {
err := controller.checkHealth(ctx)
select {
case errChan <- err:
case <-ctx.Done():
}
}()
// send an event and expect to get expectedErr
// if expectedErr is nil, shouldn't get any error
logAndExpect := func(msg string, expectedErr error) {
daemon.LogContainerEvent(c, msg)
timer := time.NewTimer(1 * time.Second)
defer timer.Stop()
select {
case err := <-errChan:
if err != expectedErr {
t.Fatalf("expect error %v, but get %v", expectedErr, err)
}
case <-timer.C:
if expectedErr != nil {
t.Fatal("time limit exceeded, didn't get expected error")
}
}
}
// events that are ignored by checkHealth
logAndExpect("health_status: running", nil)
logAndExpect("health_status: healthy", nil)
logAndExpect("die", nil)
// unhealthy event will be caught by checkHealth
logAndExpect("health_status: unhealthy", ErrContainerUnhealthy)
}

View file

@ -0,0 +1,40 @@
package container // import "github.com/docker/docker/daemon/cluster/executor/container"
import (
"errors"
"fmt"
"path/filepath"
"github.com/docker/swarmkit/api"
)
func validateMounts(mounts []api.Mount) error {
for _, mount := range mounts {
// Target must always be absolute
if !filepath.IsAbs(mount.Target) {
return fmt.Errorf("invalid mount target, must be an absolute path: %s", mount.Target)
}
switch mount.Type {
// The checks on abs paths are required due to the container API confusing
// volume mounts as bind mounts when the source is absolute (and vice-versa)
// See #25253
// TODO: This is probably not necessary once #22373 is merged
case api.MountTypeBind:
if !filepath.IsAbs(mount.Source) {
return fmt.Errorf("invalid bind mount source, must be an absolute path: %s", mount.Source)
}
case api.MountTypeVolume:
if filepath.IsAbs(mount.Source) {
return fmt.Errorf("invalid volume mount source, must not be an absolute path: %s", mount.Source)
}
case api.MountTypeTmpfs:
if mount.Source != "" {
return errors.New("invalid tmpfs source, source must be empty")
}
default:
return fmt.Errorf("invalid mount type: %s", mount.Type)
}
}
return nil
}

View file

@ -0,0 +1,142 @@
package container // import "github.com/docker/docker/daemon/cluster/executor/container"
import (
"io/ioutil"
"os"
"strings"
"testing"
"github.com/docker/docker/daemon"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/swarmkit/api"
)
func newTestControllerWithMount(m api.Mount) (*controller, error) {
return newController(&daemon.Daemon{}, nil, &api.Task{
ID: stringid.GenerateRandomID(),
ServiceID: stringid.GenerateRandomID(),
Spec: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{
Image: "image_name",
Labels: map[string]string{
"com.docker.swarm.task.id": "id",
},
Mounts: []api.Mount{m},
},
},
},
}, nil,
nil)
}
func TestControllerValidateMountBind(t *testing.T) {
// with improper source
if _, err := newTestControllerWithMount(api.Mount{
Type: api.MountTypeBind,
Source: "foo",
Target: testAbsPath,
}); err == nil || !strings.Contains(err.Error(), "invalid bind mount source") {
t.Fatalf("expected error, got: %v", err)
}
// with non-existing source
if _, err := newTestControllerWithMount(api.Mount{
Type: api.MountTypeBind,
Source: testAbsNonExistent,
Target: testAbsPath,
}); err != nil {
t.Fatalf("controller should not error at creation: %v", err)
}
// with proper source
tmpdir, err := ioutil.TempDir("", "TestControllerValidateMountBind")
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
defer os.Remove(tmpdir)
if _, err := newTestControllerWithMount(api.Mount{
Type: api.MountTypeBind,
Source: tmpdir,
Target: testAbsPath,
}); err != nil {
t.Fatalf("expected error, got: %v", err)
}
}
func TestControllerValidateMountVolume(t *testing.T) {
// with improper source
if _, err := newTestControllerWithMount(api.Mount{
Type: api.MountTypeVolume,
Source: testAbsPath,
Target: testAbsPath,
}); err == nil || !strings.Contains(err.Error(), "invalid volume mount source") {
t.Fatalf("expected error, got: %v", err)
}
// with proper source
if _, err := newTestControllerWithMount(api.Mount{
Type: api.MountTypeVolume,
Source: "foo",
Target: testAbsPath,
}); err != nil {
t.Fatalf("expected error, got: %v", err)
}
}
func TestControllerValidateMountTarget(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "TestControllerValidateMountTarget")
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
defer os.Remove(tmpdir)
// with improper target
if _, err := newTestControllerWithMount(api.Mount{
Type: api.MountTypeBind,
Source: testAbsPath,
Target: "foo",
}); err == nil || !strings.Contains(err.Error(), "invalid mount target") {
t.Fatalf("expected error, got: %v", err)
}
// with proper target
if _, err := newTestControllerWithMount(api.Mount{
Type: api.MountTypeBind,
Source: tmpdir,
Target: testAbsPath,
}); err != nil {
t.Fatalf("expected no error, got: %v", err)
}
}
func TestControllerValidateMountTmpfs(t *testing.T) {
// with improper target
if _, err := newTestControllerWithMount(api.Mount{
Type: api.MountTypeTmpfs,
Source: "foo",
Target: testAbsPath,
}); err == nil || !strings.Contains(err.Error(), "invalid tmpfs source") {
t.Fatalf("expected error, got: %v", err)
}
// with proper target
if _, err := newTestControllerWithMount(api.Mount{
Type: api.MountTypeTmpfs,
Target: testAbsPath,
}); err != nil {
t.Fatalf("expected no error, got: %v", err)
}
}
func TestControllerValidateMountInvalidType(t *testing.T) {
// with improper target
if _, err := newTestControllerWithMount(api.Mount{
Type: api.Mount_MountType(9999),
Source: "foo",
Target: testAbsPath,
}); err == nil || !strings.Contains(err.Error(), "invalid mount type") {
t.Fatalf("expected error, got: %v", err)
}
}

View file

@ -0,0 +1,8 @@
// +build !windows
package container // import "github.com/docker/docker/daemon/cluster/executor/container"
const (
testAbsPath = "/foo"
testAbsNonExistent = "/some-non-existing-host-path/"
)

View file

@ -0,0 +1,8 @@
// +build windows
package container // import "github.com/docker/docker/daemon/cluster/executor/container"
const (
testAbsPath = `c:\foo`
testAbsNonExistent = `c:\some-non-existing-host-path\`
)

View file

@ -0,0 +1,123 @@
package cluster // import "github.com/docker/docker/daemon/cluster"
import (
"fmt"
"strings"
"github.com/docker/docker/api/types/filters"
runconfigopts "github.com/docker/docker/runconfig/opts"
swarmapi "github.com/docker/swarmkit/api"
)
func newListNodesFilters(filter filters.Args) (*swarmapi.ListNodesRequest_Filters, error) {
accepted := map[string]bool{
"name": true,
"id": true,
"label": true,
"role": true,
"membership": true,
}
if err := filter.Validate(accepted); err != nil {
return nil, err
}
f := &swarmapi.ListNodesRequest_Filters{
NamePrefixes: filter.Get("name"),
IDPrefixes: filter.Get("id"),
Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")),
}
for _, r := range filter.Get("role") {
if role, ok := swarmapi.NodeRole_value[strings.ToUpper(r)]; ok {
f.Roles = append(f.Roles, swarmapi.NodeRole(role))
} else if r != "" {
return nil, fmt.Errorf("Invalid role filter: '%s'", r)
}
}
for _, a := range filter.Get("membership") {
if membership, ok := swarmapi.NodeSpec_Membership_value[strings.ToUpper(a)]; ok {
f.Memberships = append(f.Memberships, swarmapi.NodeSpec_Membership(membership))
} else if a != "" {
return nil, fmt.Errorf("Invalid membership filter: '%s'", a)
}
}
return f, nil
}
func newListTasksFilters(filter filters.Args, transformFunc func(filters.Args) error) (*swarmapi.ListTasksRequest_Filters, error) {
accepted := map[string]bool{
"name": true,
"id": true,
"label": true,
"service": true,
"node": true,
"desired-state": true,
// UpToDate is not meant to be exposed to users. It's for
// internal use in checking create/update progress. Therefore,
// we prefix it with a '_'.
"_up-to-date": true,
"runtime": true,
}
if err := filter.Validate(accepted); err != nil {
return nil, err
}
if transformFunc != nil {
if err := transformFunc(filter); err != nil {
return nil, err
}
}
f := &swarmapi.ListTasksRequest_Filters{
NamePrefixes: filter.Get("name"),
IDPrefixes: filter.Get("id"),
Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")),
ServiceIDs: filter.Get("service"),
NodeIDs: filter.Get("node"),
UpToDate: len(filter.Get("_up-to-date")) != 0,
Runtimes: filter.Get("runtime"),
}
for _, s := range filter.Get("desired-state") {
if state, ok := swarmapi.TaskState_value[strings.ToUpper(s)]; ok {
f.DesiredStates = append(f.DesiredStates, swarmapi.TaskState(state))
} else if s != "" {
return nil, fmt.Errorf("Invalid desired-state filter: '%s'", s)
}
}
return f, nil
}
func newListSecretsFilters(filter filters.Args) (*swarmapi.ListSecretsRequest_Filters, error) {
accepted := map[string]bool{
"names": true,
"name": true,
"id": true,
"label": true,
}
if err := filter.Validate(accepted); err != nil {
return nil, err
}
return &swarmapi.ListSecretsRequest_Filters{
Names: filter.Get("names"),
NamePrefixes: filter.Get("name"),
IDPrefixes: filter.Get("id"),
Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")),
}, nil
}
func newListConfigsFilters(filter filters.Args) (*swarmapi.ListConfigsRequest_Filters, error) {
accepted := map[string]bool{
"name": true,
"id": true,
"label": true,
}
if err := filter.Validate(accepted); err != nil {
return nil, err
}
return &swarmapi.ListConfigsRequest_Filters{
NamePrefixes: filter.Get("name"),
IDPrefixes: filter.Get("id"),
Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")),
}, nil
}

View file

@ -0,0 +1,102 @@
package cluster // import "github.com/docker/docker/daemon/cluster"
import (
"testing"
"github.com/docker/docker/api/types/filters"
)
func TestNewListSecretsFilters(t *testing.T) {
validNameFilter := filters.NewArgs()
validNameFilter.Add("name", "test_name")
validIDFilter := filters.NewArgs()
validIDFilter.Add("id", "7c9009d6720f6de3b492f5")
validLabelFilter := filters.NewArgs()
validLabelFilter.Add("label", "type=test")
validLabelFilter.Add("label", "storage=ssd")
validLabelFilter.Add("label", "memory")
validNamesFilter := filters.NewArgs()
validNamesFilter.Add("names", "test_name")
validAllFilter := filters.NewArgs()
validAllFilter.Add("name", "nodeName")
validAllFilter.Add("id", "7c9009d6720f6de3b492f5")
validAllFilter.Add("label", "type=test")
validAllFilter.Add("label", "memory")
validAllFilter.Add("names", "test_name")
validFilters := []filters.Args{
validNameFilter,
validIDFilter,
validLabelFilter,
validNamesFilter,
validAllFilter,
}
invalidTypeFilter := filters.NewArgs()
invalidTypeFilter.Add("nonexist", "aaaa")
invalidFilters := []filters.Args{
invalidTypeFilter,
}
for _, filter := range validFilters {
if _, err := newListSecretsFilters(filter); err != nil {
t.Fatalf("Should get no error, got %v", err)
}
}
for _, filter := range invalidFilters {
if _, err := newListSecretsFilters(filter); err == nil {
t.Fatalf("Should get an error for filter %v, while got nil", filter)
}
}
}
func TestNewListConfigsFilters(t *testing.T) {
validNameFilter := filters.NewArgs()
validNameFilter.Add("name", "test_name")
validIDFilter := filters.NewArgs()
validIDFilter.Add("id", "7c9009d6720f6de3b492f5")
validLabelFilter := filters.NewArgs()
validLabelFilter.Add("label", "type=test")
validLabelFilter.Add("label", "storage=ssd")
validLabelFilter.Add("label", "memory")
validAllFilter := filters.NewArgs()
validAllFilter.Add("name", "nodeName")
validAllFilter.Add("id", "7c9009d6720f6de3b492f5")
validAllFilter.Add("label", "type=test")
validAllFilter.Add("label", "memory")
validFilters := []filters.Args{
validNameFilter,
validIDFilter,
validLabelFilter,
validAllFilter,
}
invalidTypeFilter := filters.NewArgs()
invalidTypeFilter.Add("nonexist", "aaaa")
invalidFilters := []filters.Args{
invalidTypeFilter,
}
for _, filter := range validFilters {
if _, err := newListConfigsFilters(filter); err != nil {
t.Fatalf("Should get no error, got %v", err)
}
}
for _, filter := range invalidFilters {
if _, err := newListConfigsFilters(filter); err == nil {
t.Fatalf("Should get an error for filter %v, while got nil", filter)
}
}
}

View file

@ -0,0 +1,246 @@
package cluster // import "github.com/docker/docker/daemon/cluster"
import (
"fmt"
"github.com/docker/docker/errdefs"
swarmapi "github.com/docker/swarmkit/api"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
func getSwarm(ctx context.Context, c swarmapi.ControlClient) (*swarmapi.Cluster, error) {
rl, err := c.ListClusters(ctx, &swarmapi.ListClustersRequest{})
if err != nil {
return nil, err
}
if len(rl.Clusters) == 0 {
return nil, errors.WithStack(errNoSwarm)
}
// TODO: assume one cluster only
return rl.Clusters[0], nil
}
func getNode(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Node, error) {
// GetNode to match via full ID.
if rg, err := c.GetNode(ctx, &swarmapi.GetNodeRequest{NodeID: input}); err == nil {
return rg.Node, nil
}
// If any error (including NotFound), ListNodes to match via full name.
rl, err := c.ListNodes(ctx, &swarmapi.ListNodesRequest{
Filters: &swarmapi.ListNodesRequest_Filters{
Names: []string{input},
},
})
if err != nil || len(rl.Nodes) == 0 {
// If any error or 0 result, ListNodes to match via ID prefix.
rl, err = c.ListNodes(ctx, &swarmapi.ListNodesRequest{
Filters: &swarmapi.ListNodesRequest_Filters{
IDPrefixes: []string{input},
},
})
}
if err != nil {
return nil, err
}
if len(rl.Nodes) == 0 {
err := fmt.Errorf("node %s not found", input)
return nil, errdefs.NotFound(err)
}
if l := len(rl.Nodes); l > 1 {
return nil, errdefs.InvalidParameter(fmt.Errorf("node %s is ambiguous (%d matches found)", input, l))
}
return rl.Nodes[0], nil
}
func getService(ctx context.Context, c swarmapi.ControlClient, input string, insertDefaults bool) (*swarmapi.Service, error) {
// GetService to match via full ID.
if rg, err := c.GetService(ctx, &swarmapi.GetServiceRequest{ServiceID: input, InsertDefaults: insertDefaults}); err == nil {
return rg.Service, nil
}
// If any error (including NotFound), ListServices to match via full name.
rl, err := c.ListServices(ctx, &swarmapi.ListServicesRequest{
Filters: &swarmapi.ListServicesRequest_Filters{
Names: []string{input},
},
})
if err != nil || len(rl.Services) == 0 {
// If any error or 0 result, ListServices to match via ID prefix.
rl, err = c.ListServices(ctx, &swarmapi.ListServicesRequest{
Filters: &swarmapi.ListServicesRequest_Filters{
IDPrefixes: []string{input},
},
})
}
if err != nil {
return nil, err
}
if len(rl.Services) == 0 {
err := fmt.Errorf("service %s not found", input)
return nil, errdefs.NotFound(err)
}
if l := len(rl.Services); l > 1 {
return nil, errdefs.InvalidParameter(fmt.Errorf("service %s is ambiguous (%d matches found)", input, l))
}
if !insertDefaults {
return rl.Services[0], nil
}
rg, err := c.GetService(ctx, &swarmapi.GetServiceRequest{ServiceID: rl.Services[0].ID, InsertDefaults: true})
if err == nil {
return rg.Service, nil
}
return nil, err
}
func getTask(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Task, error) {
// GetTask to match via full ID.
if rg, err := c.GetTask(ctx, &swarmapi.GetTaskRequest{TaskID: input}); err == nil {
return rg.Task, nil
}
// If any error (including NotFound), ListTasks to match via full name.
rl, err := c.ListTasks(ctx, &swarmapi.ListTasksRequest{
Filters: &swarmapi.ListTasksRequest_Filters{
Names: []string{input},
},
})
if err != nil || len(rl.Tasks) == 0 {
// If any error or 0 result, ListTasks to match via ID prefix.
rl, err = c.ListTasks(ctx, &swarmapi.ListTasksRequest{
Filters: &swarmapi.ListTasksRequest_Filters{
IDPrefixes: []string{input},
},
})
}
if err != nil {
return nil, err
}
if len(rl.Tasks) == 0 {
err := fmt.Errorf("task %s not found", input)
return nil, errdefs.NotFound(err)
}
if l := len(rl.Tasks); l > 1 {
return nil, errdefs.InvalidParameter(fmt.Errorf("task %s is ambiguous (%d matches found)", input, l))
}
return rl.Tasks[0], nil
}
func getSecret(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Secret, error) {
// attempt to lookup secret by full ID
if rg, err := c.GetSecret(ctx, &swarmapi.GetSecretRequest{SecretID: input}); err == nil {
return rg.Secret, nil
}
// If any error (including NotFound), ListSecrets to match via full name.
rl, err := c.ListSecrets(ctx, &swarmapi.ListSecretsRequest{
Filters: &swarmapi.ListSecretsRequest_Filters{
Names: []string{input},
},
})
if err != nil || len(rl.Secrets) == 0 {
// If any error or 0 result, ListSecrets to match via ID prefix.
rl, err = c.ListSecrets(ctx, &swarmapi.ListSecretsRequest{
Filters: &swarmapi.ListSecretsRequest_Filters{
IDPrefixes: []string{input},
},
})
}
if err != nil {
return nil, err
}
if len(rl.Secrets) == 0 {
err := fmt.Errorf("secret %s not found", input)
return nil, errdefs.NotFound(err)
}
if l := len(rl.Secrets); l > 1 {
return nil, errdefs.InvalidParameter(fmt.Errorf("secret %s is ambiguous (%d matches found)", input, l))
}
return rl.Secrets[0], nil
}
func getConfig(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Config, error) {
// attempt to lookup config by full ID
if rg, err := c.GetConfig(ctx, &swarmapi.GetConfigRequest{ConfigID: input}); err == nil {
return rg.Config, nil
}
// If any error (including NotFound), ListConfigs to match via full name.
rl, err := c.ListConfigs(ctx, &swarmapi.ListConfigsRequest{
Filters: &swarmapi.ListConfigsRequest_Filters{
Names: []string{input},
},
})
if err != nil || len(rl.Configs) == 0 {
// If any error or 0 result, ListConfigs to match via ID prefix.
rl, err = c.ListConfigs(ctx, &swarmapi.ListConfigsRequest{
Filters: &swarmapi.ListConfigsRequest_Filters{
IDPrefixes: []string{input},
},
})
}
if err != nil {
return nil, err
}
if len(rl.Configs) == 0 {
err := fmt.Errorf("config %s not found", input)
return nil, errdefs.NotFound(err)
}
if l := len(rl.Configs); l > 1 {
return nil, errdefs.InvalidParameter(fmt.Errorf("config %s is ambiguous (%d matches found)", input, l))
}
return rl.Configs[0], nil
}
func getNetwork(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Network, error) {
// GetNetwork to match via full ID.
if rg, err := c.GetNetwork(ctx, &swarmapi.GetNetworkRequest{NetworkID: input}); err == nil {
return rg.Network, nil
}
// If any error (including NotFound), ListNetworks to match via ID prefix and full name.
rl, err := c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{
Filters: &swarmapi.ListNetworksRequest_Filters{
Names: []string{input},
},
})
if err != nil || len(rl.Networks) == 0 {
rl, err = c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{
Filters: &swarmapi.ListNetworksRequest_Filters{
IDPrefixes: []string{input},
},
})
}
if err != nil {
return nil, err
}
if len(rl.Networks) == 0 {
return nil, fmt.Errorf("network %s not found", input)
}
if l := len(rl.Networks); l > 1 {
return nil, errdefs.InvalidParameter(fmt.Errorf("network %s is ambiguous (%d matches found)", input, l))
}
return rl.Networks[0], nil
}

View file

@ -0,0 +1,301 @@
package cluster // import "github.com/docker/docker/daemon/cluster"
import (
"fmt"
"net"
)
const (
errNoSuchInterface configError = "no such interface"
errNoIP configError = "could not find the system's IP address"
errMustSpecifyListenAddr configError = "must specify a listening address because the address to advertise is not recognized as a system address, and a system's IP address to use could not be uniquely identified"
errBadNetworkIdentifier configError = "must specify a valid IP address or interface name"
errBadListenAddr configError = "listen address must be an IP address or network interface (with optional port number)"
errBadAdvertiseAddr configError = "advertise address must be a non-zero IP address or network interface (with optional port number)"
errBadDataPathAddr configError = "data path address must be a non-zero IP address or network interface (without a port number)"
errBadDefaultAdvertiseAddr configError = "default advertise address must be a non-zero IP address or network interface (without a port number)"
)
func resolveListenAddr(specifiedAddr string) (string, string, error) {
specifiedHost, specifiedPort, err := net.SplitHostPort(specifiedAddr)
if err != nil {
return "", "", fmt.Errorf("could not parse listen address %s", specifiedAddr)
}
// Does the host component match any of the interface names on the
// system? If so, use the address from that interface.
specifiedIP, err := resolveInputIPAddr(specifiedHost, true)
if err != nil {
if err == errBadNetworkIdentifier {
err = errBadListenAddr
}
return "", "", err
}
return specifiedIP.String(), specifiedPort, nil
}
func (c *Cluster) resolveAdvertiseAddr(advertiseAddr, listenAddrPort string) (string, string, error) {
// Approach:
// - If an advertise address is specified, use that. Resolve the
// interface's address if an interface was specified in
// advertiseAddr. Fill in the port from listenAddrPort if necessary.
// - If DefaultAdvertiseAddr is not empty, use that with the port from
// listenAddrPort. Resolve the interface's address from
// if an interface name was specified in DefaultAdvertiseAddr.
// - Otherwise, try to autodetect the system's address. Use the port in
// listenAddrPort with this address if autodetection succeeds.
if advertiseAddr != "" {
advertiseHost, advertisePort, err := net.SplitHostPort(advertiseAddr)
if err != nil {
// Not a host:port specification
advertiseHost = advertiseAddr
advertisePort = listenAddrPort
}
// Does the host component match any of the interface names on the
// system? If so, use the address from that interface.
advertiseIP, err := resolveInputIPAddr(advertiseHost, false)
if err != nil {
if err == errBadNetworkIdentifier {
err = errBadAdvertiseAddr
}
return "", "", err
}
return advertiseIP.String(), advertisePort, nil
}
if c.config.DefaultAdvertiseAddr != "" {
// Does the default advertise address component match any of the
// interface names on the system? If so, use the address from
// that interface.
defaultAdvertiseIP, err := resolveInputIPAddr(c.config.DefaultAdvertiseAddr, false)
if err != nil {
if err == errBadNetworkIdentifier {
err = errBadDefaultAdvertiseAddr
}
return "", "", err
}
return defaultAdvertiseIP.String(), listenAddrPort, nil
}
systemAddr, err := c.resolveSystemAddr()
if err != nil {
return "", "", err
}
return systemAddr.String(), listenAddrPort, nil
}
func resolveDataPathAddr(dataPathAddr string) (string, error) {
if dataPathAddr == "" {
// dataPathAddr is not defined
return "", nil
}
// If a data path flag is specified try to resolve the IP address.
dataPathIP, err := resolveInputIPAddr(dataPathAddr, false)
if err != nil {
if err == errBadNetworkIdentifier {
err = errBadDataPathAddr
}
return "", err
}
return dataPathIP.String(), nil
}
func resolveInterfaceAddr(specifiedInterface string) (net.IP, error) {
// Use a specific interface's IP address.
intf, err := net.InterfaceByName(specifiedInterface)
if err != nil {
return nil, errNoSuchInterface
}
addrs, err := intf.Addrs()
if err != nil {
return nil, err
}
var interfaceAddr4, interfaceAddr6 net.IP
for _, addr := range addrs {
ipAddr, ok := addr.(*net.IPNet)
if ok {
if ipAddr.IP.To4() != nil {
// IPv4
if interfaceAddr4 != nil {
return nil, configError(fmt.Sprintf("interface %s has more than one IPv4 address (%s and %s)", specifiedInterface, interfaceAddr4, ipAddr.IP))
}
interfaceAddr4 = ipAddr.IP
} else {
// IPv6
if interfaceAddr6 != nil {
return nil, configError(fmt.Sprintf("interface %s has more than one IPv6 address (%s and %s)", specifiedInterface, interfaceAddr6, ipAddr.IP))
}
interfaceAddr6 = ipAddr.IP
}
}
}
if interfaceAddr4 == nil && interfaceAddr6 == nil {
return nil, configError(fmt.Sprintf("interface %s has no usable IPv4 or IPv6 address", specifiedInterface))
}
// In the case that there's exactly one IPv4 address
// and exactly one IPv6 address, favor IPv4 over IPv6.
if interfaceAddr4 != nil {
return interfaceAddr4, nil
}
return interfaceAddr6, nil
}
// resolveInputIPAddr tries to resolve the IP address from the string passed as input
// - tries to match the string as an interface name, if so returns the IP address associated with it
// - on failure of previous step tries to parse the string as an IP address itself
// if succeeds returns the IP address
func resolveInputIPAddr(input string, isUnspecifiedValid bool) (net.IP, error) {
// Try to see if it is an interface name
interfaceAddr, err := resolveInterfaceAddr(input)
if err == nil {
return interfaceAddr, nil
}
// String matched interface but there is a potential ambiguity to be resolved
if err != errNoSuchInterface {
return nil, err
}
// String is not an interface check if it is a valid IP
if ip := net.ParseIP(input); ip != nil && (isUnspecifiedValid || !ip.IsUnspecified()) {
return ip, nil
}
// Not valid IP found
return nil, errBadNetworkIdentifier
}
func (c *Cluster) resolveSystemAddrViaSubnetCheck() (net.IP, error) {
// Use the system's only IP address, or fail if there are
// multiple addresses to choose from. Skip interfaces which
// are managed by docker via subnet check.
interfaces, err := net.Interfaces()
if err != nil {
return nil, err
}
var systemAddr net.IP
var systemInterface string
// List Docker-managed subnets
v4Subnets, v6Subnets := c.config.NetworkSubnetsProvider.Subnets()
ifaceLoop:
for _, intf := range interfaces {
// Skip inactive interfaces and loopback interfaces
if (intf.Flags&net.FlagUp == 0) || (intf.Flags&net.FlagLoopback) != 0 {
continue
}
addrs, err := intf.Addrs()
if err != nil {
continue
}
var interfaceAddr4, interfaceAddr6 net.IP
for _, addr := range addrs {
ipAddr, ok := addr.(*net.IPNet)
// Skip loopback and link-local addresses
if !ok || !ipAddr.IP.IsGlobalUnicast() {
continue
}
if ipAddr.IP.To4() != nil {
// IPv4
// Ignore addresses in subnets that are managed by Docker.
for _, subnet := range v4Subnets {
if subnet.Contains(ipAddr.IP) {
continue ifaceLoop
}
}
if interfaceAddr4 != nil {
return nil, errMultipleIPs(intf.Name, intf.Name, interfaceAddr4, ipAddr.IP)
}
interfaceAddr4 = ipAddr.IP
} else {
// IPv6
// Ignore addresses in subnets that are managed by Docker.
for _, subnet := range v6Subnets {
if subnet.Contains(ipAddr.IP) {
continue ifaceLoop
}
}
if interfaceAddr6 != nil {
return nil, errMultipleIPs(intf.Name, intf.Name, interfaceAddr6, ipAddr.IP)
}
interfaceAddr6 = ipAddr.IP
}
}
// In the case that this interface has exactly one IPv4 address
// and exactly one IPv6 address, favor IPv4 over IPv6.
if interfaceAddr4 != nil {
if systemAddr != nil {
return nil, errMultipleIPs(systemInterface, intf.Name, systemAddr, interfaceAddr4)
}
systemAddr = interfaceAddr4
systemInterface = intf.Name
} else if interfaceAddr6 != nil {
if systemAddr != nil {
return nil, errMultipleIPs(systemInterface, intf.Name, systemAddr, interfaceAddr6)
}
systemAddr = interfaceAddr6
systemInterface = intf.Name
}
}
if systemAddr == nil {
return nil, errNoIP
}
return systemAddr, nil
}
func listSystemIPs() []net.IP {
interfaces, err := net.Interfaces()
if err != nil {
return nil
}
var systemAddrs []net.IP
for _, intf := range interfaces {
addrs, err := intf.Addrs()
if err != nil {
continue
}
for _, addr := range addrs {
ipAddr, ok := addr.(*net.IPNet)
if ok {
systemAddrs = append(systemAddrs, ipAddr.IP)
}
}
}
return systemAddrs
}
func errMultipleIPs(interfaceA, interfaceB string, addrA, addrB net.IP) error {
if interfaceA == interfaceB {
return configError(fmt.Sprintf("could not choose an IP address to advertise since this system has multiple addresses on interface %s (%s and %s)", interfaceA, addrA, addrB))
}
return configError(fmt.Sprintf("could not choose an IP address to advertise since this system has multiple addresses on different interfaces (%s on %s and %s on %s)", addrA, interfaceA, addrB, interfaceB))
}

View file

@ -0,0 +1,89 @@
package cluster // import "github.com/docker/docker/daemon/cluster"
import (
"net"
"github.com/vishvananda/netlink"
)
func (c *Cluster) resolveSystemAddr() (net.IP, error) {
// Use the system's only device IP address, or fail if there are
// multiple addresses to choose from.
interfaces, err := netlink.LinkList()
if err != nil {
return nil, err
}
var (
systemAddr net.IP
systemInterface string
deviceFound bool
)
for _, intf := range interfaces {
// Skip non device or inactive interfaces
if intf.Type() != "device" || intf.Attrs().Flags&net.FlagUp == 0 {
continue
}
addrs, err := netlink.AddrList(intf, netlink.FAMILY_ALL)
if err != nil {
continue
}
var interfaceAddr4, interfaceAddr6 net.IP
for _, addr := range addrs {
ipAddr := addr.IPNet.IP
// Skip loopback and link-local addresses
if !ipAddr.IsGlobalUnicast() {
continue
}
// At least one non-loopback device is found and it is administratively up
deviceFound = true
if ipAddr.To4() != nil {
if interfaceAddr4 != nil {
return nil, errMultipleIPs(intf.Attrs().Name, intf.Attrs().Name, interfaceAddr4, ipAddr)
}
interfaceAddr4 = ipAddr
} else {
if interfaceAddr6 != nil {
return nil, errMultipleIPs(intf.Attrs().Name, intf.Attrs().Name, interfaceAddr6, ipAddr)
}
interfaceAddr6 = ipAddr
}
}
// In the case that this interface has exactly one IPv4 address
// and exactly one IPv6 address, favor IPv4 over IPv6.
if interfaceAddr4 != nil {
if systemAddr != nil {
return nil, errMultipleIPs(systemInterface, intf.Attrs().Name, systemAddr, interfaceAddr4)
}
systemAddr = interfaceAddr4
systemInterface = intf.Attrs().Name
} else if interfaceAddr6 != nil {
if systemAddr != nil {
return nil, errMultipleIPs(systemInterface, intf.Attrs().Name, systemAddr, interfaceAddr6)
}
systemAddr = interfaceAddr6
systemInterface = intf.Attrs().Name
}
}
if systemAddr == nil {
if !deviceFound {
// If no non-loopback device type interface is found,
// fall back to the regular auto-detection mechanism.
// This is to cover the case where docker is running
// inside a container (eths are in fact veths).
return c.resolveSystemAddrViaSubnetCheck()
}
return nil, errNoIP
}
return systemAddr, nil
}

View file

@ -0,0 +1,9 @@
// +build !linux
package cluster // import "github.com/docker/docker/daemon/cluster"
import "net"
func (c *Cluster) resolveSystemAddr() (net.IP, error) {
return c.resolveSystemAddrViaSubnetCheck()
}

View file

@ -0,0 +1,316 @@
package cluster // import "github.com/docker/docker/daemon/cluster"
import (
"fmt"
apitypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/network"
types "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/convert"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/runconfig"
swarmapi "github.com/docker/swarmkit/api"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
)
// GetNetworks returns all current cluster managed networks.
func (c *Cluster) GetNetworks() ([]apitypes.NetworkResource, error) {
list, err := c.getNetworks(nil)
if err != nil {
return nil, err
}
removePredefinedNetworks(&list)
return list, nil
}
func removePredefinedNetworks(networks *[]apitypes.NetworkResource) {
if networks == nil {
return
}
var idxs []int
for i, n := range *networks {
if v, ok := n.Labels["com.docker.swarm.predefined"]; ok && v == "true" {
idxs = append(idxs, i)
}
}
for i, idx := range idxs {
idx -= i
*networks = append((*networks)[:idx], (*networks)[idx+1:]...)
}
}
func (c *Cluster) getNetworks(filters *swarmapi.ListNetworksRequest_Filters) ([]apitypes.NetworkResource, error) {
c.mu.RLock()
defer c.mu.RUnlock()
state := c.currentNodeState()
if !state.IsActiveManager() {
return nil, c.errNoManager(state)
}
ctx, cancel := c.getRequestContext()
defer cancel()
r, err := state.controlClient.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: filters})
if err != nil {
return nil, err
}
networks := make([]apitypes.NetworkResource, 0, len(r.Networks))
for _, network := range r.Networks {
networks = append(networks, convert.BasicNetworkFromGRPC(*network))
}
return networks, nil
}
// GetNetwork returns a cluster network by an ID.
func (c *Cluster) GetNetwork(input string) (apitypes.NetworkResource, error) {
var network *swarmapi.Network
if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
n, err := getNetwork(ctx, state.controlClient, input)
if err != nil {
return err
}
network = n
return nil
}); err != nil {
return apitypes.NetworkResource{}, err
}
return convert.BasicNetworkFromGRPC(*network), nil
}
// GetNetworksByName returns cluster managed networks by name.
// It is ok to have multiple networks here. #18864
func (c *Cluster) GetNetworksByName(name string) ([]apitypes.NetworkResource, error) {
// Note that swarmapi.GetNetworkRequest.Name is not functional.
// So we cannot just use that with c.GetNetwork.
return c.getNetworks(&swarmapi.ListNetworksRequest_Filters{
Names: []string{name},
})
}
func attacherKey(target, containerID string) string {
return containerID + ":" + target
}
// UpdateAttachment signals the attachment config to the attachment
// waiter who is trying to start or attach the container to the
// network.
func (c *Cluster) UpdateAttachment(target, containerID string, config *network.NetworkingConfig) error {
c.mu.Lock()
attacher, ok := c.attachers[attacherKey(target, containerID)]
if !ok || attacher == nil {
c.mu.Unlock()
return fmt.Errorf("could not find attacher for container %s to network %s", containerID, target)
}
if attacher.inProgress {
logrus.Debugf("Discarding redundant notice of resource allocation on network %s for task id %s", target, attacher.taskID)
c.mu.Unlock()
return nil
}
attacher.inProgress = true
c.mu.Unlock()
attacher.attachWaitCh <- config
return nil
}
// WaitForDetachment waits for the container to stop or detach from
// the network.
func (c *Cluster) WaitForDetachment(ctx context.Context, networkName, networkID, taskID, containerID string) error {
c.mu.RLock()
attacher, ok := c.attachers[attacherKey(networkName, containerID)]
if !ok {
attacher, ok = c.attachers[attacherKey(networkID, containerID)]
}
state := c.currentNodeState()
if state.swarmNode == nil || state.swarmNode.Agent() == nil {
c.mu.RUnlock()
return errors.New("invalid cluster node while waiting for detachment")
}
c.mu.RUnlock()
agent := state.swarmNode.Agent()
if ok && attacher != nil &&
attacher.detachWaitCh != nil &&
attacher.attachCompleteCh != nil {
// Attachment may be in progress still so wait for
// attachment to complete.
select {
case <-attacher.attachCompleteCh:
case <-ctx.Done():
return ctx.Err()
}
if attacher.taskID == taskID {
select {
case <-attacher.detachWaitCh:
case <-ctx.Done():
return ctx.Err()
}
}
}
return agent.ResourceAllocator().DetachNetwork(ctx, taskID)
}
// AttachNetwork generates an attachment request towards the manager.
func (c *Cluster) AttachNetwork(target string, containerID string, addresses []string) (*network.NetworkingConfig, error) {
aKey := attacherKey(target, containerID)
c.mu.Lock()
state := c.currentNodeState()
if state.swarmNode == nil || state.swarmNode.Agent() == nil {
c.mu.Unlock()
return nil, errors.New("invalid cluster node while attaching to network")
}
if attacher, ok := c.attachers[aKey]; ok {
c.mu.Unlock()
return attacher.config, nil
}
agent := state.swarmNode.Agent()
attachWaitCh := make(chan *network.NetworkingConfig)
detachWaitCh := make(chan struct{})
attachCompleteCh := make(chan struct{})
c.attachers[aKey] = &attacher{
attachWaitCh: attachWaitCh,
attachCompleteCh: attachCompleteCh,
detachWaitCh: detachWaitCh,
}
c.mu.Unlock()
ctx, cancel := c.getRequestContext()
defer cancel()
taskID, err := agent.ResourceAllocator().AttachNetwork(ctx, containerID, target, addresses)
if err != nil {
c.mu.Lock()
delete(c.attachers, aKey)
c.mu.Unlock()
return nil, fmt.Errorf("Could not attach to network %s: %v", target, err)
}
c.mu.Lock()
c.attachers[aKey].taskID = taskID
close(attachCompleteCh)
c.mu.Unlock()
logrus.Debugf("Successfully attached to network %s with task id %s", target, taskID)
release := func() {
ctx, cancel := c.getRequestContext()
defer cancel()
if err := agent.ResourceAllocator().DetachNetwork(ctx, taskID); err != nil {
logrus.Errorf("Failed remove network attachment %s to network %s on allocation failure: %v",
taskID, target, err)
}
}
var config *network.NetworkingConfig
select {
case config = <-attachWaitCh:
case <-ctx.Done():
release()
return nil, fmt.Errorf("attaching to network failed, make sure your network options are correct and check manager logs: %v", ctx.Err())
}
c.mu.Lock()
c.attachers[aKey].config = config
c.mu.Unlock()
logrus.Debugf("Successfully allocated resources on network %s for task id %s", target, taskID)
return config, nil
}
// DetachNetwork unblocks the waiters waiting on WaitForDetachment so
// that a request to detach can be generated towards the manager.
func (c *Cluster) DetachNetwork(target string, containerID string) error {
aKey := attacherKey(target, containerID)
c.mu.Lock()
attacher, ok := c.attachers[aKey]
delete(c.attachers, aKey)
c.mu.Unlock()
if !ok {
return fmt.Errorf("could not find network attachment for container %s to network %s", containerID, target)
}
close(attacher.detachWaitCh)
return nil
}
// CreateNetwork creates a new cluster managed network.
func (c *Cluster) CreateNetwork(s apitypes.NetworkCreateRequest) (string, error) {
if runconfig.IsPreDefinedNetwork(s.Name) {
err := notAllowedError(fmt.Sprintf("%s is a pre-defined network and cannot be created", s.Name))
return "", errors.WithStack(err)
}
var resp *swarmapi.CreateNetworkResponse
if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
networkSpec := convert.BasicNetworkCreateToGRPC(s)
r, err := state.controlClient.CreateNetwork(ctx, &swarmapi.CreateNetworkRequest{Spec: &networkSpec})
if err != nil {
return err
}
resp = r
return nil
}); err != nil {
return "", err
}
return resp.Network.ID, nil
}
// RemoveNetwork removes a cluster network.
func (c *Cluster) RemoveNetwork(input string) error {
return c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
network, err := getNetwork(ctx, state.controlClient, input)
if err != nil {
return err
}
_, err = state.controlClient.RemoveNetwork(ctx, &swarmapi.RemoveNetworkRequest{NetworkID: network.ID})
return err
})
}
func (c *Cluster) populateNetworkID(ctx context.Context, client swarmapi.ControlClient, s *types.ServiceSpec) error {
// Always prefer NetworkAttachmentConfigs from TaskTemplate
// but fallback to service spec for backward compatibility
networks := s.TaskTemplate.Networks
if len(networks) == 0 {
networks = s.Networks
}
for i, n := range networks {
apiNetwork, err := getNetwork(ctx, client, n.Target)
if err != nil {
ln, _ := c.config.Backend.FindNetwork(n.Target)
if ln != nil && runconfig.IsPreDefinedNetwork(ln.Name()) {
// Need to retrieve the corresponding predefined swarm network
// and use its id for the request.
apiNetwork, err = getNetwork(ctx, client, ln.Name())
if err != nil {
return errors.Wrap(errdefs.NotFound(err), "could not find the corresponding predefined swarm network")
}
goto setid
}
if ln != nil && !ln.Info().Dynamic() {
errMsg := fmt.Sprintf("The network %s cannot be used with services. Only networks scoped to the swarm can be used, such as those created with the overlay driver.", ln.Name())
return errors.WithStack(notAllowedError(errMsg))
}
return err
}
setid:
networks[i].Target = apiNetwork.ID
}
return nil
}

View file

@ -0,0 +1,383 @@
package cluster // import "github.com/docker/docker/daemon/cluster"
import (
"fmt"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
types "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/executor/container"
lncluster "github.com/docker/libnetwork/cluster"
swarmapi "github.com/docker/swarmkit/api"
swarmnode "github.com/docker/swarmkit/node"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// nodeRunner implements a manager for continuously running swarmkit node, restarting them with backoff delays if needed.
type nodeRunner struct {
nodeState
mu sync.RWMutex
done chan struct{} // closed when swarmNode exits
ready chan struct{} // closed when swarmNode becomes active
reconnectDelay time.Duration
config nodeStartConfig
repeatedRun bool
cancelReconnect func()
stopping bool
cluster *Cluster // only for accessing config helpers, never call any methods. TODO: change to config struct
}
// nodeStartConfig holds configuration needed to start a new node. Exported
// fields of this structure are saved to disk in json. Unexported fields
// contain data that shouldn't be persisted between daemon reloads.
type nodeStartConfig struct {
// LocalAddr is this machine's local IP or hostname, if specified.
LocalAddr string
// RemoteAddr is the address that was given to "swarm join". It is used
// to find LocalAddr if necessary.
RemoteAddr string
// ListenAddr is the address we bind to, including a port.
ListenAddr string
// AdvertiseAddr is the address other nodes should connect to,
// including a port.
AdvertiseAddr string
// DataPathAddr is the address that has to be used for the data path
DataPathAddr string
// JoinInProgress is set to true if a join operation has started, but
// not completed yet.
JoinInProgress bool
joinAddr string
forceNewCluster bool
joinToken string
lockKey []byte
autolock bool
availability types.NodeAvailability
}
func (n *nodeRunner) Ready() chan error {
c := make(chan error, 1)
n.mu.RLock()
ready, done := n.ready, n.done
n.mu.RUnlock()
go func() {
select {
case <-ready:
case <-done:
}
select {
case <-ready:
default:
n.mu.RLock()
c <- n.err
n.mu.RUnlock()
}
close(c)
}()
return c
}
func (n *nodeRunner) Start(conf nodeStartConfig) error {
n.mu.Lock()
defer n.mu.Unlock()
n.reconnectDelay = initialReconnectDelay
return n.start(conf)
}
func (n *nodeRunner) start(conf nodeStartConfig) error {
var control string
if runtime.GOOS == "windows" {
control = `\\.\pipe\` + controlSocket
} else {
control = filepath.Join(n.cluster.runtimeRoot, controlSocket)
}
joinAddr := conf.joinAddr
if joinAddr == "" && conf.JoinInProgress {
// We must have been restarted while trying to join a cluster.
// Continue trying to join instead of forming our own cluster.
joinAddr = conf.RemoteAddr
}
// Hostname is not set here. Instead, it is obtained from
// the node description that is reported periodically
swarmnodeConfig := swarmnode.Config{
ForceNewCluster: conf.forceNewCluster,
ListenControlAPI: control,
ListenRemoteAPI: conf.ListenAddr,
AdvertiseRemoteAPI: conf.AdvertiseAddr,
JoinAddr: joinAddr,
StateDir: n.cluster.root,
JoinToken: conf.joinToken,
Executor: container.NewExecutor(
n.cluster.config.Backend,
n.cluster.config.PluginBackend,
n.cluster.config.ImageBackend),
HeartbeatTick: 1,
ElectionTick: 3,
UnlockKey: conf.lockKey,
AutoLockManagers: conf.autolock,
PluginGetter: n.cluster.config.Backend.PluginGetter(),
}
if conf.availability != "" {
avail, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(conf.availability))]
if !ok {
return fmt.Errorf("invalid Availability: %q", conf.availability)
}
swarmnodeConfig.Availability = swarmapi.NodeSpec_Availability(avail)
}
node, err := swarmnode.New(&swarmnodeConfig)
if err != nil {
return err
}
if err := node.Start(context.Background()); err != nil {
return err
}
n.done = make(chan struct{})
n.ready = make(chan struct{})
n.swarmNode = node
if conf.joinAddr != "" {
conf.JoinInProgress = true
}
n.config = conf
savePersistentState(n.cluster.root, conf)
ctx, cancel := context.WithCancel(context.Background())
go func() {
n.handleNodeExit(node)
cancel()
}()
go n.handleReadyEvent(ctx, node, n.ready)
go n.handleControlSocketChange(ctx, node)
return nil
}
func (n *nodeRunner) handleControlSocketChange(ctx context.Context, node *swarmnode.Node) {
for conn := range node.ListenControlSocket(ctx) {
n.mu.Lock()
if n.grpcConn != conn {
if conn == nil {
n.controlClient = nil
n.logsClient = nil
} else {
n.controlClient = swarmapi.NewControlClient(conn)
n.logsClient = swarmapi.NewLogsClient(conn)
// push store changes to daemon
go n.watchClusterEvents(ctx, conn)
}
}
n.grpcConn = conn
n.mu.Unlock()
n.cluster.SendClusterEvent(lncluster.EventSocketChange)
}
}
func (n *nodeRunner) watchClusterEvents(ctx context.Context, conn *grpc.ClientConn) {
client := swarmapi.NewWatchClient(conn)
watch, err := client.Watch(ctx, &swarmapi.WatchRequest{
Entries: []*swarmapi.WatchRequest_WatchEntry{
{
Kind: "node",
Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove,
},
{
Kind: "service",
Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove,
},
{
Kind: "network",
Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove,
},
{
Kind: "secret",
Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove,
},
{
Kind: "config",
Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove,
},
},
IncludeOldObject: true,
})
if err != nil {
logrus.WithError(err).Error("failed to watch cluster store")
return
}
for {
msg, err := watch.Recv()
if err != nil {
// store watch is broken
errStatus, ok := status.FromError(err)
if !ok || errStatus.Code() != codes.Canceled {
logrus.WithError(err).Error("failed to receive changes from store watch API")
}
return
}
select {
case <-ctx.Done():
return
case n.cluster.watchStream <- msg:
}
}
}
func (n *nodeRunner) handleReadyEvent(ctx context.Context, node *swarmnode.Node, ready chan struct{}) {
select {
case <-node.Ready():
n.mu.Lock()
n.err = nil
if n.config.JoinInProgress {
n.config.JoinInProgress = false
savePersistentState(n.cluster.root, n.config)
}
n.mu.Unlock()
close(ready)
case <-ctx.Done():
}
n.cluster.SendClusterEvent(lncluster.EventNodeReady)
}
func (n *nodeRunner) handleNodeExit(node *swarmnode.Node) {
err := detectLockedError(node.Err(context.Background()))
if err != nil {
logrus.Errorf("cluster exited with error: %v", err)
}
n.mu.Lock()
n.swarmNode = nil
n.err = err
close(n.done)
select {
case <-n.ready:
n.enableReconnectWatcher()
default:
if n.repeatedRun {
n.enableReconnectWatcher()
}
}
n.repeatedRun = true
n.mu.Unlock()
}
// Stop stops the current swarm node if it is running.
func (n *nodeRunner) Stop() error {
n.mu.Lock()
if n.cancelReconnect != nil { // between restarts
n.cancelReconnect()
n.cancelReconnect = nil
}
if n.swarmNode == nil {
n.mu.Unlock()
return nil
}
n.stopping = true
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
n.mu.Unlock()
if err := n.swarmNode.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") {
return err
}
n.cluster.SendClusterEvent(lncluster.EventNodeLeave)
<-n.done
return nil
}
func (n *nodeRunner) State() nodeState {
if n == nil {
return nodeState{status: types.LocalNodeStateInactive}
}
n.mu.RLock()
defer n.mu.RUnlock()
ns := n.nodeState
if ns.err != nil || n.cancelReconnect != nil {
if errors.Cause(ns.err) == errSwarmLocked {
ns.status = types.LocalNodeStateLocked
} else {
ns.status = types.LocalNodeStateError
}
} else {
select {
case <-n.ready:
ns.status = types.LocalNodeStateActive
default:
ns.status = types.LocalNodeStatePending
}
}
return ns
}
func (n *nodeRunner) enableReconnectWatcher() {
if n.stopping {
return
}
n.reconnectDelay *= 2
if n.reconnectDelay > maxReconnectDelay {
n.reconnectDelay = maxReconnectDelay
}
logrus.Warnf("Restarting swarm in %.2f seconds", n.reconnectDelay.Seconds())
delayCtx, cancel := context.WithTimeout(context.Background(), n.reconnectDelay)
n.cancelReconnect = cancel
go func() {
<-delayCtx.Done()
if delayCtx.Err() != context.DeadlineExceeded {
return
}
n.mu.Lock()
defer n.mu.Unlock()
if n.stopping {
return
}
if err := n.start(n.config); err != nil {
n.err = err
}
}()
}
// nodeState represents information about the current state of the cluster and
// provides access to the grpc clients.
type nodeState struct {
swarmNode *swarmnode.Node
grpcConn *grpc.ClientConn
controlClient swarmapi.ControlClient
logsClient swarmapi.LogsClient
status types.LocalNodeState
actualLocalAddr string
err error
}
// IsActiveManager returns true if node is a manager ready to accept control requests. It is safe to access the client properties if this returns true.
func (ns nodeState) IsActiveManager() bool {
return ns.controlClient != nil
}
// IsManager returns true if node is a manager.
func (ns nodeState) IsManager() bool {
return ns.swarmNode != nil && ns.swarmNode.Manager() != nil
}
// NodeID returns node's ID or empty string if node is inactive.
func (ns nodeState) NodeID() string {
if ns.swarmNode != nil {
return ns.swarmNode.NodeID()
}
return ""
}

View file

@ -0,0 +1,104 @@
package cluster // import "github.com/docker/docker/daemon/cluster"
import (
apitypes "github.com/docker/docker/api/types"
types "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/convert"
"github.com/docker/docker/errdefs"
swarmapi "github.com/docker/swarmkit/api"
"golang.org/x/net/context"
)
// GetNodes returns a list of all nodes known to a cluster.
func (c *Cluster) GetNodes(options apitypes.NodeListOptions) ([]types.Node, error) {
c.mu.RLock()
defer c.mu.RUnlock()
state := c.currentNodeState()
if !state.IsActiveManager() {
return nil, c.errNoManager(state)
}
filters, err := newListNodesFilters(options.Filters)
if err != nil {
return nil, err
}
ctx, cancel := c.getRequestContext()
defer cancel()
r, err := state.controlClient.ListNodes(
ctx,
&swarmapi.ListNodesRequest{Filters: filters})
if err != nil {
return nil, err
}
nodes := make([]types.Node, 0, len(r.Nodes))
for _, node := range r.Nodes {
nodes = append(nodes, convert.NodeFromGRPC(*node))
}
return nodes, nil
}
// GetNode returns a node based on an ID.
func (c *Cluster) GetNode(input string) (types.Node, error) {
var node *swarmapi.Node
if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
n, err := getNode(ctx, state.controlClient, input)
if err != nil {
return err
}
node = n
return nil
}); err != nil {
return types.Node{}, err
}
return convert.NodeFromGRPC(*node), nil
}
// UpdateNode updates existing nodes properties.
func (c *Cluster) UpdateNode(input string, version uint64, spec types.NodeSpec) error {
return c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
nodeSpec, err := convert.NodeSpecToGRPC(spec)
if err != nil {
return errdefs.InvalidParameter(err)
}
ctx, cancel := c.getRequestContext()
defer cancel()
currentNode, err := getNode(ctx, state.controlClient, input)
if err != nil {
return err
}
_, err = state.controlClient.UpdateNode(
ctx,
&swarmapi.UpdateNodeRequest{
NodeID: currentNode.ID,
Spec: &nodeSpec,
NodeVersion: &swarmapi.Version{
Index: version,
},
},
)
return err
})
}
// RemoveNode removes a node from a cluster
func (c *Cluster) RemoveNode(input string, force bool) error {
return c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
node, err := getNode(ctx, state.controlClient, input)
if err != nil {
return err
}
_, err = state.controlClient.RemoveNode(ctx, &swarmapi.RemoveNodeRequest{NodeID: node.ID, Force: force})
return err
})
}

View file

@ -0,0 +1,37 @@
package provider // import "github.com/docker/docker/daemon/cluster/provider"
import "github.com/docker/docker/api/types"
// NetworkCreateRequest is a request when creating a network.
type NetworkCreateRequest struct {
ID string
types.NetworkCreateRequest
}
// NetworkCreateResponse is a response when creating a network.
type NetworkCreateResponse struct {
ID string `json:"Id"`
}
// VirtualAddress represents a virtual address.
type VirtualAddress struct {
IPv4 string
IPv6 string
}
// PortConfig represents a port configuration.
type PortConfig struct {
Name string
Protocol int32
TargetPort uint32
PublishedPort uint32
}
// ServiceConfig represents a service configuration.
type ServiceConfig struct {
ID string
Name string
Aliases map[string][]string
VirtualAddresses map[string]*VirtualAddress
ExposedPorts []*PortConfig
}

View file

@ -0,0 +1,117 @@
package cluster // import "github.com/docker/docker/daemon/cluster"
import (
apitypes "github.com/docker/docker/api/types"
types "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/convert"
swarmapi "github.com/docker/swarmkit/api"
"golang.org/x/net/context"
)
// GetSecret returns a secret from a managed swarm cluster
func (c *Cluster) GetSecret(input string) (types.Secret, error) {
var secret *swarmapi.Secret
if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
s, err := getSecret(ctx, state.controlClient, input)
if err != nil {
return err
}
secret = s
return nil
}); err != nil {
return types.Secret{}, err
}
return convert.SecretFromGRPC(secret), nil
}
// GetSecrets returns all secrets of a managed swarm cluster.
func (c *Cluster) GetSecrets(options apitypes.SecretListOptions) ([]types.Secret, error) {
c.mu.RLock()
defer c.mu.RUnlock()
state := c.currentNodeState()
if !state.IsActiveManager() {
return nil, c.errNoManager(state)
}
filters, err := newListSecretsFilters(options.Filters)
if err != nil {
return nil, err
}
ctx, cancel := c.getRequestContext()
defer cancel()
r, err := state.controlClient.ListSecrets(ctx,
&swarmapi.ListSecretsRequest{Filters: filters})
if err != nil {
return nil, err
}
secrets := make([]types.Secret, 0, len(r.Secrets))
for _, secret := range r.Secrets {
secrets = append(secrets, convert.SecretFromGRPC(secret))
}
return secrets, nil
}
// CreateSecret creates a new secret in a managed swarm cluster.
func (c *Cluster) CreateSecret(s types.SecretSpec) (string, error) {
var resp *swarmapi.CreateSecretResponse
if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
secretSpec := convert.SecretSpecToGRPC(s)
r, err := state.controlClient.CreateSecret(ctx,
&swarmapi.CreateSecretRequest{Spec: &secretSpec})
if err != nil {
return err
}
resp = r
return nil
}); err != nil {
return "", err
}
return resp.Secret.ID, nil
}
// RemoveSecret removes a secret from a managed swarm cluster.
func (c *Cluster) RemoveSecret(input string) error {
return c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
secret, err := getSecret(ctx, state.controlClient, input)
if err != nil {
return err
}
req := &swarmapi.RemoveSecretRequest{
SecretID: secret.ID,
}
_, err = state.controlClient.RemoveSecret(ctx, req)
return err
})
}
// UpdateSecret updates a secret in a managed swarm cluster.
// Note: this is not exposed to the CLI but is available from the API only
func (c *Cluster) UpdateSecret(input string, version uint64, spec types.SecretSpec) error {
return c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
secret, err := getSecret(ctx, state.controlClient, input)
if err != nil {
return err
}
secretSpec := convert.SecretSpecToGRPC(spec)
_, err = state.controlClient.UpdateSecret(ctx,
&swarmapi.UpdateSecretRequest{
SecretID: secret.ID,
SecretVersion: &swarmapi.Version{
Index: version,
},
Spec: &secretSpec,
})
return err
})
}

View file

@ -0,0 +1,598 @@
package cluster // import "github.com/docker/docker/daemon/cluster"
import (
"encoding/base64"
"encoding/json"
"fmt"
"io"
"os"
"strconv"
"strings"
"time"
"github.com/docker/distribution/reference"
apitypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
types "github.com/docker/docker/api/types/swarm"
timetypes "github.com/docker/docker/api/types/time"
"github.com/docker/docker/daemon/cluster/convert"
"github.com/docker/docker/errdefs"
runconfigopts "github.com/docker/docker/runconfig/opts"
swarmapi "github.com/docker/swarmkit/api"
gogotypes "github.com/gogo/protobuf/types"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
)
// GetServices returns all services of a managed swarm cluster.
func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Service, error) {
c.mu.RLock()
defer c.mu.RUnlock()
state := c.currentNodeState()
if !state.IsActiveManager() {
return nil, c.errNoManager(state)
}
// We move the accepted filter check here as "mode" filter
// is processed in the daemon, not in SwarmKit. So it might
// be good to have accepted file check in the same file as
// the filter processing (in the for loop below).
accepted := map[string]bool{
"name": true,
"id": true,
"label": true,
"mode": true,
"runtime": true,
}
if err := options.Filters.Validate(accepted); err != nil {
return nil, err
}
if len(options.Filters.Get("runtime")) == 0 {
// Default to using the container runtime filter
options.Filters.Add("runtime", string(types.RuntimeContainer))
}
filters := &swarmapi.ListServicesRequest_Filters{
NamePrefixes: options.Filters.Get("name"),
IDPrefixes: options.Filters.Get("id"),
Labels: runconfigopts.ConvertKVStringsToMap(options.Filters.Get("label")),
Runtimes: options.Filters.Get("runtime"),
}
ctx, cancel := c.getRequestContext()
defer cancel()
r, err := state.controlClient.ListServices(
ctx,
&swarmapi.ListServicesRequest{Filters: filters})
if err != nil {
return nil, err
}
services := make([]types.Service, 0, len(r.Services))
for _, service := range r.Services {
if options.Filters.Contains("mode") {
var mode string
switch service.Spec.GetMode().(type) {
case *swarmapi.ServiceSpec_Global:
mode = "global"
case *swarmapi.ServiceSpec_Replicated:
mode = "replicated"
}
if !options.Filters.ExactMatch("mode", mode) {
continue
}
}
svcs, err := convert.ServiceFromGRPC(*service)
if err != nil {
return nil, err
}
services = append(services, svcs)
}
return services, nil
}
// GetService returns a service based on an ID or name.
func (c *Cluster) GetService(input string, insertDefaults bool) (types.Service, error) {
var service *swarmapi.Service
if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
s, err := getService(ctx, state.controlClient, input, insertDefaults)
if err != nil {
return err
}
service = s
return nil
}); err != nil {
return types.Service{}, err
}
svc, err := convert.ServiceFromGRPC(*service)
if err != nil {
return types.Service{}, err
}
return svc, nil
}
// CreateService creates a new service in a managed swarm cluster.
func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string, queryRegistry bool) (*apitypes.ServiceCreateResponse, error) {
var resp *apitypes.ServiceCreateResponse
err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
err := c.populateNetworkID(ctx, state.controlClient, &s)
if err != nil {
return err
}
serviceSpec, err := convert.ServiceSpecToGRPC(s)
if err != nil {
return errdefs.InvalidParameter(err)
}
resp = &apitypes.ServiceCreateResponse{}
switch serviceSpec.Task.Runtime.(type) {
// handle other runtimes here
case *swarmapi.TaskSpec_Generic:
switch serviceSpec.Task.GetGeneric().Kind {
case string(types.RuntimePlugin):
info, _ := c.config.Backend.SystemInfo()
if !info.ExperimentalBuild {
return fmt.Errorf("runtime type %q only supported in experimental", types.RuntimePlugin)
}
if s.TaskTemplate.PluginSpec == nil {
return errors.New("plugin spec must be set")
}
default:
return fmt.Errorf("unsupported runtime type: %q", serviceSpec.Task.GetGeneric().Kind)
}
r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec})
if err != nil {
return err
}
resp.ID = r.Service.ID
case *swarmapi.TaskSpec_Container:
ctnr := serviceSpec.Task.GetContainer()
if ctnr == nil {
return errors.New("service does not use container tasks")
}
if encodedAuth != "" {
ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
}
// retrieve auth config from encoded auth
authConfig := &apitypes.AuthConfig{}
if encodedAuth != "" {
authReader := strings.NewReader(encodedAuth)
dec := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, authReader))
if err := dec.Decode(authConfig); err != nil {
logrus.Warnf("invalid authconfig: %v", err)
}
}
// pin image by digest for API versions < 1.30
// TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE"
// should be removed in the future. Since integration tests only use the
// latest API version, so this is no longer required.
if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry {
digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig)
if err != nil {
logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error())
// warning in the client response should be concise
resp.Warnings = append(resp.Warnings, digestWarning(ctnr.Image))
} else if ctnr.Image != digestImage {
logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage)
ctnr.Image = digestImage
} else {
logrus.Debugf("creating service using supplied digest reference %s", ctnr.Image)
}
// Replace the context with a fresh one.
// If we timed out while communicating with the
// registry, then "ctx" will already be expired, which
// would cause UpdateService below to fail. Reusing
// "ctx" could make it impossible to create a service
// if the registry is slow or unresponsive.
var cancel func()
ctx, cancel = c.getRequestContext()
defer cancel()
}
r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec})
if err != nil {
return err
}
resp.ID = r.Service.ID
}
return nil
})
return resp, err
}
// UpdateService updates existing service to match new properties.
func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec types.ServiceSpec, flags apitypes.ServiceUpdateOptions, queryRegistry bool) (*apitypes.ServiceUpdateResponse, error) {
var resp *apitypes.ServiceUpdateResponse
err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
err := c.populateNetworkID(ctx, state.controlClient, &spec)
if err != nil {
return err
}
serviceSpec, err := convert.ServiceSpecToGRPC(spec)
if err != nil {
return errdefs.InvalidParameter(err)
}
currentService, err := getService(ctx, state.controlClient, serviceIDOrName, false)
if err != nil {
return err
}
resp = &apitypes.ServiceUpdateResponse{}
switch serviceSpec.Task.Runtime.(type) {
case *swarmapi.TaskSpec_Generic:
switch serviceSpec.Task.GetGeneric().Kind {
case string(types.RuntimePlugin):
if spec.TaskTemplate.PluginSpec == nil {
return errors.New("plugin spec must be set")
}
}
case *swarmapi.TaskSpec_Container:
newCtnr := serviceSpec.Task.GetContainer()
if newCtnr == nil {
return errors.New("service does not use container tasks")
}
encodedAuth := flags.EncodedRegistryAuth
if encodedAuth != "" {
newCtnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
} else {
// this is needed because if the encodedAuth isn't being updated then we
// shouldn't lose it, and continue to use the one that was already present
var ctnr *swarmapi.ContainerSpec
switch flags.RegistryAuthFrom {
case apitypes.RegistryAuthFromSpec, "":
ctnr = currentService.Spec.Task.GetContainer()
case apitypes.RegistryAuthFromPreviousSpec:
if currentService.PreviousSpec == nil {
return errors.New("service does not have a previous spec")
}
ctnr = currentService.PreviousSpec.Task.GetContainer()
default:
return errors.New("unsupported registryAuthFrom value")
}
if ctnr == nil {
return errors.New("service does not use container tasks")
}
newCtnr.PullOptions = ctnr.PullOptions
// update encodedAuth so it can be used to pin image by digest
if ctnr.PullOptions != nil {
encodedAuth = ctnr.PullOptions.RegistryAuth
}
}
// retrieve auth config from encoded auth
authConfig := &apitypes.AuthConfig{}
if encodedAuth != "" {
if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil {
logrus.Warnf("invalid authconfig: %v", err)
}
}
// pin image by digest for API versions < 1.30
// TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE"
// should be removed in the future. Since integration tests only use the
// latest API version, so this is no longer required.
if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry {
digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig)
if err != nil {
logrus.Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error())
// warning in the client response should be concise
resp.Warnings = append(resp.Warnings, digestWarning(newCtnr.Image))
} else if newCtnr.Image != digestImage {
logrus.Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage)
newCtnr.Image = digestImage
} else {
logrus.Debugf("updating service using supplied digest reference %s", newCtnr.Image)
}
// Replace the context with a fresh one.
// If we timed out while communicating with the
// registry, then "ctx" will already be expired, which
// would cause UpdateService below to fail. Reusing
// "ctx" could make it impossible to update a service
// if the registry is slow or unresponsive.
var cancel func()
ctx, cancel = c.getRequestContext()
defer cancel()
}
}
var rollback swarmapi.UpdateServiceRequest_Rollback
switch flags.Rollback {
case "", "none":
rollback = swarmapi.UpdateServiceRequest_NONE
case "previous":
rollback = swarmapi.UpdateServiceRequest_PREVIOUS
default:
return fmt.Errorf("unrecognized rollback option %s", flags.Rollback)
}
_, err = state.controlClient.UpdateService(
ctx,
&swarmapi.UpdateServiceRequest{
ServiceID: currentService.ID,
Spec: &serviceSpec,
ServiceVersion: &swarmapi.Version{
Index: version,
},
Rollback: rollback,
},
)
return err
})
return resp, err
}
// RemoveService removes a service from a managed swarm cluster.
func (c *Cluster) RemoveService(input string) error {
return c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
service, err := getService(ctx, state.controlClient, input, false)
if err != nil {
return err
}
_, err = state.controlClient.RemoveService(ctx, &swarmapi.RemoveServiceRequest{ServiceID: service.ID})
return err
})
}
// ServiceLogs collects service logs and writes them back to `config.OutStream`
func (c *Cluster) ServiceLogs(ctx context.Context, selector *backend.LogSelector, config *apitypes.ContainerLogsOptions) (<-chan *backend.LogMessage, error) {
c.mu.RLock()
defer c.mu.RUnlock()
state := c.currentNodeState()
if !state.IsActiveManager() {
return nil, c.errNoManager(state)
}
swarmSelector, err := convertSelector(ctx, state.controlClient, selector)
if err != nil {
return nil, errors.Wrap(err, "error making log selector")
}
// set the streams we'll use
stdStreams := []swarmapi.LogStream{}
if config.ShowStdout {
stdStreams = append(stdStreams, swarmapi.LogStreamStdout)
}
if config.ShowStderr {
stdStreams = append(stdStreams, swarmapi.LogStreamStderr)
}
// Get tail value squared away - the number of previous log lines we look at
var tail int64
// in ContainerLogs, if the tail value is ANYTHING non-integer, we just set
// it to -1 (all). i don't agree with that, but i also think no tail value
// should be legitimate. if you don't pass tail, we assume you want "all"
if config.Tail == "all" || config.Tail == "" {
// tail of 0 means send all logs on the swarmkit side
tail = 0
} else {
t, err := strconv.Atoi(config.Tail)
if err != nil {
return nil, errors.New("tail value must be a positive integer or \"all\"")
}
if t < 0 {
return nil, errors.New("negative tail values not supported")
}
// we actually use negative tail in swarmkit to represent messages
// backwards starting from the beginning. also, -1 means no logs. so,
// basically, for api compat with docker container logs, add one and
// flip the sign. we error above if you try to negative tail, which
// isn't supported by docker (and would error deeper in the stack
// anyway)
//
// See the logs protobuf for more information
tail = int64(-(t + 1))
}
// get the since value - the time in the past we're looking at logs starting from
var sinceProto *gogotypes.Timestamp
if config.Since != "" {
s, n, err := timetypes.ParseTimestamps(config.Since, 0)
if err != nil {
return nil, errors.Wrap(err, "could not parse since timestamp")
}
since := time.Unix(s, n)
sinceProto, err = gogotypes.TimestampProto(since)
if err != nil {
return nil, errors.Wrap(err, "could not parse timestamp to proto")
}
}
stream, err := state.logsClient.SubscribeLogs(ctx, &swarmapi.SubscribeLogsRequest{
Selector: swarmSelector,
Options: &swarmapi.LogSubscriptionOptions{
Follow: config.Follow,
Streams: stdStreams,
Tail: tail,
Since: sinceProto,
},
})
if err != nil {
return nil, err
}
messageChan := make(chan *backend.LogMessage, 1)
go func() {
defer close(messageChan)
for {
// Check the context before doing anything.
select {
case <-ctx.Done():
return
default:
}
subscribeMsg, err := stream.Recv()
if err == io.EOF {
return
}
// if we're not io.EOF, push the message in and return
if err != nil {
select {
case <-ctx.Done():
case messageChan <- &backend.LogMessage{Err: err}:
}
return
}
for _, msg := range subscribeMsg.Messages {
// make a new message
m := new(backend.LogMessage)
m.Attrs = make([]backend.LogAttr, 0, len(msg.Attrs)+3)
// add the timestamp, adding the error if it fails
m.Timestamp, err = gogotypes.TimestampFromProto(msg.Timestamp)
if err != nil {
m.Err = err
}
nodeKey := contextPrefix + ".node.id"
serviceKey := contextPrefix + ".service.id"
taskKey := contextPrefix + ".task.id"
// copy over all of the details
for _, d := range msg.Attrs {
switch d.Key {
case nodeKey, serviceKey, taskKey:
// we have the final say over context details (in case there
// is a conflict (if the user added a detail with a context's
// key for some reason))
default:
m.Attrs = append(m.Attrs, backend.LogAttr{Key: d.Key, Value: d.Value})
}
}
m.Attrs = append(m.Attrs,
backend.LogAttr{Key: nodeKey, Value: msg.Context.NodeID},
backend.LogAttr{Key: serviceKey, Value: msg.Context.ServiceID},
backend.LogAttr{Key: taskKey, Value: msg.Context.TaskID},
)
switch msg.Stream {
case swarmapi.LogStreamStdout:
m.Source = "stdout"
case swarmapi.LogStreamStderr:
m.Source = "stderr"
}
m.Line = msg.Data
// there could be a case where the reader stops accepting
// messages and the context is canceled. we need to check that
// here, or otherwise we risk blocking forever on the message
// send.
select {
case <-ctx.Done():
return
case messageChan <- m:
}
}
}
}()
return messageChan, nil
}
// convertSelector takes a backend.LogSelector, which contains raw names that
// may or may not be valid, and converts them to an api.LogSelector proto. It
// returns an error if something fails
func convertSelector(ctx context.Context, cc swarmapi.ControlClient, selector *backend.LogSelector) (*swarmapi.LogSelector, error) {
// don't rely on swarmkit to resolve IDs, do it ourselves
swarmSelector := &swarmapi.LogSelector{}
for _, s := range selector.Services {
service, err := getService(ctx, cc, s, false)
if err != nil {
return nil, err
}
c := service.Spec.Task.GetContainer()
if c == nil {
return nil, errors.New("logs only supported on container tasks")
}
swarmSelector.ServiceIDs = append(swarmSelector.ServiceIDs, service.ID)
}
for _, t := range selector.Tasks {
task, err := getTask(ctx, cc, t)
if err != nil {
return nil, err
}
c := task.Spec.GetContainer()
if c == nil {
return nil, errors.New("logs only supported on container tasks")
}
swarmSelector.TaskIDs = append(swarmSelector.TaskIDs, task.ID)
}
return swarmSelector, nil
}
// imageWithDigestString takes an image such as name or name:tag
// and returns the image pinned to a digest, such as name@sha256:34234
func (c *Cluster) imageWithDigestString(ctx context.Context, image string, authConfig *apitypes.AuthConfig) (string, error) {
ref, err := reference.ParseAnyReference(image)
if err != nil {
return "", err
}
namedRef, ok := ref.(reference.Named)
if !ok {
if _, ok := ref.(reference.Digested); ok {
return image, nil
}
return "", errors.Errorf("unknown image reference format: %s", image)
}
// only query registry if not a canonical reference (i.e. with digest)
if _, ok := namedRef.(reference.Canonical); !ok {
namedRef = reference.TagNameOnly(namedRef)
taggedRef, ok := namedRef.(reference.NamedTagged)
if !ok {
return "", errors.Errorf("image reference not tagged: %s", image)
}
repo, _, err := c.config.ImageBackend.GetRepository(ctx, taggedRef, authConfig)
if err != nil {
return "", err
}
dscrptr, err := repo.Tags(ctx).Get(ctx, taggedRef.Tag())
if err != nil {
return "", err
}
namedDigestedRef, err := reference.WithDigest(taggedRef, dscrptr.Digest)
if err != nil {
return "", err
}
// return familiar form until interface updated to return type
return reference.FamiliarString(namedDigestedRef), nil
}
// reference already contains a digest, so just return it
return reference.FamiliarString(ref), nil
}
// digestWarning constructs a formatted warning string
// using the image name that could not be pinned by digest. The
// formatting is hardcoded, but could me made smarter in the future
func digestWarning(image string) string {
return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image)
}

View file

@ -0,0 +1,568 @@
package cluster // import "github.com/docker/docker/daemon/cluster"
import (
"fmt"
"net"
"strings"
"time"
apitypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
types "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/convert"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/signal"
swarmapi "github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/manager/encryption"
swarmnode "github.com/docker/swarmkit/node"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
)
// Init initializes new cluster from user provided request.
func (c *Cluster) Init(req types.InitRequest) (string, error) {
c.controlMutex.Lock()
defer c.controlMutex.Unlock()
if c.nr != nil {
if req.ForceNewCluster {
// Take c.mu temporarily to wait for presently running
// API handlers to finish before shutting down the node.
c.mu.Lock()
if !c.nr.nodeState.IsManager() {
return "", errSwarmNotManager
}
c.mu.Unlock()
if err := c.nr.Stop(); err != nil {
return "", err
}
} else {
return "", errSwarmExists
}
}
if err := validateAndSanitizeInitRequest(&req); err != nil {
return "", errdefs.InvalidParameter(err)
}
listenHost, listenPort, err := resolveListenAddr(req.ListenAddr)
if err != nil {
return "", err
}
advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort)
if err != nil {
return "", err
}
dataPathAddr, err := resolveDataPathAddr(req.DataPathAddr)
if err != nil {
return "", err
}
localAddr := listenHost
// If the local address is undetermined, the advertise address
// will be used as local address, if it belongs to this system.
// If the advertise address is not local, then we try to find
// a system address to use as local address. If this fails,
// we give up and ask the user to pass the listen address.
if net.ParseIP(localAddr).IsUnspecified() {
advertiseIP := net.ParseIP(advertiseHost)
found := false
for _, systemIP := range listSystemIPs() {
if systemIP.Equal(advertiseIP) {
localAddr = advertiseIP.String()
found = true
break
}
}
if !found {
ip, err := c.resolveSystemAddr()
if err != nil {
logrus.Warnf("Could not find a local address: %v", err)
return "", errMustSpecifyListenAddr
}
localAddr = ip.String()
}
}
nr, err := c.newNodeRunner(nodeStartConfig{
forceNewCluster: req.ForceNewCluster,
autolock: req.AutoLockManagers,
LocalAddr: localAddr,
ListenAddr: net.JoinHostPort(listenHost, listenPort),
AdvertiseAddr: net.JoinHostPort(advertiseHost, advertisePort),
DataPathAddr: dataPathAddr,
availability: req.Availability,
})
if err != nil {
return "", err
}
c.mu.Lock()
c.nr = nr
c.mu.Unlock()
if err := <-nr.Ready(); err != nil {
c.mu.Lock()
c.nr = nil
c.mu.Unlock()
if !req.ForceNewCluster { // if failure on first attempt don't keep state
if err := clearPersistentState(c.root); err != nil {
return "", err
}
}
return "", err
}
state := nr.State()
if state.swarmNode == nil { // should never happen but protect from panic
return "", errors.New("invalid cluster state for spec initialization")
}
if err := initClusterSpec(state.swarmNode, req.Spec); err != nil {
return "", err
}
return state.NodeID(), nil
}
// Join makes current Cluster part of an existing swarm cluster.
func (c *Cluster) Join(req types.JoinRequest) error {
c.controlMutex.Lock()
defer c.controlMutex.Unlock()
c.mu.Lock()
if c.nr != nil {
c.mu.Unlock()
return errors.WithStack(errSwarmExists)
}
c.mu.Unlock()
if err := validateAndSanitizeJoinRequest(&req); err != nil {
return errdefs.InvalidParameter(err)
}
listenHost, listenPort, err := resolveListenAddr(req.ListenAddr)
if err != nil {
return err
}
var advertiseAddr string
if req.AdvertiseAddr != "" {
advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort)
// For joining, we don't need to provide an advertise address,
// since the remote side can detect it.
if err == nil {
advertiseAddr = net.JoinHostPort(advertiseHost, advertisePort)
}
}
dataPathAddr, err := resolveDataPathAddr(req.DataPathAddr)
if err != nil {
return err
}
nr, err := c.newNodeRunner(nodeStartConfig{
RemoteAddr: req.RemoteAddrs[0],
ListenAddr: net.JoinHostPort(listenHost, listenPort),
AdvertiseAddr: advertiseAddr,
DataPathAddr: dataPathAddr,
joinAddr: req.RemoteAddrs[0],
joinToken: req.JoinToken,
availability: req.Availability,
})
if err != nil {
return err
}
c.mu.Lock()
c.nr = nr
c.mu.Unlock()
select {
case <-time.After(swarmConnectTimeout):
return errSwarmJoinTimeoutReached
case err := <-nr.Ready():
if err != nil {
c.mu.Lock()
c.nr = nil
c.mu.Unlock()
if err := clearPersistentState(c.root); err != nil {
return err
}
}
return err
}
}
// Inspect retrieves the configuration properties of a managed swarm cluster.
func (c *Cluster) Inspect() (types.Swarm, error) {
var swarm types.Swarm
if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
s, err := c.inspect(ctx, state)
if err != nil {
return err
}
swarm = s
return nil
}); err != nil {
return types.Swarm{}, err
}
return swarm, nil
}
func (c *Cluster) inspect(ctx context.Context, state nodeState) (types.Swarm, error) {
s, err := getSwarm(ctx, state.controlClient)
if err != nil {
return types.Swarm{}, err
}
return convert.SwarmFromGRPC(*s), nil
}
// Update updates configuration of a managed swarm cluster.
func (c *Cluster) Update(version uint64, spec types.Spec, flags types.UpdateFlags) error {
return c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
swarm, err := getSwarm(ctx, state.controlClient)
if err != nil {
return err
}
// Validate spec name.
if spec.Annotations.Name == "" {
spec.Annotations.Name = "default"
} else if spec.Annotations.Name != "default" {
return errdefs.InvalidParameter(errors.New(`swarm spec must be named "default"`))
}
// In update, client should provide the complete spec of the swarm, including
// Name and Labels. If a field is specified with 0 or nil, then the default value
// will be used to swarmkit.
clusterSpec, err := convert.SwarmSpecToGRPC(spec)
if err != nil {
return errdefs.InvalidParameter(err)
}
_, err = state.controlClient.UpdateCluster(
ctx,
&swarmapi.UpdateClusterRequest{
ClusterID: swarm.ID,
Spec: &clusterSpec,
ClusterVersion: &swarmapi.Version{
Index: version,
},
Rotation: swarmapi.KeyRotation{
WorkerJoinToken: flags.RotateWorkerToken,
ManagerJoinToken: flags.RotateManagerToken,
ManagerUnlockKey: flags.RotateManagerUnlockKey,
},
},
)
return err
})
}
// GetUnlockKey returns the unlock key for the swarm.
func (c *Cluster) GetUnlockKey() (string, error) {
var resp *swarmapi.GetUnlockKeyResponse
if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
client := swarmapi.NewCAClient(state.grpcConn)
r, err := client.GetUnlockKey(ctx, &swarmapi.GetUnlockKeyRequest{})
if err != nil {
return err
}
resp = r
return nil
}); err != nil {
return "", err
}
if len(resp.UnlockKey) == 0 {
// no key
return "", nil
}
return encryption.HumanReadableKey(resp.UnlockKey), nil
}
// UnlockSwarm provides a key to decrypt data that is encrypted at rest.
func (c *Cluster) UnlockSwarm(req types.UnlockRequest) error {
c.controlMutex.Lock()
defer c.controlMutex.Unlock()
c.mu.RLock()
state := c.currentNodeState()
if !state.IsActiveManager() {
// when manager is not active,
// unless it is locked, otherwise return error.
if err := c.errNoManager(state); err != errSwarmLocked {
c.mu.RUnlock()
return err
}
} else {
// when manager is active, return an error of "not locked"
c.mu.RUnlock()
return notLockedError{}
}
// only when swarm is locked, code running reaches here
nr := c.nr
c.mu.RUnlock()
key, err := encryption.ParseHumanReadableKey(req.UnlockKey)
if err != nil {
return errdefs.InvalidParameter(err)
}
config := nr.config
config.lockKey = key
if err := nr.Stop(); err != nil {
return err
}
nr, err = c.newNodeRunner(config)
if err != nil {
return err
}
c.mu.Lock()
c.nr = nr
c.mu.Unlock()
if err := <-nr.Ready(); err != nil {
if errors.Cause(err) == errSwarmLocked {
return invalidUnlockKey{}
}
return errors.Errorf("swarm component could not be started: %v", err)
}
return nil
}
// Leave shuts down Cluster and removes current state.
func (c *Cluster) Leave(force bool) error {
c.controlMutex.Lock()
defer c.controlMutex.Unlock()
c.mu.Lock()
nr := c.nr
if nr == nil {
c.mu.Unlock()
return errors.WithStack(errNoSwarm)
}
state := c.currentNodeState()
c.mu.Unlock()
if errors.Cause(state.err) == errSwarmLocked && !force {
// leave a locked swarm without --force is not allowed
return errors.WithStack(notAvailableError("Swarm is encrypted and locked. Please unlock it first or use `--force` to ignore this message."))
}
if state.IsManager() && !force {
msg := "You are attempting to leave the swarm on a node that is participating as a manager. "
if state.IsActiveManager() {
active, reachable, unreachable, err := managerStats(state.controlClient, state.NodeID())
if err == nil {
if active && removingManagerCausesLossOfQuorum(reachable, unreachable) {
if isLastManager(reachable, unreachable) {
msg += "Removing the last manager erases all current state of the swarm. Use `--force` to ignore this message. "
return errors.WithStack(notAvailableError(msg))
}
msg += fmt.Sprintf("Removing this node leaves %v managers out of %v. Without a Raft quorum your swarm will be inaccessible. ", reachable-1, reachable+unreachable)
}
}
} else {
msg += "Doing so may lose the consensus of your cluster. "
}
msg += "The only way to restore a swarm that has lost consensus is to reinitialize it with `--force-new-cluster`. Use `--force` to suppress this message."
return errors.WithStack(notAvailableError(msg))
}
// release readers in here
if err := nr.Stop(); err != nil {
logrus.Errorf("failed to shut down cluster node: %v", err)
signal.DumpStacks("")
return err
}
c.mu.Lock()
c.nr = nil
c.mu.Unlock()
if nodeID := state.NodeID(); nodeID != "" {
nodeContainers, err := c.listContainerForNode(nodeID)
if err != nil {
return err
}
for _, id := range nodeContainers {
if err := c.config.Backend.ContainerRm(id, &apitypes.ContainerRmConfig{ForceRemove: true}); err != nil {
logrus.Errorf("error removing %v: %v", id, err)
}
}
}
// todo: cleanup optional?
if err := clearPersistentState(c.root); err != nil {
return err
}
c.config.Backend.DaemonLeavesCluster()
return nil
}
// Info returns information about the current cluster state.
func (c *Cluster) Info() types.Info {
info := types.Info{
NodeAddr: c.GetAdvertiseAddress(),
}
c.mu.RLock()
defer c.mu.RUnlock()
state := c.currentNodeState()
info.LocalNodeState = state.status
if state.err != nil {
info.Error = state.err.Error()
}
ctx, cancel := c.getRequestContext()
defer cancel()
if state.IsActiveManager() {
info.ControlAvailable = true
swarm, err := c.inspect(ctx, state)
if err != nil {
info.Error = err.Error()
}
info.Cluster = &swarm.ClusterInfo
if r, err := state.controlClient.ListNodes(ctx, &swarmapi.ListNodesRequest{}); err != nil {
info.Error = err.Error()
} else {
info.Nodes = len(r.Nodes)
for _, n := range r.Nodes {
if n.ManagerStatus != nil {
info.Managers = info.Managers + 1
}
}
}
}
if state.swarmNode != nil {
for _, r := range state.swarmNode.Remotes() {
info.RemoteManagers = append(info.RemoteManagers, types.Peer{NodeID: r.NodeID, Addr: r.Addr})
}
info.NodeID = state.swarmNode.NodeID()
}
return info
}
func validateAndSanitizeInitRequest(req *types.InitRequest) error {
var err error
req.ListenAddr, err = validateAddr(req.ListenAddr)
if err != nil {
return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err)
}
if req.Spec.Annotations.Name == "" {
req.Spec.Annotations.Name = "default"
} else if req.Spec.Annotations.Name != "default" {
return errors.New(`swarm spec must be named "default"`)
}
return nil
}
func validateAndSanitizeJoinRequest(req *types.JoinRequest) error {
var err error
req.ListenAddr, err = validateAddr(req.ListenAddr)
if err != nil {
return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err)
}
if len(req.RemoteAddrs) == 0 {
return errors.New("at least 1 RemoteAddr is required to join")
}
for i := range req.RemoteAddrs {
req.RemoteAddrs[i], err = validateAddr(req.RemoteAddrs[i])
if err != nil {
return fmt.Errorf("invalid remoteAddr %q: %v", req.RemoteAddrs[i], err)
}
}
return nil
}
func validateAddr(addr string) (string, error) {
if addr == "" {
return addr, errors.New("invalid empty address")
}
newaddr, err := opts.ParseTCPAddr(addr, defaultAddr)
if err != nil {
return addr, nil
}
return strings.TrimPrefix(newaddr, "tcp://"), nil
}
func initClusterSpec(node *swarmnode.Node, spec types.Spec) error {
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
for conn := range node.ListenControlSocket(ctx) {
if ctx.Err() != nil {
return ctx.Err()
}
if conn != nil {
client := swarmapi.NewControlClient(conn)
var cluster *swarmapi.Cluster
for i := 0; ; i++ {
lcr, err := client.ListClusters(ctx, &swarmapi.ListClustersRequest{})
if err != nil {
return fmt.Errorf("error on listing clusters: %v", err)
}
if len(lcr.Clusters) == 0 {
if i < 10 {
time.Sleep(200 * time.Millisecond)
continue
}
return errors.New("empty list of clusters was returned")
}
cluster = lcr.Clusters[0]
break
}
// In init, we take the initial default values from swarmkit, and merge
// any non nil or 0 value from spec to GRPC spec. This will leave the
// default value alone.
// Note that this is different from Update(), as in Update() we expect
// user to specify the complete spec of the cluster (as they already know
// the existing one and knows which field to update)
clusterSpec, err := convert.MergeSwarmSpecToGRPC(spec, cluster.Spec)
if err != nil {
return fmt.Errorf("error updating cluster settings: %v", err)
}
_, err = client.UpdateCluster(ctx, &swarmapi.UpdateClusterRequest{
ClusterID: cluster.ID,
ClusterVersion: &cluster.Meta.Version,
Spec: &clusterSpec,
})
if err != nil {
return fmt.Errorf("error updating cluster settings: %v", err)
}
return nil
}
}
return ctx.Err()
}
func (c *Cluster) listContainerForNode(nodeID string) ([]string, error) {
var ids []string
filters := filters.NewArgs()
filters.Add("label", fmt.Sprintf("com.docker.swarm.node.id=%s", nodeID))
containers, err := c.config.Backend.Containers(&apitypes.ContainerListOptions{
Filters: filters,
})
if err != nil {
return []string{}, err
}
for _, c := range containers {
ids = append(ids, c.ID)
}
return ids, nil
}

View file

@ -0,0 +1,86 @@
package cluster // import "github.com/docker/docker/daemon/cluster"
import (
apitypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
types "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/convert"
swarmapi "github.com/docker/swarmkit/api"
"golang.org/x/net/context"
)
// GetTasks returns a list of tasks matching the filter options.
func (c *Cluster) GetTasks(options apitypes.TaskListOptions) ([]types.Task, error) {
var r *swarmapi.ListTasksResponse
if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
filterTransform := func(filter filters.Args) error {
if filter.Contains("service") {
serviceFilters := filter.Get("service")
for _, serviceFilter := range serviceFilters {
service, err := getService(ctx, state.controlClient, serviceFilter, false)
if err != nil {
return err
}
filter.Del("service", serviceFilter)
filter.Add("service", service.ID)
}
}
if filter.Contains("node") {
nodeFilters := filter.Get("node")
for _, nodeFilter := range nodeFilters {
node, err := getNode(ctx, state.controlClient, nodeFilter)
if err != nil {
return err
}
filter.Del("node", nodeFilter)
filter.Add("node", node.ID)
}
}
if !filter.Contains("runtime") {
// default to only showing container tasks
filter.Add("runtime", "container")
filter.Add("runtime", "")
}
return nil
}
filters, err := newListTasksFilters(options.Filters, filterTransform)
if err != nil {
return err
}
r, err = state.controlClient.ListTasks(
ctx,
&swarmapi.ListTasksRequest{Filters: filters})
return err
}); err != nil {
return nil, err
}
tasks := make([]types.Task, 0, len(r.Tasks))
for _, task := range r.Tasks {
t, err := convert.TaskFromGRPC(*task)
if err != nil {
return nil, err
}
tasks = append(tasks, t)
}
return tasks, nil
}
// GetTask returns a task by an ID.
func (c *Cluster) GetTask(input string) (types.Task, error) {
var task *swarmapi.Task
if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
t, err := getTask(ctx, state.controlClient, input)
if err != nil {
return err
}
task = t
return nil
}); err != nil {
return types.Task{}, err
}
return convert.TaskFromGRPC(*task)
}

View file

@ -0,0 +1,63 @@
package cluster // import "github.com/docker/docker/daemon/cluster"
import (
"encoding/json"
"io/ioutil"
"os"
"path/filepath"
"github.com/docker/docker/pkg/ioutils"
)
func loadPersistentState(root string) (*nodeStartConfig, error) {
dt, err := ioutil.ReadFile(filepath.Join(root, stateFile))
if err != nil {
return nil, err
}
// missing certificate means no actual state to restore from
if _, err := os.Stat(filepath.Join(root, "certificates/swarm-node.crt")); err != nil {
if os.IsNotExist(err) {
clearPersistentState(root)
}
return nil, err
}
var st nodeStartConfig
if err := json.Unmarshal(dt, &st); err != nil {
return nil, err
}
return &st, nil
}
func savePersistentState(root string, config nodeStartConfig) error {
dt, err := json.Marshal(config)
if err != nil {
return err
}
return ioutils.AtomicWriteFile(filepath.Join(root, stateFile), dt, 0600)
}
func clearPersistentState(root string) error {
// todo: backup this data instead of removing?
// rather than delete the entire swarm directory, delete the contents in order to preserve the inode
// (for example, allowing it to be bind-mounted)
files, err := ioutil.ReadDir(root)
if err != nil {
return err
}
for _, f := range files {
if err := os.RemoveAll(filepath.Join(root, f.Name())); err != nil {
return err
}
}
return nil
}
func removingManagerCausesLossOfQuorum(reachable, unreachable int) bool {
return reachable-2 <= unreachable
}
func isLastManager(reachable, unreachable int) bool {
return reachable == 1 && unreachable == 0
}

View file

@ -0,0 +1,186 @@
package daemon // import "github.com/docker/docker/daemon"
import (
"fmt"
"runtime"
"strings"
"time"
"github.com/docker/docker/api/types/backend"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/builder/dockerfile"
"github.com/docker/docker/errdefs"
"github.com/pkg/errors"
)
// merge merges two Config, the image container configuration (defaults values),
// and the user container configuration, either passed by the API or generated
// by the cli.
// It will mutate the specified user configuration (userConf) with the image
// configuration where the user configuration is incomplete.
func merge(userConf, imageConf *containertypes.Config) error {
if userConf.User == "" {
userConf.User = imageConf.User
}
if len(userConf.ExposedPorts) == 0 {
userConf.ExposedPorts = imageConf.ExposedPorts
} else if imageConf.ExposedPorts != nil {
for port := range imageConf.ExposedPorts {
if _, exists := userConf.ExposedPorts[port]; !exists {
userConf.ExposedPorts[port] = struct{}{}
}
}
}
if len(userConf.Env) == 0 {
userConf.Env = imageConf.Env
} else {
for _, imageEnv := range imageConf.Env {
found := false
imageEnvKey := strings.Split(imageEnv, "=")[0]
for _, userEnv := range userConf.Env {
userEnvKey := strings.Split(userEnv, "=")[0]
if runtime.GOOS == "windows" {
// Case insensitive environment variables on Windows
imageEnvKey = strings.ToUpper(imageEnvKey)
userEnvKey = strings.ToUpper(userEnvKey)
}
if imageEnvKey == userEnvKey {
found = true
break
}
}
if !found {
userConf.Env = append(userConf.Env, imageEnv)
}
}
}
if userConf.Labels == nil {
userConf.Labels = map[string]string{}
}
for l, v := range imageConf.Labels {
if _, ok := userConf.Labels[l]; !ok {
userConf.Labels[l] = v
}
}
if len(userConf.Entrypoint) == 0 {
if len(userConf.Cmd) == 0 {
userConf.Cmd = imageConf.Cmd
userConf.ArgsEscaped = imageConf.ArgsEscaped
}
if userConf.Entrypoint == nil {
userConf.Entrypoint = imageConf.Entrypoint
}
}
if imageConf.Healthcheck != nil {
if userConf.Healthcheck == nil {
userConf.Healthcheck = imageConf.Healthcheck
} else {
if len(userConf.Healthcheck.Test) == 0 {
userConf.Healthcheck.Test = imageConf.Healthcheck.Test
}
if userConf.Healthcheck.Interval == 0 {
userConf.Healthcheck.Interval = imageConf.Healthcheck.Interval
}
if userConf.Healthcheck.Timeout == 0 {
userConf.Healthcheck.Timeout = imageConf.Healthcheck.Timeout
}
if userConf.Healthcheck.StartPeriod == 0 {
userConf.Healthcheck.StartPeriod = imageConf.Healthcheck.StartPeriod
}
if userConf.Healthcheck.Retries == 0 {
userConf.Healthcheck.Retries = imageConf.Healthcheck.Retries
}
}
}
if userConf.WorkingDir == "" {
userConf.WorkingDir = imageConf.WorkingDir
}
if len(userConf.Volumes) == 0 {
userConf.Volumes = imageConf.Volumes
} else {
for k, v := range imageConf.Volumes {
userConf.Volumes[k] = v
}
}
if userConf.StopSignal == "" {
userConf.StopSignal = imageConf.StopSignal
}
return nil
}
// CreateImageFromContainer creates a new image from a container. The container
// config will be updated by applying the change set to the custom config, then
// applying that config over the existing container config.
func (daemon *Daemon) CreateImageFromContainer(name string, c *backend.CreateImageConfig) (string, error) {
start := time.Now()
container, err := daemon.GetContainer(name)
if err != nil {
return "", err
}
// It is not possible to commit a running container on Windows
if (runtime.GOOS == "windows") && container.IsRunning() {
return "", errors.Errorf("%+v does not support commit of a running container", runtime.GOOS)
}
if container.IsDead() {
err := fmt.Errorf("You cannot commit container %s which is Dead", container.ID)
return "", errdefs.Conflict(err)
}
if container.IsRemovalInProgress() {
err := fmt.Errorf("You cannot commit container %s which is being removed", container.ID)
return "", errdefs.Conflict(err)
}
if c.Pause && !container.IsPaused() {
daemon.containerPause(container)
defer daemon.containerUnpause(container)
}
if c.Config == nil {
c.Config = container.Config
}
newConfig, err := dockerfile.BuildFromConfig(c.Config, c.Changes, container.OS)
if err != nil {
return "", err
}
if err := merge(newConfig, container.Config); err != nil {
return "", err
}
id, err := daemon.imageService.CommitImage(backend.CommitConfig{
Author: c.Author,
Comment: c.Comment,
Config: newConfig,
ContainerConfig: container.Config,
ContainerID: container.ID,
ContainerMountLabel: container.MountLabel,
ContainerOS: container.OS,
ParentImageID: string(container.ImageID),
})
if err != nil {
return "", err
}
var imageRef string
if c.Repo != "" {
imageRef, err = daemon.imageService.TagImage(string(id), c.Repo, c.Tag)
if err != nil {
return "", err
}
}
daemon.LogContainerEventWithAttributes(container, "commit", map[string]string{
"comment": c.Comment,
"imageID": id.String(),
"imageRef": imageRef,
})
containerActions.WithValues("commit").UpdateSince(start)
return id.String(), nil
}

View file

@ -0,0 +1,530 @@
package config // import "github.com/docker/docker/daemon/config"
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"reflect"
"runtime"
"strings"
"sync"
daemondiscovery "github.com/docker/docker/daemon/discovery"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/discovery"
"github.com/docker/docker/registry"
"github.com/imdario/mergo"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
const (
// DefaultMaxConcurrentDownloads is the default value for
// maximum number of downloads that
// may take place at a time for each pull.
DefaultMaxConcurrentDownloads = 3
// DefaultMaxConcurrentUploads is the default value for
// maximum number of uploads that
// may take place at a time for each push.
DefaultMaxConcurrentUploads = 5
// StockRuntimeName is the reserved name/alias used to represent the
// OCI runtime being shipped with the docker daemon package.
StockRuntimeName = "runc"
// DefaultShmSize is the default value for container's shm size
DefaultShmSize = int64(67108864)
// DefaultNetworkMtu is the default value for network MTU
DefaultNetworkMtu = 1500
// DisableNetworkBridge is the default value of the option to disable network bridge
DisableNetworkBridge = "none"
// DefaultInitBinary is the name of the default init binary
DefaultInitBinary = "docker-init"
)
// flatOptions contains configuration keys
// that MUST NOT be parsed as deep structures.
// Use this to differentiate these options
// with others like the ones in CommonTLSOptions.
var flatOptions = map[string]bool{
"cluster-store-opts": true,
"log-opts": true,
"runtimes": true,
"default-ulimits": true,
}
// LogConfig represents the default log configuration.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type LogConfig struct {
Type string `json:"log-driver,omitempty"`
Config map[string]string `json:"log-opts,omitempty"`
}
// commonBridgeConfig stores all the platform-common bridge driver specific
// configuration.
type commonBridgeConfig struct {
Iface string `json:"bridge,omitempty"`
FixedCIDR string `json:"fixed-cidr,omitempty"`
}
// CommonTLSOptions defines TLS configuration for the daemon server.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonTLSOptions struct {
CAFile string `json:"tlscacert,omitempty"`
CertFile string `json:"tlscert,omitempty"`
KeyFile string `json:"tlskey,omitempty"`
}
// CommonConfig defines the configuration of a docker daemon which is
// common across platforms.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonConfig struct {
AuthzMiddleware *authorization.Middleware `json:"-"`
AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins
AutoRestart bool `json:"-"`
Context map[string][]string `json:"-"`
DisableBridge bool `json:"-"`
DNS []string `json:"dns,omitempty"`
DNSOptions []string `json:"dns-opts,omitempty"`
DNSSearch []string `json:"dns-search,omitempty"`
ExecOptions []string `json:"exec-opts,omitempty"`
GraphDriver string `json:"storage-driver,omitempty"`
GraphOptions []string `json:"storage-opts,omitempty"`
Labels []string `json:"labels,omitempty"`
Mtu int `json:"mtu,omitempty"`
NetworkDiagnosticPort int `json:"network-diagnostic-port,omitempty"`
Pidfile string `json:"pidfile,omitempty"`
RawLogs bool `json:"raw-logs,omitempty"`
RootDeprecated string `json:"graph,omitempty"`
Root string `json:"data-root,omitempty"`
ExecRoot string `json:"exec-root,omitempty"`
SocketGroup string `json:"group,omitempty"`
CorsHeaders string `json:"api-cors-header,omitempty"`
// TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests
// when pushing to a registry which does not support schema 2. This field is marked as
// deprecated because schema 1 manifests are deprecated in favor of schema 2 and the
// daemon ID will use a dedicated identifier not shared with exported signatures.
TrustKeyPath string `json:"deprecated-key-path,omitempty"`
// LiveRestoreEnabled determines whether we should keep containers
// alive upon daemon shutdown/start
LiveRestoreEnabled bool `json:"live-restore,omitempty"`
// ClusterStore is the storage backend used for the cluster information. It is used by both
// multihost networking (to store networks and endpoints information) and by the node discovery
// mechanism.
ClusterStore string `json:"cluster-store,omitempty"`
// ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such
// as TLS configuration settings.
ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"`
// ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node
// discovery. This should be a 'host:port' combination on which that daemon instance is
// reachable by other hosts.
ClusterAdvertise string `json:"cluster-advertise,omitempty"`
// MaxConcurrentDownloads is the maximum number of downloads that
// may take place at a time for each pull.
MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"`
// MaxConcurrentUploads is the maximum number of uploads that
// may take place at a time for each push.
MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"`
// ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container
// to stop when daemon is being shutdown
ShutdownTimeout int `json:"shutdown-timeout,omitempty"`
Debug bool `json:"debug,omitempty"`
Hosts []string `json:"hosts,omitempty"`
LogLevel string `json:"log-level,omitempty"`
TLS bool `json:"tls,omitempty"`
TLSVerify bool `json:"tlsverify,omitempty"`
// Embedded structs that allow config
// deserialization without the full struct.
CommonTLSOptions
// SwarmDefaultAdvertiseAddr is the default host/IP or network interface
// to use if a wildcard address is specified in the ListenAddr value
// given to the /swarm/init endpoint and no advertise address is
// specified.
SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"`
MetricsAddress string `json:"metrics-addr"`
LogConfig
BridgeConfig // bridgeConfig holds bridge network specific configuration.
registry.ServiceOptions
sync.Mutex
// FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags
// It should probably be handled outside this package.
ValuesSet map[string]interface{} `json:"-"`
Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not
// Exposed node Generic Resources
// e.g: ["orange=red", "orange=green", "orange=blue", "apple=3"]
NodeGenericResources []string `json:"node-generic-resources,omitempty"`
// NetworkControlPlaneMTU allows to specify the control plane MTU, this will allow to optimize the network use in some components
NetworkControlPlaneMTU int `json:"network-control-plane-mtu,omitempty"`
// ContainerAddr is the address used to connect to containerd if we're
// not starting it ourselves
ContainerdAddr string `json:"containerd,omitempty"`
}
// IsValueSet returns true if a configuration value
// was explicitly set in the configuration file.
func (conf *Config) IsValueSet(name string) bool {
if conf.ValuesSet == nil {
return false
}
_, ok := conf.ValuesSet[name]
return ok
}
// New returns a new fully initialized Config struct
func New() *Config {
config := Config{}
config.LogConfig.Config = make(map[string]string)
config.ClusterOpts = make(map[string]string)
if runtime.GOOS != "linux" {
config.V2Only = true
}
return &config
}
// ParseClusterAdvertiseSettings parses the specified advertise settings
func ParseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) {
if clusterAdvertise == "" {
return "", daemondiscovery.ErrDiscoveryDisabled
}
if clusterStore == "" {
return "", errors.New("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration")
}
advertise, err := discovery.ParseAdvertise(clusterAdvertise)
if err != nil {
return "", fmt.Errorf("discovery advertise parsing failed (%v)", err)
}
return advertise, nil
}
// GetConflictFreeLabels validates Labels for conflict
// In swarm the duplicates for labels are removed
// so we only take same values here, no conflict values
// If the key-value is the same we will only take the last label
func GetConflictFreeLabels(labels []string) ([]string, error) {
labelMap := map[string]string{}
for _, label := range labels {
stringSlice := strings.SplitN(label, "=", 2)
if len(stringSlice) > 1 {
// If there is a conflict we will return an error
if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] {
return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v)
}
labelMap[stringSlice[0]] = stringSlice[1]
}
}
newLabels := []string{}
for k, v := range labelMap {
newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v))
}
return newLabels, nil
}
// Reload reads the configuration in the host and reloads the daemon and server.
func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error {
logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile)
newConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
if flags.Changed("config-file") || !os.IsNotExist(err) {
return fmt.Errorf("unable to configure the Docker daemon with file %s: %v", configFile, err)
}
newConfig = New()
}
if err := Validate(newConfig); err != nil {
return fmt.Errorf("file configuration validation failed (%v)", err)
}
// Check if duplicate label-keys with different values are found
newLabels, err := GetConflictFreeLabels(newConfig.Labels)
if err != nil {
return err
}
newConfig.Labels = newLabels
reload(newConfig)
return nil
}
// boolValue is an interface that boolean value flags implement
// to tell the command line how to make -name equivalent to -name=true.
type boolValue interface {
IsBoolFlag() bool
}
// MergeDaemonConfigurations reads a configuration file,
// loads the file configuration in an isolated structure,
// and merges the configuration provided from flags on top
// if there are no conflicts.
func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) {
fileConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
return nil, err
}
if err := Validate(fileConfig); err != nil {
return nil, fmt.Errorf("configuration validation from file failed (%v)", err)
}
// merge flags configuration on top of the file configuration
if err := mergo.Merge(fileConfig, flagsConfig); err != nil {
return nil, err
}
// We need to validate again once both fileConfig and flagsConfig
// have been merged
if err := Validate(fileConfig); err != nil {
return nil, fmt.Errorf("merged configuration validation from file and command line flags failed (%v)", err)
}
return fileConfig, nil
}
// getConflictFreeConfiguration loads the configuration from a JSON file.
// It compares that configuration with the one provided by the flags,
// and returns an error if there are conflicts.
func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) {
b, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, err
}
var config Config
var reader io.Reader
if flags != nil {
var jsonConfig map[string]interface{}
reader = bytes.NewReader(b)
if err := json.NewDecoder(reader).Decode(&jsonConfig); err != nil {
return nil, err
}
configSet := configValuesSet(jsonConfig)
if err := findConfigurationConflicts(configSet, flags); err != nil {
return nil, err
}
// Override flag values to make sure the values set in the config file with nullable values, like `false`,
// are not overridden by default truthy values from the flags that were not explicitly set.
// See https://github.com/docker/docker/issues/20289 for an example.
//
// TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers.
namedOptions := make(map[string]interface{})
for key, value := range configSet {
f := flags.Lookup(key)
if f == nil { // ignore named flags that don't match
namedOptions[key] = value
continue
}
if _, ok := f.Value.(boolValue); ok {
f.Value.Set(fmt.Sprintf("%v", value))
}
}
if len(namedOptions) > 0 {
// set also default for mergeVal flags that are boolValue at the same time.
flags.VisitAll(func(f *pflag.Flag) {
if opt, named := f.Value.(opts.NamedOption); named {
v, set := namedOptions[opt.Name()]
_, boolean := f.Value.(boolValue)
if set && boolean {
f.Value.Set(fmt.Sprintf("%v", v))
}
}
})
}
config.ValuesSet = configSet
}
reader = bytes.NewReader(b)
if err := json.NewDecoder(reader).Decode(&config); err != nil {
return nil, err
}
if config.RootDeprecated != "" {
logrus.Warn(`The "graph" config file option is deprecated. Please use "data-root" instead.`)
if config.Root != "" {
return nil, fmt.Errorf(`cannot specify both "graph" and "data-root" config file options`)
}
config.Root = config.RootDeprecated
}
return &config, nil
}
// configValuesSet returns the configuration values explicitly set in the file.
func configValuesSet(config map[string]interface{}) map[string]interface{} {
flatten := make(map[string]interface{})
for k, v := range config {
if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] {
for km, vm := range m {
flatten[km] = vm
}
continue
}
flatten[k] = v
}
return flatten
}
// findConfigurationConflicts iterates over the provided flags searching for
// duplicated configurations and unknown keys. It returns an error with all the conflicts if
// it finds any.
func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error {
// 1. Search keys from the file that we don't recognize as flags.
unknownKeys := make(map[string]interface{})
for key, value := range config {
if flag := flags.Lookup(key); flag == nil {
unknownKeys[key] = value
}
}
// 2. Discard values that implement NamedOption.
// Their configuration name differs from their flag name, like `labels` and `label`.
if len(unknownKeys) > 0 {
unknownNamedConflicts := func(f *pflag.Flag) {
if namedOption, ok := f.Value.(opts.NamedOption); ok {
if _, valid := unknownKeys[namedOption.Name()]; valid {
delete(unknownKeys, namedOption.Name())
}
}
}
flags.VisitAll(unknownNamedConflicts)
}
if len(unknownKeys) > 0 {
var unknown []string
for key := range unknownKeys {
unknown = append(unknown, key)
}
return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", "))
}
var conflicts []string
printConflict := func(name string, flagValue, fileValue interface{}) string {
return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue)
}
// 3. Search keys that are present as a flag and as a file option.
duplicatedConflicts := func(f *pflag.Flag) {
// search option name in the json configuration payload if the value is a named option
if namedOption, ok := f.Value.(opts.NamedOption); ok {
if optsValue, ok := config[namedOption.Name()]; ok {
conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue))
}
} else {
// search flag name in the json configuration payload
for _, name := range []string{f.Name, f.Shorthand} {
if value, ok := config[name]; ok {
conflicts = append(conflicts, printConflict(name, f.Value.String(), value))
break
}
}
}
}
flags.Visit(duplicatedConflicts)
if len(conflicts) > 0 {
return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
}
return nil
}
// Validate validates some specific configs.
// such as config.DNS, config.Labels, config.DNSSearch,
// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads.
func Validate(config *Config) error {
// validate DNS
for _, dns := range config.DNS {
if _, err := opts.ValidateIPAddress(dns); err != nil {
return err
}
}
// validate DNSSearch
for _, dnsSearch := range config.DNSSearch {
if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil {
return err
}
}
// validate Labels
for _, label := range config.Labels {
if _, err := opts.ValidateLabel(label); err != nil {
return err
}
}
// validate MaxConcurrentDownloads
if config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 {
return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads)
}
// validate MaxConcurrentUploads
if config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 {
return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads)
}
// validate that "default" runtime is not reset
if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 {
if _, ok := runtimes[StockRuntimeName]; ok {
return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName)
}
}
if _, err := ParseGenericResources(config.NodeGenericResources); err != nil {
return err
}
if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" && defaultRuntime != StockRuntimeName {
runtimes := config.GetAllRuntimes()
if _, ok := runtimes[defaultRuntime]; !ok {
return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
}
}
// validate platform-specific settings
return config.ValidatePlatformConfig()
}
// ModifiedDiscoverySettings returns whether the discovery configuration has been modified or not.
func ModifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool {
if config.ClusterStore != backendType || config.ClusterAdvertise != advertise {
return true
}
if (config.ClusterOpts == nil && clusterOpts == nil) ||
(config.ClusterOpts == nil && len(clusterOpts) == 0) ||
(len(config.ClusterOpts) == 0 && clusterOpts == nil) {
return false
}
return !reflect.DeepEqual(config.ClusterOpts, clusterOpts)
}

View file

@ -0,0 +1,71 @@
// +build linux freebsd
package config // import "github.com/docker/docker/daemon/config"
import (
"net"
"github.com/docker/docker/api/types"
)
// CommonUnixConfig defines configuration of a docker daemon that is
// common across Unix platforms.
type CommonUnixConfig struct {
Runtimes map[string]types.Runtime `json:"runtimes,omitempty"`
DefaultRuntime string `json:"default-runtime,omitempty"`
DefaultInitBinary string `json:"default-init,omitempty"`
}
type commonUnixBridgeConfig struct {
DefaultIP net.IP `json:"ip,omitempty"`
IP string `json:"bip,omitempty"`
DefaultGatewayIPv4 net.IP `json:"default-gateway,omitempty"`
DefaultGatewayIPv6 net.IP `json:"default-gateway-v6,omitempty"`
InterContainerCommunication bool `json:"icc,omitempty"`
}
// GetRuntime returns the runtime path and arguments for a given
// runtime name
func (conf *Config) GetRuntime(name string) *types.Runtime {
conf.Lock()
defer conf.Unlock()
if rt, ok := conf.Runtimes[name]; ok {
return &rt
}
return nil
}
// GetDefaultRuntimeName returns the current default runtime
func (conf *Config) GetDefaultRuntimeName() string {
conf.Lock()
rt := conf.DefaultRuntime
conf.Unlock()
return rt
}
// GetAllRuntimes returns a copy of the runtimes map
func (conf *Config) GetAllRuntimes() map[string]types.Runtime {
conf.Lock()
rts := conf.Runtimes
conf.Unlock()
return rts
}
// GetExecRoot returns the user configured Exec-root
func (conf *Config) GetExecRoot() string {
return conf.ExecRoot
}
// GetInitPath returns the configured docker-init path
func (conf *Config) GetInitPath() string {
conf.Lock()
defer conf.Unlock()
if conf.InitPath != "" {
return conf.InitPath
}
if conf.DefaultInitBinary != "" {
return conf.DefaultInitBinary
}
return DefaultInitBinary
}

View file

@ -0,0 +1,84 @@
// +build !windows
package config // import "github.com/docker/docker/daemon/config"
import (
"testing"
"github.com/docker/docker/api/types"
)
func TestCommonUnixValidateConfigurationErrors(t *testing.T) {
testCases := []struct {
config *Config
}{
// Can't override the stock runtime
{
config: &Config{
CommonUnixConfig: CommonUnixConfig{
Runtimes: map[string]types.Runtime{
StockRuntimeName: {},
},
},
},
},
// Default runtime should be present in runtimes
{
config: &Config{
CommonUnixConfig: CommonUnixConfig{
Runtimes: map[string]types.Runtime{
"foo": {},
},
DefaultRuntime: "bar",
},
},
},
}
for _, tc := range testCases {
err := Validate(tc.config)
if err == nil {
t.Fatalf("expected error, got nil for config %v", tc.config)
}
}
}
func TestCommonUnixGetInitPath(t *testing.T) {
testCases := []struct {
config *Config
expectedInitPath string
}{
{
config: &Config{
InitPath: "some-init-path",
},
expectedInitPath: "some-init-path",
},
{
config: &Config{
CommonUnixConfig: CommonUnixConfig{
DefaultInitBinary: "foo-init-bin",
},
},
expectedInitPath: "foo-init-bin",
},
{
config: &Config{
InitPath: "init-path-A",
CommonUnixConfig: CommonUnixConfig{
DefaultInitBinary: "init-path-B",
},
},
expectedInitPath: "init-path-A",
},
{
config: &Config{},
expectedInitPath: "docker-init",
},
}
for _, tc := range testCases {
initPath := tc.config.GetInitPath()
if initPath != tc.expectedInitPath {
t.Fatalf("expected initPath to be %v, got %v", tc.expectedInitPath, initPath)
}
}
}

View file

@ -0,0 +1,489 @@
package config // import "github.com/docker/docker/daemon/config"
import (
"io/ioutil"
"os"
"strings"
"testing"
"github.com/docker/docker/daemon/discovery"
"github.com/docker/docker/internal/testutil"
"github.com/docker/docker/opts"
"github.com/gotestyourself/gotestyourself/assert"
is "github.com/gotestyourself/gotestyourself/assert/cmp"
"github.com/gotestyourself/gotestyourself/fs"
"github.com/spf13/pflag"
)
func TestDaemonConfigurationNotFound(t *testing.T) {
_, err := MergeDaemonConfigurations(&Config{}, nil, "/tmp/foo-bar-baz-docker")
if err == nil || !os.IsNotExist(err) {
t.Fatalf("expected does not exist error, got %v", err)
}
}
func TestDaemonBrokenConfiguration(t *testing.T) {
f, err := ioutil.TempFile("", "docker-config-")
if err != nil {
t.Fatal(err)
}
configFile := f.Name()
f.Write([]byte(`{"Debug": tru`))
f.Close()
_, err = MergeDaemonConfigurations(&Config{}, nil, configFile)
if err == nil {
t.Fatalf("expected error, got %v", err)
}
}
func TestParseClusterAdvertiseSettings(t *testing.T) {
_, err := ParseClusterAdvertiseSettings("something", "")
if err != discovery.ErrDiscoveryDisabled {
t.Fatalf("expected discovery disabled error, got %v\n", err)
}
_, err = ParseClusterAdvertiseSettings("", "something")
if err == nil {
t.Fatalf("expected discovery store error, got %v\n", err)
}
_, err = ParseClusterAdvertiseSettings("etcd", "127.0.0.1:8080")
if err != nil {
t.Fatal(err)
}
}
func TestFindConfigurationConflicts(t *testing.T) {
config := map[string]interface{}{"authorization-plugins": "foobar"}
flags := pflag.NewFlagSet("test", pflag.ContinueOnError)
flags.String("authorization-plugins", "", "")
assert.Check(t, flags.Set("authorization-plugins", "asdf"))
testutil.ErrorContains(t,
findConfigurationConflicts(config, flags),
"authorization-plugins: (from flag: asdf, from file: foobar)")
}
func TestFindConfigurationConflictsWithNamedOptions(t *testing.T) {
config := map[string]interface{}{"hosts": []string{"qwer"}}
flags := pflag.NewFlagSet("test", pflag.ContinueOnError)
var hosts []string
flags.VarP(opts.NewNamedListOptsRef("hosts", &hosts, opts.ValidateHost), "host", "H", "Daemon socket(s) to connect to")
assert.Check(t, flags.Set("host", "tcp://127.0.0.1:4444"))
assert.Check(t, flags.Set("host", "unix:///var/run/docker.sock"))
testutil.ErrorContains(t, findConfigurationConflicts(config, flags), "hosts")
}
func TestDaemonConfigurationMergeConflicts(t *testing.T) {
f, err := ioutil.TempFile("", "docker-config-")
if err != nil {
t.Fatal(err)
}
configFile := f.Name()
f.Write([]byte(`{"debug": true}`))
f.Close()
flags := pflag.NewFlagSet("test", pflag.ContinueOnError)
flags.Bool("debug", false, "")
flags.Set("debug", "false")
_, err = MergeDaemonConfigurations(&Config{}, flags, configFile)
if err == nil {
t.Fatal("expected error, got nil")
}
if !strings.Contains(err.Error(), "debug") {
t.Fatalf("expected debug conflict, got %v", err)
}
}
func TestDaemonConfigurationMergeConcurrent(t *testing.T) {
f, err := ioutil.TempFile("", "docker-config-")
if err != nil {
t.Fatal(err)
}
configFile := f.Name()
f.Write([]byte(`{"max-concurrent-downloads": 1}`))
f.Close()
_, err = MergeDaemonConfigurations(&Config{}, nil, configFile)
if err != nil {
t.Fatal("expected error, got nil")
}
}
func TestDaemonConfigurationMergeConcurrentError(t *testing.T) {
f, err := ioutil.TempFile("", "docker-config-")
if err != nil {
t.Fatal(err)
}
configFile := f.Name()
f.Write([]byte(`{"max-concurrent-downloads": -1}`))
f.Close()
_, err = MergeDaemonConfigurations(&Config{}, nil, configFile)
if err == nil {
t.Fatalf("expected no error, got error %v", err)
}
}
func TestDaemonConfigurationMergeConflictsWithInnerStructs(t *testing.T) {
f, err := ioutil.TempFile("", "docker-config-")
if err != nil {
t.Fatal(err)
}
configFile := f.Name()
f.Write([]byte(`{"tlscacert": "/etc/certificates/ca.pem"}`))
f.Close()
flags := pflag.NewFlagSet("test", pflag.ContinueOnError)
flags.String("tlscacert", "", "")
flags.Set("tlscacert", "~/.docker/ca.pem")
_, err = MergeDaemonConfigurations(&Config{}, flags, configFile)
if err == nil {
t.Fatal("expected error, got nil")
}
if !strings.Contains(err.Error(), "tlscacert") {
t.Fatalf("expected tlscacert conflict, got %v", err)
}
}
func TestFindConfigurationConflictsWithUnknownKeys(t *testing.T) {
config := map[string]interface{}{"tls-verify": "true"}
flags := pflag.NewFlagSet("test", pflag.ContinueOnError)
flags.Bool("tlsverify", false, "")
err := findConfigurationConflicts(config, flags)
if err == nil {
t.Fatal("expected error, got nil")
}
if !strings.Contains(err.Error(), "the following directives don't match any configuration option: tls-verify") {
t.Fatalf("expected tls-verify conflict, got %v", err)
}
}
func TestFindConfigurationConflictsWithMergedValues(t *testing.T) {
var hosts []string
config := map[string]interface{}{"hosts": "tcp://127.0.0.1:2345"}
flags := pflag.NewFlagSet("base", pflag.ContinueOnError)
flags.VarP(opts.NewNamedListOptsRef("hosts", &hosts, nil), "host", "H", "")
err := findConfigurationConflicts(config, flags)
if err != nil {
t.Fatal(err)
}
flags.Set("host", "unix:///var/run/docker.sock")
err = findConfigurationConflicts(config, flags)
if err == nil {
t.Fatal("expected error, got nil")
}
if !strings.Contains(err.Error(), "hosts: (from flag: [unix:///var/run/docker.sock], from file: tcp://127.0.0.1:2345)") {
t.Fatalf("expected hosts conflict, got %v", err)
}
}
func TestValidateConfigurationErrors(t *testing.T) {
minusNumber := -10
testCases := []struct {
config *Config
}{
{
config: &Config{
CommonConfig: CommonConfig{
Labels: []string{"one"},
},
},
},
{
config: &Config{
CommonConfig: CommonConfig{
Labels: []string{"foo=bar", "one"},
},
},
},
{
config: &Config{
CommonConfig: CommonConfig{
DNS: []string{"1.1.1.1o"},
},
},
},
{
config: &Config{
CommonConfig: CommonConfig{
DNS: []string{"2.2.2.2", "1.1.1.1o"},
},
},
},
{
config: &Config{
CommonConfig: CommonConfig{
DNSSearch: []string{"123456"},
},
},
},
{
config: &Config{
CommonConfig: CommonConfig{
DNSSearch: []string{"a.b.c", "123456"},
},
},
},
{
config: &Config{
CommonConfig: CommonConfig{
MaxConcurrentDownloads: &minusNumber,
// This is weird...
ValuesSet: map[string]interface{}{
"max-concurrent-downloads": -1,
},
},
},
},
{
config: &Config{
CommonConfig: CommonConfig{
MaxConcurrentUploads: &minusNumber,
// This is weird...
ValuesSet: map[string]interface{}{
"max-concurrent-uploads": -1,
},
},
},
},
{
config: &Config{
CommonConfig: CommonConfig{
NodeGenericResources: []string{"foo"},
},
},
},
{
config: &Config{
CommonConfig: CommonConfig{
NodeGenericResources: []string{"foo=bar", "foo=1"},
},
},
},
}
for _, tc := range testCases {
err := Validate(tc.config)
if err == nil {
t.Fatalf("expected error, got nil for config %v", tc.config)
}
}
}
func TestValidateConfiguration(t *testing.T) {
minusNumber := 4
testCases := []struct {
config *Config
}{
{
config: &Config{
CommonConfig: CommonConfig{
Labels: []string{"one=two"},
},
},
},
{
config: &Config{
CommonConfig: CommonConfig{
DNS: []string{"1.1.1.1"},
},
},
},
{
config: &Config{
CommonConfig: CommonConfig{
DNSSearch: []string{"a.b.c"},
},
},
},
{
config: &Config{
CommonConfig: CommonConfig{
MaxConcurrentDownloads: &minusNumber,
// This is weird...
ValuesSet: map[string]interface{}{
"max-concurrent-downloads": -1,
},
},
},
},
{
config: &Config{
CommonConfig: CommonConfig{
MaxConcurrentUploads: &minusNumber,
// This is weird...
ValuesSet: map[string]interface{}{
"max-concurrent-uploads": -1,
},
},
},
},
{
config: &Config{
CommonConfig: CommonConfig{
NodeGenericResources: []string{"foo=bar", "foo=baz"},
},
},
},
{
config: &Config{
CommonConfig: CommonConfig{
NodeGenericResources: []string{"foo=1"},
},
},
},
}
for _, tc := range testCases {
err := Validate(tc.config)
if err != nil {
t.Fatalf("expected no error, got error %v", err)
}
}
}
func TestModifiedDiscoverySettings(t *testing.T) {
cases := []struct {
current *Config
modified *Config
expected bool
}{
{
current: discoveryConfig("foo", "bar", map[string]string{}),
modified: discoveryConfig("foo", "bar", map[string]string{}),
expected: false,
},
{
current: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}),
modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}),
expected: false,
},
{
current: discoveryConfig("foo", "bar", map[string]string{}),
modified: discoveryConfig("foo", "bar", nil),
expected: false,
},
{
current: discoveryConfig("foo", "bar", nil),
modified: discoveryConfig("foo", "bar", map[string]string{}),
expected: false,
},
{
current: discoveryConfig("foo", "bar", nil),
modified: discoveryConfig("baz", "bar", nil),
expected: true,
},
{
current: discoveryConfig("foo", "bar", nil),
modified: discoveryConfig("foo", "baz", nil),
expected: true,
},
{
current: discoveryConfig("foo", "bar", nil),
modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}),
expected: true,
},
}
for _, c := range cases {
got := ModifiedDiscoverySettings(c.current, c.modified.ClusterStore, c.modified.ClusterAdvertise, c.modified.ClusterOpts)
if c.expected != got {
t.Fatalf("expected %v, got %v: current config %v, new config %v", c.expected, got, c.current, c.modified)
}
}
}
func discoveryConfig(backendAddr, advertiseAddr string, opts map[string]string) *Config {
return &Config{
CommonConfig: CommonConfig{
ClusterStore: backendAddr,
ClusterAdvertise: advertiseAddr,
ClusterOpts: opts,
},
}
}
// TestReloadSetConfigFileNotExist tests that when `--config-file` is set
// and it doesn't exist the `Reload` function returns an error.
func TestReloadSetConfigFileNotExist(t *testing.T) {
configFile := "/tmp/blabla/not/exists/config.json"
flags := pflag.NewFlagSet("test", pflag.ContinueOnError)
flags.String("config-file", "", "")
flags.Set("config-file", configFile)
err := Reload(configFile, flags, func(c *Config) {})
assert.Check(t, is.ErrorContains(err, ""))
testutil.ErrorContains(t, err, "unable to configure the Docker daemon with file")
}
// TestReloadDefaultConfigNotExist tests that if the default configuration file
// doesn't exist the daemon still will be reloaded.
func TestReloadDefaultConfigNotExist(t *testing.T) {
reloaded := false
configFile := "/etc/docker/daemon.json"
flags := pflag.NewFlagSet("test", pflag.ContinueOnError)
flags.String("config-file", configFile, "")
err := Reload(configFile, flags, func(c *Config) {
reloaded = true
})
assert.Check(t, err)
assert.Check(t, reloaded)
}
// TestReloadBadDefaultConfig tests that when `--config-file` is not set
// and the default configuration file exists and is bad return an error
func TestReloadBadDefaultConfig(t *testing.T) {
f, err := ioutil.TempFile("", "docker-config-")
if err != nil {
t.Fatal(err)
}
configFile := f.Name()
f.Write([]byte(`{wrong: "configuration"}`))
f.Close()
flags := pflag.NewFlagSet("test", pflag.ContinueOnError)
flags.String("config-file", configFile, "")
err = Reload(configFile, flags, func(c *Config) {})
assert.Check(t, is.ErrorContains(err, ""))
testutil.ErrorContains(t, err, "unable to configure the Docker daemon with file")
}
func TestReloadWithConflictingLabels(t *testing.T) {
tempFile := fs.NewFile(t, "config", fs.WithContent(`{"labels":["foo=bar","foo=baz"]}`))
defer tempFile.Remove()
configFile := tempFile.Path()
var lbls []string
flags := pflag.NewFlagSet("test", pflag.ContinueOnError)
flags.String("config-file", configFile, "")
flags.StringSlice("labels", lbls, "")
err := Reload(configFile, flags, func(c *Config) {})
testutil.ErrorContains(t, err, "conflict labels for foo=baz and foo=bar")
}
func TestReloadWithDuplicateLabels(t *testing.T) {
tempFile := fs.NewFile(t, "config", fs.WithContent(`{"labels":["foo=the-same","foo=the-same"]}`))
defer tempFile.Remove()
configFile := tempFile.Path()
var lbls []string
flags := pflag.NewFlagSet("test", pflag.ContinueOnError)
flags.String("config-file", configFile, "")
flags.StringSlice("labels", lbls, "")
err := Reload(configFile, flags, func(c *Config) {})
assert.Check(t, err)
}

View file

@ -0,0 +1,88 @@
// +build linux freebsd
package config // import "github.com/docker/docker/daemon/config"
import (
"fmt"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/opts"
units "github.com/docker/go-units"
)
const (
// DefaultIpcMode is default for container's IpcMode, if not set otherwise
DefaultIpcMode = "shareable" // TODO: change to private
)
// Config defines the configuration of a docker daemon.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line uses.
type Config struct {
CommonConfig
// These fields are common to all unix platforms.
CommonUnixConfig
// Fields below here are platform specific.
CgroupParent string `json:"cgroup-parent,omitempty"`
EnableSelinuxSupport bool `json:"selinux-enabled,omitempty"`
RemappedRoot string `json:"userns-remap,omitempty"`
Ulimits map[string]*units.Ulimit `json:"default-ulimits,omitempty"`
CPURealtimePeriod int64 `json:"cpu-rt-period,omitempty"`
CPURealtimeRuntime int64 `json:"cpu-rt-runtime,omitempty"`
OOMScoreAdjust int `json:"oom-score-adjust,omitempty"`
Init bool `json:"init,omitempty"`
InitPath string `json:"init-path,omitempty"`
SeccompProfile string `json:"seccomp-profile,omitempty"`
ShmSize opts.MemBytes `json:"default-shm-size,omitempty"`
NoNewPrivileges bool `json:"no-new-privileges,omitempty"`
IpcMode string `json:"default-ipc-mode,omitempty"`
}
// BridgeConfig stores all the bridge driver specific
// configuration.
type BridgeConfig struct {
commonBridgeConfig
// These fields are common to all unix platforms.
commonUnixBridgeConfig
// Fields below here are platform specific.
EnableIPv6 bool `json:"ipv6,omitempty"`
EnableIPTables bool `json:"iptables,omitempty"`
EnableIPForward bool `json:"ip-forward,omitempty"`
EnableIPMasq bool `json:"ip-masq,omitempty"`
EnableUserlandProxy bool `json:"userland-proxy,omitempty"`
UserlandProxyPath string `json:"userland-proxy-path,omitempty"`
FixedCIDRv6 string `json:"fixed-cidr-v6,omitempty"`
}
// IsSwarmCompatible defines if swarm mode can be enabled in this config
func (conf *Config) IsSwarmCompatible() error {
if conf.ClusterStore != "" || conf.ClusterAdvertise != "" {
return fmt.Errorf("--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode")
}
if conf.LiveRestoreEnabled {
return fmt.Errorf("--live-restore daemon configuration is incompatible with swarm mode")
}
return nil
}
func verifyDefaultIpcMode(mode string) error {
const hint = "Use \"shareable\" or \"private\"."
dm := containertypes.IpcMode(mode)
if !dm.Valid() {
return fmt.Errorf("Default IPC mode setting (%v) is invalid. "+hint, dm)
}
if dm != "" && !dm.IsPrivate() && !dm.IsShareable() {
return fmt.Errorf("IPC mode \"%v\" is not supported as default value. "+hint, dm)
}
return nil
}
// ValidatePlatformConfig checks if any platform-specific configuration settings are invalid.
func (conf *Config) ValidatePlatformConfig() error {
return verifyDefaultIpcMode(conf.IpcMode)
}

View file

@ -0,0 +1,134 @@
// +build !windows
package config // import "github.com/docker/docker/daemon/config"
import (
"testing"
"github.com/docker/docker/opts"
units "github.com/docker/go-units"
"github.com/gotestyourself/gotestyourself/assert"
is "github.com/gotestyourself/gotestyourself/assert/cmp"
"github.com/gotestyourself/gotestyourself/fs"
"github.com/spf13/pflag"
)
func TestGetConflictFreeConfiguration(t *testing.T) {
configFileData := `
{
"debug": true,
"default-ulimits": {
"nofile": {
"Name": "nofile",
"Hard": 2048,
"Soft": 1024
}
},
"log-opts": {
"tag": "test_tag"
}
}`
file := fs.NewFile(t, "docker-config", fs.WithContent(configFileData))
defer file.Remove()
flags := pflag.NewFlagSet("test", pflag.ContinueOnError)
var debug bool
flags.BoolVarP(&debug, "debug", "D", false, "")
flags.Var(opts.NewNamedUlimitOpt("default-ulimits", nil), "default-ulimit", "")
flags.Var(opts.NewNamedMapOpts("log-opts", nil, nil), "log-opt", "")
cc, err := getConflictFreeConfiguration(file.Path(), flags)
assert.NilError(t, err)
assert.Check(t, cc.Debug)
expectedUlimits := map[string]*units.Ulimit{
"nofile": {
Name: "nofile",
Hard: 2048,
Soft: 1024,
},
}
assert.Check(t, is.DeepEqual(expectedUlimits, cc.Ulimits))
}
func TestDaemonConfigurationMerge(t *testing.T) {
configFileData := `
{
"debug": true,
"default-ulimits": {
"nofile": {
"Name": "nofile",
"Hard": 2048,
"Soft": 1024
}
},
"log-opts": {
"tag": "test_tag"
}
}`
file := fs.NewFile(t, "docker-config", fs.WithContent(configFileData))
defer file.Remove()
c := &Config{
CommonConfig: CommonConfig{
AutoRestart: true,
LogConfig: LogConfig{
Type: "syslog",
Config: map[string]string{"tag": "test"},
},
},
}
flags := pflag.NewFlagSet("test", pflag.ContinueOnError)
var debug bool
flags.BoolVarP(&debug, "debug", "D", false, "")
flags.Var(opts.NewNamedUlimitOpt("default-ulimits", nil), "default-ulimit", "")
flags.Var(opts.NewNamedMapOpts("log-opts", nil, nil), "log-opt", "")
cc, err := MergeDaemonConfigurations(c, flags, file.Path())
assert.NilError(t, err)
assert.Check(t, cc.Debug)
assert.Check(t, cc.AutoRestart)
expectedLogConfig := LogConfig{
Type: "syslog",
Config: map[string]string{"tag": "test_tag"},
}
assert.Check(t, is.DeepEqual(expectedLogConfig, cc.LogConfig))
expectedUlimits := map[string]*units.Ulimit{
"nofile": {
Name: "nofile",
Hard: 2048,
Soft: 1024,
},
}
assert.Check(t, is.DeepEqual(expectedUlimits, cc.Ulimits))
}
func TestDaemonConfigurationMergeShmSize(t *testing.T) {
data := `{"default-shm-size": "1g"}`
file := fs.NewFile(t, "docker-config", fs.WithContent(data))
defer file.Remove()
c := &Config{}
flags := pflag.NewFlagSet("test", pflag.ContinueOnError)
shmSize := opts.MemBytes(DefaultShmSize)
flags.Var(&shmSize, "default-shm-size", "")
cc, err := MergeDaemonConfigurations(c, flags, file.Path())
assert.NilError(t, err)
expectedValue := 1 * 1024 * 1024 * 1024
assert.Check(t, is.Equal(int64(expectedValue), cc.ShmSize.Value()))
}

View file

@ -0,0 +1,57 @@
package config // import "github.com/docker/docker/daemon/config"
import (
"github.com/docker/docker/api/types"
)
// BridgeConfig stores all the bridge driver specific
// configuration.
type BridgeConfig struct {
commonBridgeConfig
}
// Config defines the configuration of a docker daemon.
// These are the configuration settings that you pass
// to the docker daemon when you launch it with say: `dockerd -e windows`
type Config struct {
CommonConfig
// Fields below here are platform specific. (There are none presently
// for the Windows daemon.)
}
// GetRuntime returns the runtime path and arguments for a given
// runtime name
func (conf *Config) GetRuntime(name string) *types.Runtime {
return nil
}
// GetInitPath returns the configure docker-init path
func (conf *Config) GetInitPath() string {
return ""
}
// GetDefaultRuntimeName returns the current default runtime
func (conf *Config) GetDefaultRuntimeName() string {
return StockRuntimeName
}
// GetAllRuntimes returns a copy of the runtimes map
func (conf *Config) GetAllRuntimes() map[string]types.Runtime {
return map[string]types.Runtime{}
}
// GetExecRoot returns the user configured Exec-root
func (conf *Config) GetExecRoot() string {
return ""
}
// IsSwarmCompatible defines if swarm mode can be enabled in this config
func (conf *Config) IsSwarmCompatible() error {
return nil
}
// ValidatePlatformConfig checks if any platform-specific configuration settings are invalid.
func (conf *Config) ValidatePlatformConfig() error {
return nil
}

View file

@ -0,0 +1,60 @@
// +build windows
package config // import "github.com/docker/docker/daemon/config"
import (
"io/ioutil"
"testing"
"github.com/docker/docker/opts"
"github.com/gotestyourself/gotestyourself/assert"
is "github.com/gotestyourself/gotestyourself/assert/cmp"
"github.com/spf13/pflag"
)
func TestDaemonConfigurationMerge(t *testing.T) {
f, err := ioutil.TempFile("", "docker-config-")
if err != nil {
t.Fatal(err)
}
configFile := f.Name()
f.Write([]byte(`
{
"debug": true,
"log-opts": {
"tag": "test_tag"
}
}`))
f.Close()
c := &Config{
CommonConfig: CommonConfig{
AutoRestart: true,
LogConfig: LogConfig{
Type: "syslog",
Config: map[string]string{"tag": "test"},
},
},
}
flags := pflag.NewFlagSet("test", pflag.ContinueOnError)
var debug bool
flags.BoolVarP(&debug, "debug", "D", false, "")
flags.Var(opts.NewNamedMapOpts("log-opts", nil, nil), "log-opt", "")
cc, err := MergeDaemonConfigurations(c, flags, configFile)
assert.NilError(t, err)
assert.Check(t, cc.Debug)
assert.Check(t, cc.AutoRestart)
expectedLogConfig := LogConfig{
Type: "syslog",
Config: map[string]string{"tag": "test_tag"},
}
assert.Check(t, is.DeepEqual(expectedLogConfig, cc.LogConfig))
}

View file

@ -0,0 +1,22 @@
package config // import "github.com/docker/docker/daemon/config"
import (
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/convert"
"github.com/docker/swarmkit/api/genericresource"
)
// ParseGenericResources parses and validates the specified string as a list of GenericResource
func ParseGenericResources(value []string) ([]swarm.GenericResource, error) {
if len(value) == 0 {
return nil, nil
}
resources, err := genericresource.Parse(value)
if err != nil {
return nil, err
}
obj := convert.GenericResourcesFromGRPC(resources)
return obj, nil
}

View file

@ -0,0 +1,21 @@
package daemon // import "github.com/docker/docker/daemon"
import (
swarmtypes "github.com/docker/docker/api/types/swarm"
"github.com/sirupsen/logrus"
)
// SetContainerConfigReferences sets the container config references needed
func (daemon *Daemon) SetContainerConfigReferences(name string, refs []*swarmtypes.ConfigReference) error {
if !configsSupported() && len(refs) > 0 {
logrus.Warn("configs are not supported on this platform")
return nil
}
c, err := daemon.GetContainer(name)
if err != nil {
return err
}
c.ConfigReferences = append(c.ConfigReferences, refs...)
return nil
}

View file

@ -0,0 +1,5 @@
package daemon // import "github.com/docker/docker/daemon"
func configsSupported() bool {
return true
}

View file

@ -0,0 +1,7 @@
// +build !linux,!windows
package daemon // import "github.com/docker/docker/daemon"
func configsSupported() bool {
return false
}

View file

@ -0,0 +1,5 @@
package daemon // import "github.com/docker/docker/daemon"
func configsSupported() bool {
return true
}

View file

@ -0,0 +1,358 @@
package daemon // import "github.com/docker/docker/daemon"
import (
"fmt"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"time"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/strslice"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/network"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/image"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/truncindex"
"github.com/docker/docker/runconfig"
"github.com/docker/docker/volume"
"github.com/docker/go-connections/nat"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
)
// GetContainer looks for a container using the provided information, which could be
// one of the following inputs from the caller:
// - A full container ID, which will exact match a container in daemon's list
// - A container name, which will only exact match via the GetByName() function
// - A partial container ID prefix (e.g. short ID) of any length that is
// unique enough to only return a single container object
// If none of these searches succeed, an error is returned
func (daemon *Daemon) GetContainer(prefixOrName string) (*container.Container, error) {
if len(prefixOrName) == 0 {
return nil, errors.WithStack(invalidIdentifier(prefixOrName))
}
if containerByID := daemon.containers.Get(prefixOrName); containerByID != nil {
// prefix is an exact match to a full container ID
return containerByID, nil
}
// GetByName will match only an exact name provided; we ignore errors
if containerByName, _ := daemon.GetByName(prefixOrName); containerByName != nil {
// prefix is an exact match to a full container Name
return containerByName, nil
}
containerID, indexError := daemon.idIndex.Get(prefixOrName)
if indexError != nil {
// When truncindex defines an error type, use that instead
if indexError == truncindex.ErrNotExist {
return nil, containerNotFound(prefixOrName)
}
return nil, errdefs.System(indexError)
}
return daemon.containers.Get(containerID), nil
}
// checkContainer make sure the specified container validates the specified conditions
func (daemon *Daemon) checkContainer(container *container.Container, conditions ...func(*container.Container) error) error {
for _, condition := range conditions {
if err := condition(container); err != nil {
return err
}
}
return nil
}
// Exists returns a true if a container of the specified ID or name exists,
// false otherwise.
func (daemon *Daemon) Exists(id string) bool {
c, _ := daemon.GetContainer(id)
return c != nil
}
// IsPaused returns a bool indicating if the specified container is paused.
func (daemon *Daemon) IsPaused(id string) bool {
c, _ := daemon.GetContainer(id)
return c.State.IsPaused()
}
func (daemon *Daemon) containerRoot(id string) string {
return filepath.Join(daemon.repository, id)
}
// Load reads the contents of a container from disk
// This is typically done at startup.
func (daemon *Daemon) load(id string) (*container.Container, error) {
container := daemon.newBaseContainer(id)
if err := container.FromDisk(); err != nil {
return nil, err
}
if err := label.ReserveLabel(container.ProcessLabel); err != nil {
return nil, err
}
if container.ID != id {
return container, fmt.Errorf("Container %s is stored at %s", container.ID, id)
}
return container, nil
}
// Register makes a container object usable by the daemon as <container.ID>
func (daemon *Daemon) Register(c *container.Container) error {
// Attach to stdout and stderr
if c.Config.OpenStdin {
c.StreamConfig.NewInputPipes()
} else {
c.StreamConfig.NewNopInputPipe()
}
// once in the memory store it is visible to other goroutines
// grab a Lock until it has been checkpointed to avoid races
c.Lock()
defer c.Unlock()
daemon.containers.Add(c.ID, c)
daemon.idIndex.Add(c.ID)
return c.CheckpointTo(daemon.containersReplica)
}
func (daemon *Daemon) newContainer(name string, operatingSystem string, config *containertypes.Config, hostConfig *containertypes.HostConfig, imgID image.ID, managed bool) (*container.Container, error) {
var (
id string
err error
noExplicitName = name == ""
)
id, name, err = daemon.generateIDAndName(name)
if err != nil {
return nil, err
}
if hostConfig.NetworkMode.IsHost() {
if config.Hostname == "" {
config.Hostname, err = os.Hostname()
if err != nil {
return nil, errdefs.System(err)
}
}
} else {
daemon.generateHostname(id, config)
}
entrypoint, args := daemon.getEntrypointAndArgs(config.Entrypoint, config.Cmd)
base := daemon.newBaseContainer(id)
base.Created = time.Now().UTC()
base.Managed = managed
base.Path = entrypoint
base.Args = args //FIXME: de-duplicate from config
base.Config = config
base.HostConfig = &containertypes.HostConfig{}
base.ImageID = imgID
base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName}
base.Name = name
base.Driver = daemon.imageService.GraphDriverForOS(operatingSystem)
base.OS = operatingSystem
return base, err
}
// GetByName returns a container given a name.
func (daemon *Daemon) GetByName(name string) (*container.Container, error) {
if len(name) == 0 {
return nil, fmt.Errorf("No container name supplied")
}
fullName := name
if name[0] != '/' {
fullName = "/" + name
}
id, err := daemon.containersReplica.Snapshot().GetID(fullName)
if err != nil {
return nil, fmt.Errorf("Could not find entity for %s", name)
}
e := daemon.containers.Get(id)
if e == nil {
return nil, fmt.Errorf("Could not find container for entity id %s", id)
}
return e, nil
}
// newBaseContainer creates a new container with its initial
// configuration based on the root storage from the daemon.
func (daemon *Daemon) newBaseContainer(id string) *container.Container {
return container.NewBaseContainer(id, daemon.containerRoot(id))
}
func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint strslice.StrSlice, configCmd strslice.StrSlice) (string, []string) {
if len(configEntrypoint) != 0 {
return configEntrypoint[0], append(configEntrypoint[1:], configCmd...)
}
return configCmd[0], configCmd[1:]
}
func (daemon *Daemon) generateHostname(id string, config *containertypes.Config) {
// Generate default hostname
if config.Hostname == "" {
config.Hostname = id[:12]
}
}
func (daemon *Daemon) setSecurityOptions(container *container.Container, hostConfig *containertypes.HostConfig) error {
container.Lock()
defer container.Unlock()
return daemon.parseSecurityOpt(container, hostConfig)
}
func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *containertypes.HostConfig) error {
// Do not lock while creating volumes since this could be calling out to external plugins
// Don't want to block other actions, like `docker ps` because we're waiting on an external plugin
if err := daemon.registerMountPoints(container, hostConfig); err != nil {
return err
}
container.Lock()
defer container.Unlock()
// Register any links from the host config before starting the container
if err := daemon.registerLinks(container, hostConfig); err != nil {
return err
}
runconfig.SetDefaultNetModeIfBlank(hostConfig)
container.HostConfig = hostConfig
return container.CheckpointTo(daemon.containersReplica)
}
// verifyContainerSettings performs validation of the hostconfig and config
// structures.
func (daemon *Daemon) verifyContainerSettings(platform string, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) {
// First perform verification of settings common across all platforms.
if config != nil {
if config.WorkingDir != "" {
wdInvalid := false
if runtime.GOOS == platform {
config.WorkingDir = filepath.FromSlash(config.WorkingDir) // Ensure in platform semantics
if !system.IsAbs(config.WorkingDir) {
wdInvalid = true
}
} else {
// LCOW. Force Unix semantics
config.WorkingDir = strings.Replace(config.WorkingDir, string(os.PathSeparator), "/", -1)
if !path.IsAbs(config.WorkingDir) {
wdInvalid = true
}
}
if wdInvalid {
return nil, fmt.Errorf("the working directory '%s' is invalid, it needs to be an absolute path", config.WorkingDir)
}
}
if len(config.StopSignal) > 0 {
_, err := signal.ParseSignal(config.StopSignal)
if err != nil {
return nil, err
}
}
// Validate if Env contains empty variable or not (e.g., ``, `=foo`)
for _, env := range config.Env {
if _, err := opts.ValidateEnv(env); err != nil {
return nil, err
}
}
// Validate the healthcheck params of Config
if config.Healthcheck != nil {
if config.Healthcheck.Interval != 0 && config.Healthcheck.Interval < containertypes.MinimumDuration {
return nil, errors.Errorf("Interval in Healthcheck cannot be less than %s", containertypes.MinimumDuration)
}
if config.Healthcheck.Timeout != 0 && config.Healthcheck.Timeout < containertypes.MinimumDuration {
return nil, errors.Errorf("Timeout in Healthcheck cannot be less than %s", containertypes.MinimumDuration)
}
if config.Healthcheck.Retries < 0 {
return nil, errors.Errorf("Retries in Healthcheck cannot be negative")
}
if config.Healthcheck.StartPeriod != 0 && config.Healthcheck.StartPeriod < containertypes.MinimumDuration {
return nil, errors.Errorf("StartPeriod in Healthcheck cannot be less than %s", containertypes.MinimumDuration)
}
}
}
if hostConfig == nil {
return nil, nil
}
if hostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() {
return nil, errors.Errorf("can't create 'AutoRemove' container with restart policy")
}
// Validate mounts; check if host directories still exist
parser := volume.NewParser(platform)
for _, cfg := range hostConfig.Mounts {
if err := parser.ValidateMountConfig(&cfg); err != nil {
return nil, err
}
}
for _, extraHost := range hostConfig.ExtraHosts {
if _, err := opts.ValidateExtraHost(extraHost); err != nil {
return nil, err
}
}
for port := range hostConfig.PortBindings {
_, portStr := nat.SplitProtoPort(string(port))
if _, err := nat.ParsePort(portStr); err != nil {
return nil, errors.Errorf("invalid port specification: %q", portStr)
}
for _, pb := range hostConfig.PortBindings[port] {
_, err := nat.NewPort(nat.SplitProtoPort(pb.HostPort))
if err != nil {
return nil, errors.Errorf("invalid port specification: %q", pb.HostPort)
}
}
}
p := hostConfig.RestartPolicy
switch p.Name {
case "always", "unless-stopped", "no":
if p.MaximumRetryCount != 0 {
return nil, errors.Errorf("maximum retry count cannot be used with restart policy '%s'", p.Name)
}
case "on-failure":
if p.MaximumRetryCount < 0 {
return nil, errors.Errorf("maximum retry count cannot be negative")
}
case "":
// do nothing
default:
return nil, errors.Errorf("invalid restart policy '%s'", p.Name)
}
if !hostConfig.Isolation.IsValid() {
return nil, errors.Errorf("invalid isolation '%s' on %s", hostConfig.Isolation, runtime.GOOS)
}
var (
err error
warnings []string
)
// Now do platform-specific verification
if warnings, err = verifyPlatformContainerSettings(daemon, hostConfig, config, update); err != nil {
return warnings, err
}
if hostConfig.NetworkMode.IsHost() && len(hostConfig.PortBindings) > 0 {
warnings = append(warnings, "Published ports are discarded when using host network mode")
}
return warnings, err
}

View file

@ -0,0 +1,30 @@
//+build !windows
package daemon // import "github.com/docker/docker/daemon"
import (
"github.com/docker/docker/container"
"github.com/docker/docker/errdefs"
)
func (daemon *Daemon) saveApparmorConfig(container *container.Container) error {
container.AppArmorProfile = "" //we don't care about the previous value.
if !daemon.apparmorEnabled {
return nil // if apparmor is disabled there is nothing to do here.
}
if err := parseSecurityOpt(container, container.HostConfig); err != nil {
return errdefs.InvalidParameter(err)
}
if !container.HostConfig.Privileged {
if container.AppArmorProfile == "" {
container.AppArmorProfile = defaultApparmorProfile
}
} else {
container.AppArmorProfile = "unconfined"
}
return nil
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,414 @@
// +build linux freebsd
package daemon // import "github.com/docker/docker/daemon"
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"time"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/links"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/runconfig"
"github.com/docker/libnetwork"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) {
var env []string
children := daemon.children(container)
bridgeSettings := container.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()]
if bridgeSettings == nil || bridgeSettings.EndpointSettings == nil {
return nil, nil
}
for linkAlias, child := range children {
if !child.IsRunning() {
return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias)
}
childBridgeSettings := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()]
if childBridgeSettings == nil || childBridgeSettings.EndpointSettings == nil {
return nil, fmt.Errorf("container %s not attached to default bridge network", child.ID)
}
link := links.NewLink(
bridgeSettings.IPAddress,
childBridgeSettings.IPAddress,
linkAlias,
child.Config.Env,
child.Config.ExposedPorts,
)
env = append(env, link.ToEnv()...)
}
return env, nil
}
func (daemon *Daemon) getIpcContainer(id string) (*container.Container, error) {
errMsg := "can't join IPC of container " + id
// Check the container exists
container, err := daemon.GetContainer(id)
if err != nil {
return nil, errors.Wrap(err, errMsg)
}
// Check the container is running and not restarting
if err := daemon.checkContainer(container, containerIsRunning, containerIsNotRestarting); err != nil {
return nil, errors.Wrap(err, errMsg)
}
// Check the container ipc is shareable
if st, err := os.Stat(container.ShmPath); err != nil || !st.IsDir() {
if err == nil || os.IsNotExist(err) {
return nil, errors.New(errMsg + ": non-shareable IPC")
}
// stat() failed?
return nil, errors.Wrap(err, errMsg+": unexpected error from stat "+container.ShmPath)
}
return container, nil
}
func (daemon *Daemon) getPidContainer(container *container.Container) (*container.Container, error) {
containerID := container.HostConfig.PidMode.Container()
container, err := daemon.GetContainer(containerID)
if err != nil {
return nil, errors.Wrapf(err, "cannot join PID of a non running container: %s", containerID)
}
return container, daemon.checkContainer(container, containerIsRunning, containerIsNotRestarting)
}
func containerIsRunning(c *container.Container) error {
if !c.IsRunning() {
return errdefs.Conflict(errors.Errorf("container %s is not running", c.ID))
}
return nil
}
func containerIsNotRestarting(c *container.Container) error {
if c.IsRestarting() {
return errContainerIsRestarting(c.ID)
}
return nil
}
func (daemon *Daemon) setupIpcDirs(c *container.Container) error {
ipcMode := c.HostConfig.IpcMode
switch {
case ipcMode.IsContainer():
ic, err := daemon.getIpcContainer(ipcMode.Container())
if err != nil {
return err
}
c.ShmPath = ic.ShmPath
case ipcMode.IsHost():
if _, err := os.Stat("/dev/shm"); err != nil {
return fmt.Errorf("/dev/shm is not mounted, but must be for --ipc=host")
}
c.ShmPath = "/dev/shm"
case ipcMode.IsPrivate(), ipcMode.IsNone():
// c.ShmPath will/should not be used, so make it empty.
// Container's /dev/shm mount comes from OCI spec.
c.ShmPath = ""
case ipcMode.IsEmpty():
// A container was created by an older version of the daemon.
// The default behavior used to be what is now called "shareable".
fallthrough
case ipcMode.IsShareable():
rootIDs := daemon.idMappings.RootPair()
if !c.HasMountFor("/dev/shm") {
shmPath, err := c.ShmResourcePath()
if err != nil {
return err
}
if err := idtools.MkdirAllAndChown(shmPath, 0700, rootIDs); err != nil {
return err
}
shmproperty := "mode=1777,size=" + strconv.FormatInt(c.HostConfig.ShmSize, 10)
if err := unix.Mount("shm", shmPath, "tmpfs", uintptr(unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV), label.FormatMountLabel(shmproperty, c.GetMountLabel())); err != nil {
return fmt.Errorf("mounting shm tmpfs: %s", err)
}
if err := os.Chown(shmPath, rootIDs.UID, rootIDs.GID); err != nil {
return err
}
c.ShmPath = shmPath
}
default:
return fmt.Errorf("invalid IPC mode: %v", ipcMode)
}
return nil
}
func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) {
if len(c.SecretReferences) == 0 && len(c.ConfigReferences) == 0 {
return nil
}
if err := daemon.createSecretsDir(c); err != nil {
return err
}
defer func() {
if setupErr != nil {
daemon.cleanupSecretDir(c)
}
}()
if c.DependencyStore == nil {
return fmt.Errorf("secret store is not initialized")
}
// retrieve possible remapped range start for root UID, GID
rootIDs := daemon.idMappings.RootPair()
for _, s := range c.SecretReferences {
// TODO (ehazlett): use type switch when more are supported
if s.File == nil {
logrus.Error("secret target type is not a file target")
continue
}
// secrets are created in the SecretMountPath on the host, at a
// single level
fPath, err := c.SecretFilePath(*s)
if err != nil {
return errors.Wrap(err, "error getting secret file path")
}
if err := idtools.MkdirAllAndChown(filepath.Dir(fPath), 0700, rootIDs); err != nil {
return errors.Wrap(err, "error creating secret mount path")
}
logrus.WithFields(logrus.Fields{
"name": s.File.Name,
"path": fPath,
}).Debug("injecting secret")
secret, err := c.DependencyStore.Secrets().Get(s.SecretID)
if err != nil {
return errors.Wrap(err, "unable to get secret from secret store")
}
if err := ioutil.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil {
return errors.Wrap(err, "error injecting secret")
}
uid, err := strconv.Atoi(s.File.UID)
if err != nil {
return err
}
gid, err := strconv.Atoi(s.File.GID)
if err != nil {
return err
}
if err := os.Chown(fPath, rootIDs.UID+uid, rootIDs.GID+gid); err != nil {
return errors.Wrap(err, "error setting ownership for secret")
}
if err := os.Chmod(fPath, s.File.Mode); err != nil {
return errors.Wrap(err, "error setting file mode for secret")
}
}
for _, ref := range c.ConfigReferences {
// TODO (ehazlett): use type switch when more are supported
if ref.File == nil {
logrus.Error("config target type is not a file target")
continue
}
fPath, err := c.ConfigFilePath(*ref)
if err != nil {
return errors.Wrap(err, "error getting config file path for container")
}
if err := idtools.MkdirAllAndChown(filepath.Dir(fPath), 0700, rootIDs); err != nil {
return errors.Wrap(err, "error creating config mount path")
}
logrus.WithFields(logrus.Fields{
"name": ref.File.Name,
"path": fPath,
}).Debug("injecting config")
config, err := c.DependencyStore.Configs().Get(ref.ConfigID)
if err != nil {
return errors.Wrap(err, "unable to get config from config store")
}
if err := ioutil.WriteFile(fPath, config.Spec.Data, ref.File.Mode); err != nil {
return errors.Wrap(err, "error injecting config")
}
uid, err := strconv.Atoi(ref.File.UID)
if err != nil {
return err
}
gid, err := strconv.Atoi(ref.File.GID)
if err != nil {
return err
}
if err := os.Chown(fPath, rootIDs.UID+uid, rootIDs.GID+gid); err != nil {
return errors.Wrap(err, "error setting ownership for config")
}
if err := os.Chmod(fPath, ref.File.Mode); err != nil {
return errors.Wrap(err, "error setting file mode for config")
}
}
return daemon.remountSecretDir(c)
}
// createSecretsDir is used to create a dir suitable for storing container secrets.
// In practice this is using a tmpfs mount and is used for both "configs" and "secrets"
func (daemon *Daemon) createSecretsDir(c *container.Container) error {
// retrieve possible remapped range start for root UID, GID
rootIDs := daemon.idMappings.RootPair()
dir, err := c.SecretMountPath()
if err != nil {
return errors.Wrap(err, "error getting container secrets dir")
}
// create tmpfs
if err := idtools.MkdirAllAndChown(dir, 0700, rootIDs); err != nil {
return errors.Wrap(err, "error creating secret local mount path")
}
tmpfsOwnership := fmt.Sprintf("uid=%d,gid=%d", rootIDs.UID, rootIDs.GID)
if err := mount.Mount("tmpfs", dir, "tmpfs", "nodev,nosuid,noexec,"+tmpfsOwnership); err != nil {
return errors.Wrap(err, "unable to setup secret mount")
}
return nil
}
func (daemon *Daemon) remountSecretDir(c *container.Container) error {
dir, err := c.SecretMountPath()
if err != nil {
return errors.Wrap(err, "error getting container secrets path")
}
if err := label.Relabel(dir, c.MountLabel, false); err != nil {
logrus.WithError(err).WithField("dir", dir).Warn("Error while attempting to set selinux label")
}
rootIDs := daemon.idMappings.RootPair()
tmpfsOwnership := fmt.Sprintf("uid=%d,gid=%d", rootIDs.UID, rootIDs.GID)
// remount secrets ro
if err := mount.Mount("tmpfs", dir, "tmpfs", "remount,ro,"+tmpfsOwnership); err != nil {
return errors.Wrap(err, "unable to remount dir as readonly")
}
return nil
}
func (daemon *Daemon) cleanupSecretDir(c *container.Container) {
dir, err := c.SecretMountPath()
if err != nil {
logrus.WithError(err).WithField("container", c.ID).Warn("error getting secrets mount path for container")
}
if err := mount.RecursiveUnmount(dir); err != nil {
logrus.WithField("dir", dir).WithError(err).Warn("Error while attmepting to unmount dir, this may prevent removal of container.")
}
if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) {
logrus.WithField("dir", dir).WithError(err).Error("Error removing dir.")
}
}
func killProcessDirectly(cntr *container.Container) error {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Block until the container to stops or timeout.
status := <-cntr.Wait(ctx, container.WaitConditionNotRunning)
if status.Err() != nil {
// Ensure that we don't kill ourselves
if pid := cntr.GetPID(); pid != 0 {
logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(cntr.ID))
if err := unix.Kill(pid, 9); err != nil {
if err != unix.ESRCH {
return err
}
e := errNoSuchProcess{pid, 9}
logrus.Debug(e)
return e
}
}
}
return nil
}
func detachMounted(path string) error {
return unix.Unmount(path, unix.MNT_DETACH)
}
func isLinkable(child *container.Container) bool {
// A container is linkable only if it belongs to the default network
_, ok := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()]
return ok
}
func enableIPOnPredefinedNetwork() bool {
return false
}
func (daemon *Daemon) isNetworkHotPluggable() bool {
return true
}
func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error {
var err error
container.HostsPath, err = container.GetRootResourcePath("hosts")
if err != nil {
return err
}
*sboxOptions = append(*sboxOptions, libnetwork.OptionHostsPath(container.HostsPath))
container.ResolvConfPath, err = container.GetRootResourcePath("resolv.conf")
if err != nil {
return err
}
*sboxOptions = append(*sboxOptions, libnetwork.OptionResolvConfPath(container.ResolvConfPath))
return nil
}
func (daemon *Daemon) initializeNetworkingPaths(container *container.Container, nc *container.Container) error {
container.HostnamePath = nc.HostnamePath
container.HostsPath = nc.HostsPath
container.ResolvConfPath = nc.ResolvConfPath
return nil
}
func (daemon *Daemon) setupContainerMountsRoot(c *container.Container) error {
// get the root mount path so we can make it unbindable
p, err := c.MountsResourcePath("")
if err != nil {
return err
}
if err := idtools.MkdirAllAndChown(p, 0700, daemon.idMappings.RootPair()); err != nil {
return err
}
if err := mount.MakeUnbindable(p); err != nil {
// Setting unbindable is a precaution and is not neccessary for correct operation.
// Do not error out if this fails.
logrus.WithError(err).WithField("resource", p).WithField("container", c.ID).Warn("Error setting container resource mounts to unbindable, this may cause mount leakages, preventing removal of this container.")
}
return nil
}

View file

@ -0,0 +1,201 @@
package daemon // import "github.com/docker/docker/daemon"
import (
"fmt"
"io/ioutil"
"os"
"github.com/docker/docker/container"
"github.com/docker/docker/pkg/system"
"github.com/docker/libnetwork"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) {
return nil, nil
}
func (daemon *Daemon) setupConfigDir(c *container.Container) (setupErr error) {
if len(c.ConfigReferences) == 0 {
return nil
}
localPath := c.ConfigsDirPath()
logrus.Debugf("configs: setting up config dir: %s", localPath)
// create local config root
if err := system.MkdirAllWithACL(localPath, 0, system.SddlAdministratorsLocalSystem); err != nil {
return errors.Wrap(err, "error creating config dir")
}
defer func() {
if setupErr != nil {
if err := os.RemoveAll(localPath); err != nil {
logrus.Errorf("error cleaning up config dir: %s", err)
}
}
}()
if c.DependencyStore == nil {
return fmt.Errorf("config store is not initialized")
}
for _, configRef := range c.ConfigReferences {
// TODO (ehazlett): use type switch when more are supported
if configRef.File == nil {
logrus.Error("config target type is not a file target")
continue
}
fPath := c.ConfigFilePath(*configRef)
log := logrus.WithFields(logrus.Fields{"name": configRef.File.Name, "path": fPath})
log.Debug("injecting config")
config, err := c.DependencyStore.Configs().Get(configRef.ConfigID)
if err != nil {
return errors.Wrap(err, "unable to get config from config store")
}
if err := ioutil.WriteFile(fPath, config.Spec.Data, configRef.File.Mode); err != nil {
return errors.Wrap(err, "error injecting config")
}
}
return nil
}
func (daemon *Daemon) setupIpcDirs(container *container.Container) error {
return nil
}
// TODO Windows: Fix Post-TP5. This is a hack to allow docker cp to work
// against containers which have volumes. You will still be able to cp
// to somewhere on the container drive, but not to any mounted volumes
// inside the container. Without this fix, docker cp is broken to any
// container which has a volume, regardless of where the file is inside the
// container.
func (daemon *Daemon) mountVolumes(container *container.Container) error {
return nil
}
func detachMounted(path string) error {
return nil
}
func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) {
if len(c.SecretReferences) == 0 {
return nil
}
localMountPath, err := c.SecretMountPath()
if err != nil {
return err
}
logrus.Debugf("secrets: setting up secret dir: %s", localMountPath)
// create local secret root
if err := system.MkdirAllWithACL(localMountPath, 0, system.SddlAdministratorsLocalSystem); err != nil {
return errors.Wrap(err, "error creating secret local directory")
}
defer func() {
if setupErr != nil {
if err := os.RemoveAll(localMountPath); err != nil {
logrus.Errorf("error cleaning up secret mount: %s", err)
}
}
}()
if c.DependencyStore == nil {
return fmt.Errorf("secret store is not initialized")
}
for _, s := range c.SecretReferences {
// TODO (ehazlett): use type switch when more are supported
if s.File == nil {
logrus.Error("secret target type is not a file target")
continue
}
// secrets are created in the SecretMountPath on the host, at a
// single level
fPath, err := c.SecretFilePath(*s)
if err != nil {
return err
}
logrus.WithFields(logrus.Fields{
"name": s.File.Name,
"path": fPath,
}).Debug("injecting secret")
secret, err := c.DependencyStore.Secrets().Get(s.SecretID)
if err != nil {
return errors.Wrap(err, "unable to get secret from secret store")
}
if err := ioutil.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil {
return errors.Wrap(err, "error injecting secret")
}
}
return nil
}
func killProcessDirectly(container *container.Container) error {
return nil
}
func isLinkable(child *container.Container) bool {
return false
}
func enableIPOnPredefinedNetwork() bool {
return true
}
func (daemon *Daemon) isNetworkHotPluggable() bool {
return true
}
func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error {
return nil
}
func (daemon *Daemon) initializeNetworkingPaths(container *container.Container, nc *container.Container) error {
if nc.HostConfig.Isolation.IsHyperV() {
return fmt.Errorf("sharing of hyperv containers network is not supported")
}
container.NetworkSharedContainerID = nc.ID
if nc.NetworkSettings != nil {
for n := range nc.NetworkSettings.Networks {
sn, err := daemon.FindNetwork(n)
if err != nil {
continue
}
ep, err := nc.GetEndpointInNetwork(sn)
if err != nil {
continue
}
data, err := ep.DriverInfo()
if err != nil {
continue
}
if data["GW_INFO"] != nil {
gwInfo := data["GW_INFO"].(map[string]interface{})
if gwInfo["hnsid"] != nil {
container.SharedEndpointList = append(container.SharedEndpointList, gwInfo["hnsid"].(string))
}
}
if data["hnsid"] != nil {
container.SharedEndpointList = append(container.SharedEndpointList, data["hnsid"].(string))
}
}
}
return nil
}

View file

@ -0,0 +1,44 @@
// +build linux freebsd
package daemon
import (
"testing"
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/daemon/config"
"github.com/docker/go-connections/nat"
"github.com/gotestyourself/gotestyourself/assert"
)
// TestContainerWarningHostAndPublishPorts that a warning is returned when setting network mode to host and specifying published ports.
// This should not be tested on Windows because Windows doesn't support "host" network mode.
func TestContainerWarningHostAndPublishPorts(t *testing.T) {
testCases := []struct {
ports nat.PortMap
warnings []string
}{
{ports: nat.PortMap{}},
{ports: nat.PortMap{
"8080": []nat.PortBinding{{HostPort: "8989"}},
}, warnings: []string{"Published ports are discarded when using host network mode"}},
}
for _, tc := range testCases {
hostConfig := &containertypes.HostConfig{
Runtime: "runc",
NetworkMode: "host",
PortBindings: tc.ports,
}
cs := &config.Config{
CommonUnixConfig: config.CommonUnixConfig{
Runtimes: map[string]types.Runtime{"runc": {}},
},
}
d := &Daemon{configStore: cs}
wrns, err := d.verifyContainerSettings("", hostConfig, &containertypes.Config{}, false)
assert.NilError(t, err)
assert.DeepEqual(t, tc.warnings, wrns)
}
}

View file

@ -0,0 +1,9 @@
package daemon // import "github.com/docker/docker/daemon"
import (
"github.com/docker/docker/container"
)
func (daemon *Daemon) saveApparmorConfig(container *container.Container) error {
return nil
}

View file

@ -0,0 +1,324 @@
package daemon // import "github.com/docker/docker/daemon"
import (
"fmt"
"net"
"runtime"
"strings"
"time"
"github.com/pkg/errors"
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
networktypes "github.com/docker/docker/api/types/network"
"github.com/docker/docker/container"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/image"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/runconfig"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/sirupsen/logrus"
)
// CreateManagedContainer creates a container that is managed by a Service
func (daemon *Daemon) CreateManagedContainer(params types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) {
return daemon.containerCreate(params, true)
}
// ContainerCreate creates a regular container
func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) {
return daemon.containerCreate(params, false)
}
func (daemon *Daemon) containerCreate(params types.ContainerCreateConfig, managed bool) (containertypes.ContainerCreateCreatedBody, error) {
start := time.Now()
if params.Config == nil {
return containertypes.ContainerCreateCreatedBody{}, errdefs.InvalidParameter(errors.New("Config cannot be empty in order to create a container"))
}
os := runtime.GOOS
if params.Config.Image != "" {
img, err := daemon.imageService.GetImage(params.Config.Image)
if err == nil {
os = img.OS
}
} else {
// This mean scratch. On Windows, we can safely assume that this is a linux
// container. On other platforms, it's the host OS (which it already is)
if runtime.GOOS == "windows" && system.LCOWSupported() {
os = "linux"
}
}
warnings, err := daemon.verifyContainerSettings(os, params.HostConfig, params.Config, false)
if err != nil {
return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, errdefs.InvalidParameter(err)
}
err = verifyNetworkingConfig(params.NetworkingConfig)
if err != nil {
return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, errdefs.InvalidParameter(err)
}
if params.HostConfig == nil {
params.HostConfig = &containertypes.HostConfig{}
}
err = daemon.adaptContainerSettings(params.HostConfig, params.AdjustCPUShares)
if err != nil {
return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, errdefs.InvalidParameter(err)
}
container, err := daemon.create(params, managed)
if err != nil {
return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err
}
containerActions.WithValues("create").UpdateSince(start)
return containertypes.ContainerCreateCreatedBody{ID: container.ID, Warnings: warnings}, nil
}
// Create creates a new container from the given configuration with a given name.
func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) (retC *container.Container, retErr error) {
var (
container *container.Container
img *image.Image
imgID image.ID
err error
)
os := runtime.GOOS
if params.Config.Image != "" {
img, err = daemon.imageService.GetImage(params.Config.Image)
if err != nil {
return nil, err
}
if img.OS != "" {
os = img.OS
} else {
// default to the host OS except on Windows with LCOW
if runtime.GOOS == "windows" && system.LCOWSupported() {
os = "linux"
}
}
imgID = img.ID()
if runtime.GOOS == "windows" && img.OS == "linux" && !system.LCOWSupported() {
return nil, errors.New("operating system on which parent image was created is not Windows")
}
} else {
if runtime.GOOS == "windows" {
os = "linux" // 'scratch' case.
}
}
if err := daemon.mergeAndVerifyConfig(params.Config, img); err != nil {
return nil, errdefs.InvalidParameter(err)
}
if err := daemon.mergeAndVerifyLogConfig(&params.HostConfig.LogConfig); err != nil {
return nil, errdefs.InvalidParameter(err)
}
if container, err = daemon.newContainer(params.Name, os, params.Config, params.HostConfig, imgID, managed); err != nil {
return nil, err
}
defer func() {
if retErr != nil {
if err := daemon.cleanupContainer(container, true, true); err != nil {
logrus.Errorf("failed to cleanup container on create error: %v", err)
}
}
}()
if err := daemon.setSecurityOptions(container, params.HostConfig); err != nil {
return nil, err
}
container.HostConfig.StorageOpt = params.HostConfig.StorageOpt
// Fixes: https://github.com/moby/moby/issues/34074 and
// https://github.com/docker/for-win/issues/999.
// Merge the daemon's storage options if they aren't already present. We only
// do this on Windows as there's no effective sandbox size limit other than
// physical on Linux.
if runtime.GOOS == "windows" {
if container.HostConfig.StorageOpt == nil {
container.HostConfig.StorageOpt = make(map[string]string)
}
for _, v := range daemon.configStore.GraphOptions {
opt := strings.SplitN(v, "=", 2)
if _, ok := container.HostConfig.StorageOpt[opt[0]]; !ok {
container.HostConfig.StorageOpt[opt[0]] = opt[1]
}
}
}
// Set RWLayer for container after mount labels have been set
rwLayer, err := daemon.imageService.CreateLayer(container, setupInitLayer(daemon.idMappings))
if err != nil {
return nil, errdefs.System(err)
}
container.RWLayer = rwLayer
rootIDs := daemon.idMappings.RootPair()
if err := idtools.MkdirAndChown(container.Root, 0700, rootIDs); err != nil {
return nil, err
}
if err := idtools.MkdirAndChown(container.CheckpointDir(), 0700, rootIDs); err != nil {
return nil, err
}
if err := daemon.setHostConfig(container, params.HostConfig); err != nil {
return nil, err
}
if err := daemon.createContainerOSSpecificSettings(container, params.Config, params.HostConfig); err != nil {
return nil, err
}
var endpointsConfigs map[string]*networktypes.EndpointSettings
if params.NetworkingConfig != nil {
endpointsConfigs = params.NetworkingConfig.EndpointsConfig
}
// Make sure NetworkMode has an acceptable value. We do this to ensure
// backwards API compatibility.
runconfig.SetDefaultNetModeIfBlank(container.HostConfig)
daemon.updateContainerNetworkSettings(container, endpointsConfigs)
if err := daemon.Register(container); err != nil {
return nil, err
}
stateCtr.set(container.ID, "stopped")
daemon.LogContainerEvent(container, "create")
return container, nil
}
func toHostConfigSelinuxLabels(labels []string) []string {
for i, l := range labels {
labels[i] = "label=" + l
}
return labels
}
func (daemon *Daemon) generateSecurityOpt(hostConfig *containertypes.HostConfig) ([]string, error) {
for _, opt := range hostConfig.SecurityOpt {
con := strings.Split(opt, "=")
if con[0] == "label" {
// Caller overrode SecurityOpts
return nil, nil
}
}
ipcMode := hostConfig.IpcMode
pidMode := hostConfig.PidMode
privileged := hostConfig.Privileged
if ipcMode.IsHost() || pidMode.IsHost() || privileged {
return toHostConfigSelinuxLabels(label.DisableSecOpt()), nil
}
var ipcLabel []string
var pidLabel []string
ipcContainer := ipcMode.Container()
pidContainer := pidMode.Container()
if ipcContainer != "" {
c, err := daemon.GetContainer(ipcContainer)
if err != nil {
return nil, err
}
ipcLabel = label.DupSecOpt(c.ProcessLabel)
if pidContainer == "" {
return toHostConfigSelinuxLabels(ipcLabel), err
}
}
if pidContainer != "" {
c, err := daemon.GetContainer(pidContainer)
if err != nil {
return nil, err
}
pidLabel = label.DupSecOpt(c.ProcessLabel)
if ipcContainer == "" {
return toHostConfigSelinuxLabels(pidLabel), err
}
}
if pidLabel != nil && ipcLabel != nil {
for i := 0; i < len(pidLabel); i++ {
if pidLabel[i] != ipcLabel[i] {
return nil, fmt.Errorf("--ipc and --pid containers SELinux labels aren't the same")
}
}
return toHostConfigSelinuxLabels(pidLabel), nil
}
return nil, nil
}
// VolumeCreate creates a volume with the specified name, driver, and opts
// This is called directly from the Engine API
func (daemon *Daemon) VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) {
if name == "" {
name = stringid.GenerateNonCryptoID()
}
v, err := daemon.volumes.Create(name, driverName, opts, labels)
if err != nil {
return nil, err
}
daemon.LogVolumeEvent(v.Name(), "create", map[string]string{"driver": v.DriverName()})
apiV := volumeToAPIType(v)
apiV.Mountpoint = v.Path()
return apiV, nil
}
func (daemon *Daemon) mergeAndVerifyConfig(config *containertypes.Config, img *image.Image) error {
if img != nil && img.Config != nil {
if err := merge(config, img.Config); err != nil {
return err
}
}
// Reset the Entrypoint if it is [""]
if len(config.Entrypoint) == 1 && config.Entrypoint[0] == "" {
config.Entrypoint = nil
}
if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 {
return fmt.Errorf("No command specified")
}
return nil
}
// Checks if the client set configurations for more than one network while creating a container
// Also checks if the IPAMConfig is valid
func verifyNetworkingConfig(nwConfig *networktypes.NetworkingConfig) error {
if nwConfig == nil || len(nwConfig.EndpointsConfig) == 0 {
return nil
}
if len(nwConfig.EndpointsConfig) == 1 {
for k, v := range nwConfig.EndpointsConfig {
if v == nil {
return errdefs.InvalidParameter(errors.Errorf("no EndpointSettings for %s", k))
}
if v.IPAMConfig != nil {
if v.IPAMConfig.IPv4Address != "" && net.ParseIP(v.IPAMConfig.IPv4Address).To4() == nil {
return errors.Errorf("invalid IPv4 address: %s", v.IPAMConfig.IPv4Address)
}
if v.IPAMConfig.IPv6Address != "" {
n := net.ParseIP(v.IPAMConfig.IPv6Address)
// if the address is an invalid network address (ParseIP == nil) or if it is
// an IPv4 address (To4() != nil), then it is an invalid IPv6 address
if n == nil || n.To4() != nil {
return errors.Errorf("invalid IPv6 address: %s", v.IPAMConfig.IPv6Address)
}
}
}
}
return nil
}
l := make([]string, 0, len(nwConfig.EndpointsConfig))
for k := range nwConfig.EndpointsConfig {
l = append(l, k)
}
return errors.Errorf("Container cannot be connected to network endpoints: %s", strings.Join(l, ", "))
}

View file

@ -0,0 +1,21 @@
package daemon // import "github.com/docker/docker/daemon"
import (
"testing"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/errdefs"
"github.com/gotestyourself/gotestyourself/assert"
)
// Test case for 35752
func TestVerifyNetworkingConfig(t *testing.T) {
name := "mynet"
endpoints := make(map[string]*network.EndpointSettings, 1)
endpoints[name] = nil
nwConfig := &network.NetworkingConfig{
EndpointsConfig: endpoints,
}
err := verifyNetworkingConfig(nwConfig)
assert.Check(t, errdefs.IsInvalidParameter(err))
}

View file

@ -0,0 +1,81 @@
// +build !windows
package daemon // import "github.com/docker/docker/daemon"
import (
"fmt"
"os"
"path/filepath"
containertypes "github.com/docker/docker/api/types/container"
mounttypes "github.com/docker/docker/api/types/mount"
"github.com/docker/docker/container"
"github.com/docker/docker/pkg/stringid"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/sirupsen/logrus"
)
// createContainerOSSpecificSettings performs host-OS specific container create functionality
func (daemon *Daemon) createContainerOSSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error {
if err := daemon.Mount(container); err != nil {
return err
}
defer daemon.Unmount(container)
rootIDs := daemon.idMappings.RootPair()
if err := container.SetupWorkingDirectory(rootIDs); err != nil {
return err
}
for spec := range config.Volumes {
name := stringid.GenerateNonCryptoID()
destination := filepath.Clean(spec)
// Skip volumes for which we already have something mounted on that
// destination because of a --volume-from.
if container.IsDestinationMounted(destination) {
continue
}
path, err := container.GetResourcePath(destination)
if err != nil {
return err
}
stat, err := os.Stat(path)
if err == nil && !stat.IsDir() {
return fmt.Errorf("cannot mount volume over existing file, file exists %s", path)
}
v, err := daemon.volumes.CreateWithRef(name, hostConfig.VolumeDriver, container.ID, nil, nil)
if err != nil {
return err
}
if err := label.Relabel(v.Path(), container.MountLabel, true); err != nil {
return err
}
container.AddMountPointWithVolume(destination, v, true)
}
return daemon.populateVolumes(container)
}
// populateVolumes copies data from the container's rootfs into the volume for non-binds.
// this is only called when the container is created.
func (daemon *Daemon) populateVolumes(c *container.Container) error {
for _, mnt := range c.MountPoints {
if mnt.Volume == nil {
continue
}
if mnt.Type != mounttypes.TypeVolume || !mnt.CopyData {
continue
}
logrus.Debugf("copying image data from %s:%s, to %s", c.ID, mnt.Destination, mnt.Name)
if err := c.CopyImagePathContent(mnt.Volume, mnt.Destination); err != nil {
return err
}
}
return nil
}

View file

@ -0,0 +1,91 @@
package daemon // import "github.com/docker/docker/daemon"
import (
"fmt"
"runtime"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/volume"
)
// createContainerOSSpecificSettings performs host-OS specific container create functionality
func (daemon *Daemon) createContainerOSSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error {
if container.OS == runtime.GOOS {
// Make sure the host config has the default daemon isolation if not specified by caller.
if containertypes.Isolation.IsDefault(containertypes.Isolation(hostConfig.Isolation)) {
hostConfig.Isolation = daemon.defaultIsolation
}
} else {
// LCOW must be a Hyper-V container as you can't run a shared kernel when one
// is a Windows kernel, the other is a Linux kernel.
if containertypes.Isolation.IsProcess(containertypes.Isolation(hostConfig.Isolation)) {
return fmt.Errorf("process isolation is invalid for Linux containers on Windows")
}
hostConfig.Isolation = "hyperv"
}
parser := volume.NewParser(container.OS)
for spec := range config.Volumes {
mp, err := parser.ParseMountRaw(spec, hostConfig.VolumeDriver)
if err != nil {
return fmt.Errorf("Unrecognised volume spec: %v", err)
}
// If the mountpoint doesn't have a name, generate one.
if len(mp.Name) == 0 {
mp.Name = stringid.GenerateNonCryptoID()
}
// Skip volumes for which we already have something mounted on that
// destination because of a --volume-from.
if container.IsDestinationMounted(mp.Destination) {
continue
}
volumeDriver := hostConfig.VolumeDriver
// Create the volume in the volume driver. If it doesn't exist,
// a new one will be created.
v, err := daemon.volumes.CreateWithRef(mp.Name, volumeDriver, container.ID, nil, nil)
if err != nil {
return err
}
// FIXME Windows: This code block is present in the Linux version and
// allows the contents to be copied to the container FS prior to it
// being started. However, the function utilizes the FollowSymLinkInScope
// path which does not cope with Windows volume-style file paths. There
// is a separate effort to resolve this (@swernli), so this processing
// is deferred for now. A case where this would be useful is when
// a dockerfile includes a VOLUME statement, but something is created
// in that directory during the dockerfile processing. What this means
// on Windows for TP5 is that in that scenario, the contents will not
// copied, but that's (somewhat) OK as HCS will bomb out soon after
// at it doesn't support mapped directories which have contents in the
// destination path anyway.
//
// Example for repro later:
// FROM windowsservercore
// RUN mkdir c:\myvol
// RUN copy c:\windows\system32\ntdll.dll c:\myvol
// VOLUME "c:\myvol"
//
// Then
// docker build -t vol .
// docker run -it --rm vol cmd <-- This is where HCS will error out.
//
// // never attempt to copy existing content in a container FS to a shared volume
// if v.DriverName() == volume.DefaultDriverName {
// if err := container.CopyImagePathContent(v, mp.Destination); err != nil {
// return err
// }
// }
// Add it to container.MountPoints
container.AddMountPointWithVolume(mp.Destination, v, mp.RW)
}
return nil
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,127 @@
package daemon // import "github.com/docker/docker/daemon"
import (
"bufio"
"fmt"
"io"
"os"
"regexp"
"strings"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/mount"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// On Linux, plugins use a static path for storing execution state,
// instead of deriving path from daemon's exec-root. This is because
// plugin socket files are created here and they cannot exceed max
// path length of 108 bytes.
func getPluginExecRoot(root string) string {
return "/run/docker/plugins"
}
func (daemon *Daemon) cleanupMountsByID(id string) error {
logrus.Debugf("Cleaning up old mountid %s: start.", id)
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return err
}
defer f.Close()
return daemon.cleanupMountsFromReaderByID(f, id, mount.Unmount)
}
func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, unmount func(target string) error) error {
if daemon.root == "" {
return nil
}
var errors []string
regexps := getCleanPatterns(id)
sc := bufio.NewScanner(reader)
for sc.Scan() {
if fields := strings.Fields(sc.Text()); len(fields) >= 4 {
if mnt := fields[4]; strings.HasPrefix(mnt, daemon.root) {
for _, p := range regexps {
if p.MatchString(mnt) {
if err := unmount(mnt); err != nil {
logrus.Error(err)
errors = append(errors, err.Error())
}
}
}
}
}
}
if err := sc.Err(); err != nil {
return err
}
if len(errors) > 0 {
return fmt.Errorf("Error cleaning up mounts:\n%v", strings.Join(errors, "\n"))
}
logrus.Debugf("Cleaning up old mountid %v: done.", id)
return nil
}
// cleanupMounts umounts used by container resources and the daemon root mount
func (daemon *Daemon) cleanupMounts() error {
if err := daemon.cleanupMountsByID(""); err != nil {
return err
}
infos, err := mount.GetMounts()
if err != nil {
return errors.Wrap(err, "error reading mount table for cleanup")
}
info := getMountInfo(infos, daemon.root)
// `info.Root` here is the root mountpoint of the passed in path (`daemon.root`).
// The ony cases that need to be cleaned up is when the daemon has performed a
// `mount --bind /daemon/root /daemon/root && mount --make-shared /daemon/root`
// This is only done when the daemon is started up and `/daemon/root` is not
// already on a shared mountpoint.
if !shouldUnmountRoot(daemon.root, info) {
return nil
}
logrus.WithField("mountpoint", daemon.root).Debug("unmounting daemon root")
return mount.Unmount(daemon.root)
}
func getCleanPatterns(id string) (regexps []*regexp.Regexp) {
var patterns []string
if id == "" {
id = "[0-9a-f]{64}"
patterns = append(patterns, "containers/"+id+"/shm")
}
patterns = append(patterns, "aufs/mnt/"+id+"$", "overlay/"+id+"/merged$", "zfs/graph/"+id+"$")
for _, p := range patterns {
r, err := regexp.Compile(p)
if err == nil {
regexps = append(regexps, r)
}
}
return
}
func getRealPath(path string) (string, error) {
return fileutils.ReadSymlinkedDirectory(path)
}
func shouldUnmountRoot(root string, info *mount.Info) bool {
if info == nil {
return false
}
if info.Mountpoint != root {
return false
}
if !strings.HasSuffix(root, info.Root) {
return false
}
return hasMountinfoOption(info.Optional, sharedPropagationOption)
}

View file

@ -0,0 +1,230 @@
// +build linux
package daemon // import "github.com/docker/docker/daemon"
import (
"strings"
"testing"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
"github.com/docker/docker/oci"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/mount"
"github.com/gotestyourself/gotestyourself/assert"
is "github.com/gotestyourself/gotestyourself/assert/cmp"
)
const mountsFixture = `142 78 0:38 / / rw,relatime - aufs none rw,si=573b861da0b3a05b,dio
143 142 0:60 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw
144 142 0:67 / /dev rw,nosuid - tmpfs tmpfs rw,mode=755
145 144 0:78 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=666
146 144 0:49 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw
147 142 0:84 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw
148 147 0:86 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs tmpfs rw,mode=755
149 148 0:22 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpuset
150 148 0:25 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpu
151 148 0:27 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpuacct
152 148 0:28 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,memory
153 148 0:29 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,devices
154 148 0:30 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,freezer
155 148 0:31 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,blkio
156 148 0:32 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,perf_event
157 148 0:33 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,hugetlb
158 148 0:35 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd
159 142 8:4 /home/mlaventure/gopath /home/mlaventure/gopath rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered
160 142 8:4 /var/lib/docker/volumes/9a428b651ee4c538130143cad8d87f603a4bf31b928afe7ff3ecd65480692b35/_data /var/lib/docker rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered
164 142 8:4 /home/mlaventure/gopath/src/github.com/docker/docker /go/src/github.com/docker/docker rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered
165 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/resolv.conf /etc/resolv.conf rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered
166 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/hostname /etc/hostname rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered
167 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/hosts /etc/hosts rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered
168 144 0:39 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k
169 144 0:12 /14 /dev/console rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000
83 147 0:10 / /sys/kernel/security rw,relatime - securityfs none rw
89 142 0:87 / /tmp rw,relatime - tmpfs none rw
97 142 0:60 / /run/docker/netns/default rw,nosuid,nodev,noexec,relatime - proc proc rw
100 160 8:4 /var/lib/docker/volumes/9a428b651ee4c538130143cad8d87f603a4bf31b928afe7ff3ecd65480692b35/_data/aufs /var/lib/docker/aufs rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered
115 100 0:102 / /var/lib/docker/aufs/mnt/0ecda1c63e5b58b3d89ff380bf646c95cc980252cf0b52466d43619aec7c8432 rw,relatime - aufs none rw,si=573b861dbc01905b,dio
116 160 0:107 / /var/lib/docker/containers/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k
118 142 0:102 / /run/docker/libcontainerd/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/rootfs rw,relatime - aufs none rw,si=573b861dbc01905b,dio
242 142 0:60 / /run/docker/netns/c3664df2a0f7 rw,nosuid,nodev,noexec,relatime - proc proc rw
120 100 0:122 / /var/lib/docker/aufs/mnt/03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d rw,relatime - aufs none rw,si=573b861eb147805b,dio
171 142 0:122 / /run/docker/libcontainerd/e406ff6f3e18516d50e03dbca4de54767a69a403a6f7ec1edc2762812824521e/rootfs rw,relatime - aufs none rw,si=573b861eb147805b,dio
310 142 0:60 / /run/docker/netns/71a18572176b rw,nosuid,nodev,noexec,relatime - proc proc rw
`
func TestCleanupMounts(t *testing.T) {
d := &Daemon{
root: "/var/lib/docker/",
}
expected := "/var/lib/docker/containers/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/shm"
var unmounted int
unmount := func(target string) error {
if target == expected {
unmounted++
}
return nil
}
d.cleanupMountsFromReaderByID(strings.NewReader(mountsFixture), "", unmount)
if unmounted != 1 {
t.Fatal("Expected to unmount the shm (and the shm only)")
}
}
func TestCleanupMountsByID(t *testing.T) {
d := &Daemon{
root: "/var/lib/docker/",
}
expected := "/var/lib/docker/aufs/mnt/03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d"
var unmounted int
unmount := func(target string) error {
if target == expected {
unmounted++
}
return nil
}
d.cleanupMountsFromReaderByID(strings.NewReader(mountsFixture), "03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d", unmount)
if unmounted != 1 {
t.Fatal("Expected to unmount the auf root (and that only)")
}
}
func TestNotCleanupMounts(t *testing.T) {
d := &Daemon{
repository: "",
}
var unmounted bool
unmount := func(target string) error {
unmounted = true
return nil
}
mountInfo := `234 232 0:59 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k`
d.cleanupMountsFromReaderByID(strings.NewReader(mountInfo), "", unmount)
if unmounted {
t.Fatal("Expected not to clean up /dev/shm")
}
}
// TestTmpfsDevShmSizeOverride checks that user-specified /dev/tmpfs mount
// size is not overriden by the default shmsize (that should only be used
// for default /dev/shm (as in "shareable" and "private" ipc modes).
// https://github.com/moby/moby/issues/35271
func TestTmpfsDevShmSizeOverride(t *testing.T) {
size := "777m"
mnt := "/dev/shm"
d := Daemon{
idMappings: &idtools.IDMappings{},
}
c := &container.Container{
HostConfig: &containertypes.HostConfig{
ShmSize: 48 * 1024, // size we should NOT end up with
},
}
ms := []container.Mount{
{
Source: "tmpfs",
Destination: mnt,
Data: "size=" + size,
},
}
// convert ms to spec
spec := oci.DefaultSpec()
err := setMounts(&d, &spec, c, ms)
assert.Check(t, err)
// Check the resulting spec for the correct size
found := false
for _, m := range spec.Mounts {
if m.Destination == mnt {
for _, o := range m.Options {
if !strings.HasPrefix(o, "size=") {
continue
}
t.Logf("%+v\n", m.Options)
assert.Check(t, is.Equal("size="+size, o))
found = true
}
}
}
if !found {
t.Fatal("/dev/shm not found in spec, or size option missing")
}
}
func TestValidateContainerIsolationLinux(t *testing.T) {
d := Daemon{}
_, err := d.verifyContainerSettings("linux", &containertypes.HostConfig{Isolation: containertypes.IsolationHyperV}, nil, false)
assert.Check(t, is.Error(err, "invalid isolation 'hyperv' on linux"))
}
func TestShouldUnmountRoot(t *testing.T) {
for _, test := range []struct {
desc string
root string
info *mount.Info
expect bool
}{
{
desc: "root is at /",
root: "/docker",
info: &mount.Info{Root: "/docker", Mountpoint: "/docker"},
expect: true,
},
{
desc: "not a mountpoint",
root: "/docker",
info: nil,
expect: false,
},
{
desc: "root is at in a submount from `/`",
root: "/foo/docker",
info: &mount.Info{Root: "/docker", Mountpoint: "/foo/docker"},
expect: true,
},
{
desc: "root is mounted in from a parent mount namespace same root dir", // dind is an example of this
root: "/docker",
info: &mount.Info{Root: "/docker/volumes/1234657/_data", Mountpoint: "/docker"},
expect: false,
},
{
desc: "root is mounted in from a parent mount namespace different root dir",
root: "/foo/bar",
info: &mount.Info{Root: "/docker/volumes/1234657/_data", Mountpoint: "/foo/bar"},
expect: false,
},
} {
t.Run(test.desc, func(t *testing.T) {
for _, options := range []struct {
desc string
Optional string
expect bool
}{
{desc: "shared", Optional: "shared:", expect: true},
{desc: "slave", Optional: "slave:", expect: false},
{desc: "private", Optional: "private:", expect: false},
} {
t.Run(options.desc, func(t *testing.T) {
expect := options.expect
if expect {
expect = test.expect
}
if test.info != nil {
test.info.Optional = options.Optional
}
assert.Check(t, is.Equal(expect, shouldUnmountRoot(test.root, test.info)))
})
}
})
}
}

View file

@ -0,0 +1,326 @@
package daemon // import "github.com/docker/docker/daemon"
import (
"io/ioutil"
"os"
"path/filepath"
"runtime"
"testing"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
"github.com/docker/docker/errdefs"
_ "github.com/docker/docker/pkg/discovery/memory"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/truncindex"
"github.com/docker/docker/volume"
volumedrivers "github.com/docker/docker/volume/drivers"
"github.com/docker/docker/volume/local"
"github.com/docker/docker/volume/store"
"github.com/docker/go-connections/nat"
"github.com/docker/libnetwork"
"github.com/gotestyourself/gotestyourself/assert"
is "github.com/gotestyourself/gotestyourself/assert/cmp"
"github.com/pkg/errors"
)
//
// https://github.com/docker/docker/issues/8069
//
func TestGetContainer(t *testing.T) {
c1 := &container.Container{
ID: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57",
Name: "tender_bardeen",
}
c2 := &container.Container{
ID: "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de",
Name: "drunk_hawking",
}
c3 := &container.Container{
ID: "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57",
Name: "3cdbd1aa",
}
c4 := &container.Container{
ID: "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5",
Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57",
}
c5 := &container.Container{
ID: "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b",
Name: "d22d69a2b896",
}
store := container.NewMemoryStore()
store.Add(c1.ID, c1)
store.Add(c2.ID, c2)
store.Add(c3.ID, c3)
store.Add(c4.ID, c4)
store.Add(c5.ID, c5)
index := truncindex.NewTruncIndex([]string{})
index.Add(c1.ID)
index.Add(c2.ID)
index.Add(c3.ID)
index.Add(c4.ID)
index.Add(c5.ID)
containersReplica, err := container.NewViewDB()
if err != nil {
t.Fatalf("could not create ViewDB: %v", err)
}
daemon := &Daemon{
containers: store,
containersReplica: containersReplica,
idIndex: index,
}
daemon.reserveName(c1.ID, c1.Name)
daemon.reserveName(c2.ID, c2.Name)
daemon.reserveName(c3.ID, c3.Name)
daemon.reserveName(c4.ID, c4.Name)
daemon.reserveName(c5.ID, c5.Name)
if container, _ := daemon.GetContainer("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 {
t.Fatal("Should explicitly match full container IDs")
}
if container, _ := daemon.GetContainer("75fb0b8009"); container != c4 {
t.Fatal("Should match a partial ID")
}
if container, _ := daemon.GetContainer("drunk_hawking"); container != c2 {
t.Fatal("Should match a full name")
}
// c3.Name is a partial match for both c3.ID and c2.ID
if c, _ := daemon.GetContainer("3cdbd1aa"); c != c3 {
t.Fatal("Should match a full name even though it collides with another container's ID")
}
if container, _ := daemon.GetContainer("d22d69a2b896"); container != c5 {
t.Fatal("Should match a container where the provided prefix is an exact match to the its name, and is also a prefix for its ID")
}
if _, err := daemon.GetContainer("3cdbd1"); err == nil {
t.Fatal("Should return an error when provided a prefix that partially matches multiple container ID's")
}
if _, err := daemon.GetContainer("nothing"); err == nil {
t.Fatal("Should return an error when provided a prefix that is neither a name or a partial match to an ID")
}
}
func initDaemonWithVolumeStore(tmp string) (*Daemon, error) {
var err error
daemon := &Daemon{
repository: tmp,
root: tmp,
}
daemon.volumes, err = store.New(tmp)
if err != nil {
return nil, err
}
volumesDriver, err := local.New(tmp, idtools.IDPair{UID: 0, GID: 0})
if err != nil {
return nil, err
}
volumedrivers.Register(volumesDriver, volumesDriver.Name())
return daemon, nil
}
func TestValidContainerNames(t *testing.T) {
invalidNames := []string{"-rm", "&sdfsfd", "safd%sd"}
validNames := []string{"word-word", "word_word", "1weoid"}
for _, name := range invalidNames {
if validContainerNamePattern.MatchString(name) {
t.Fatalf("%q is not a valid container name and was returned as valid.", name)
}
}
for _, name := range validNames {
if !validContainerNamePattern.MatchString(name) {
t.Fatalf("%q is a valid container name and was returned as invalid.", name)
}
}
}
func TestContainerInitDNS(t *testing.T) {
tmp, err := ioutil.TempDir("", "docker-container-test-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
containerPath := filepath.Join(tmp, containerID)
if err := os.MkdirAll(containerPath, 0755); err != nil {
t.Fatal(err)
}
config := `{"State":{"Running":true,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":2464,"ExitCode":0,
"Error":"","StartedAt":"2015-05-26T16:48:53.869308965Z","FinishedAt":"0001-01-01T00:00:00Z"},
"ID":"d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e","Created":"2015-05-26T16:48:53.7987917Z","Path":"top",
"Args":[],"Config":{"Hostname":"d59df5276e7b","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"",
"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":true,"OpenStdin":true,
"StdinOnce":false,"Env":null,"Cmd":["top"],"Image":"ubuntu:latest","Volumes":null,"WorkingDir":"","Entrypoint":null,
"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":{}},"Image":"07f8e8c5e66084bef8f848877857537ffe1c47edd01a93af27e7161672ad0e95",
"NetworkSettings":{"IPAddress":"172.17.0.1","IPPrefixLen":16,"MacAddress":"02:42:ac:11:00:01","LinkLocalIPv6Address":"fe80::42:acff:fe11:1",
"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","Ports":{}},
"ResolvConfPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/resolv.conf",
"HostnamePath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hostname",
"HostsPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hosts",
"LogPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e-json.log",
"Name":"/ubuntu","Driver":"aufs","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0,
"UpdateDns":false,"Volumes":{},"VolumesRW":{},"AppliedVolumesFrom":null}`
// Container struct only used to retrieve path to config file
container := &container.Container{Root: containerPath}
configPath, err := container.ConfigPath()
if err != nil {
t.Fatal(err)
}
if err = ioutil.WriteFile(configPath, []byte(config), 0644); err != nil {
t.Fatal(err)
}
hostConfig := `{"Binds":[],"ContainerIDFile":"","Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"",
"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsOptions":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null,
"Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0},
"SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}`
hostConfigPath, err := container.HostConfigPath()
if err != nil {
t.Fatal(err)
}
if err = ioutil.WriteFile(hostConfigPath, []byte(hostConfig), 0644); err != nil {
t.Fatal(err)
}
daemon, err := initDaemonWithVolumeStore(tmp)
if err != nil {
t.Fatal(err)
}
defer volumedrivers.Unregister(volume.DefaultDriverName)
c, err := daemon.load(containerID)
if err != nil {
t.Fatal(err)
}
if c.HostConfig.DNS == nil {
t.Fatal("Expected container DNS to not be nil")
}
if c.HostConfig.DNSSearch == nil {
t.Fatal("Expected container DNSSearch to not be nil")
}
if c.HostConfig.DNSOptions == nil {
t.Fatal("Expected container DNSOptions to not be nil")
}
}
func newPortNoError(proto, port string) nat.Port {
p, _ := nat.NewPort(proto, port)
return p
}
func TestMerge(t *testing.T) {
volumesImage := make(map[string]struct{})
volumesImage["/test1"] = struct{}{}
volumesImage["/test2"] = struct{}{}
portsImage := make(nat.PortSet)
portsImage[newPortNoError("tcp", "1111")] = struct{}{}
portsImage[newPortNoError("tcp", "2222")] = struct{}{}
configImage := &containertypes.Config{
ExposedPorts: portsImage,
Env: []string{"VAR1=1", "VAR2=2"},
Volumes: volumesImage,
}
portsUser := make(nat.PortSet)
portsUser[newPortNoError("tcp", "2222")] = struct{}{}
portsUser[newPortNoError("tcp", "3333")] = struct{}{}
volumesUser := make(map[string]struct{})
volumesUser["/test3"] = struct{}{}
configUser := &containertypes.Config{
ExposedPorts: portsUser,
Env: []string{"VAR2=3", "VAR3=3"},
Volumes: volumesUser,
}
if err := merge(configUser, configImage); err != nil {
t.Error(err)
}
if len(configUser.ExposedPorts) != 3 {
t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts))
}
for portSpecs := range configUser.ExposedPorts {
if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" {
t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs)
}
}
if len(configUser.Env) != 3 {
t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env))
}
for _, env := range configUser.Env {
if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" {
t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env)
}
}
if len(configUser.Volumes) != 3 {
t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes))
}
for v := range configUser.Volumes {
if v != "/test1" && v != "/test2" && v != "/test3" {
t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v)
}
}
ports, _, err := nat.ParsePortSpecs([]string{"0000"})
if err != nil {
t.Error(err)
}
configImage2 := &containertypes.Config{
ExposedPorts: ports,
}
if err := merge(configUser, configImage2); err != nil {
t.Error(err)
}
if len(configUser.ExposedPorts) != 4 {
t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts))
}
for portSpecs := range configUser.ExposedPorts {
if portSpecs.Port() != "0" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" {
t.Fatalf("Expected %q or %q or %q or %q, found %s", 0, 1111, 2222, 3333, portSpecs)
}
}
}
func TestValidateContainerIsolation(t *testing.T) {
d := Daemon{}
_, err := d.verifyContainerSettings(runtime.GOOS, &containertypes.HostConfig{Isolation: containertypes.Isolation("invalid")}, nil, false)
assert.Check(t, is.Error(err, "invalid isolation 'invalid' on "+runtime.GOOS))
}
func TestFindNetworkErrorType(t *testing.T) {
d := Daemon{}
_, err := d.FindNetwork("fakeNet")
_, ok := errors.Cause(err).(libnetwork.ErrNoSuchNetwork)
if !errdefs.IsNotFound(err) || !ok {
t.Error("The FindNetwork method MUST always return an error that implements the NotFound interface and is ErrNoSuchNetwork")
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,357 @@
// +build !windows
package daemon // import "github.com/docker/docker/daemon"
import (
"errors"
"io/ioutil"
"os"
"path/filepath"
"testing"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/volume"
"github.com/docker/docker/volume/drivers"
"github.com/docker/docker/volume/local"
"github.com/docker/docker/volume/store"
"github.com/gotestyourself/gotestyourself/assert"
)
type fakeContainerGetter struct {
containers map[string]*container.Container
}
func (f *fakeContainerGetter) GetContainer(cid string) (*container.Container, error) {
container, ok := f.containers[cid]
if !ok {
return nil, errors.New("container not found")
}
return container, nil
}
// Unix test as uses settings which are not available on Windows
func TestAdjustSharedNamespaceContainerName(t *testing.T) {
fakeID := "abcdef1234567890"
hostConfig := &containertypes.HostConfig{
IpcMode: containertypes.IpcMode("container:base"),
PidMode: containertypes.PidMode("container:base"),
NetworkMode: containertypes.NetworkMode("container:base"),
}
containerStore := &fakeContainerGetter{}
containerStore.containers = make(map[string]*container.Container)
containerStore.containers["base"] = &container.Container{
ID: fakeID,
}
adaptSharedNamespaceContainer(containerStore, hostConfig)
if hostConfig.IpcMode != containertypes.IpcMode("container:"+fakeID) {
t.Errorf("Expected IpcMode to be container:%s", fakeID)
}
if hostConfig.PidMode != containertypes.PidMode("container:"+fakeID) {
t.Errorf("Expected PidMode to be container:%s", fakeID)
}
if hostConfig.NetworkMode != containertypes.NetworkMode("container:"+fakeID) {
t.Errorf("Expected NetworkMode to be container:%s", fakeID)
}
}
// Unix test as uses settings which are not available on Windows
func TestAdjustCPUShares(t *testing.T) {
tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
daemon := &Daemon{
repository: tmp,
root: tmp,
}
hostConfig := &containertypes.HostConfig{
Resources: containertypes.Resources{CPUShares: linuxMinCPUShares - 1},
}
daemon.adaptContainerSettings(hostConfig, true)
if hostConfig.CPUShares != linuxMinCPUShares {
t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares)
}
hostConfig.CPUShares = linuxMaxCPUShares + 1
daemon.adaptContainerSettings(hostConfig, true)
if hostConfig.CPUShares != linuxMaxCPUShares {
t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares)
}
hostConfig.CPUShares = 0
daemon.adaptContainerSettings(hostConfig, true)
if hostConfig.CPUShares != 0 {
t.Error("Expected CPUShares to be unchanged")
}
hostConfig.CPUShares = 1024
daemon.adaptContainerSettings(hostConfig, true)
if hostConfig.CPUShares != 1024 {
t.Error("Expected CPUShares to be unchanged")
}
}
// Unix test as uses settings which are not available on Windows
func TestAdjustCPUSharesNoAdjustment(t *testing.T) {
tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
daemon := &Daemon{
repository: tmp,
root: tmp,
}
hostConfig := &containertypes.HostConfig{
Resources: containertypes.Resources{CPUShares: linuxMinCPUShares - 1},
}
daemon.adaptContainerSettings(hostConfig, false)
if hostConfig.CPUShares != linuxMinCPUShares-1 {
t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares-1)
}
hostConfig.CPUShares = linuxMaxCPUShares + 1
daemon.adaptContainerSettings(hostConfig, false)
if hostConfig.CPUShares != linuxMaxCPUShares+1 {
t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares+1)
}
hostConfig.CPUShares = 0
daemon.adaptContainerSettings(hostConfig, false)
if hostConfig.CPUShares != 0 {
t.Error("Expected CPUShares to be unchanged")
}
hostConfig.CPUShares = 1024
daemon.adaptContainerSettings(hostConfig, false)
if hostConfig.CPUShares != 1024 {
t.Error("Expected CPUShares to be unchanged")
}
}
// Unix test as uses settings which are not available on Windows
func TestParseSecurityOptWithDeprecatedColon(t *testing.T) {
container := &container.Container{}
config := &containertypes.HostConfig{}
// test apparmor
config.SecurityOpt = []string{"apparmor=test_profile"}
if err := parseSecurityOpt(container, config); err != nil {
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
}
if container.AppArmorProfile != "test_profile" {
t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile)
}
// test seccomp
sp := "/path/to/seccomp_test.json"
config.SecurityOpt = []string{"seccomp=" + sp}
if err := parseSecurityOpt(container, config); err != nil {
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
}
if container.SeccompProfile != sp {
t.Fatalf("Unexpected AppArmorProfile, expected: %q, got %q", sp, container.SeccompProfile)
}
// test valid label
config.SecurityOpt = []string{"label=user:USER"}
if err := parseSecurityOpt(container, config); err != nil {
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
}
// test invalid label
config.SecurityOpt = []string{"label"}
if err := parseSecurityOpt(container, config); err == nil {
t.Fatal("Expected parseSecurityOpt error, got nil")
}
// test invalid opt
config.SecurityOpt = []string{"test"}
if err := parseSecurityOpt(container, config); err == nil {
t.Fatal("Expected parseSecurityOpt error, got nil")
}
}
func TestParseSecurityOpt(t *testing.T) {
container := &container.Container{}
config := &containertypes.HostConfig{}
// test apparmor
config.SecurityOpt = []string{"apparmor=test_profile"}
if err := parseSecurityOpt(container, config); err != nil {
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
}
if container.AppArmorProfile != "test_profile" {
t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile)
}
// test seccomp
sp := "/path/to/seccomp_test.json"
config.SecurityOpt = []string{"seccomp=" + sp}
if err := parseSecurityOpt(container, config); err != nil {
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
}
if container.SeccompProfile != sp {
t.Fatalf("Unexpected SeccompProfile, expected: %q, got %q", sp, container.SeccompProfile)
}
// test valid label
config.SecurityOpt = []string{"label=user:USER"}
if err := parseSecurityOpt(container, config); err != nil {
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
}
// test invalid label
config.SecurityOpt = []string{"label"}
if err := parseSecurityOpt(container, config); err == nil {
t.Fatal("Expected parseSecurityOpt error, got nil")
}
// test invalid opt
config.SecurityOpt = []string{"test"}
if err := parseSecurityOpt(container, config); err == nil {
t.Fatal("Expected parseSecurityOpt error, got nil")
}
}
func TestParseNNPSecurityOptions(t *testing.T) {
daemon := &Daemon{
configStore: &config.Config{NoNewPrivileges: true},
}
container := &container.Container{}
config := &containertypes.HostConfig{}
// test NNP when "daemon:true" and "no-new-privileges=false""
config.SecurityOpt = []string{"no-new-privileges=false"}
if err := daemon.parseSecurityOpt(container, config); err != nil {
t.Fatalf("Unexpected daemon.parseSecurityOpt error: %v", err)
}
if container.NoNewPrivileges {
t.Fatalf("container.NoNewPrivileges should be FALSE: %v", container.NoNewPrivileges)
}
// test NNP when "daemon:false" and "no-new-privileges=true""
daemon.configStore.NoNewPrivileges = false
config.SecurityOpt = []string{"no-new-privileges=true"}
if err := daemon.parseSecurityOpt(container, config); err != nil {
t.Fatalf("Unexpected daemon.parseSecurityOpt error: %v", err)
}
if !container.NoNewPrivileges {
t.Fatalf("container.NoNewPrivileges should be TRUE: %v", container.NoNewPrivileges)
}
}
func TestNetworkOptions(t *testing.T) {
daemon := &Daemon{}
dconfigCorrect := &config.Config{
CommonConfig: config.CommonConfig{
ClusterStore: "consul://localhost:8500",
ClusterAdvertise: "192.168.0.1:8000",
},
}
if _, err := daemon.networkOptions(dconfigCorrect, nil, nil); err != nil {
t.Fatalf("Expect networkOptions success, got error: %v", err)
}
dconfigWrong := &config.Config{
CommonConfig: config.CommonConfig{
ClusterStore: "consul://localhost:8500://test://bbb",
},
}
if _, err := daemon.networkOptions(dconfigWrong, nil, nil); err == nil {
t.Fatal("Expected networkOptions error, got nil")
}
}
func TestMigratePre17Volumes(t *testing.T) {
rootDir, err := ioutil.TempDir("", "test-daemon-volumes")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(rootDir)
volumeRoot := filepath.Join(rootDir, "volumes")
err = os.MkdirAll(volumeRoot, 0755)
if err != nil {
t.Fatal(err)
}
containerRoot := filepath.Join(rootDir, "containers")
cid := "1234"
err = os.MkdirAll(filepath.Join(containerRoot, cid), 0755)
assert.NilError(t, err)
vid := "5678"
vfsPath := filepath.Join(rootDir, "vfs", "dir", vid)
err = os.MkdirAll(vfsPath, 0755)
assert.NilError(t, err)
config := []byte(`
{
"ID": "` + cid + `",
"Volumes": {
"/foo": "` + vfsPath + `",
"/bar": "/foo",
"/quux": "/quux"
},
"VolumesRW": {
"/foo": true,
"/bar": true,
"/quux": false
}
}
`)
volStore, err := store.New(volumeRoot)
if err != nil {
t.Fatal(err)
}
drv, err := local.New(volumeRoot, idtools.IDPair{UID: 0, GID: 0})
if err != nil {
t.Fatal(err)
}
volumedrivers.Register(drv, volume.DefaultDriverName)
daemon := &Daemon{
root: rootDir,
repository: containerRoot,
volumes: volStore,
}
err = ioutil.WriteFile(filepath.Join(containerRoot, cid, "config.v2.json"), config, 600)
if err != nil {
t.Fatal(err)
}
c, err := daemon.load(cid)
if err != nil {
t.Fatal(err)
}
if err := daemon.verifyVolumesInfo(c); err != nil {
t.Fatal(err)
}
expected := map[string]volume.MountPoint{
"/foo": {Destination: "/foo", RW: true, Name: vid},
"/bar": {Source: "/foo", Destination: "/bar", RW: true},
"/quux": {Source: "/quux", Destination: "/quux", RW: false},
}
for id, mp := range c.MountPoints {
x, exists := expected[id]
if !exists {
t.Fatal("volume not migrated")
}
if mp.Source != x.Source || mp.Destination != x.Destination || mp.RW != x.RW || mp.Name != x.Name {
t.Fatalf("got unexpected mountpoint, expected: %+v, got: %+v", x, mp)
}
}
}

View file

@ -0,0 +1,5 @@
// +build !linux,!freebsd,!windows
package daemon // import "github.com/docker/docker/daemon"
const platformSupported = false

View file

@ -0,0 +1,662 @@
package daemon // import "github.com/docker/docker/daemon"
import (
"context"
"fmt"
"path/filepath"
"strings"
"github.com/Microsoft/hcsshim"
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/pkg/containerfs"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/platform"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/runconfig"
"github.com/docker/libnetwork"
nwconfig "github.com/docker/libnetwork/config"
"github.com/docker/libnetwork/datastore"
winlibnetwork "github.com/docker/libnetwork/drivers/windows"
"github.com/docker/libnetwork/netlabel"
"github.com/docker/libnetwork/options"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/windows"
"golang.org/x/sys/windows/svc/mgr"
)
const (
defaultNetworkSpace = "172.16.0.0/12"
platformSupported = true
windowsMinCPUShares = 1
windowsMaxCPUShares = 10000
windowsMinCPUPercent = 1
windowsMaxCPUPercent = 100
)
// Windows has no concept of an execution state directory. So use config.Root here.
func getPluginExecRoot(root string) string {
return filepath.Join(root, "plugins")
}
func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error {
return parseSecurityOpt(container, hostConfig)
}
func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error {
return nil
}
func setupInitLayer(idMappings *idtools.IDMappings) func(containerfs.ContainerFS) error {
return nil
}
func checkKernel() error {
return nil
}
func (daemon *Daemon) getCgroupDriver() string {
return ""
}
// adaptContainerSettings is called during container creation to modify any
// settings necessary in the HostConfig structure.
func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error {
if hostConfig == nil {
return nil
}
return nil
}
func verifyContainerResources(resources *containertypes.Resources, isHyperv bool) ([]string, error) {
warnings := []string{}
fixMemorySwappiness(resources)
if !isHyperv {
// The processor resource controls are mutually exclusive on
// Windows Server Containers, the order of precedence is
// CPUCount first, then CPUShares, and CPUPercent last.
if resources.CPUCount > 0 {
if resources.CPUShares > 0 {
warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded")
logrus.Warn("Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded")
resources.CPUShares = 0
}
if resources.CPUPercent > 0 {
warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded")
logrus.Warn("Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded")
resources.CPUPercent = 0
}
} else if resources.CPUShares > 0 {
if resources.CPUPercent > 0 {
warnings = append(warnings, "Conflicting options: CPU shares takes priority over CPU percent on Windows Server Containers. CPU percent discarded")
logrus.Warn("Conflicting options: CPU shares takes priority over CPU percent on Windows Server Containers. CPU percent discarded")
resources.CPUPercent = 0
}
}
}
if resources.CPUShares < 0 || resources.CPUShares > windowsMaxCPUShares {
return warnings, fmt.Errorf("range of CPUShares is from %d to %d", windowsMinCPUShares, windowsMaxCPUShares)
}
if resources.CPUPercent < 0 || resources.CPUPercent > windowsMaxCPUPercent {
return warnings, fmt.Errorf("range of CPUPercent is from %d to %d", windowsMinCPUPercent, windowsMaxCPUPercent)
}
if resources.CPUCount < 0 {
return warnings, fmt.Errorf("invalid CPUCount: CPUCount cannot be negative")
}
if resources.NanoCPUs > 0 && resources.CPUPercent > 0 {
return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Percent cannot both be set")
}
if resources.NanoCPUs > 0 && resources.CPUShares > 0 {
return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Shares cannot both be set")
}
// The precision we could get is 0.01, because on Windows we have to convert to CPUPercent.
// We don't set the lower limit here and it is up to the underlying platform (e.g., Windows) to return an error.
if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 {
return warnings, fmt.Errorf("range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU())
}
osv := system.GetOSVersion()
if resources.NanoCPUs > 0 && isHyperv && osv.Build < 16175 {
leftoverNanoCPUs := resources.NanoCPUs % 1e9
if leftoverNanoCPUs != 0 && resources.NanoCPUs > 1e9 {
resources.NanoCPUs = ((resources.NanoCPUs + 1e9/2) / 1e9) * 1e9
warningString := fmt.Sprintf("Your current OS version does not support Hyper-V containers with NanoCPUs greater than 1000000000 but not divisible by 1000000000. NanoCPUs rounded to %d", resources.NanoCPUs)
warnings = append(warnings, warningString)
logrus.Warn(warningString)
}
}
if len(resources.BlkioDeviceReadBps) > 0 {
return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadBps")
}
if len(resources.BlkioDeviceReadIOps) > 0 {
return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadIOps")
}
if len(resources.BlkioDeviceWriteBps) > 0 {
return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteBps")
}
if len(resources.BlkioDeviceWriteIOps) > 0 {
return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteIOps")
}
if resources.BlkioWeight > 0 {
return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeight")
}
if len(resources.BlkioWeightDevice) > 0 {
return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeightDevice")
}
if resources.CgroupParent != "" {
return warnings, fmt.Errorf("invalid option: Windows does not support CgroupParent")
}
if resources.CPUPeriod != 0 {
return warnings, fmt.Errorf("invalid option: Windows does not support CPUPeriod")
}
if resources.CpusetCpus != "" {
return warnings, fmt.Errorf("invalid option: Windows does not support CpusetCpus")
}
if resources.CpusetMems != "" {
return warnings, fmt.Errorf("invalid option: Windows does not support CpusetMems")
}
if resources.KernelMemory != 0 {
return warnings, fmt.Errorf("invalid option: Windows does not support KernelMemory")
}
if resources.MemoryReservation != 0 {
return warnings, fmt.Errorf("invalid option: Windows does not support MemoryReservation")
}
if resources.MemorySwap != 0 {
return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwap")
}
if resources.MemorySwappiness != nil {
return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwappiness")
}
if resources.OomKillDisable != nil && *resources.OomKillDisable {
return warnings, fmt.Errorf("invalid option: Windows does not support OomKillDisable")
}
if resources.PidsLimit != 0 {
return warnings, fmt.Errorf("invalid option: Windows does not support PidsLimit")
}
if len(resources.Ulimits) != 0 {
return warnings, fmt.Errorf("invalid option: Windows does not support Ulimits")
}
return warnings, nil
}
// verifyPlatformContainerSettings performs platform-specific validation of the
// hostconfig and config structures.
func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) {
warnings := []string{}
hyperv := daemon.runAsHyperVContainer(hostConfig)
if !hyperv && system.IsWindowsClient() && !system.IsIoTCore() {
// @engine maintainers. This block should not be removed. It partially enforces licensing
// restrictions on Windows. Ping @jhowardmsft if there are concerns or PRs to change this.
return warnings, fmt.Errorf("Windows client operating systems only support Hyper-V containers")
}
w, err := verifyContainerResources(&hostConfig.Resources, hyperv)
warnings = append(warnings, w...)
return warnings, err
}
// verifyDaemonSettings performs validation of daemon config struct
func verifyDaemonSettings(config *config.Config) error {
return nil
}
// checkSystem validates platform-specific requirements
func checkSystem() error {
// Validate the OS version. Note that docker.exe must be manifested for this
// call to return the correct version.
osv := system.GetOSVersion()
if osv.MajorVersion < 10 {
return fmt.Errorf("This version of Windows does not support the docker daemon")
}
if osv.Build < 14393 {
return fmt.Errorf("The docker daemon requires build 14393 or later of Windows Server 2016 or Windows 10")
}
vmcompute := windows.NewLazySystemDLL("vmcompute.dll")
if vmcompute.Load() != nil {
return fmt.Errorf("failed to load vmcompute.dll, ensure that the Containers feature is installed")
}
// Ensure that the required Host Network Service and vmcompute services
// are running. Docker will fail in unexpected ways if this is not present.
var requiredServices = []string{"hns", "vmcompute"}
if err := ensureServicesInstalled(requiredServices); err != nil {
return errors.Wrap(err, "a required service is not installed, ensure the Containers feature is installed")
}
return nil
}
func ensureServicesInstalled(services []string) error {
m, err := mgr.Connect()
if err != nil {
return err
}
defer m.Disconnect()
for _, service := range services {
s, err := m.OpenService(service)
if err != nil {
return errors.Wrapf(err, "failed to open service %s", service)
}
s.Close()
}
return nil
}
// configureKernelSecuritySupport configures and validate security support for the kernel
func configureKernelSecuritySupport(config *config.Config, driverName string) error {
return nil
}
// configureMaxThreads sets the Go runtime max threads threshold
func configureMaxThreads(config *config.Config) error {
return nil
}
func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) {
netOptions, err := daemon.networkOptions(config, nil, nil)
if err != nil {
return nil, err
}
controller, err := libnetwork.New(netOptions...)
if err != nil {
return nil, fmt.Errorf("error obtaining controller instance: %v", err)
}
hnsresponse, err := hcsshim.HNSListNetworkRequest("GET", "", "")
if err != nil {
return nil, err
}
// Remove networks not present in HNS
for _, v := range controller.Networks() {
options := v.Info().DriverOptions()
hnsid := options[winlibnetwork.HNSID]
found := false
for _, v := range hnsresponse {
if v.Id == hnsid {
found = true
break
}
}
if !found {
// global networks should not be deleted by local HNS
if v.Info().Scope() != datastore.GlobalScope {
err = v.Delete()
if err != nil {
logrus.Errorf("Error occurred when removing network %v", err)
}
}
}
}
_, err = controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(false))
if err != nil {
return nil, err
}
defaultNetworkExists := false
if network, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil {
options := network.Info().DriverOptions()
for _, v := range hnsresponse {
if options[winlibnetwork.HNSID] == v.Id {
defaultNetworkExists = true
break
}
}
}
// discover and add HNS networks to windows
// network that exist are removed and added again
for _, v := range hnsresponse {
if strings.ToLower(v.Type) == "private" {
continue // workaround for HNS reporting unsupported networks
}
var n libnetwork.Network
s := func(current libnetwork.Network) bool {
options := current.Info().DriverOptions()
if options[winlibnetwork.HNSID] == v.Id {
n = current
return true
}
return false
}
controller.WalkNetworks(s)
drvOptions := make(map[string]string)
if n != nil {
// global networks should not be deleted by local HNS
if n.Info().Scope() == datastore.GlobalScope {
continue
}
v.Name = n.Name()
// This will not cause network delete from HNS as the network
// is not yet populated in the libnetwork windows driver
// restore option if it existed before
drvOptions = n.Info().DriverOptions()
n.Delete()
}
netOption := map[string]string{
winlibnetwork.NetworkName: v.Name,
winlibnetwork.HNSID: v.Id,
}
// add persisted driver options
for k, v := range drvOptions {
if k != winlibnetwork.NetworkName && k != winlibnetwork.HNSID {
netOption[k] = v
}
}
v4Conf := []*libnetwork.IpamConf{}
for _, subnet := range v.Subnets {
ipamV4Conf := libnetwork.IpamConf{}
ipamV4Conf.PreferredPool = subnet.AddressPrefix
ipamV4Conf.Gateway = subnet.GatewayAddress
v4Conf = append(v4Conf, &ipamV4Conf)
}
name := v.Name
// If there is no nat network create one from the first NAT network
// encountered if it doesn't already exist
if !defaultNetworkExists &&
runconfig.DefaultDaemonNetworkMode() == containertypes.NetworkMode(strings.ToLower(v.Type)) &&
n == nil {
name = runconfig.DefaultDaemonNetworkMode().NetworkName()
defaultNetworkExists = true
}
v6Conf := []*libnetwork.IpamConf{}
_, err := controller.NewNetwork(strings.ToLower(v.Type), name, "",
libnetwork.NetworkOptionGeneric(options.Generic{
netlabel.GenericData: netOption,
}),
libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil),
)
if err != nil {
logrus.Errorf("Error occurred when creating network %v", err)
}
}
if !config.DisableBridge {
// Initialize default driver "bridge"
if err := initBridgeDriver(controller, config); err != nil {
return nil, err
}
}
return controller, nil
}
func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error {
if _, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil {
return nil
}
netOption := map[string]string{
winlibnetwork.NetworkName: runconfig.DefaultDaemonNetworkMode().NetworkName(),
}
var ipamOption libnetwork.NetworkOption
var subnetPrefix string
if config.BridgeConfig.FixedCIDR != "" {
subnetPrefix = config.BridgeConfig.FixedCIDR
} else {
// TP5 doesn't support properly detecting subnet
osv := system.GetOSVersion()
if osv.Build < 14360 {
subnetPrefix = defaultNetworkSpace
}
}
if subnetPrefix != "" {
ipamV4Conf := libnetwork.IpamConf{}
ipamV4Conf.PreferredPool = subnetPrefix
v4Conf := []*libnetwork.IpamConf{&ipamV4Conf}
v6Conf := []*libnetwork.IpamConf{}
ipamOption = libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil)
}
_, err := controller.NewNetwork(string(runconfig.DefaultDaemonNetworkMode()), runconfig.DefaultDaemonNetworkMode().NetworkName(), "",
libnetwork.NetworkOptionGeneric(options.Generic{
netlabel.GenericData: netOption,
}),
ipamOption,
)
if err != nil {
return fmt.Errorf("Error creating default network: %v", err)
}
return nil
}
// registerLinks sets up links between containers and writes the
// configuration out for persistence. As of Windows TP4, links are not supported.
func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error {
return nil
}
func (daemon *Daemon) cleanupMountsByID(in string) error {
return nil
}
func (daemon *Daemon) cleanupMounts() error {
return nil
}
func setupRemappedRoot(config *config.Config) (*idtools.IDMappings, error) {
return &idtools.IDMappings{}, nil
}
func setupDaemonRoot(config *config.Config, rootDir string, rootIDs idtools.IDPair) error {
config.Root = rootDir
// Create the root directory if it doesn't exists
if err := system.MkdirAllWithACL(config.Root, 0, system.SddlAdministratorsLocalSystem); err != nil {
return err
}
return nil
}
// runasHyperVContainer returns true if we are going to run as a Hyper-V container
func (daemon *Daemon) runAsHyperVContainer(hostConfig *containertypes.HostConfig) bool {
if hostConfig.Isolation.IsDefault() {
// Container is set to use the default, so take the default from the daemon configuration
return daemon.defaultIsolation.IsHyperV()
}
// Container is requesting an isolation mode. Honour it.
return hostConfig.Isolation.IsHyperV()
}
// conditionalMountOnStart is a platform specific helper function during the
// container start to call mount.
func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error {
// Bail out now for Linux containers. We cannot mount the containers filesystem on the
// host as it is a non-Windows filesystem.
if system.LCOWSupported() && container.OS != "windows" {
return nil
}
// We do not mount if a Hyper-V container as it needs to be mounted inside the
// utility VM, not the host.
if !daemon.runAsHyperVContainer(container.HostConfig) {
return daemon.Mount(container)
}
return nil
}
// conditionalUnmountOnCleanup is a platform specific helper function called
// during the cleanup of a container to unmount.
func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error {
// Bail out now for Linux containers
if system.LCOWSupported() && container.OS != "windows" {
return nil
}
// We do not unmount if a Hyper-V container
if !daemon.runAsHyperVContainer(container.HostConfig) {
return daemon.Unmount(container)
}
return nil
}
func driverOptions(config *config.Config) []nwconfig.Option {
return []nwconfig.Option{}
}
func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) {
if !c.IsRunning() {
return nil, errNotRunning(c.ID)
}
// Obtain the stats from HCS via libcontainerd
stats, err := daemon.containerd.Stats(context.Background(), c.ID)
if err != nil {
if strings.Contains(err.Error(), "container not found") {
return nil, containerNotFound(c.ID)
}
return nil, err
}
// Start with an empty structure
s := &types.StatsJSON{}
s.Stats.Read = stats.Read
s.Stats.NumProcs = platform.NumProcs()
if stats.HCSStats != nil {
hcss := stats.HCSStats
// Populate the CPU/processor statistics
s.CPUStats = types.CPUStats{
CPUUsage: types.CPUUsage{
TotalUsage: hcss.Processor.TotalRuntime100ns,
UsageInKernelmode: hcss.Processor.RuntimeKernel100ns,
UsageInUsermode: hcss.Processor.RuntimeKernel100ns,
},
}
// Populate the memory statistics
s.MemoryStats = types.MemoryStats{
Commit: hcss.Memory.UsageCommitBytes,
CommitPeak: hcss.Memory.UsageCommitPeakBytes,
PrivateWorkingSet: hcss.Memory.UsagePrivateWorkingSetBytes,
}
// Populate the storage statistics
s.StorageStats = types.StorageStats{
ReadCountNormalized: hcss.Storage.ReadCountNormalized,
ReadSizeBytes: hcss.Storage.ReadSizeBytes,
WriteCountNormalized: hcss.Storage.WriteCountNormalized,
WriteSizeBytes: hcss.Storage.WriteSizeBytes,
}
// Populate the network statistics
s.Networks = make(map[string]types.NetworkStats)
for _, nstats := range hcss.Network {
s.Networks[nstats.EndpointId] = types.NetworkStats{
RxBytes: nstats.BytesReceived,
RxPackets: nstats.PacketsReceived,
RxDropped: nstats.DroppedPacketsIncoming,
TxBytes: nstats.BytesSent,
TxPackets: nstats.PacketsSent,
TxDropped: nstats.DroppedPacketsOutgoing,
}
}
}
return s, nil
}
// setDefaultIsolation determine the default isolation mode for the
// daemon to run in. This is only applicable on Windows
func (daemon *Daemon) setDefaultIsolation() error {
daemon.defaultIsolation = containertypes.Isolation("process")
// On client SKUs, default to Hyper-V. Note that IoT reports as a client SKU
// but it should not be treated as such.
if system.IsWindowsClient() && !system.IsIoTCore() {
daemon.defaultIsolation = containertypes.Isolation("hyperv")
}
for _, option := range daemon.configStore.ExecOptions {
key, val, err := parsers.ParseKeyValueOpt(option)
if err != nil {
return err
}
key = strings.ToLower(key)
switch key {
case "isolation":
if !containertypes.Isolation(val).IsValid() {
return fmt.Errorf("Invalid exec-opt value for 'isolation':'%s'", val)
}
if containertypes.Isolation(val).IsHyperV() {
daemon.defaultIsolation = containertypes.Isolation("hyperv")
}
if containertypes.Isolation(val).IsProcess() {
if system.IsWindowsClient() && !system.IsIoTCore() {
// @engine maintainers. This block should not be removed. It partially enforces licensing
// restrictions on Windows. Ping @jhowardmsft if there are concerns or PRs to change this.
return fmt.Errorf("Windows client operating systems only support Hyper-V containers")
}
daemon.defaultIsolation = containertypes.Isolation("process")
}
default:
return fmt.Errorf("Unrecognised exec-opt '%s'\n", key)
}
}
logrus.Infof("Windows default isolation mode: %s", daemon.defaultIsolation)
return nil
}
func setupDaemonProcess(config *config.Config) error {
return nil
}
// verifyVolumesInfo is a no-op on windows.
// This is called during daemon initialization to migrate volumes from pre-1.7.
// volumes were not supported on windows pre-1.7
func (daemon *Daemon) verifyVolumesInfo(container *container.Container) error {
return nil
}
func (daemon *Daemon) setupSeccompProfile() error {
return nil
}
func getRealPath(path string) (string, error) {
if system.IsIoTCore() {
// Due to https://github.com/golang/go/issues/20506, path expansion
// does not work correctly on the default IoT Core configuration.
// TODO @darrenstahlmsft remove this once golang/go/20506 is fixed
return path, nil
}
return fileutils.ReadSymlinkedDirectory(path)
}
func (daemon *Daemon) loadRuntimes() error {
return nil
}
func (daemon *Daemon) initRuntimes(_ map[string]types.Runtime) error {
return nil
}

View file

@ -0,0 +1,72 @@
// +build windows
package daemon // import "github.com/docker/docker/daemon"
import (
"strings"
"testing"
"golang.org/x/sys/windows/svc/mgr"
)
const existingService = "Power"
func TestEnsureServicesExist(t *testing.T) {
m, err := mgr.Connect()
if err != nil {
t.Fatal("failed to connect to service manager, this test needs admin")
}
defer m.Disconnect()
s, err := m.OpenService(existingService)
if err != nil {
t.Fatalf("expected to find known inbox service %q, this test needs a known inbox service to run correctly", existingService)
}
defer s.Close()
input := []string{existingService}
err = ensureServicesInstalled(input)
if err != nil {
t.Fatalf("unexpected error for input %q: %q", input, err)
}
}
func TestEnsureServicesExistErrors(t *testing.T) {
m, err := mgr.Connect()
if err != nil {
t.Fatal("failed to connect to service manager, this test needs admin")
}
defer m.Disconnect()
s, err := m.OpenService(existingService)
if err != nil {
t.Fatalf("expected to find known inbox service %q, this test needs a known inbox service to run correctly", existingService)
}
defer s.Close()
for _, testcase := range []struct {
input []string
expectedError string
}{
{
input: []string{"daemon_windows_test_fakeservice"},
expectedError: "failed to open service daemon_windows_test_fakeservice",
},
{
input: []string{"daemon_windows_test_fakeservice1", "daemon_windows_test_fakeservice2"},
expectedError: "failed to open service daemon_windows_test_fakeservice1",
},
{
input: []string{existingService, "daemon_windows_test_fakeservice"},
expectedError: "failed to open service daemon_windows_test_fakeservice",
},
} {
t.Run(strings.Join(testcase.input, ";"), func(t *testing.T) {
err := ensureServicesInstalled(testcase.input)
if err == nil {
t.Fatalf("expected error for input %v", testcase.input)
}
if !strings.Contains(err.Error(), testcase.expectedError) {
t.Fatalf("expected error %q to contain %q", err.Error(), testcase.expectedError)
}
})
}
}

View file

@ -0,0 +1,27 @@
// +build !windows
package daemon // import "github.com/docker/docker/daemon"
import (
"os"
"os/signal"
stackdump "github.com/docker/docker/pkg/signal"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
func (d *Daemon) setupDumpStackTrap(root string) {
c := make(chan os.Signal, 1)
signal.Notify(c, unix.SIGUSR1)
go func() {
for range c {
path, err := stackdump.DumpStacks(root)
if err != nil {
logrus.WithError(err).Error("failed to write goroutines dump")
} else {
logrus.Infof("goroutine stacks written to %s", path)
}
}
}()
}

View file

@ -0,0 +1,7 @@
// +build !linux,!darwin,!freebsd,!windows
package daemon // import "github.com/docker/docker/daemon"
func (d *Daemon) setupDumpStackTrap(_ string) {
return
}

View file

@ -0,0 +1,46 @@
package daemon // import "github.com/docker/docker/daemon"
import (
"fmt"
"os"
"unsafe"
winio "github.com/Microsoft/go-winio"
"github.com/docker/docker/pkg/signal"
"github.com/sirupsen/logrus"
"golang.org/x/sys/windows"
)
func (d *Daemon) setupDumpStackTrap(root string) {
// Windows does not support signals like *nix systems. So instead of
// trapping on SIGUSR1 to dump stacks, we wait on a Win32 event to be
// signaled. ACL'd to builtin administrators and local system
event := "Global\\docker-daemon-" + fmt.Sprint(os.Getpid())
ev, _ := windows.UTF16PtrFromString(event)
sd, err := winio.SddlToSecurityDescriptor("D:P(A;;GA;;;BA)(A;;GA;;;SY)")
if err != nil {
logrus.Errorf("failed to get security descriptor for debug stackdump event %s: %s", event, err.Error())
return
}
var sa windows.SecurityAttributes
sa.Length = uint32(unsafe.Sizeof(sa))
sa.InheritHandle = 1
sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0]))
h, err := windows.CreateEvent(&sa, 0, 0, ev)
if h == 0 || err != nil {
logrus.Errorf("failed to create debug stackdump event %s: %s", event, err.Error())
return
}
go func() {
logrus.Debugf("Stackdump - waiting signal at %s", event)
for {
windows.WaitForSingleObject(h, windows.INFINITE)
path, err := signal.DumpStacks(root)
if err != nil {
logrus.WithError(err).Error("failed to write goroutines dump")
} else {
logrus.Infof("goroutine stacks written to %s", path)
}
}
}()
}

View file

@ -0,0 +1,186 @@
package daemon // import "github.com/docker/docker/daemon"
import (
"fmt"
"os"
"path"
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/container"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/volume"
volumestore "github.com/docker/docker/volume/store"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// ContainerRm removes the container id from the filesystem. An error
// is returned if the container is not found, or if the remove
// fails. If the remove succeeds, the container name is released, and
// network links are removed.
func (daemon *Daemon) ContainerRm(name string, config *types.ContainerRmConfig) error {
start := time.Now()
container, err := daemon.GetContainer(name)
if err != nil {
return err
}
// Container state RemovalInProgress should be used to avoid races.
if inProgress := container.SetRemovalInProgress(); inProgress {
err := fmt.Errorf("removal of container %s is already in progress", name)
return errdefs.Conflict(err)
}
defer container.ResetRemovalInProgress()
// check if container wasn't deregistered by previous rm since Get
if c := daemon.containers.Get(container.ID); c == nil {
return nil
}
if config.RemoveLink {
return daemon.rmLink(container, name)
}
err = daemon.cleanupContainer(container, config.ForceRemove, config.RemoveVolume)
containerActions.WithValues("delete").UpdateSince(start)
return err
}
func (daemon *Daemon) rmLink(container *container.Container, name string) error {
if name[0] != '/' {
name = "/" + name
}
parent, n := path.Split(name)
if parent == "/" {
return fmt.Errorf("Conflict, cannot remove the default name of the container")
}
parent = strings.TrimSuffix(parent, "/")
pe, err := daemon.containersReplica.Snapshot().GetID(parent)
if err != nil {
return fmt.Errorf("Cannot get parent %s for name %s", parent, name)
}
daemon.releaseName(name)
parentContainer, _ := daemon.GetContainer(pe)
if parentContainer != nil {
daemon.linkIndex.unlink(name, container, parentContainer)
if err := daemon.updateNetwork(parentContainer); err != nil {
logrus.Debugf("Could not update network to remove link %s: %v", n, err)
}
}
return nil
}
// cleanupContainer unregisters a container from the daemon, stops stats
// collection and cleanly removes contents and metadata from the filesystem.
func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove, removeVolume bool) (err error) {
if container.IsRunning() {
if !forceRemove {
state := container.StateString()
procedure := "Stop the container before attempting removal or force remove"
if state == "paused" {
procedure = "Unpause and then " + strings.ToLower(procedure)
}
err := fmt.Errorf("You cannot remove a %s container %s. %s", state, container.ID, procedure)
return errdefs.Conflict(err)
}
if err := daemon.Kill(container); err != nil {
return fmt.Errorf("Could not kill running container %s, cannot remove - %v", container.ID, err)
}
}
if !system.IsOSSupported(container.OS) {
return fmt.Errorf("cannot remove %s: %s ", container.ID, system.ErrNotSupportedOperatingSystem)
}
// stop collection of stats for the container regardless
// if stats are currently getting collected.
daemon.statsCollector.StopCollection(container)
if err = daemon.containerStop(container, 3); err != nil {
return err
}
// Mark container dead. We don't want anybody to be restarting it.
container.Lock()
container.Dead = true
// Save container state to disk. So that if error happens before
// container meta file got removed from disk, then a restart of
// docker should not make a dead container alive.
if err := container.CheckpointTo(daemon.containersReplica); err != nil && !os.IsNotExist(err) {
logrus.Errorf("Error saving dying container to disk: %v", err)
}
container.Unlock()
// When container creation fails and `RWLayer` has not been created yet, we
// do not call `ReleaseRWLayer`
if container.RWLayer != nil {
err := daemon.imageService.ReleaseLayer(container.RWLayer, container.OS)
if err != nil {
err = errors.Wrapf(err, "container %s", container.ID)
container.SetRemovalError(err)
return err
}
container.RWLayer = nil
}
if err := system.EnsureRemoveAll(container.Root); err != nil {
e := errors.Wrapf(err, "unable to remove filesystem for %s", container.ID)
container.SetRemovalError(e)
return e
}
linkNames := daemon.linkIndex.delete(container)
selinuxFreeLxcContexts(container.ProcessLabel)
daemon.idIndex.Delete(container.ID)
daemon.containers.Delete(container.ID)
daemon.containersReplica.Delete(container)
if e := daemon.removeMountPoints(container, removeVolume); e != nil {
logrus.Error(e)
}
for _, name := range linkNames {
daemon.releaseName(name)
}
container.SetRemoved()
stateCtr.del(container.ID)
daemon.LogContainerEvent(container, "destroy")
return nil
}
// VolumeRm removes the volume with the given name.
// If the volume is referenced by a container it is not removed
// This is called directly from the Engine API
func (daemon *Daemon) VolumeRm(name string, force bool) error {
v, err := daemon.volumes.Get(name)
if err != nil {
if force && volumestore.IsNotExist(err) {
return nil
}
return err
}
err = daemon.volumeRm(v)
if err != nil && volumestore.IsInUse(err) {
return errdefs.Conflict(err)
}
if err == nil || force {
daemon.volumes.Purge(name)
return nil
}
return err
}
func (daemon *Daemon) volumeRm(v volume.Volume) error {
if err := daemon.volumes.Remove(v); err != nil {
return errors.Wrap(err, "unable to remove volume")
}
daemon.LogVolumeEvent(v.Name(), "destroy", map[string]string{"driver": v.DriverName()})
return nil
}

View file

@ -0,0 +1,96 @@
package daemon // import "github.com/docker/docker/daemon"
import (
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
"github.com/docker/docker/internal/testutil"
"github.com/gotestyourself/gotestyourself/assert"
)
func newDaemonWithTmpRoot(t *testing.T) (*Daemon, func()) {
tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-")
assert.NilError(t, err)
d := &Daemon{
repository: tmp,
root: tmp,
}
d.containers = container.NewMemoryStore()
return d, func() { os.RemoveAll(tmp) }
}
func newContainerWithState(state *container.State) *container.Container {
return &container.Container{
ID: "test",
State: state,
Config: &containertypes.Config{},
}
}
// TestContainerDelete tests that a useful error message and instructions is
// given when attempting to remove a container (#30842)
func TestContainerDelete(t *testing.T) {
tt := []struct {
errMsg string
fixMsg string
initContainer func() *container.Container
}{
// a paused container
{
errMsg: "cannot remove a paused container",
fixMsg: "Unpause and then stop the container before attempting removal or force remove",
initContainer: func() *container.Container {
return newContainerWithState(&container.State{Paused: true, Running: true})
}},
// a restarting container
{
errMsg: "cannot remove a restarting container",
fixMsg: "Stop the container before attempting removal or force remove",
initContainer: func() *container.Container {
c := newContainerWithState(container.NewState())
c.SetRunning(0, true)
c.SetRestarting(&container.ExitStatus{})
return c
}},
// a running container
{
errMsg: "cannot remove a running container",
fixMsg: "Stop the container before attempting removal or force remove",
initContainer: func() *container.Container {
return newContainerWithState(&container.State{Running: true})
}},
}
for _, te := range tt {
c := te.initContainer()
d, cleanup := newDaemonWithTmpRoot(t)
defer cleanup()
d.containers.Add(c.ID, c)
err := d.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: false})
testutil.ErrorContains(t, err, te.errMsg)
testutil.ErrorContains(t, err, te.fixMsg)
}
}
func TestContainerDoubleDelete(t *testing.T) {
c := newContainerWithState(container.NewState())
// Mark the container as having a delete in progress
c.SetRemovalInProgress()
d, cleanup := newDaemonWithTmpRoot(t)
defer cleanup()
d.containers.Add(c.ID, c)
// Try to remove the container when its state is removalInProgress.
// It should return an error indicating it is under removal progress.
err := d.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: true})
testutil.ErrorContains(t, err, fmt.Sprintf("removal of container %s is already in progress", c.ID))
}

View file

@ -0,0 +1,17 @@
package daemon // import "github.com/docker/docker/daemon"
import (
"github.com/docker/swarmkit/agent/exec"
)
// SetContainerDependencyStore sets the dependency store backend for the container
func (daemon *Daemon) SetContainerDependencyStore(name string, store exec.DependencyGetter) error {
c, err := daemon.GetContainer(name)
if err != nil {
return err
}
c.DependencyStore = store
return nil
}

View file

@ -0,0 +1,202 @@
package discovery // import "github.com/docker/docker/daemon/discovery"
import (
"errors"
"fmt"
"strconv"
"time"
"github.com/docker/docker/pkg/discovery"
"github.com/sirupsen/logrus"
// Register the libkv backends for discovery.
_ "github.com/docker/docker/pkg/discovery/kv"
)
const (
// defaultDiscoveryHeartbeat is the default value for discovery heartbeat interval.
defaultDiscoveryHeartbeat = 20 * time.Second
// defaultDiscoveryTTLFactor is the default TTL factor for discovery
defaultDiscoveryTTLFactor = 3
)
// ErrDiscoveryDisabled is an error returned if the discovery is disabled
var ErrDiscoveryDisabled = errors.New("discovery is disabled")
// Reloader is the discovery reloader of the daemon
type Reloader interface {
discovery.Watcher
Stop()
Reload(backend, address string, clusterOpts map[string]string) error
ReadyCh() <-chan struct{}
}
type daemonDiscoveryReloader struct {
backend discovery.Backend
ticker *time.Ticker
term chan bool
readyCh chan struct{}
}
func (d *daemonDiscoveryReloader) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) {
return d.backend.Watch(stopCh)
}
func (d *daemonDiscoveryReloader) ReadyCh() <-chan struct{} {
return d.readyCh
}
func discoveryOpts(clusterOpts map[string]string) (time.Duration, time.Duration, error) {
var (
heartbeat = defaultDiscoveryHeartbeat
ttl = defaultDiscoveryTTLFactor * defaultDiscoveryHeartbeat
)
if hb, ok := clusterOpts["discovery.heartbeat"]; ok {
h, err := strconv.Atoi(hb)
if err != nil {
return time.Duration(0), time.Duration(0), err
}
if h <= 0 {
return time.Duration(0), time.Duration(0),
fmt.Errorf("discovery.heartbeat must be positive")
}
heartbeat = time.Duration(h) * time.Second
ttl = defaultDiscoveryTTLFactor * heartbeat
}
if tstr, ok := clusterOpts["discovery.ttl"]; ok {
t, err := strconv.Atoi(tstr)
if err != nil {
return time.Duration(0), time.Duration(0), err
}
if t <= 0 {
return time.Duration(0), time.Duration(0),
fmt.Errorf("discovery.ttl must be positive")
}
ttl = time.Duration(t) * time.Second
if _, ok := clusterOpts["discovery.heartbeat"]; !ok {
heartbeat = time.Duration(t) * time.Second / time.Duration(defaultDiscoveryTTLFactor)
}
if ttl <= heartbeat {
return time.Duration(0), time.Duration(0),
fmt.Errorf("discovery.ttl timer must be greater than discovery.heartbeat")
}
}
return heartbeat, ttl, nil
}
// Init initializes the nodes discovery subsystem by connecting to the specified backend
// and starts a registration loop to advertise the current node under the specified address.
func Init(backendAddress, advertiseAddress string, clusterOpts map[string]string) (Reloader, error) {
heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts)
if err != nil {
return nil, err
}
reloader := &daemonDiscoveryReloader{
backend: backend,
ticker: time.NewTicker(heartbeat),
term: make(chan bool),
readyCh: make(chan struct{}),
}
// We call Register() on the discovery backend in a loop for the whole lifetime of the daemon,
// but we never actually Watch() for nodes appearing and disappearing for the moment.
go reloader.advertiseHeartbeat(advertiseAddress)
return reloader, nil
}
// advertiseHeartbeat registers the current node against the discovery backend using the specified
// address. The function never returns, as registration against the backend comes with a TTL and
// requires regular heartbeats.
func (d *daemonDiscoveryReloader) advertiseHeartbeat(address string) {
var ready bool
if err := d.initHeartbeat(address); err == nil {
ready = true
close(d.readyCh)
} else {
logrus.WithError(err).Debug("First discovery heartbeat failed")
}
for {
select {
case <-d.ticker.C:
if err := d.backend.Register(address); err != nil {
logrus.Warnf("Registering as %q in discovery failed: %v", address, err)
} else {
if !ready {
close(d.readyCh)
ready = true
}
}
case <-d.term:
return
}
}
}
// initHeartbeat is used to do the first heartbeat. It uses a tight loop until
// either the timeout period is reached or the heartbeat is successful and returns.
func (d *daemonDiscoveryReloader) initHeartbeat(address string) error {
// Setup a short ticker until the first heartbeat has succeeded
t := time.NewTicker(500 * time.Millisecond)
defer t.Stop()
// timeout makes sure that after a period of time we stop being so aggressive trying to reach the discovery service
timeout := time.After(60 * time.Second)
for {
select {
case <-timeout:
return errors.New("timeout waiting for initial discovery")
case <-d.term:
return errors.New("terminated")
case <-t.C:
if err := d.backend.Register(address); err == nil {
return nil
}
}
}
}
// Reload makes the watcher to stop advertising and reconfigures it to advertise in a new address.
func (d *daemonDiscoveryReloader) Reload(backendAddress, advertiseAddress string, clusterOpts map[string]string) error {
d.Stop()
heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts)
if err != nil {
return err
}
d.backend = backend
d.ticker = time.NewTicker(heartbeat)
d.readyCh = make(chan struct{})
go d.advertiseHeartbeat(advertiseAddress)
return nil
}
// Stop terminates the discovery advertising.
func (d *daemonDiscoveryReloader) Stop() {
d.ticker.Stop()
d.term <- true
}
func parseDiscoveryOptions(backendAddress string, clusterOpts map[string]string) (time.Duration, discovery.Backend, error) {
heartbeat, ttl, err := discoveryOpts(clusterOpts)
if err != nil {
return 0, nil, err
}
backend, err := discovery.New(backendAddress, heartbeat, ttl, clusterOpts)
if err != nil {
return 0, nil, err
}
return heartbeat, backend, nil
}

View file

@ -0,0 +1,96 @@
package discovery // import "github.com/docker/docker/daemon/discovery"
import (
"fmt"
"testing"
"time"
"github.com/gotestyourself/gotestyourself/assert"
is "github.com/gotestyourself/gotestyourself/assert/cmp"
)
func TestDiscoveryOptsErrors(t *testing.T) {
var testcases = []struct {
doc string
opts map[string]string
}{
{
doc: "discovery.ttl < discovery.heartbeat",
opts: map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "5"},
},
{
doc: "discovery.ttl == discovery.heartbeat",
opts: map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "10"},
},
{
doc: "negative discovery.heartbeat",
opts: map[string]string{"discovery.heartbeat": "-10", "discovery.ttl": "10"},
},
{
doc: "negative discovery.ttl",
opts: map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "-10"},
},
{
doc: "invalid discovery.heartbeat",
opts: map[string]string{"discovery.heartbeat": "invalid"},
},
{
doc: "invalid discovery.ttl",
opts: map[string]string{"discovery.ttl": "invalid"},
},
}
for _, testcase := range testcases {
_, _, err := discoveryOpts(testcase.opts)
assert.Check(t, is.ErrorContains(err, ""), testcase.doc)
}
}
func TestDiscoveryOpts(t *testing.T) {
clusterOpts := map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "20"}
heartbeat, ttl, err := discoveryOpts(clusterOpts)
assert.NilError(t, err)
assert.Check(t, is.Equal(10*time.Second, heartbeat))
assert.Check(t, is.Equal(20*time.Second, ttl))
clusterOpts = map[string]string{"discovery.heartbeat": "10"}
heartbeat, ttl, err = discoveryOpts(clusterOpts)
assert.NilError(t, err)
assert.Check(t, is.Equal(10*time.Second, heartbeat))
assert.Check(t, is.Equal(10*defaultDiscoveryTTLFactor*time.Second, ttl))
clusterOpts = map[string]string{"discovery.ttl": "30"}
heartbeat, ttl, err = discoveryOpts(clusterOpts)
assert.NilError(t, err)
if ttl != 30*time.Second {
t.Fatalf("TTL - Expected : %v, Actual : %v", 30*time.Second, ttl)
}
expected := 30 * time.Second / defaultDiscoveryTTLFactor
if heartbeat != expected {
t.Fatalf("Heartbeat - Expected : %v, Actual : %v", expected, heartbeat)
}
discoveryTTL := fmt.Sprintf("%d", defaultDiscoveryTTLFactor-1)
clusterOpts = map[string]string{"discovery.ttl": discoveryTTL}
heartbeat, _, err = discoveryOpts(clusterOpts)
if err == nil && heartbeat == 0 {
t.Fatal("discovery.heartbeat must be positive")
}
clusterOpts = map[string]string{}
heartbeat, ttl, err = discoveryOpts(clusterOpts)
if err != nil {
t.Fatal(err)
}
if heartbeat != defaultDiscoveryHeartbeat {
t.Fatalf("Heartbeat - Expected : %v, Actual : %v", defaultDiscoveryHeartbeat, heartbeat)
}
expected = defaultDiscoveryHeartbeat * defaultDiscoveryTTLFactor
if ttl != expected {
t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl)
}
}

Some files were not shown because too many files have changed in this diff Show more