update vendor

Signed-off-by: Jess Frazelle <acidburn@microsoft.com>
This commit is contained in:
Jess Frazelle 2018-09-25 12:27:46 -04:00
parent 19a32db84d
commit 94d1cfbfbf
No known key found for this signature in database
GPG key ID: 18F3685C0022BFF3
10501 changed files with 2307943 additions and 29279 deletions

View file

@ -0,0 +1,876 @@
package plugin // import "github.com/docker/docker/plugin"
import (
"archive/tar"
"compress/gzip"
"context"
"encoding/json"
"io"
"io/ioutil"
"net/http"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"github.com/docker/distribution/manifest/schema2"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/distribution"
progressutils "github.com/docker/docker/distribution/utils"
"github.com/docker/docker/distribution/xfer"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/plugin/v2"
refstore "github.com/docker/docker/reference"
"github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
var acceptedPluginFilterTags = map[string]bool{
"enabled": true,
"capability": true,
}
// Disable deactivates a plugin. This means resources (volumes, networks) cant use them.
func (pm *Manager) Disable(refOrID string, config *types.PluginDisableConfig) error {
p, err := pm.config.Store.GetV2Plugin(refOrID)
if err != nil {
return err
}
pm.mu.RLock()
c := pm.cMap[p]
pm.mu.RUnlock()
if !config.ForceDisable && p.GetRefCount() > 0 {
return errors.WithStack(inUseError(p.Name()))
}
for _, typ := range p.GetTypes() {
if typ.Capability == authorization.AuthZApiImplements {
pm.config.AuthzMiddleware.RemovePlugin(p.Name())
}
}
if err := pm.disable(p, c); err != nil {
return err
}
pm.publisher.Publish(EventDisable{Plugin: p.PluginObj})
pm.config.LogPluginEvent(p.GetID(), refOrID, "disable")
return nil
}
// Enable activates a plugin, which implies that they are ready to be used by containers.
func (pm *Manager) Enable(refOrID string, config *types.PluginEnableConfig) error {
p, err := pm.config.Store.GetV2Plugin(refOrID)
if err != nil {
return err
}
c := &controller{timeoutInSecs: config.Timeout}
if err := pm.enable(p, c, false); err != nil {
return err
}
pm.publisher.Publish(EventEnable{Plugin: p.PluginObj})
pm.config.LogPluginEvent(p.GetID(), refOrID, "enable")
return nil
}
// Inspect examines a plugin config
func (pm *Manager) Inspect(refOrID string) (tp *types.Plugin, err error) {
p, err := pm.config.Store.GetV2Plugin(refOrID)
if err != nil {
return nil, err
}
return &p.PluginObj, nil
}
func (pm *Manager) pull(ctx context.Context, ref reference.Named, config *distribution.ImagePullConfig, outStream io.Writer) error {
if outStream != nil {
// Include a buffer so that slow client connections don't affect
// transfer performance.
progressChan := make(chan progress.Progress, 100)
writesDone := make(chan struct{})
defer func() {
close(progressChan)
<-writesDone
}()
var cancelFunc context.CancelFunc
ctx, cancelFunc = context.WithCancel(ctx)
go func() {
progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan)
close(writesDone)
}()
config.ProgressOutput = progress.ChanOutput(progressChan)
} else {
config.ProgressOutput = progress.DiscardOutput()
}
return distribution.Pull(ctx, ref, config)
}
type tempConfigStore struct {
config []byte
configDigest digest.Digest
}
func (s *tempConfigStore) Put(c []byte) (digest.Digest, error) {
dgst := digest.FromBytes(c)
s.config = c
s.configDigest = dgst
return dgst, nil
}
func (s *tempConfigStore) Get(d digest.Digest) ([]byte, error) {
if d != s.configDigest {
return nil, errNotFound("digest not found")
}
return s.config, nil
}
func (s *tempConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) {
return configToRootFS(c)
}
func (s *tempConfigStore) PlatformFromConfig(c []byte) (*specs.Platform, error) {
// TODO: LCOW/Plugins. This will need revisiting. For now use the runtime OS
return &specs.Platform{OS: runtime.GOOS}, nil
}
func computePrivileges(c types.PluginConfig) types.PluginPrivileges {
var privileges types.PluginPrivileges
if c.Network.Type != "null" && c.Network.Type != "bridge" && c.Network.Type != "" {
privileges = append(privileges, types.PluginPrivilege{
Name: "network",
Description: "permissions to access a network",
Value: []string{c.Network.Type},
})
}
if c.IpcHost {
privileges = append(privileges, types.PluginPrivilege{
Name: "host ipc namespace",
Description: "allow access to host ipc namespace",
Value: []string{"true"},
})
}
if c.PidHost {
privileges = append(privileges, types.PluginPrivilege{
Name: "host pid namespace",
Description: "allow access to host pid namespace",
Value: []string{"true"},
})
}
for _, mount := range c.Mounts {
if mount.Source != nil {
privileges = append(privileges, types.PluginPrivilege{
Name: "mount",
Description: "host path to mount",
Value: []string{*mount.Source},
})
}
}
for _, device := range c.Linux.Devices {
if device.Path != nil {
privileges = append(privileges, types.PluginPrivilege{
Name: "device",
Description: "host device to access",
Value: []string{*device.Path},
})
}
}
if c.Linux.AllowAllDevices {
privileges = append(privileges, types.PluginPrivilege{
Name: "allow-all-devices",
Description: "allow 'rwm' access to all devices",
Value: []string{"true"},
})
}
if len(c.Linux.Capabilities) > 0 {
privileges = append(privileges, types.PluginPrivilege{
Name: "capabilities",
Description: "list of additional capabilities required",
Value: c.Linux.Capabilities,
})
}
return privileges
}
// Privileges pulls a plugin config and computes the privileges required to install it.
func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) {
// create image store instance
cs := &tempConfigStore{}
// DownloadManager not defined because only pulling configuration.
pluginPullConfig := &distribution.ImagePullConfig{
Config: distribution.Config{
MetaHeaders: metaHeader,
AuthConfig: authConfig,
RegistryService: pm.config.RegistryService,
ImageEventLogger: func(string, string, string) {},
ImageStore: cs,
},
Schema2Types: distribution.PluginTypes,
}
if err := pm.pull(ctx, ref, pluginPullConfig, nil); err != nil {
return nil, err
}
if cs.config == nil {
return nil, errors.New("no configuration pulled")
}
var config types.PluginConfig
if err := json.Unmarshal(cs.config, &config); err != nil {
return nil, errdefs.System(err)
}
return computePrivileges(config), nil
}
// Upgrade upgrades a plugin
func (pm *Manager) Upgrade(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) (err error) {
p, err := pm.config.Store.GetV2Plugin(name)
if err != nil {
return err
}
if p.IsEnabled() {
return errors.Wrap(enabledError(p.Name()), "plugin must be disabled before upgrading")
}
pm.muGC.RLock()
defer pm.muGC.RUnlock()
// revalidate because Pull is public
if _, err := reference.ParseNormalizedNamed(name); err != nil {
return errors.Wrapf(errdefs.InvalidParameter(err), "failed to parse %q", name)
}
tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs")
if err != nil {
return errors.Wrap(errdefs.System(err), "error preparing upgrade")
}
defer os.RemoveAll(tmpRootFSDir)
dm := &downloadManager{
tmpDir: tmpRootFSDir,
blobStore: pm.blobStore,
}
pluginPullConfig := &distribution.ImagePullConfig{
Config: distribution.Config{
MetaHeaders: metaHeader,
AuthConfig: authConfig,
RegistryService: pm.config.RegistryService,
ImageEventLogger: pm.config.LogPluginEvent,
ImageStore: dm,
},
DownloadManager: dm, // todo: reevaluate if possible to substitute distribution/xfer dependencies instead
Schema2Types: distribution.PluginTypes,
}
err = pm.pull(ctx, ref, pluginPullConfig, outStream)
if err != nil {
go pm.GC()
return err
}
if err := pm.upgradePlugin(p, dm.configDigest, dm.blobs, tmpRootFSDir, &privileges); err != nil {
return err
}
p.PluginObj.PluginReference = ref.String()
return nil
}
// Pull pulls a plugin, check if the correct privileges are provided and install the plugin.
func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer, opts ...CreateOpt) (err error) {
pm.muGC.RLock()
defer pm.muGC.RUnlock()
// revalidate because Pull is public
nameref, err := reference.ParseNormalizedNamed(name)
if err != nil {
return errors.Wrapf(errdefs.InvalidParameter(err), "failed to parse %q", name)
}
name = reference.FamiliarString(reference.TagNameOnly(nameref))
if err := pm.config.Store.validateName(name); err != nil {
return errdefs.InvalidParameter(err)
}
tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs")
if err != nil {
return errors.Wrap(errdefs.System(err), "error preparing pull")
}
defer os.RemoveAll(tmpRootFSDir)
dm := &downloadManager{
tmpDir: tmpRootFSDir,
blobStore: pm.blobStore,
}
pluginPullConfig := &distribution.ImagePullConfig{
Config: distribution.Config{
MetaHeaders: metaHeader,
AuthConfig: authConfig,
RegistryService: pm.config.RegistryService,
ImageEventLogger: pm.config.LogPluginEvent,
ImageStore: dm,
},
DownloadManager: dm, // todo: reevaluate if possible to substitute distribution/xfer dependencies instead
Schema2Types: distribution.PluginTypes,
}
err = pm.pull(ctx, ref, pluginPullConfig, outStream)
if err != nil {
go pm.GC()
return err
}
refOpt := func(p *v2.Plugin) {
p.PluginObj.PluginReference = ref.String()
}
optsList := make([]CreateOpt, 0, len(opts)+1)
optsList = append(optsList, opts...)
optsList = append(optsList, refOpt)
p, err := pm.createPlugin(name, dm.configDigest, dm.blobs, tmpRootFSDir, &privileges, optsList...)
if err != nil {
return err
}
pm.publisher.Publish(EventCreate{Plugin: p.PluginObj})
return nil
}
// List displays the list of plugins and associated metadata.
func (pm *Manager) List(pluginFilters filters.Args) ([]types.Plugin, error) {
if err := pluginFilters.Validate(acceptedPluginFilterTags); err != nil {
return nil, err
}
enabledOnly := false
disabledOnly := false
if pluginFilters.Contains("enabled") {
if pluginFilters.ExactMatch("enabled", "true") {
enabledOnly = true
} else if pluginFilters.ExactMatch("enabled", "false") {
disabledOnly = true
} else {
return nil, invalidFilter{"enabled", pluginFilters.Get("enabled")}
}
}
plugins := pm.config.Store.GetAll()
out := make([]types.Plugin, 0, len(plugins))
next:
for _, p := range plugins {
if enabledOnly && !p.PluginObj.Enabled {
continue
}
if disabledOnly && p.PluginObj.Enabled {
continue
}
if pluginFilters.Contains("capability") {
for _, f := range p.GetTypes() {
if !pluginFilters.Match("capability", f.Capability) {
continue next
}
}
}
out = append(out, p.PluginObj)
}
return out, nil
}
// Push pushes a plugin to the store.
func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, outStream io.Writer) error {
p, err := pm.config.Store.GetV2Plugin(name)
if err != nil {
return err
}
ref, err := reference.ParseNormalizedNamed(p.Name())
if err != nil {
return errors.Wrapf(err, "plugin has invalid name %v for push", p.Name())
}
var po progress.Output
if outStream != nil {
// Include a buffer so that slow client connections don't affect
// transfer performance.
progressChan := make(chan progress.Progress, 100)
writesDone := make(chan struct{})
defer func() {
close(progressChan)
<-writesDone
}()
var cancelFunc context.CancelFunc
ctx, cancelFunc = context.WithCancel(ctx)
go func() {
progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan)
close(writesDone)
}()
po = progress.ChanOutput(progressChan)
} else {
po = progress.DiscardOutput()
}
// TODO: replace these with manager
is := &pluginConfigStore{
pm: pm,
plugin: p,
}
lss := make(map[string]distribution.PushLayerProvider)
lss[runtime.GOOS] = &pluginLayerProvider{
pm: pm,
plugin: p,
}
rs := &pluginReference{
name: ref,
pluginID: p.Config,
}
uploadManager := xfer.NewLayerUploadManager(3)
imagePushConfig := &distribution.ImagePushConfig{
Config: distribution.Config{
MetaHeaders: metaHeader,
AuthConfig: authConfig,
ProgressOutput: po,
RegistryService: pm.config.RegistryService,
ReferenceStore: rs,
ImageEventLogger: pm.config.LogPluginEvent,
ImageStore: is,
RequireSchema2: true,
},
ConfigMediaType: schema2.MediaTypePluginConfig,
LayerStores: lss,
UploadManager: uploadManager,
}
return distribution.Push(ctx, ref, imagePushConfig)
}
type pluginReference struct {
name reference.Named
pluginID digest.Digest
}
func (r *pluginReference) References(id digest.Digest) []reference.Named {
if r.pluginID != id {
return nil
}
return []reference.Named{r.name}
}
func (r *pluginReference) ReferencesByName(ref reference.Named) []refstore.Association {
return []refstore.Association{
{
Ref: r.name,
ID: r.pluginID,
},
}
}
func (r *pluginReference) Get(ref reference.Named) (digest.Digest, error) {
if r.name.String() != ref.String() {
return digest.Digest(""), refstore.ErrDoesNotExist
}
return r.pluginID, nil
}
func (r *pluginReference) AddTag(ref reference.Named, id digest.Digest, force bool) error {
// Read only, ignore
return nil
}
func (r *pluginReference) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error {
// Read only, ignore
return nil
}
func (r *pluginReference) Delete(ref reference.Named) (bool, error) {
// Read only, ignore
return false, nil
}
type pluginConfigStore struct {
pm *Manager
plugin *v2.Plugin
}
func (s *pluginConfigStore) Put([]byte) (digest.Digest, error) {
return digest.Digest(""), errors.New("cannot store config on push")
}
func (s *pluginConfigStore) Get(d digest.Digest) ([]byte, error) {
if s.plugin.Config != d {
return nil, errors.New("plugin not found")
}
rwc, err := s.pm.blobStore.Get(d)
if err != nil {
return nil, err
}
defer rwc.Close()
return ioutil.ReadAll(rwc)
}
func (s *pluginConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) {
return configToRootFS(c)
}
func (s *pluginConfigStore) PlatformFromConfig(c []byte) (*specs.Platform, error) {
// TODO: LCOW/Plugins. This will need revisiting. For now use the runtime OS
return &specs.Platform{OS: runtime.GOOS}, nil
}
type pluginLayerProvider struct {
pm *Manager
plugin *v2.Plugin
}
func (p *pluginLayerProvider) Get(id layer.ChainID) (distribution.PushLayer, error) {
rootFS := rootFSFromPlugin(p.plugin.PluginObj.Config.Rootfs)
var i int
for i = 1; i <= len(rootFS.DiffIDs); i++ {
if layer.CreateChainID(rootFS.DiffIDs[:i]) == id {
break
}
}
if i > len(rootFS.DiffIDs) {
return nil, errors.New("layer not found")
}
return &pluginLayer{
pm: p.pm,
diffIDs: rootFS.DiffIDs[:i],
blobs: p.plugin.Blobsums[:i],
}, nil
}
type pluginLayer struct {
pm *Manager
diffIDs []layer.DiffID
blobs []digest.Digest
}
func (l *pluginLayer) ChainID() layer.ChainID {
return layer.CreateChainID(l.diffIDs)
}
func (l *pluginLayer) DiffID() layer.DiffID {
return l.diffIDs[len(l.diffIDs)-1]
}
func (l *pluginLayer) Parent() distribution.PushLayer {
if len(l.diffIDs) == 1 {
return nil
}
return &pluginLayer{
pm: l.pm,
diffIDs: l.diffIDs[:len(l.diffIDs)-1],
blobs: l.blobs[:len(l.diffIDs)-1],
}
}
func (l *pluginLayer) Open() (io.ReadCloser, error) {
return l.pm.blobStore.Get(l.blobs[len(l.diffIDs)-1])
}
func (l *pluginLayer) Size() (int64, error) {
return l.pm.blobStore.Size(l.blobs[len(l.diffIDs)-1])
}
func (l *pluginLayer) MediaType() string {
return schema2.MediaTypeLayer
}
func (l *pluginLayer) Release() {
// Nothing needs to be release, no references held
}
// Remove deletes plugin's root directory.
func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error {
p, err := pm.config.Store.GetV2Plugin(name)
pm.mu.RLock()
c := pm.cMap[p]
pm.mu.RUnlock()
if err != nil {
return err
}
if !config.ForceRemove {
if p.GetRefCount() > 0 {
return inUseError(p.Name())
}
if p.IsEnabled() {
return enabledError(p.Name())
}
}
if p.IsEnabled() {
if err := pm.disable(p, c); err != nil {
logrus.Errorf("failed to disable plugin '%s': %s", p.Name(), err)
}
}
defer func() {
go pm.GC()
}()
id := p.GetID()
pluginDir := filepath.Join(pm.config.Root, id)
if err := mount.RecursiveUnmount(pluginDir); err != nil {
return errors.Wrap(err, "error unmounting plugin data")
}
if err := atomicRemoveAll(pluginDir); err != nil {
return err
}
pm.config.Store.Remove(p)
pm.config.LogPluginEvent(id, name, "remove")
pm.publisher.Publish(EventRemove{Plugin: p.PluginObj})
return nil
}
// Set sets plugin args
func (pm *Manager) Set(name string, args []string) error {
p, err := pm.config.Store.GetV2Plugin(name)
if err != nil {
return err
}
if err := p.Set(args); err != nil {
return err
}
return pm.save(p)
}
// CreateFromContext creates a plugin from the given pluginDir which contains
// both the rootfs and the config.json and a repoName with optional tag.
func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) (err error) {
pm.muGC.RLock()
defer pm.muGC.RUnlock()
ref, err := reference.ParseNormalizedNamed(options.RepoName)
if err != nil {
return errors.Wrapf(err, "failed to parse reference %v", options.RepoName)
}
if _, ok := ref.(reference.Canonical); ok {
return errors.Errorf("canonical references are not permitted")
}
name := reference.FamiliarString(reference.TagNameOnly(ref))
if err := pm.config.Store.validateName(name); err != nil { // fast check, real check is in createPlugin()
return err
}
tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs")
if err != nil {
return errors.Wrap(err, "failed to create temp directory")
}
defer os.RemoveAll(tmpRootFSDir)
var configJSON []byte
rootFS := splitConfigRootFSFromTar(tarCtx, &configJSON)
rootFSBlob, err := pm.blobStore.New()
if err != nil {
return err
}
defer rootFSBlob.Close()
gzw := gzip.NewWriter(rootFSBlob)
layerDigester := digest.Canonical.Digester()
rootFSReader := io.TeeReader(rootFS, io.MultiWriter(gzw, layerDigester.Hash()))
if err := chrootarchive.Untar(rootFSReader, tmpRootFSDir, nil); err != nil {
return err
}
if err := rootFS.Close(); err != nil {
return err
}
if configJSON == nil {
return errors.New("config not found")
}
if err := gzw.Close(); err != nil {
return errors.Wrap(err, "error closing gzip writer")
}
var config types.PluginConfig
if err := json.Unmarshal(configJSON, &config); err != nil {
return errors.Wrap(err, "failed to parse config")
}
if err := pm.validateConfig(config); err != nil {
return err
}
pm.mu.Lock()
defer pm.mu.Unlock()
rootFSBlobsum, err := rootFSBlob.Commit()
if err != nil {
return err
}
defer func() {
if err != nil {
go pm.GC()
}
}()
config.Rootfs = &types.PluginConfigRootfs{
Type: "layers",
DiffIds: []string{layerDigester.Digest().String()},
}
config.DockerVersion = dockerversion.Version
configBlob, err := pm.blobStore.New()
if err != nil {
return err
}
defer configBlob.Close()
if err := json.NewEncoder(configBlob).Encode(config); err != nil {
return errors.Wrap(err, "error encoding json config")
}
configBlobsum, err := configBlob.Commit()
if err != nil {
return err
}
p, err := pm.createPlugin(name, configBlobsum, []digest.Digest{rootFSBlobsum}, tmpRootFSDir, nil)
if err != nil {
return err
}
p.PluginObj.PluginReference = name
pm.publisher.Publish(EventCreate{Plugin: p.PluginObj})
pm.config.LogPluginEvent(p.PluginObj.ID, name, "create")
return nil
}
func (pm *Manager) validateConfig(config types.PluginConfig) error {
return nil // TODO:
}
func splitConfigRootFSFromTar(in io.ReadCloser, config *[]byte) io.ReadCloser {
pr, pw := io.Pipe()
go func() {
tarReader := tar.NewReader(in)
tarWriter := tar.NewWriter(pw)
defer in.Close()
hasRootFS := false
for {
hdr, err := tarReader.Next()
if err == io.EOF {
if !hasRootFS {
pw.CloseWithError(errors.Wrap(err, "no rootfs found"))
return
}
// Signals end of archive.
tarWriter.Close()
pw.Close()
return
}
if err != nil {
pw.CloseWithError(errors.Wrap(err, "failed to read from tar"))
return
}
content := io.Reader(tarReader)
name := path.Clean(hdr.Name)
if path.IsAbs(name) {
name = name[1:]
}
if name == configFileName {
dt, err := ioutil.ReadAll(content)
if err != nil {
pw.CloseWithError(errors.Wrapf(err, "failed to read %s", configFileName))
return
}
*config = dt
}
if parts := strings.Split(name, "/"); len(parts) != 0 && parts[0] == rootFSFileName {
hdr.Name = path.Clean(path.Join(parts[1:]...))
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(strings.ToLower(hdr.Linkname), rootFSFileName+"/") {
hdr.Linkname = hdr.Linkname[len(rootFSFileName)+1:]
}
if err := tarWriter.WriteHeader(hdr); err != nil {
pw.CloseWithError(errors.Wrap(err, "error writing tar header"))
return
}
if _, err := pools.Copy(tarWriter, content); err != nil {
pw.CloseWithError(errors.Wrap(err, "error copying tar data"))
return
}
hasRootFS = true
} else {
io.Copy(ioutil.Discard, content)
}
}
}()
return pr
}
func atomicRemoveAll(dir string) error {
renamed := dir + "-removing"
err := os.Rename(dir, renamed)
switch {
case os.IsNotExist(err), err == nil:
// even if `dir` doesn't exist, we can still try and remove `renamed`
case os.IsExist(err):
// Some previous remove failed, check if the origin dir exists
if e := system.EnsureRemoveAll(renamed); e != nil {
return errors.Wrap(err, "rename target already exists and could not be removed")
}
if _, err := os.Stat(dir); os.IsNotExist(err) {
// origin doesn't exist, nothing left to do
return nil
}
// attempt to rename again
if err := os.Rename(dir, renamed); err != nil {
return errors.Wrap(err, "failed to rename dir for atomic removal")
}
default:
return errors.Wrap(err, "failed to rename dir for atomic removal")
}
if err := system.EnsureRemoveAll(renamed); err != nil {
os.Rename(renamed, dir)
return err
}
return nil
}

View file

@ -0,0 +1,81 @@
package plugin // import "github.com/docker/docker/plugin"
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
)
func TestAtomicRemoveAllNormal(t *testing.T) {
dir, err := ioutil.TempDir("", "atomic-remove-with-normal")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir) // just try to make sure this gets cleaned up
if err := atomicRemoveAll(dir); err != nil {
t.Fatal(err)
}
if _, err := os.Stat(dir); !os.IsNotExist(err) {
t.Fatalf("dir should be gone: %v", err)
}
if _, err := os.Stat(dir + "-removing"); !os.IsNotExist(err) {
t.Fatalf("dir should be gone: %v", err)
}
}
func TestAtomicRemoveAllAlreadyExists(t *testing.T) {
dir, err := ioutil.TempDir("", "atomic-remove-already-exists")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir) // just try to make sure this gets cleaned up
if err := os.MkdirAll(dir+"-removing", 0755); err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir + "-removing")
if err := atomicRemoveAll(dir); err != nil {
t.Fatal(err)
}
if _, err := os.Stat(dir); !os.IsNotExist(err) {
t.Fatalf("dir should be gone: %v", err)
}
if _, err := os.Stat(dir + "-removing"); !os.IsNotExist(err) {
t.Fatalf("dir should be gone: %v", err)
}
}
func TestAtomicRemoveAllNotExist(t *testing.T) {
if err := atomicRemoveAll("/not-exist"); err != nil {
t.Fatal(err)
}
dir, err := ioutil.TempDir("", "atomic-remove-already-exists")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir) // just try to make sure this gets cleaned up
// create the removing dir, but not the "real" one
foo := filepath.Join(dir, "foo")
removing := dir + "-removing"
if err := os.MkdirAll(removing, 0755); err != nil {
t.Fatal(err)
}
if err := atomicRemoveAll(dir); err != nil {
t.Fatal(err)
}
if _, err := os.Stat(foo); !os.IsNotExist(err) {
t.Fatalf("dir should be gone: %v", err)
}
if _, err := os.Stat(removing); !os.IsNotExist(err) {
t.Fatalf("dir should be gone: %v", err)
}
}

View file

@ -0,0 +1,72 @@
// +build !linux
package plugin // import "github.com/docker/docker/plugin"
import (
"context"
"errors"
"io"
"net/http"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
)
var errNotSupported = errors.New("plugins are not supported on this platform")
// Disable deactivates a plugin, which implies that they cannot be used by containers.
func (pm *Manager) Disable(name string, config *types.PluginDisableConfig) error {
return errNotSupported
}
// Enable activates a plugin, which implies that they are ready to be used by containers.
func (pm *Manager) Enable(name string, config *types.PluginEnableConfig) error {
return errNotSupported
}
// Inspect examines a plugin config
func (pm *Manager) Inspect(refOrID string) (tp *types.Plugin, err error) {
return nil, errNotSupported
}
// Privileges pulls a plugin config and computes the privileges required to install it.
func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) {
return nil, errNotSupported
}
// Pull pulls a plugin, check if the correct privileges are provided and install the plugin.
func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, out io.Writer, opts ...CreateOpt) error {
return errNotSupported
}
// Upgrade pulls a plugin, check if the correct privileges are provided and install the plugin.
func (pm *Manager) Upgrade(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) error {
return errNotSupported
}
// List displays the list of plugins and associated metadata.
func (pm *Manager) List(pluginFilters filters.Args) ([]types.Plugin, error) {
return nil, errNotSupported
}
// Push pushes a plugin to the store.
func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, out io.Writer) error {
return errNotSupported
}
// Remove deletes plugin's root directory.
func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error {
return errNotSupported
}
// Set sets plugin args
func (pm *Manager) Set(name string, args []string) error {
return errNotSupported
}
// CreateFromContext creates a plugin from the given pluginDir which contains
// both the rootfs and the config.json and a repoName with optional tag.
func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) error {
return errNotSupported
}

View file

@ -0,0 +1,190 @@
package plugin // import "github.com/docker/docker/plugin"
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"github.com/docker/docker/distribution/xfer"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/progress"
"github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type blobstore interface {
New() (WriteCommitCloser, error)
Get(dgst digest.Digest) (io.ReadCloser, error)
Size(dgst digest.Digest) (int64, error)
}
type basicBlobStore struct {
path string
}
func newBasicBlobStore(p string) (*basicBlobStore, error) {
tmpdir := filepath.Join(p, "tmp")
if err := os.MkdirAll(tmpdir, 0700); err != nil {
return nil, errors.Wrapf(err, "failed to mkdir %v", p)
}
return &basicBlobStore{path: p}, nil
}
func (b *basicBlobStore) New() (WriteCommitCloser, error) {
f, err := ioutil.TempFile(filepath.Join(b.path, "tmp"), ".insertion")
if err != nil {
return nil, errors.Wrap(err, "failed to create temp file")
}
return newInsertion(f), nil
}
func (b *basicBlobStore) Get(dgst digest.Digest) (io.ReadCloser, error) {
return os.Open(filepath.Join(b.path, string(dgst.Algorithm()), dgst.Hex()))
}
func (b *basicBlobStore) Size(dgst digest.Digest) (int64, error) {
stat, err := os.Stat(filepath.Join(b.path, string(dgst.Algorithm()), dgst.Hex()))
if err != nil {
return 0, err
}
return stat.Size(), nil
}
func (b *basicBlobStore) gc(whitelist map[digest.Digest]struct{}) {
for _, alg := range []string{string(digest.Canonical)} {
items, err := ioutil.ReadDir(filepath.Join(b.path, alg))
if err != nil {
continue
}
for _, fi := range items {
if _, exists := whitelist[digest.Digest(alg+":"+fi.Name())]; !exists {
p := filepath.Join(b.path, alg, fi.Name())
err := os.RemoveAll(p)
logrus.Debugf("cleaned up blob %v: %v", p, err)
}
}
}
}
// WriteCommitCloser defines object that can be committed to blobstore.
type WriteCommitCloser interface {
io.WriteCloser
Commit() (digest.Digest, error)
}
type insertion struct {
io.Writer
f *os.File
digester digest.Digester
closed bool
}
func newInsertion(tempFile *os.File) *insertion {
digester := digest.Canonical.Digester()
return &insertion{f: tempFile, digester: digester, Writer: io.MultiWriter(tempFile, digester.Hash())}
}
func (i *insertion) Commit() (digest.Digest, error) {
p := i.f.Name()
d := filepath.Join(filepath.Join(p, "../../"))
i.f.Sync()
defer os.RemoveAll(p)
if err := i.f.Close(); err != nil {
return "", err
}
i.closed = true
dgst := i.digester.Digest()
if err := os.MkdirAll(filepath.Join(d, string(dgst.Algorithm())), 0700); err != nil {
return "", errors.Wrapf(err, "failed to mkdir %v", d)
}
if err := os.Rename(p, filepath.Join(d, string(dgst.Algorithm()), dgst.Hex())); err != nil {
return "", errors.Wrapf(err, "failed to rename %v", p)
}
return dgst, nil
}
func (i *insertion) Close() error {
if i.closed {
return nil
}
defer os.RemoveAll(i.f.Name())
return i.f.Close()
}
type downloadManager struct {
blobStore blobstore
tmpDir string
blobs []digest.Digest
configDigest digest.Digest
}
func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.RootFS, os string, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) {
for _, l := range layers {
b, err := dm.blobStore.New()
if err != nil {
return initialRootFS, nil, err
}
defer b.Close()
rc, _, err := l.Download(ctx, progressOutput)
if err != nil {
return initialRootFS, nil, errors.Wrap(err, "failed to download")
}
defer rc.Close()
r := io.TeeReader(rc, b)
inflatedLayerData, err := archive.DecompressStream(r)
if err != nil {
return initialRootFS, nil, err
}
defer inflatedLayerData.Close()
digester := digest.Canonical.Digester()
if _, err := chrootarchive.ApplyLayer(dm.tmpDir, io.TeeReader(inflatedLayerData, digester.Hash())); err != nil {
return initialRootFS, nil, err
}
initialRootFS.Append(layer.DiffID(digester.Digest()))
d, err := b.Commit()
if err != nil {
return initialRootFS, nil, err
}
dm.blobs = append(dm.blobs, d)
}
return initialRootFS, nil, nil
}
func (dm *downloadManager) Put(dt []byte) (digest.Digest, error) {
b, err := dm.blobStore.New()
if err != nil {
return "", err
}
defer b.Close()
n, err := b.Write(dt)
if err != nil {
return "", err
}
if n != len(dt) {
return "", io.ErrShortWrite
}
d, err := b.Commit()
dm.configDigest = d
return d, err
}
func (dm *downloadManager) Get(d digest.Digest) ([]byte, error) {
return nil, fmt.Errorf("digest not found")
}
func (dm *downloadManager) RootFSFromConfig(c []byte) (*image.RootFS, error) {
return configToRootFS(c)
}
func (dm *downloadManager) PlatformFromConfig(c []byte) (*specs.Platform, error) {
// TODO: LCOW/Plugins. This will need revisiting. For now use the runtime OS
return &specs.Platform{OS: runtime.GOOS}, nil
}

View file

@ -0,0 +1,50 @@
package plugin // import "github.com/docker/docker/plugin"
import (
"sync"
"github.com/docker/docker/pkg/plugins"
"github.com/docker/docker/plugin/v2"
"github.com/opencontainers/runtime-spec/specs-go"
)
// Store manages the plugin inventory in memory and on-disk
type Store struct {
sync.RWMutex
plugins map[string]*v2.Plugin
specOpts map[string][]SpecOpt
/* handlers are necessary for transition path of legacy plugins
* to the new model. Legacy plugins use Handle() for registering an
* activation callback.*/
handlers map[string][]func(string, *plugins.Client)
}
// NewStore creates a Store.
func NewStore() *Store {
return &Store{
plugins: make(map[string]*v2.Plugin),
specOpts: make(map[string][]SpecOpt),
handlers: make(map[string][]func(string, *plugins.Client)),
}
}
// SpecOpt is used for subsystems that need to modify the runtime spec of a plugin
type SpecOpt func(*specs.Spec)
// CreateOpt is used to configure specific plugin details when created
type CreateOpt func(p *v2.Plugin)
// WithSwarmService is a CreateOpt that flags the passed in a plugin as a plugin
// managed by swarm
func WithSwarmService(id string) CreateOpt {
return func(p *v2.Plugin) {
p.SwarmServiceID = id
}
}
// WithSpecMounts is a SpecOpt which appends the provided mounts to the runtime spec
func WithSpecMounts(mounts []specs.Mount) SpecOpt {
return func(s *specs.Spec) {
s.Mounts = append(s.Mounts, mounts...)
}
}

View file

@ -0,0 +1,66 @@
package plugin // import "github.com/docker/docker/plugin"
import "fmt"
type errNotFound string
func (name errNotFound) Error() string {
return fmt.Sprintf("plugin %q not found", string(name))
}
func (errNotFound) NotFound() {}
type errAmbiguous string
func (name errAmbiguous) Error() string {
return fmt.Sprintf("multiple plugins found for %q", string(name))
}
func (name errAmbiguous) InvalidParameter() {}
type errDisabled string
func (name errDisabled) Error() string {
return fmt.Sprintf("plugin %s found but disabled", string(name))
}
func (name errDisabled) Conflict() {}
type invalidFilter struct {
filter string
value []string
}
func (e invalidFilter) Error() string {
msg := "Invalid filter '" + e.filter
if len(e.value) > 0 {
msg += fmt.Sprintf("=%s", e.value)
}
return msg + "'"
}
func (invalidFilter) InvalidParameter() {}
type inUseError string
func (e inUseError) Error() string {
return "plugin " + string(e) + " is in use"
}
func (inUseError) Conflict() {}
type enabledError string
func (e enabledError) Error() string {
return "plugin " + string(e) + " is enabled"
}
func (enabledError) Conflict() {}
type alreadyExistsError string
func (e alreadyExistsError) Error() string {
return "plugin " + string(e) + " already exists"
}
func (alreadyExistsError) Conflict() {}

View file

@ -0,0 +1,111 @@
package plugin // import "github.com/docker/docker/plugin"
import (
"fmt"
"reflect"
"github.com/docker/docker/api/types"
)
// Event is emitted for actions performed on the plugin manager
type Event interface {
matches(Event) bool
}
// EventCreate is an event which is emitted when a plugin is created
// This is either by pull or create from context.
//
// Use the `Interfaces` field to match only plugins that implement a specific
// interface.
// These are matched against using "or" logic.
// If no interfaces are listed, all are matched.
type EventCreate struct {
Interfaces map[string]bool
Plugin types.Plugin
}
func (e EventCreate) matches(observed Event) bool {
oe, ok := observed.(EventCreate)
if !ok {
return false
}
if len(e.Interfaces) == 0 {
return true
}
var ifaceMatch bool
for _, in := range oe.Plugin.Config.Interface.Types {
if e.Interfaces[in.Capability] {
ifaceMatch = true
break
}
}
return ifaceMatch
}
// EventRemove is an event which is emitted when a plugin is removed
// It maches on the passed in plugin's ID only.
type EventRemove struct {
Plugin types.Plugin
}
func (e EventRemove) matches(observed Event) bool {
oe, ok := observed.(EventRemove)
if !ok {
return false
}
return e.Plugin.ID == oe.Plugin.ID
}
// EventDisable is an event that is emitted when a plugin is disabled
// It maches on the passed in plugin's ID only.
type EventDisable struct {
Plugin types.Plugin
}
func (e EventDisable) matches(observed Event) bool {
oe, ok := observed.(EventDisable)
if !ok {
return false
}
return e.Plugin.ID == oe.Plugin.ID
}
// EventEnable is an event that is emitted when a plugin is disabled
// It maches on the passed in plugin's ID only.
type EventEnable struct {
Plugin types.Plugin
}
func (e EventEnable) matches(observed Event) bool {
oe, ok := observed.(EventEnable)
if !ok {
return false
}
return e.Plugin.ID == oe.Plugin.ID
}
// SubscribeEvents provides an event channel to listen for structured events from
// the plugin manager actions, CRUD operations.
// The caller must call the returned `cancel()` function once done with the channel
// or this will leak resources.
func (pm *Manager) SubscribeEvents(buffer int, watchEvents ...Event) (eventCh <-chan interface{}, cancel func()) {
topic := func(i interface{}) bool {
observed, ok := i.(Event)
if !ok {
panic(fmt.Sprintf("unexpected type passed to event channel: %v", reflect.TypeOf(i)))
}
for _, e := range watchEvents {
if e.matches(observed) {
return true
}
}
// If no specific events are specified always assume a matched event
// If some events were specified and none matched above, then the event
// doesn't match
return watchEvents == nil
}
ch := pm.publisher.SubscribeTopicWithBuffer(topic, buffer)
cancelFunc := func() { pm.publisher.Evict(ch) }
return ch, cancelFunc
}

View file

@ -0,0 +1,177 @@
package containerd // import "github.com/docker/docker/plugin/executor/containerd"
import (
"context"
"io"
"path/filepath"
"sync"
"time"
"github.com/containerd/containerd"
"github.com/containerd/containerd/cio"
"github.com/containerd/containerd/runtime/linux/runctypes"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/libcontainerd"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// PluginNamespace is the name used for the plugins namespace
const PluginNamespace = "plugins.moby"
// ExitHandler represents an object that is called when the exit event is received from containerd
type ExitHandler interface {
HandleExitEvent(id string) error
}
// Client is used by the exector to perform operations.
// TODO(@cpuguy83): This should really just be based off the containerd client interface.
// However right now this whole package is tied to github.com/docker/docker/libcontainerd
type Client interface {
Create(ctx context.Context, containerID string, spec *specs.Spec, runtimeOptions interface{}) error
Restore(ctx context.Context, containerID string, attachStdio libcontainerd.StdioCallback) (alive bool, pid int, err error)
Status(ctx context.Context, containerID string) (libcontainerd.Status, error)
Delete(ctx context.Context, containerID string) error
DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error)
Start(ctx context.Context, containerID, checkpointDir string, withStdin bool, attachStdio libcontainerd.StdioCallback) (pid int, err error)
SignalProcess(ctx context.Context, containerID, processID string, signal int) error
}
// New creates a new containerd plugin executor
func New(ctx context.Context, rootDir string, cli *containerd.Client, exitHandler ExitHandler) (*Executor, error) {
e := &Executor{
rootDir: rootDir,
exitHandler: exitHandler,
}
client, err := libcontainerd.NewClient(ctx, cli, rootDir, PluginNamespace, e)
if err != nil {
return nil, errors.Wrap(err, "error creating containerd exec client")
}
e.client = client
return e, nil
}
// Executor is the containerd client implementation of a plugin executor
type Executor struct {
rootDir string
client Client
exitHandler ExitHandler
}
// deleteTaskAndContainer deletes plugin task and then plugin container from containerd
func deleteTaskAndContainer(ctx context.Context, cli Client, id string) {
_, _, err := cli.DeleteTask(ctx, id)
if err != nil && !errdefs.IsNotFound(err) {
logrus.WithError(err).WithField("id", id).Error("failed to delete plugin task from containerd")
}
err = cli.Delete(ctx, id)
if err != nil && !errdefs.IsNotFound(err) {
logrus.WithError(err).WithField("id", id).Error("failed to delete plugin container from containerd")
}
}
// Create creates a new container
func (e *Executor) Create(id string, spec specs.Spec, stdout, stderr io.WriteCloser) error {
opts := runctypes.RuncOptions{
RuntimeRoot: filepath.Join(e.rootDir, "runtime-root"),
}
ctx := context.Background()
err := e.client.Create(ctx, id, &spec, &opts)
if err != nil {
status, err2 := e.client.Status(ctx, id)
if err2 != nil {
if !errdefs.IsNotFound(err2) {
logrus.WithError(err2).WithField("id", id).Warn("Received an error while attempting to read plugin status")
}
} else {
if status != libcontainerd.StatusRunning && status != libcontainerd.StatusUnknown {
if err2 := e.client.Delete(ctx, id); err2 != nil && !errdefs.IsNotFound(err2) {
logrus.WithError(err2).WithField("plugin", id).Error("Error cleaning up containerd container")
}
err = e.client.Create(ctx, id, &spec, &opts)
}
}
if err != nil {
return errors.Wrap(err, "error creating containerd container")
}
}
_, err = e.client.Start(ctx, id, "", false, attachStreamsFunc(stdout, stderr))
if err != nil {
deleteTaskAndContainer(ctx, e.client, id)
}
return err
}
// Restore restores a container
func (e *Executor) Restore(id string, stdout, stderr io.WriteCloser) (bool, error) {
alive, _, err := e.client.Restore(context.Background(), id, attachStreamsFunc(stdout, stderr))
if err != nil && !errdefs.IsNotFound(err) {
return false, err
}
if !alive {
deleteTaskAndContainer(context.Background(), e.client, id)
}
return alive, nil
}
// IsRunning returns if the container with the given id is running
func (e *Executor) IsRunning(id string) (bool, error) {
status, err := e.client.Status(context.Background(), id)
return status == libcontainerd.StatusRunning, err
}
// Signal sends the specified signal to the container
func (e *Executor) Signal(id string, signal int) error {
return e.client.SignalProcess(context.Background(), id, libcontainerd.InitProcessName, signal)
}
// ProcessEvent handles events from containerd
// All events are ignored except the exit event, which is sent of to the stored handler
func (e *Executor) ProcessEvent(id string, et libcontainerd.EventType, ei libcontainerd.EventInfo) error {
switch et {
case libcontainerd.EventExit:
deleteTaskAndContainer(context.Background(), e.client, id)
return e.exitHandler.HandleExitEvent(ei.ContainerID)
}
return nil
}
type rio struct {
cio.IO
wg sync.WaitGroup
}
func (c *rio) Wait() {
c.wg.Wait()
c.IO.Wait()
}
func attachStreamsFunc(stdout, stderr io.WriteCloser) libcontainerd.StdioCallback {
return func(iop *cio.DirectIO) (cio.IO, error) {
if iop.Stdin != nil {
iop.Stdin.Close()
// closing stdin shouldn't be needed here, it should never be open
panic("plugin stdin shouldn't have been created!")
}
rio := &rio{IO: iop}
rio.wg.Add(2)
go func() {
io.Copy(stdout, iop.Stdout)
stdout.Close()
rio.wg.Done()
}()
go func() {
io.Copy(stderr, iop.Stderr)
stderr.Close()
rio.wg.Done()
}()
return rio, nil
}
}

View file

@ -0,0 +1,148 @@
package containerd
import (
"context"
"io/ioutil"
"os"
"sync"
"testing"
"time"
"github.com/docker/docker/libcontainerd"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"gotest.tools/assert"
)
func TestLifeCycle(t *testing.T) {
t.Parallel()
mock := newMockClient()
exec, cleanup := setupTest(t, mock, mock)
defer cleanup()
id := "test-create"
mock.simulateStartError(true, id)
err := exec.Create(id, specs.Spec{}, nil, nil)
assert.Assert(t, err != nil)
mock.simulateStartError(false, id)
err = exec.Create(id, specs.Spec{}, nil, nil)
assert.Assert(t, err)
running, _ := exec.IsRunning(id)
assert.Assert(t, running)
// create with the same ID
err = exec.Create(id, specs.Spec{}, nil, nil)
assert.Assert(t, err != nil)
mock.HandleExitEvent(id) // simulate a plugin that exits
err = exec.Create(id, specs.Spec{}, nil, nil)
assert.Assert(t, err)
}
func setupTest(t *testing.T, client Client, eh ExitHandler) (*Executor, func()) {
rootDir, err := ioutil.TempDir("", "test-daemon")
assert.Assert(t, err)
assert.Assert(t, client != nil)
assert.Assert(t, eh != nil)
return &Executor{
rootDir: rootDir,
client: client,
exitHandler: eh,
}, func() {
assert.Assert(t, os.RemoveAll(rootDir))
}
}
type mockClient struct {
mu sync.Mutex
containers map[string]bool
errorOnStart map[string]bool
}
func newMockClient() *mockClient {
return &mockClient{
containers: make(map[string]bool),
errorOnStart: make(map[string]bool),
}
}
func (c *mockClient) Create(ctx context.Context, id string, _ *specs.Spec, _ interface{}) error {
c.mu.Lock()
defer c.mu.Unlock()
if _, ok := c.containers[id]; ok {
return errors.New("exists")
}
c.containers[id] = false
return nil
}
func (c *mockClient) Restore(ctx context.Context, id string, attachStdio libcontainerd.StdioCallback) (alive bool, pid int, err error) {
return false, 0, nil
}
func (c *mockClient) Status(ctx context.Context, id string) (libcontainerd.Status, error) {
c.mu.Lock()
defer c.mu.Unlock()
running, ok := c.containers[id]
if !ok {
return libcontainerd.StatusUnknown, errors.New("not found")
}
if running {
return libcontainerd.StatusRunning, nil
}
return libcontainerd.StatusStopped, nil
}
func (c *mockClient) Delete(ctx context.Context, id string) error {
c.mu.Lock()
defer c.mu.Unlock()
delete(c.containers, id)
return nil
}
func (c *mockClient) DeleteTask(ctx context.Context, id string) (uint32, time.Time, error) {
return 0, time.Time{}, nil
}
func (c *mockClient) Start(ctx context.Context, id, checkpointDir string, withStdin bool, attachStdio libcontainerd.StdioCallback) (pid int, err error) {
c.mu.Lock()
defer c.mu.Unlock()
if _, ok := c.containers[id]; !ok {
return 0, errors.New("not found")
}
if c.errorOnStart[id] {
return 0, errors.New("some startup error")
}
c.containers[id] = true
return 1, nil
}
func (c *mockClient) SignalProcess(ctx context.Context, containerID, processID string, signal int) error {
return nil
}
func (c *mockClient) simulateStartError(sim bool, id string) {
c.mu.Lock()
defer c.mu.Unlock()
if sim {
c.errorOnStart[id] = sim
return
}
delete(c.errorOnStart, id)
}
func (c *mockClient) HandleExitEvent(id string) error {
c.mu.Lock()
defer c.mu.Unlock()
delete(c.containers, id)
return nil
}

View file

@ -0,0 +1,384 @@
package plugin // import "github.com/docker/docker/plugin"
import (
"encoding/json"
"io"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"regexp"
"sort"
"strings"
"sync"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/pubsub"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/plugin/v2"
"github.com/docker/docker/registry"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const configFileName = "config.json"
const rootFSFileName = "rootfs"
var validFullID = regexp.MustCompile(`^([a-f0-9]{64})$`)
// Executor is the interface that the plugin manager uses to interact with for starting/stopping plugins
type Executor interface {
Create(id string, spec specs.Spec, stdout, stderr io.WriteCloser) error
IsRunning(id string) (bool, error)
Restore(id string, stdout, stderr io.WriteCloser) (alive bool, err error)
Signal(id string, signal int) error
}
func (pm *Manager) restorePlugin(p *v2.Plugin, c *controller) error {
if p.IsEnabled() {
return pm.restore(p, c)
}
return nil
}
type eventLogger func(id, name, action string)
// ManagerConfig defines configuration needed to start new manager.
type ManagerConfig struct {
Store *Store // remove
RegistryService registry.Service
LiveRestoreEnabled bool // TODO: remove
LogPluginEvent eventLogger
Root string
ExecRoot string
CreateExecutor ExecutorCreator
AuthzMiddleware *authorization.Middleware
}
// ExecutorCreator is used in the manager config to pass in an `Executor`
type ExecutorCreator func(*Manager) (Executor, error)
// Manager controls the plugin subsystem.
type Manager struct {
config ManagerConfig
mu sync.RWMutex // protects cMap
muGC sync.RWMutex // protects blobstore deletions
cMap map[*v2.Plugin]*controller
blobStore *basicBlobStore
publisher *pubsub.Publisher
executor Executor
}
// controller represents the manager's control on a plugin.
type controller struct {
restart bool
exitChan chan bool
timeoutInSecs int
}
// pluginRegistryService ensures that all resolved repositories
// are of the plugin class.
type pluginRegistryService struct {
registry.Service
}
func (s pluginRegistryService) ResolveRepository(name reference.Named) (repoInfo *registry.RepositoryInfo, err error) {
repoInfo, err = s.Service.ResolveRepository(name)
if repoInfo != nil {
repoInfo.Class = "plugin"
}
return
}
// NewManager returns a new plugin manager.
func NewManager(config ManagerConfig) (*Manager, error) {
if config.RegistryService != nil {
config.RegistryService = pluginRegistryService{config.RegistryService}
}
manager := &Manager{
config: config,
}
for _, dirName := range []string{manager.config.Root, manager.config.ExecRoot, manager.tmpDir()} {
if err := os.MkdirAll(dirName, 0700); err != nil {
return nil, errors.Wrapf(err, "failed to mkdir %v", dirName)
}
}
var err error
manager.executor, err = config.CreateExecutor(manager)
if err != nil {
return nil, err
}
manager.blobStore, err = newBasicBlobStore(filepath.Join(manager.config.Root, "storage/blobs"))
if err != nil {
return nil, err
}
manager.cMap = make(map[*v2.Plugin]*controller)
if err := manager.reload(); err != nil {
return nil, errors.Wrap(err, "failed to restore plugins")
}
manager.publisher = pubsub.NewPublisher(0, 0)
return manager, nil
}
func (pm *Manager) tmpDir() string {
return filepath.Join(pm.config.Root, "tmp")
}
// HandleExitEvent is called when the executor receives the exit event
// In the future we may change this, but for now all we care about is the exit event.
func (pm *Manager) HandleExitEvent(id string) error {
p, err := pm.config.Store.GetV2Plugin(id)
if err != nil {
return err
}
if err := os.RemoveAll(filepath.Join(pm.config.ExecRoot, id)); err != nil && !os.IsNotExist(err) {
logrus.WithError(err).WithField("id", id).Error("Could not remove plugin bundle dir")
}
pm.mu.RLock()
c := pm.cMap[p]
if c.exitChan != nil {
close(c.exitChan)
c.exitChan = nil // ignore duplicate events (containerd issue #2299)
}
restart := c.restart
pm.mu.RUnlock()
if restart {
pm.enable(p, c, true)
} else {
if err := mount.RecursiveUnmount(filepath.Join(pm.config.Root, id)); err != nil {
return errors.Wrap(err, "error cleaning up plugin mounts")
}
}
return nil
}
func handleLoadError(err error, id string) {
if err == nil {
return
}
logger := logrus.WithError(err).WithField("id", id)
if os.IsNotExist(errors.Cause(err)) {
// Likely some error while removing on an older version of docker
logger.Warn("missing plugin config, skipping: this may be caused due to a failed remove and requires manual cleanup.")
return
}
logger.Error("error loading plugin, skipping")
}
func (pm *Manager) reload() error { // todo: restore
dir, err := ioutil.ReadDir(pm.config.Root)
if err != nil {
return errors.Wrapf(err, "failed to read %v", pm.config.Root)
}
plugins := make(map[string]*v2.Plugin)
for _, v := range dir {
if validFullID.MatchString(v.Name()) {
p, err := pm.loadPlugin(v.Name())
if err != nil {
handleLoadError(err, v.Name())
continue
}
plugins[p.GetID()] = p
} else {
if validFullID.MatchString(strings.TrimSuffix(v.Name(), "-removing")) {
// There was likely some error while removing this plugin, let's try to remove again here
if err := system.EnsureRemoveAll(v.Name()); err != nil {
logrus.WithError(err).WithField("id", v.Name()).Warn("error while attempting to clean up previously removed plugin")
}
}
}
}
pm.config.Store.SetAll(plugins)
var wg sync.WaitGroup
wg.Add(len(plugins))
for _, p := range plugins {
c := &controller{exitChan: make(chan bool)}
pm.mu.Lock()
pm.cMap[p] = c
pm.mu.Unlock()
go func(p *v2.Plugin) {
defer wg.Done()
if err := pm.restorePlugin(p, c); err != nil {
logrus.WithError(err).WithField("id", p.GetID()).Error("Failed to restore plugin")
return
}
if p.Rootfs != "" {
p.Rootfs = filepath.Join(pm.config.Root, p.PluginObj.ID, "rootfs")
}
// We should only enable rootfs propagation for certain plugin types that need it.
for _, typ := range p.PluginObj.Config.Interface.Types {
if (typ.Capability == "volumedriver" || typ.Capability == "graphdriver") && typ.Prefix == "docker" && strings.HasPrefix(typ.Version, "1.") {
if p.PluginObj.Config.PropagatedMount != "" {
propRoot := filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount")
// check if we need to migrate an older propagated mount from before
// these mounts were stored outside the plugin rootfs
if _, err := os.Stat(propRoot); os.IsNotExist(err) {
rootfsProp := filepath.Join(p.Rootfs, p.PluginObj.Config.PropagatedMount)
if _, err := os.Stat(rootfsProp); err == nil {
if err := os.Rename(rootfsProp, propRoot); err != nil {
logrus.WithError(err).WithField("dir", propRoot).Error("error migrating propagated mount storage")
}
}
}
if err := os.MkdirAll(propRoot, 0755); err != nil {
logrus.Errorf("failed to create PropagatedMount directory at %s: %v", propRoot, err)
}
}
}
}
pm.save(p)
requiresManualRestore := !pm.config.LiveRestoreEnabled && p.IsEnabled()
if requiresManualRestore {
// if liveRestore is not enabled, the plugin will be stopped now so we should enable it
if err := pm.enable(p, c, true); err != nil {
logrus.WithError(err).WithField("id", p.GetID()).Error("failed to enable plugin")
}
}
}(p)
}
wg.Wait()
return nil
}
// Get looks up the requested plugin in the store.
func (pm *Manager) Get(idOrName string) (*v2.Plugin, error) {
return pm.config.Store.GetV2Plugin(idOrName)
}
func (pm *Manager) loadPlugin(id string) (*v2.Plugin, error) {
p := filepath.Join(pm.config.Root, id, configFileName)
dt, err := ioutil.ReadFile(p)
if err != nil {
return nil, errors.Wrapf(err, "error reading %v", p)
}
var plugin v2.Plugin
if err := json.Unmarshal(dt, &plugin); err != nil {
return nil, errors.Wrapf(err, "error decoding %v", p)
}
return &plugin, nil
}
func (pm *Manager) save(p *v2.Plugin) error {
pluginJSON, err := json.Marshal(p)
if err != nil {
return errors.Wrap(err, "failed to marshal plugin json")
}
if err := ioutils.AtomicWriteFile(filepath.Join(pm.config.Root, p.GetID(), configFileName), pluginJSON, 0600); err != nil {
return errors.Wrap(err, "failed to write atomically plugin json")
}
return nil
}
// GC cleans up unreferenced blobs. This is recommended to run in a goroutine
func (pm *Manager) GC() {
pm.muGC.Lock()
defer pm.muGC.Unlock()
whitelist := make(map[digest.Digest]struct{})
for _, p := range pm.config.Store.GetAll() {
whitelist[p.Config] = struct{}{}
for _, b := range p.Blobsums {
whitelist[b] = struct{}{}
}
}
pm.blobStore.gc(whitelist)
}
type logHook struct{ id string }
func (logHook) Levels() []logrus.Level {
return logrus.AllLevels
}
func (l logHook) Fire(entry *logrus.Entry) error {
entry.Data = logrus.Fields{"plugin": l.id}
return nil
}
func makeLoggerStreams(id string) (stdout, stderr io.WriteCloser) {
logger := logrus.New()
logger.Hooks.Add(logHook{id})
return logger.WriterLevel(logrus.InfoLevel), logger.WriterLevel(logrus.ErrorLevel)
}
func validatePrivileges(requiredPrivileges, privileges types.PluginPrivileges) error {
if !isEqual(requiredPrivileges, privileges, isEqualPrivilege) {
return errors.New("incorrect privileges")
}
return nil
}
func isEqual(arrOne, arrOther types.PluginPrivileges, compare func(x, y types.PluginPrivilege) bool) bool {
if len(arrOne) != len(arrOther) {
return false
}
sort.Sort(arrOne)
sort.Sort(arrOther)
for i := 1; i < arrOne.Len(); i++ {
if !compare(arrOne[i], arrOther[i]) {
return false
}
}
return true
}
func isEqualPrivilege(a, b types.PluginPrivilege) bool {
if a.Name != b.Name {
return false
}
return reflect.DeepEqual(a.Value, b.Value)
}
func configToRootFS(c []byte) (*image.RootFS, error) {
var pluginConfig types.PluginConfig
if err := json.Unmarshal(c, &pluginConfig); err != nil {
return nil, err
}
// validation for empty rootfs is in distribution code
if pluginConfig.Rootfs == nil {
return nil, nil
}
return rootFSFromPlugin(pluginConfig.Rootfs), nil
}
func rootFSFromPlugin(pluginfs *types.PluginConfigRootfs) *image.RootFS {
rootFS := image.RootFS{
Type: pluginfs.Type,
DiffIDs: make([]layer.DiffID, len(pluginfs.DiffIds)),
}
for i := range pluginfs.DiffIds {
rootFS.DiffIDs[i] = layer.DiffID(pluginfs.DiffIds[i])
}
return &rootFS
}

View file

@ -0,0 +1,335 @@
package plugin // import "github.com/docker/docker/plugin"
import (
"encoding/json"
"net"
"os"
"path/filepath"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/daemon/initlayer"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/containerfs"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/plugins"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/plugin/v2"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error {
p.Rootfs = filepath.Join(pm.config.Root, p.PluginObj.ID, "rootfs")
if p.IsEnabled() && !force {
return errors.Wrap(enabledError(p.Name()), "plugin already enabled")
}
spec, err := p.InitSpec(pm.config.ExecRoot)
if err != nil {
return err
}
c.restart = true
c.exitChan = make(chan bool)
pm.mu.Lock()
pm.cMap[p] = c
pm.mu.Unlock()
var propRoot string
if p.PluginObj.Config.PropagatedMount != "" {
propRoot = filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount")
if err := os.MkdirAll(propRoot, 0755); err != nil {
logrus.Errorf("failed to create PropagatedMount directory at %s: %v", propRoot, err)
}
if err := mount.MakeRShared(propRoot); err != nil {
return errors.Wrap(err, "error setting up propagated mount dir")
}
}
rootFS := containerfs.NewLocalContainerFS(filepath.Join(pm.config.Root, p.PluginObj.ID, rootFSFileName))
if err := initlayer.Setup(rootFS, idtools.Identity{UID: 0, GID: 0}); err != nil {
return errors.WithStack(err)
}
stdout, stderr := makeLoggerStreams(p.GetID())
if err := pm.executor.Create(p.GetID(), *spec, stdout, stderr); err != nil {
if p.PluginObj.Config.PropagatedMount != "" {
if err := mount.Unmount(propRoot); err != nil {
logrus.Warnf("Could not unmount %s: %v", propRoot, err)
}
}
return errors.WithStack(err)
}
return pm.pluginPostStart(p, c)
}
func (pm *Manager) pluginPostStart(p *v2.Plugin, c *controller) error {
sockAddr := filepath.Join(pm.config.ExecRoot, p.GetID(), p.GetSocket())
p.SetTimeout(time.Duration(c.timeoutInSecs) * time.Second)
addr := &net.UnixAddr{Net: "unix", Name: sockAddr}
p.SetAddr(addr)
if p.Protocol() == plugins.ProtocolSchemeHTTPV1 {
client, err := plugins.NewClientWithTimeout(addr.Network()+"://"+addr.String(), nil, p.Timeout())
if err != nil {
c.restart = false
shutdownPlugin(p, c.exitChan, pm.executor)
return errors.WithStack(err)
}
p.SetPClient(client)
}
// Initial sleep before net Dial to allow plugin to listen on socket.
time.Sleep(500 * time.Millisecond)
maxRetries := 3
var retries int
for {
// net dial into the unix socket to see if someone's listening.
conn, err := net.Dial("unix", sockAddr)
if err == nil {
conn.Close()
break
}
time.Sleep(3 * time.Second)
retries++
if retries > maxRetries {
logrus.Debugf("error net dialing plugin: %v", err)
c.restart = false
// While restoring plugins, we need to explicitly set the state to disabled
pm.config.Store.SetState(p, false)
shutdownPlugin(p, c.exitChan, pm.executor)
return err
}
}
pm.config.Store.SetState(p, true)
pm.config.Store.CallHandler(p)
return pm.save(p)
}
func (pm *Manager) restore(p *v2.Plugin, c *controller) error {
stdout, stderr := makeLoggerStreams(p.GetID())
alive, err := pm.executor.Restore(p.GetID(), stdout, stderr)
if err != nil {
return err
}
if pm.config.LiveRestoreEnabled {
if !alive {
return pm.enable(p, c, true)
}
c.exitChan = make(chan bool)
c.restart = true
pm.mu.Lock()
pm.cMap[p] = c
pm.mu.Unlock()
return pm.pluginPostStart(p, c)
}
if alive {
// TODO(@cpuguy83): Should we always just re-attach to the running plugin instead of doing this?
c.restart = false
shutdownPlugin(p, c.exitChan, pm.executor)
}
return nil
}
func shutdownPlugin(p *v2.Plugin, ec chan bool, executor Executor) {
pluginID := p.GetID()
err := executor.Signal(pluginID, int(unix.SIGTERM))
if err != nil {
logrus.Errorf("Sending SIGTERM to plugin failed with error: %v", err)
} else {
select {
case <-ec:
logrus.Debug("Clean shutdown of plugin")
case <-time.After(time.Second * 10):
logrus.Debug("Force shutdown plugin")
if err := executor.Signal(pluginID, int(unix.SIGKILL)); err != nil {
logrus.Errorf("Sending SIGKILL to plugin failed with error: %v", err)
}
select {
case <-ec:
logrus.Debug("SIGKILL plugin shutdown")
case <-time.After(time.Second * 10):
logrus.Debug("Force shutdown plugin FAILED")
}
}
}
}
func (pm *Manager) disable(p *v2.Plugin, c *controller) error {
if !p.IsEnabled() {
return errors.Wrap(errDisabled(p.Name()), "plugin is already disabled")
}
c.restart = false
shutdownPlugin(p, c.exitChan, pm.executor)
pm.config.Store.SetState(p, false)
return pm.save(p)
}
// Shutdown stops all plugins and called during daemon shutdown.
func (pm *Manager) Shutdown() {
plugins := pm.config.Store.GetAll()
for _, p := range plugins {
pm.mu.RLock()
c := pm.cMap[p]
pm.mu.RUnlock()
if pm.config.LiveRestoreEnabled && p.IsEnabled() {
logrus.Debug("Plugin active when liveRestore is set, skipping shutdown")
continue
}
if pm.executor != nil && p.IsEnabled() {
c.restart = false
shutdownPlugin(p, c.exitChan, pm.executor)
}
}
if err := mount.RecursiveUnmount(pm.config.Root); err != nil {
logrus.WithError(err).Warn("error cleaning up plugin mounts")
}
}
func (pm *Manager) upgradePlugin(p *v2.Plugin, configDigest digest.Digest, blobsums []digest.Digest, tmpRootFSDir string, privileges *types.PluginPrivileges) (err error) {
config, err := pm.setupNewPlugin(configDigest, blobsums, privileges)
if err != nil {
return err
}
pdir := filepath.Join(pm.config.Root, p.PluginObj.ID)
orig := filepath.Join(pdir, "rootfs")
// Make sure nothing is mounted
// This could happen if the plugin was disabled with `-f` with active mounts.
// If there is anything in `orig` is still mounted, this should error out.
if err := mount.RecursiveUnmount(orig); err != nil {
return errdefs.System(err)
}
backup := orig + "-old"
if err := os.Rename(orig, backup); err != nil {
return errors.Wrap(errdefs.System(err), "error backing up plugin data before upgrade")
}
defer func() {
if err != nil {
if rmErr := os.RemoveAll(orig); rmErr != nil && !os.IsNotExist(rmErr) {
logrus.WithError(rmErr).WithField("dir", backup).Error("error cleaning up after failed upgrade")
return
}
if mvErr := os.Rename(backup, orig); mvErr != nil {
err = errors.Wrap(mvErr, "error restoring old plugin root on upgrade failure")
}
if rmErr := os.RemoveAll(tmpRootFSDir); rmErr != nil && !os.IsNotExist(rmErr) {
logrus.WithError(rmErr).WithField("plugin", p.Name()).Errorf("error cleaning up plugin upgrade dir: %s", tmpRootFSDir)
}
} else {
if rmErr := os.RemoveAll(backup); rmErr != nil && !os.IsNotExist(rmErr) {
logrus.WithError(rmErr).WithField("dir", backup).Error("error cleaning up old plugin root after successful upgrade")
}
p.Config = configDigest
p.Blobsums = blobsums
}
}()
if err := os.Rename(tmpRootFSDir, orig); err != nil {
return errors.Wrap(errdefs.System(err), "error upgrading")
}
p.PluginObj.Config = config
err = pm.save(p)
return errors.Wrap(err, "error saving upgraded plugin config")
}
func (pm *Manager) setupNewPlugin(configDigest digest.Digest, blobsums []digest.Digest, privileges *types.PluginPrivileges) (types.PluginConfig, error) {
configRC, err := pm.blobStore.Get(configDigest)
if err != nil {
return types.PluginConfig{}, err
}
defer configRC.Close()
var config types.PluginConfig
dec := json.NewDecoder(configRC)
if err := dec.Decode(&config); err != nil {
return types.PluginConfig{}, errors.Wrapf(err, "failed to parse config")
}
if dec.More() {
return types.PluginConfig{}, errors.New("invalid config json")
}
requiredPrivileges := computePrivileges(config)
if err != nil {
return types.PluginConfig{}, err
}
if privileges != nil {
if err := validatePrivileges(requiredPrivileges, *privileges); err != nil {
return types.PluginConfig{}, err
}
}
return config, nil
}
// createPlugin creates a new plugin. take lock before calling.
func (pm *Manager) createPlugin(name string, configDigest digest.Digest, blobsums []digest.Digest, rootFSDir string, privileges *types.PluginPrivileges, opts ...CreateOpt) (p *v2.Plugin, err error) {
if err := pm.config.Store.validateName(name); err != nil { // todo: this check is wrong. remove store
return nil, errdefs.InvalidParameter(err)
}
config, err := pm.setupNewPlugin(configDigest, blobsums, privileges)
if err != nil {
return nil, err
}
p = &v2.Plugin{
PluginObj: types.Plugin{
Name: name,
ID: stringid.GenerateRandomID(),
Config: config,
},
Config: configDigest,
Blobsums: blobsums,
}
p.InitEmptySettings()
for _, o := range opts {
o(p)
}
pdir := filepath.Join(pm.config.Root, p.PluginObj.ID)
if err := os.MkdirAll(pdir, 0700); err != nil {
return nil, errors.Wrapf(err, "failed to mkdir %v", pdir)
}
defer func() {
if err != nil {
os.RemoveAll(pdir)
}
}()
if err := os.Rename(rootFSDir, filepath.Join(pdir, rootFSFileName)); err != nil {
return nil, errors.Wrap(err, "failed to rename rootfs")
}
if err := pm.save(p); err != nil {
return nil, err
}
pm.config.Store.Add(p) // todo: remove
return p, nil
}

View file

@ -0,0 +1,279 @@
package plugin // import "github.com/docker/docker/plugin"
import (
"io"
"io/ioutil"
"net"
"os"
"path/filepath"
"testing"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/plugin/v2"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"gotest.tools/skip"
)
func TestManagerWithPluginMounts(t *testing.T) {
skip.If(t, os.Getuid() != 0, "skipping test that requires root")
root, err := ioutil.TempDir("", "test-store-with-plugin-mounts")
if err != nil {
t.Fatal(err)
}
defer system.EnsureRemoveAll(root)
s := NewStore()
managerRoot := filepath.Join(root, "manager")
p1 := newTestPlugin(t, "test1", "testcap", managerRoot)
p2 := newTestPlugin(t, "test2", "testcap", managerRoot)
p2.PluginObj.Enabled = true
m, err := NewManager(
ManagerConfig{
Store: s,
Root: managerRoot,
ExecRoot: filepath.Join(root, "exec"),
CreateExecutor: func(*Manager) (Executor, error) { return nil, nil },
LogPluginEvent: func(_, _, _ string) {},
})
if err != nil {
t.Fatal(err)
}
if err := s.Add(p1); err != nil {
t.Fatal(err)
}
if err := s.Add(p2); err != nil {
t.Fatal(err)
}
// Create a mount to simulate a plugin that has created it's own mounts
p2Mount := filepath.Join(p2.Rootfs, "testmount")
if err := os.MkdirAll(p2Mount, 0755); err != nil {
t.Fatal(err)
}
if err := mount.Mount("tmpfs", p2Mount, "tmpfs", ""); err != nil {
t.Fatal(err)
}
if err := m.Remove(p1.GetID(), &types.PluginRmConfig{ForceRemove: true}); err != nil {
t.Fatal(err)
}
if mounted, err := mount.Mounted(p2Mount); !mounted || err != nil {
t.Fatalf("expected %s to be mounted, err: %v", p2Mount, err)
}
}
func newTestPlugin(t *testing.T, name, cap, root string) *v2.Plugin {
id := stringid.GenerateNonCryptoID()
rootfs := filepath.Join(root, id)
if err := os.MkdirAll(rootfs, 0755); err != nil {
t.Fatal(err)
}
p := v2.Plugin{PluginObj: types.Plugin{ID: id, Name: name}}
p.Rootfs = rootfs
iType := types.PluginInterfaceType{Capability: cap, Prefix: "docker", Version: "1.0"}
i := types.PluginConfigInterface{Socket: "plugin.sock", Types: []types.PluginInterfaceType{iType}}
p.PluginObj.Config.Interface = i
p.PluginObj.ID = id
return &p
}
type simpleExecutor struct {
}
func (e *simpleExecutor) Create(id string, spec specs.Spec, stdout, stderr io.WriteCloser) error {
return errors.New("Create failed")
}
func (e *simpleExecutor) Restore(id string, stdout, stderr io.WriteCloser) (bool, error) {
return false, nil
}
func (e *simpleExecutor) IsRunning(id string) (bool, error) {
return false, nil
}
func (e *simpleExecutor) Signal(id string, signal int) error {
return nil
}
func TestCreateFailed(t *testing.T) {
root, err := ioutil.TempDir("", "test-create-failed")
if err != nil {
t.Fatal(err)
}
defer system.EnsureRemoveAll(root)
s := NewStore()
managerRoot := filepath.Join(root, "manager")
p := newTestPlugin(t, "create", "testcreate", managerRoot)
m, err := NewManager(
ManagerConfig{
Store: s,
Root: managerRoot,
ExecRoot: filepath.Join(root, "exec"),
CreateExecutor: func(*Manager) (Executor, error) { return &simpleExecutor{}, nil },
LogPluginEvent: func(_, _, _ string) {},
})
if err != nil {
t.Fatal(err)
}
if err := s.Add(p); err != nil {
t.Fatal(err)
}
if err := m.enable(p, &controller{}, false); err == nil {
t.Fatalf("expected Create failed error, got %v", err)
}
if err := m.Remove(p.GetID(), &types.PluginRmConfig{ForceRemove: true}); err != nil {
t.Fatal(err)
}
}
type executorWithRunning struct {
m *Manager
root string
exitChans map[string]chan struct{}
}
func (e *executorWithRunning) Create(id string, spec specs.Spec, stdout, stderr io.WriteCloser) error {
sockAddr := filepath.Join(e.root, id, "plugin.sock")
ch := make(chan struct{})
if e.exitChans == nil {
e.exitChans = make(map[string]chan struct{})
}
e.exitChans[id] = ch
listenTestPlugin(sockAddr, ch)
return nil
}
func (e *executorWithRunning) IsRunning(id string) (bool, error) {
return true, nil
}
func (e *executorWithRunning) Restore(id string, stdout, stderr io.WriteCloser) (bool, error) {
return true, nil
}
func (e *executorWithRunning) Signal(id string, signal int) error {
ch := e.exitChans[id]
ch <- struct{}{}
<-ch
e.m.HandleExitEvent(id)
return nil
}
func TestPluginAlreadyRunningOnStartup(t *testing.T) {
t.Parallel()
root, err := ioutil.TempDir("", t.Name())
if err != nil {
t.Fatal(err)
}
defer system.EnsureRemoveAll(root)
for _, test := range []struct {
desc string
config ManagerConfig
}{
{
desc: "live-restore-disabled",
config: ManagerConfig{
LogPluginEvent: func(_, _, _ string) {},
},
},
{
desc: "live-restore-enabled",
config: ManagerConfig{
LogPluginEvent: func(_, _, _ string) {},
LiveRestoreEnabled: true,
},
},
} {
t.Run(test.desc, func(t *testing.T) {
config := test.config
desc := test.desc
t.Parallel()
p := newTestPlugin(t, desc, desc, config.Root)
p.PluginObj.Enabled = true
// Need a short-ish path here so we don't run into unix socket path length issues.
config.ExecRoot, err = ioutil.TempDir("", "plugintest")
executor := &executorWithRunning{root: config.ExecRoot}
config.CreateExecutor = func(m *Manager) (Executor, error) { executor.m = m; return executor, nil }
if err := executor.Create(p.GetID(), specs.Spec{}, nil, nil); err != nil {
t.Fatal(err)
}
root := filepath.Join(root, desc)
config.Root = filepath.Join(root, "manager")
if err := os.MkdirAll(filepath.Join(config.Root, p.GetID()), 0755); err != nil {
t.Fatal(err)
}
if !p.IsEnabled() {
t.Fatal("plugin should be enabled")
}
if err := (&Manager{config: config}).save(p); err != nil {
t.Fatal(err)
}
s := NewStore()
config.Store = s
if err != nil {
t.Fatal(err)
}
defer system.EnsureRemoveAll(config.ExecRoot)
m, err := NewManager(config)
if err != nil {
t.Fatal(err)
}
defer m.Shutdown()
p = s.GetAll()[p.GetID()] // refresh `p` with what the manager knows
if p.Client() == nil {
t.Fatal("plugin client should not be nil")
}
})
}
}
func listenTestPlugin(sockAddr string, exit chan struct{}) (net.Listener, error) {
if err := os.MkdirAll(filepath.Dir(sockAddr), 0755); err != nil {
return nil, err
}
l, err := net.Listen("unix", sockAddr)
if err != nil {
return nil, err
}
go func() {
for {
conn, err := l.Accept()
if err != nil {
return
}
conn.Close()
}
}()
go func() {
<-exit
l.Close()
os.Remove(sockAddr)
exit <- struct{}{}
}()
return l, nil
}

View file

@ -0,0 +1,55 @@
package plugin // import "github.com/docker/docker/plugin"
import (
"testing"
"github.com/docker/docker/api/types"
)
func TestValidatePrivileges(t *testing.T) {
testData := map[string]struct {
requiredPrivileges types.PluginPrivileges
privileges types.PluginPrivileges
result bool
}{
"diff-len": {
requiredPrivileges: []types.PluginPrivilege{
{Name: "Privilege1", Description: "Description", Value: []string{"abc", "def", "ghi"}},
},
privileges: []types.PluginPrivilege{
{Name: "Privilege1", Description: "Description", Value: []string{"abc", "def", "ghi"}},
{Name: "Privilege2", Description: "Description", Value: []string{"123", "456", "789"}},
},
result: false,
},
"diff-value": {
requiredPrivileges: []types.PluginPrivilege{
{Name: "Privilege1", Description: "Description", Value: []string{"abc", "def", "GHI"}},
{Name: "Privilege2", Description: "Description", Value: []string{"123", "456", "***"}},
},
privileges: []types.PluginPrivilege{
{Name: "Privilege1", Description: "Description", Value: []string{"abc", "def", "ghi"}},
{Name: "Privilege2", Description: "Description", Value: []string{"123", "456", "789"}},
},
result: false,
},
"diff-order-but-same-value": {
requiredPrivileges: []types.PluginPrivilege{
{Name: "Privilege1", Description: "Description", Value: []string{"abc", "def", "GHI"}},
{Name: "Privilege2", Description: "Description", Value: []string{"123", "456", "789"}},
},
privileges: []types.PluginPrivilege{
{Name: "Privilege2", Description: "Description", Value: []string{"123", "456", "789"}},
{Name: "Privilege1", Description: "Description", Value: []string{"GHI", "abc", "def"}},
},
result: true,
},
}
for key, data := range testData {
err := validatePrivileges(data.requiredPrivileges, data.privileges)
if (err == nil) != data.result {
t.Fatalf("Test item %s expected result to be %t, got %t", key, data.result, (err == nil))
}
}
}

View file

@ -0,0 +1,28 @@
package plugin // import "github.com/docker/docker/plugin"
import (
"fmt"
"github.com/docker/docker/plugin/v2"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error {
return fmt.Errorf("Not implemented")
}
func (pm *Manager) initSpec(p *v2.Plugin) (*specs.Spec, error) {
return nil, fmt.Errorf("Not implemented")
}
func (pm *Manager) disable(p *v2.Plugin, c *controller) error {
return fmt.Errorf("Not implemented")
}
func (pm *Manager) restore(p *v2.Plugin, c *controller) error {
return fmt.Errorf("Not implemented")
}
// Shutdown plugins
func (pm *Manager) Shutdown() {
}

View file

@ -0,0 +1,291 @@
package plugin // import "github.com/docker/docker/plugin"
import (
"fmt"
"strings"
"github.com/docker/distribution/reference"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/pkg/plugins"
"github.com/docker/docker/plugin/v2"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
/* allowV1PluginsFallback determines daemon's support for V1 plugins.
* When the time comes to remove support for V1 plugins, flipping
* this bool is all that will be needed.
*/
const allowV1PluginsFallback = true
/* defaultAPIVersion is the version of the plugin API for volume, network,
IPAM and authz. This is a very stable API. When we update this API, then
pluginType should include a version. e.g. "networkdriver/2.0".
*/
const defaultAPIVersion = "1.0"
// GetV2Plugin retrieves a plugin by name, id or partial ID.
func (ps *Store) GetV2Plugin(refOrID string) (*v2.Plugin, error) {
ps.RLock()
defer ps.RUnlock()
id, err := ps.resolvePluginID(refOrID)
if err != nil {
return nil, err
}
p, idOk := ps.plugins[id]
if !idOk {
return nil, errors.WithStack(errNotFound(id))
}
return p, nil
}
// validateName returns error if name is already reserved. always call with lock and full name
func (ps *Store) validateName(name string) error {
for _, p := range ps.plugins {
if p.Name() == name {
return alreadyExistsError(name)
}
}
return nil
}
// GetAll retrieves all plugins.
func (ps *Store) GetAll() map[string]*v2.Plugin {
ps.RLock()
defer ps.RUnlock()
return ps.plugins
}
// SetAll initialized plugins during daemon restore.
func (ps *Store) SetAll(plugins map[string]*v2.Plugin) {
ps.Lock()
defer ps.Unlock()
for _, p := range plugins {
ps.setSpecOpts(p)
}
ps.plugins = plugins
}
func (ps *Store) getAllByCap(capability string) []plugingetter.CompatPlugin {
ps.RLock()
defer ps.RUnlock()
result := make([]plugingetter.CompatPlugin, 0, 1)
for _, p := range ps.plugins {
if p.IsEnabled() {
if _, err := p.FilterByCap(capability); err == nil {
result = append(result, p)
}
}
}
return result
}
// SetState sets the active state of the plugin and updates plugindb.
func (ps *Store) SetState(p *v2.Plugin, state bool) {
ps.Lock()
defer ps.Unlock()
p.PluginObj.Enabled = state
}
func (ps *Store) setSpecOpts(p *v2.Plugin) {
var specOpts []SpecOpt
for _, typ := range p.GetTypes() {
opts, ok := ps.specOpts[typ.String()]
if ok {
specOpts = append(specOpts, opts...)
}
}
p.SetSpecOptModifier(func(s *specs.Spec) {
for _, o := range specOpts {
o(s)
}
})
}
// Add adds a plugin to memory and plugindb.
// An error will be returned if there is a collision.
func (ps *Store) Add(p *v2.Plugin) error {
ps.Lock()
defer ps.Unlock()
if v, exist := ps.plugins[p.GetID()]; exist {
return fmt.Errorf("plugin %q has the same ID %s as %q", p.Name(), p.GetID(), v.Name())
}
ps.setSpecOpts(p)
ps.plugins[p.GetID()] = p
return nil
}
// Remove removes a plugin from memory and plugindb.
func (ps *Store) Remove(p *v2.Plugin) {
ps.Lock()
delete(ps.plugins, p.GetID())
ps.Unlock()
}
// Get returns an enabled plugin matching the given name and capability.
func (ps *Store) Get(name, capability string, mode int) (plugingetter.CompatPlugin, error) {
// Lookup using new model.
if ps != nil {
p, err := ps.GetV2Plugin(name)
if err == nil {
if p.IsEnabled() {
fp, err := p.FilterByCap(capability)
if err != nil {
return nil, err
}
p.AddRefCount(mode)
return fp, nil
}
// Plugin was found but it is disabled, so we should not fall back to legacy plugins
// but we should error out right away
return nil, errDisabled(name)
}
if _, ok := errors.Cause(err).(errNotFound); !ok {
return nil, err
}
}
if !allowV1PluginsFallback {
return nil, errNotFound(name)
}
p, err := plugins.Get(name, capability)
if err == nil {
return p, nil
}
if errors.Cause(err) == plugins.ErrNotFound {
return nil, errNotFound(name)
}
return nil, errors.Wrap(errdefs.System(err), "legacy plugin")
}
// GetAllManagedPluginsByCap returns a list of managed plugins matching the given capability.
func (ps *Store) GetAllManagedPluginsByCap(capability string) []plugingetter.CompatPlugin {
return ps.getAllByCap(capability)
}
// GetAllByCap returns a list of enabled plugins matching the given capability.
func (ps *Store) GetAllByCap(capability string) ([]plugingetter.CompatPlugin, error) {
result := make([]plugingetter.CompatPlugin, 0, 1)
/* Daemon start always calls plugin.Init thereby initializing a store.
* So store on experimental builds can never be nil, even while
* handling legacy plugins. However, there are legacy plugin unit
* tests where the volume subsystem directly talks with the plugin,
* bypassing the daemon. For such tests, this check is necessary.
*/
if ps != nil {
ps.RLock()
result = ps.getAllByCap(capability)
ps.RUnlock()
}
// Lookup with legacy model
if allowV1PluginsFallback {
pl, err := plugins.GetAll(capability)
if err != nil {
return nil, errors.Wrap(errdefs.System(err), "legacy plugin")
}
for _, p := range pl {
result = append(result, p)
}
}
return result, nil
}
func pluginType(cap string) string {
return fmt.Sprintf("docker.%s/%s", strings.ToLower(cap), defaultAPIVersion)
}
// Handle sets a callback for a given capability. It is only used by network
// and ipam drivers during plugin registration. The callback registers the
// driver with the subsystem (network, ipam).
func (ps *Store) Handle(capability string, callback func(string, *plugins.Client)) {
typ := pluginType(capability)
// Register callback with new plugin model.
ps.Lock()
handlers, ok := ps.handlers[typ]
if !ok {
handlers = []func(string, *plugins.Client){}
}
handlers = append(handlers, callback)
ps.handlers[typ] = handlers
ps.Unlock()
// Register callback with legacy plugin model.
if allowV1PluginsFallback {
plugins.Handle(capability, callback)
}
}
// RegisterRuntimeOpt stores a list of SpecOpts for the provided capability.
// These options are applied to the runtime spec before a plugin is started for the specified capability.
func (ps *Store) RegisterRuntimeOpt(cap string, opts ...SpecOpt) {
ps.Lock()
defer ps.Unlock()
typ := pluginType(cap)
ps.specOpts[typ] = append(ps.specOpts[typ], opts...)
}
// CallHandler calls the registered callback. It is invoked during plugin enable.
func (ps *Store) CallHandler(p *v2.Plugin) {
for _, typ := range p.GetTypes() {
for _, handler := range ps.handlers[typ.String()] {
handler(p.Name(), p.Client())
}
}
}
func (ps *Store) resolvePluginID(idOrName string) (string, error) {
ps.RLock() // todo: fix
defer ps.RUnlock()
if validFullID.MatchString(idOrName) {
return idOrName, nil
}
ref, err := reference.ParseNormalizedNamed(idOrName)
if err != nil {
return "", errors.WithStack(errNotFound(idOrName))
}
if _, ok := ref.(reference.Canonical); ok {
logrus.Warnf("canonical references cannot be resolved: %v", reference.FamiliarString(ref))
return "", errors.WithStack(errNotFound(idOrName))
}
ref = reference.TagNameOnly(ref)
for _, p := range ps.plugins {
if p.PluginObj.Name == reference.FamiliarString(ref) {
return p.PluginObj.ID, nil
}
}
var found *v2.Plugin
for id, p := range ps.plugins { // this can be optimized
if strings.HasPrefix(id, idOrName) {
if found != nil {
return "", errors.WithStack(errAmbiguous(idOrName))
}
found = p
}
}
if found == nil {
return "", errors.WithStack(errNotFound(idOrName))
}
return found.PluginObj.ID, nil
}

View file

@ -0,0 +1,64 @@
package plugin // import "github.com/docker/docker/plugin"
import (
"testing"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/plugin/v2"
)
func TestFilterByCapNeg(t *testing.T) {
p := v2.Plugin{PluginObj: types.Plugin{Name: "test:latest"}}
iType := types.PluginInterfaceType{Capability: "volumedriver", Prefix: "docker", Version: "1.0"}
i := types.PluginConfigInterface{Socket: "plugins.sock", Types: []types.PluginInterfaceType{iType}}
p.PluginObj.Config.Interface = i
_, err := p.FilterByCap("foobar")
if err == nil {
t.Fatalf("expected inadequate error, got %v", err)
}
}
func TestFilterByCapPos(t *testing.T) {
p := v2.Plugin{PluginObj: types.Plugin{Name: "test:latest"}}
iType := types.PluginInterfaceType{Capability: "volumedriver", Prefix: "docker", Version: "1.0"}
i := types.PluginConfigInterface{Socket: "plugins.sock", Types: []types.PluginInterfaceType{iType}}
p.PluginObj.Config.Interface = i
_, err := p.FilterByCap("volumedriver")
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
}
func TestStoreGetPluginNotMatchCapRefs(t *testing.T) {
s := NewStore()
p := v2.Plugin{PluginObj: types.Plugin{Name: "test:latest"}}
iType := types.PluginInterfaceType{Capability: "whatever", Prefix: "docker", Version: "1.0"}
i := types.PluginConfigInterface{Socket: "plugins.sock", Types: []types.PluginInterfaceType{iType}}
p.PluginObj.Config.Interface = i
if err := s.Add(&p); err != nil {
t.Fatal(err)
}
if _, err := s.Get("test", "volumedriver", plugingetter.Acquire); err == nil {
t.Fatal("exepcted error when getting plugin that doesn't match the passed in capability")
}
if refs := p.GetRefCount(); refs != 0 {
t.Fatalf("reference count should be 0, got: %d", refs)
}
p.PluginObj.Enabled = true
if _, err := s.Get("test", "volumedriver", plugingetter.Acquire); err == nil {
t.Fatal("exepcted error when getting plugin that doesn't match the passed in capability")
}
if refs := p.GetRefCount(); refs != 0 {
t.Fatalf("reference count should be 0, got: %d", refs)
}
}

View file

@ -0,0 +1,311 @@
package v2 // import "github.com/docker/docker/plugin/v2"
import (
"fmt"
"net"
"path/filepath"
"strings"
"sync"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/pkg/plugins"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/runtime-spec/specs-go"
)
// Plugin represents an individual plugin.
type Plugin struct {
mu sync.RWMutex
PluginObj types.Plugin `json:"plugin"` // todo: embed struct
pClient *plugins.Client
refCount int
Rootfs string // TODO: make private
Config digest.Digest
Blobsums []digest.Digest
modifyRuntimeSpec func(*specs.Spec)
SwarmServiceID string
timeout time.Duration
addr net.Addr
}
const defaultPluginRuntimeDestination = "/run/docker/plugins"
// ErrInadequateCapability indicates that the plugin did not have the requested capability.
type ErrInadequateCapability struct {
cap string
}
func (e ErrInadequateCapability) Error() string {
return fmt.Sprintf("plugin does not provide %q capability", e.cap)
}
// ScopedPath returns the path scoped to the plugin rootfs
func (p *Plugin) ScopedPath(s string) string {
if p.PluginObj.Config.PropagatedMount != "" && strings.HasPrefix(s, p.PluginObj.Config.PropagatedMount) {
// re-scope to the propagated mount path on the host
return filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount", strings.TrimPrefix(s, p.PluginObj.Config.PropagatedMount))
}
return filepath.Join(p.Rootfs, s)
}
// Client returns the plugin client.
// Deprecated: use p.Addr() and manually create the client
func (p *Plugin) Client() *plugins.Client {
p.mu.RLock()
defer p.mu.RUnlock()
return p.pClient
}
// SetPClient set the plugin client.
// Deprecated: Hardcoded plugin client is deprecated
func (p *Plugin) SetPClient(client *plugins.Client) {
p.mu.Lock()
defer p.mu.Unlock()
p.pClient = client
}
// IsV1 returns true for V1 plugins and false otherwise.
func (p *Plugin) IsV1() bool {
return false
}
// Name returns the plugin name.
func (p *Plugin) Name() string {
return p.PluginObj.Name
}
// FilterByCap query the plugin for a given capability.
func (p *Plugin) FilterByCap(capability string) (*Plugin, error) {
capability = strings.ToLower(capability)
for _, typ := range p.PluginObj.Config.Interface.Types {
if typ.Capability == capability && typ.Prefix == "docker" {
return p, nil
}
}
return nil, ErrInadequateCapability{capability}
}
// InitEmptySettings initializes empty settings for a plugin.
func (p *Plugin) InitEmptySettings() {
p.PluginObj.Settings.Mounts = make([]types.PluginMount, len(p.PluginObj.Config.Mounts))
copy(p.PluginObj.Settings.Mounts, p.PluginObj.Config.Mounts)
p.PluginObj.Settings.Devices = make([]types.PluginDevice, len(p.PluginObj.Config.Linux.Devices))
copy(p.PluginObj.Settings.Devices, p.PluginObj.Config.Linux.Devices)
p.PluginObj.Settings.Env = make([]string, 0, len(p.PluginObj.Config.Env))
for _, env := range p.PluginObj.Config.Env {
if env.Value != nil {
p.PluginObj.Settings.Env = append(p.PluginObj.Settings.Env, fmt.Sprintf("%s=%s", env.Name, *env.Value))
}
}
p.PluginObj.Settings.Args = make([]string, len(p.PluginObj.Config.Args.Value))
copy(p.PluginObj.Settings.Args, p.PluginObj.Config.Args.Value)
}
// Set is used to pass arguments to the plugin.
func (p *Plugin) Set(args []string) error {
p.mu.Lock()
defer p.mu.Unlock()
if p.PluginObj.Enabled {
return fmt.Errorf("cannot set on an active plugin, disable plugin before setting")
}
sets, err := newSettables(args)
if err != nil {
return err
}
// TODO(vieux): lots of code duplication here, needs to be refactored.
next:
for _, s := range sets {
// range over all the envs in the config
for _, env := range p.PluginObj.Config.Env {
// found the env in the config
if env.Name == s.name {
// is it settable ?
if ok, err := s.isSettable(allowedSettableFieldsEnv, env.Settable); err != nil {
return err
} else if !ok {
return fmt.Errorf("%q is not settable", s.prettyName())
}
// is it, so lets update the settings in memory
updateSettingsEnv(&p.PluginObj.Settings.Env, &s)
continue next
}
}
// range over all the mounts in the config
for _, mount := range p.PluginObj.Config.Mounts {
// found the mount in the config
if mount.Name == s.name {
// is it settable ?
if ok, err := s.isSettable(allowedSettableFieldsMounts, mount.Settable); err != nil {
return err
} else if !ok {
return fmt.Errorf("%q is not settable", s.prettyName())
}
// it is, so lets update the settings in memory
if mount.Source == nil {
return fmt.Errorf("Plugin config has no mount source")
}
*mount.Source = s.value
continue next
}
}
// range over all the devices in the config
for _, device := range p.PluginObj.Config.Linux.Devices {
// found the device in the config
if device.Name == s.name {
// is it settable ?
if ok, err := s.isSettable(allowedSettableFieldsDevices, device.Settable); err != nil {
return err
} else if !ok {
return fmt.Errorf("%q is not settable", s.prettyName())
}
// it is, so lets update the settings in memory
if device.Path == nil {
return fmt.Errorf("Plugin config has no device path")
}
*device.Path = s.value
continue next
}
}
// found the name in the config
if p.PluginObj.Config.Args.Name == s.name {
// is it settable ?
if ok, err := s.isSettable(allowedSettableFieldsArgs, p.PluginObj.Config.Args.Settable); err != nil {
return err
} else if !ok {
return fmt.Errorf("%q is not settable", s.prettyName())
}
// it is, so lets update the settings in memory
p.PluginObj.Settings.Args = strings.Split(s.value, " ")
continue next
}
return fmt.Errorf("setting %q not found in the plugin configuration", s.name)
}
return nil
}
// IsEnabled returns the active state of the plugin.
func (p *Plugin) IsEnabled() bool {
p.mu.RLock()
defer p.mu.RUnlock()
return p.PluginObj.Enabled
}
// GetID returns the plugin's ID.
func (p *Plugin) GetID() string {
p.mu.RLock()
defer p.mu.RUnlock()
return p.PluginObj.ID
}
// GetSocket returns the plugin socket.
func (p *Plugin) GetSocket() string {
p.mu.RLock()
defer p.mu.RUnlock()
return p.PluginObj.Config.Interface.Socket
}
// GetTypes returns the interface types of a plugin.
func (p *Plugin) GetTypes() []types.PluginInterfaceType {
p.mu.RLock()
defer p.mu.RUnlock()
return p.PluginObj.Config.Interface.Types
}
// GetRefCount returns the reference count.
func (p *Plugin) GetRefCount() int {
p.mu.RLock()
defer p.mu.RUnlock()
return p.refCount
}
// AddRefCount adds to reference count.
func (p *Plugin) AddRefCount(count int) {
p.mu.Lock()
defer p.mu.Unlock()
p.refCount += count
}
// Acquire increments the plugin's reference count
// This should be followed up by `Release()` when the plugin is no longer in use.
func (p *Plugin) Acquire() {
p.AddRefCount(plugingetter.Acquire)
}
// Release decrements the plugin's reference count
// This should only be called when the plugin is no longer in use, e.g. with
// via `Acquire()` or getter.Get("name", "type", plugingetter.Acquire)
func (p *Plugin) Release() {
p.AddRefCount(plugingetter.Release)
}
// SetSpecOptModifier sets the function to use to modify the generated
// runtime spec.
func (p *Plugin) SetSpecOptModifier(f func(*specs.Spec)) {
p.mu.Lock()
p.modifyRuntimeSpec = f
p.mu.Unlock()
}
// Timeout gets the currently configured connection timeout.
// This should be used when dialing the plugin.
func (p *Plugin) Timeout() time.Duration {
p.mu.RLock()
t := p.timeout
p.mu.RUnlock()
return t
}
// SetTimeout sets the timeout to use for dialing.
func (p *Plugin) SetTimeout(t time.Duration) {
p.mu.Lock()
p.timeout = t
p.mu.Unlock()
}
// Addr returns the net.Addr to use to connect to the plugin socket
func (p *Plugin) Addr() net.Addr {
p.mu.RLock()
addr := p.addr
p.mu.RUnlock()
return addr
}
// SetAddr sets the plugin address which can be used for dialing the plugin.
func (p *Plugin) SetAddr(addr net.Addr) {
p.mu.Lock()
p.addr = addr
p.mu.Unlock()
}
// Protocol is the protocol that should be used for interacting with the plugin.
func (p *Plugin) Protocol() string {
if p.PluginObj.Config.Interface.ProtocolScheme != "" {
return p.PluginObj.Config.Interface.ProtocolScheme
}
return plugins.ProtocolSchemeHTTPV1
}

View file

@ -0,0 +1,141 @@
package v2 // import "github.com/docker/docker/plugin/v2"
import (
"os"
"path/filepath"
"runtime"
"strings"
"github.com/docker/docker/api/types"
"github.com/docker/docker/oci"
"github.com/docker/docker/pkg/system"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
// InitSpec creates an OCI spec from the plugin's config.
func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) {
s := oci.DefaultSpec()
s.Root = &specs.Root{
Path: p.Rootfs,
Readonly: false, // TODO: all plugins should be readonly? settable in config?
}
userMounts := make(map[string]struct{}, len(p.PluginObj.Settings.Mounts))
for _, m := range p.PluginObj.Settings.Mounts {
userMounts[m.Destination] = struct{}{}
}
execRoot = filepath.Join(execRoot, p.PluginObj.ID)
if err := os.MkdirAll(execRoot, 0700); err != nil {
return nil, errors.WithStack(err)
}
if p.PluginObj.Config.PropagatedMount != "" {
pRoot := filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount")
s.Mounts = append(s.Mounts, specs.Mount{
Source: pRoot,
Destination: p.PluginObj.Config.PropagatedMount,
Type: "bind",
Options: []string{"rbind", "rw", "rshared"},
})
s.Linux.RootfsPropagation = "rshared"
}
mounts := append(p.PluginObj.Config.Mounts, types.PluginMount{
Source: &execRoot,
Destination: defaultPluginRuntimeDestination,
Type: "bind",
Options: []string{"rbind", "rshared"},
})
if p.PluginObj.Config.Network.Type != "" {
// TODO: if net == bridge, use libnetwork controller to create a new plugin-specific bridge, bind mount /etc/hosts and /etc/resolv.conf look at the docker code (allocateNetwork, initialize)
if p.PluginObj.Config.Network.Type == "host" {
oci.RemoveNamespace(&s, specs.LinuxNamespaceType("network"))
}
etcHosts := "/etc/hosts"
resolvConf := "/etc/resolv.conf"
mounts = append(mounts,
types.PluginMount{
Source: &etcHosts,
Destination: etcHosts,
Type: "bind",
Options: []string{"rbind", "ro"},
},
types.PluginMount{
Source: &resolvConf,
Destination: resolvConf,
Type: "bind",
Options: []string{"rbind", "ro"},
})
}
if p.PluginObj.Config.PidHost {
oci.RemoveNamespace(&s, specs.LinuxNamespaceType("pid"))
}
if p.PluginObj.Config.IpcHost {
oci.RemoveNamespace(&s, specs.LinuxNamespaceType("ipc"))
}
for _, mnt := range mounts {
m := specs.Mount{
Destination: mnt.Destination,
Type: mnt.Type,
Options: mnt.Options,
}
if mnt.Source == nil {
return nil, errors.New("mount source is not specified")
}
m.Source = *mnt.Source
s.Mounts = append(s.Mounts, m)
}
for i, m := range s.Mounts {
if strings.HasPrefix(m.Destination, "/dev/") {
if _, ok := userMounts[m.Destination]; ok {
s.Mounts = append(s.Mounts[:i], s.Mounts[i+1:]...)
}
}
}
if p.PluginObj.Config.Linux.AllowAllDevices {
s.Linux.Resources.Devices = []specs.LinuxDeviceCgroup{{Allow: true, Access: "rwm"}}
}
for _, dev := range p.PluginObj.Settings.Devices {
path := *dev.Path
d, dPermissions, err := oci.DevicesFromPath(path, path, "rwm")
if err != nil {
return nil, errors.WithStack(err)
}
s.Linux.Devices = append(s.Linux.Devices, d...)
s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, dPermissions...)
}
envs := make([]string, 1, len(p.PluginObj.Settings.Env)+1)
envs[0] = "PATH=" + system.DefaultPathEnv(runtime.GOOS)
envs = append(envs, p.PluginObj.Settings.Env...)
args := append(p.PluginObj.Config.Entrypoint, p.PluginObj.Settings.Args...)
cwd := p.PluginObj.Config.WorkDir
if len(cwd) == 0 {
cwd = "/"
}
s.Process.Terminal = false
s.Process.Args = args
s.Process.Cwd = cwd
s.Process.Env = envs
caps := s.Process.Capabilities
caps.Bounding = append(caps.Bounding, p.PluginObj.Config.Linux.Capabilities...)
caps.Permitted = append(caps.Permitted, p.PluginObj.Config.Linux.Capabilities...)
caps.Inheritable = append(caps.Inheritable, p.PluginObj.Config.Linux.Capabilities...)
caps.Effective = append(caps.Effective, p.PluginObj.Config.Linux.Capabilities...)
if p.modifyRuntimeSpec != nil {
p.modifyRuntimeSpec(&s)
}
return &s, nil
}

View file

@ -0,0 +1,14 @@
// +build !linux
package v2 // import "github.com/docker/docker/plugin/v2"
import (
"errors"
"github.com/opencontainers/runtime-spec/specs-go"
)
// InitSpec creates an OCI spec from the plugin's config.
func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) {
return nil, errors.New("not supported")
}

View file

@ -0,0 +1,102 @@
package v2 // import "github.com/docker/docker/plugin/v2"
import (
"errors"
"fmt"
"strings"
)
type settable struct {
name string
field string
value string
}
var (
allowedSettableFieldsEnv = []string{"value"}
allowedSettableFieldsArgs = []string{"value"}
allowedSettableFieldsDevices = []string{"path"}
allowedSettableFieldsMounts = []string{"source"}
errMultipleFields = errors.New("multiple fields are settable, one must be specified")
errInvalidFormat = errors.New("invalid format, must be <name>[.<field>][=<value>]")
)
func newSettables(args []string) ([]settable, error) {
sets := make([]settable, 0, len(args))
for _, arg := range args {
set, err := newSettable(arg)
if err != nil {
return nil, err
}
sets = append(sets, set)
}
return sets, nil
}
func newSettable(arg string) (settable, error) {
var set settable
if i := strings.Index(arg, "="); i == 0 {
return set, errInvalidFormat
} else if i < 0 {
set.name = arg
} else {
set.name = arg[:i]
set.value = arg[i+1:]
}
if i := strings.LastIndex(set.name, "."); i > 0 {
set.field = set.name[i+1:]
set.name = arg[:i]
}
return set, nil
}
// prettyName return name.field if there is a field, otherwise name.
func (set *settable) prettyName() string {
if set.field != "" {
return fmt.Sprintf("%s.%s", set.name, set.field)
}
return set.name
}
func (set *settable) isSettable(allowedSettableFields []string, settable []string) (bool, error) {
if set.field == "" {
if len(settable) == 1 {
// if field is not specified and there only one settable, default to it.
set.field = settable[0]
} else if len(settable) > 1 {
return false, errMultipleFields
}
}
isAllowed := false
for _, allowedSettableField := range allowedSettableFields {
if set.field == allowedSettableField {
isAllowed = true
break
}
}
if isAllowed {
for _, settableField := range settable {
if set.field == settableField {
return true, nil
}
}
}
return false, nil
}
func updateSettingsEnv(env *[]string, set *settable) {
for i, e := range *env {
if parts := strings.SplitN(e, "=", 2); parts[0] == set.name {
(*env)[i] = fmt.Sprintf("%s=%s", set.name, set.value)
return
}
}
*env = append(*env, fmt.Sprintf("%s=%s", set.name, set.value))
}

View file

@ -0,0 +1,91 @@
package v2 // import "github.com/docker/docker/plugin/v2"
import (
"reflect"
"testing"
)
func TestNewSettable(t *testing.T) {
contexts := []struct {
arg string
name string
field string
value string
err error
}{
{"name=value", "name", "", "value", nil},
{"name", "name", "", "", nil},
{"name.field=value", "name", "field", "value", nil},
{"name.field", "name", "field", "", nil},
{"=value", "", "", "", errInvalidFormat},
{"=", "", "", "", errInvalidFormat},
}
for _, c := range contexts {
s, err := newSettable(c.arg)
if err != c.err {
t.Fatalf("expected error to be %v, got %v", c.err, err)
}
if s.name != c.name {
t.Fatalf("expected name to be %q, got %q", c.name, s.name)
}
if s.field != c.field {
t.Fatalf("expected field to be %q, got %q", c.field, s.field)
}
if s.value != c.value {
t.Fatalf("expected value to be %q, got %q", c.value, s.value)
}
}
}
func TestIsSettable(t *testing.T) {
contexts := []struct {
allowedSettableFields []string
set settable
settable []string
result bool
err error
}{
{allowedSettableFieldsEnv, settable{}, []string{}, false, nil},
{allowedSettableFieldsEnv, settable{field: "value"}, []string{}, false, nil},
{allowedSettableFieldsEnv, settable{}, []string{"value"}, true, nil},
{allowedSettableFieldsEnv, settable{field: "value"}, []string{"value"}, true, nil},
{allowedSettableFieldsEnv, settable{field: "foo"}, []string{"value"}, false, nil},
{allowedSettableFieldsEnv, settable{field: "foo"}, []string{"foo"}, false, nil},
{allowedSettableFieldsEnv, settable{}, []string{"value1", "value2"}, false, errMultipleFields},
}
for _, c := range contexts {
if res, err := c.set.isSettable(c.allowedSettableFields, c.settable); res != c.result {
t.Fatalf("expected result to be %t, got %t", c.result, res)
} else if err != c.err {
t.Fatalf("expected error to be %v, got %v", c.err, err)
}
}
}
func TestUpdateSettingsEnv(t *testing.T) {
contexts := []struct {
env []string
set settable
newEnv []string
}{
{[]string{}, settable{name: "DEBUG", value: "1"}, []string{"DEBUG=1"}},
{[]string{"DEBUG=0"}, settable{name: "DEBUG", value: "1"}, []string{"DEBUG=1"}},
{[]string{"FOO=0"}, settable{name: "DEBUG", value: "1"}, []string{"FOO=0", "DEBUG=1"}},
{[]string{"FOO=0", "DEBUG=0"}, settable{name: "DEBUG", value: "1"}, []string{"FOO=0", "DEBUG=1"}},
{[]string{"FOO=0", "DEBUG=0", "BAR=1"}, settable{name: "DEBUG", value: "1"}, []string{"FOO=0", "DEBUG=1", "BAR=1"}},
}
for _, c := range contexts {
updateSettingsEnv(&c.env, &c.set)
if !reflect.DeepEqual(c.env, c.newEnv) {
t.Fatalf("expected env to be %q, got %q", c.newEnv, c.env)
}
}
}