update vendor

Signed-off-by: Jess Frazelle <acidburn@microsoft.com>
This commit is contained in:
Jess Frazelle 2018-09-25 12:27:46 -04:00
parent 19a32db84d
commit 94d1cfbfbf
No known key found for this signature in database
GPG key ID: 18F3685C0022BFF3
10501 changed files with 2307943 additions and 29279 deletions

View file

@ -0,0 +1,348 @@
package containerizedengine
import (
"bytes"
"context"
"syscall"
"github.com/containerd/containerd"
containerdtypes "github.com/containerd/containerd/api/types"
"github.com/containerd/containerd/cio"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/oci"
prototypes "github.com/gogo/protobuf/types"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runtime-spec/specs-go"
)
type (
fakeContainerdClient struct {
containersFunc func(ctx context.Context, filters ...string) ([]containerd.Container, error)
newContainerFunc func(ctx context.Context, id string, opts ...containerd.NewContainerOpts) (containerd.Container, error)
pullFunc func(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error)
getImageFunc func(ctx context.Context, ref string) (containerd.Image, error)
contentStoreFunc func() content.Store
containerServiceFunc func() containers.Store
}
fakeContainer struct {
idFunc func() string
infoFunc func(context.Context) (containers.Container, error)
deleteFunc func(context.Context, ...containerd.DeleteOpts) error
newTaskFunc func(context.Context, cio.Creator, ...containerd.NewTaskOpts) (containerd.Task, error)
specFunc func(context.Context) (*oci.Spec, error)
taskFunc func(context.Context, cio.Attach) (containerd.Task, error)
imageFunc func(context.Context) (containerd.Image, error)
labelsFunc func(context.Context) (map[string]string, error)
setLabelsFunc func(context.Context, map[string]string) (map[string]string, error)
extensionsFunc func(context.Context) (map[string]prototypes.Any, error)
updateFunc func(context.Context, ...containerd.UpdateContainerOpts) error
}
fakeImage struct {
nameFunc func() string
targetFunc func() ocispec.Descriptor
unpackFunc func(context.Context, string) error
rootFSFunc func(ctx context.Context) ([]digest.Digest, error)
sizeFunc func(ctx context.Context) (int64, error)
configFunc func(ctx context.Context) (ocispec.Descriptor, error)
isUnpackedFunc func(context.Context, string) (bool, error)
contentStoreFunc func() content.Store
}
fakeTask struct {
idFunc func() string
pidFunc func() uint32
startFunc func(context.Context) error
deleteFunc func(context.Context, ...containerd.ProcessDeleteOpts) (*containerd.ExitStatus, error)
killFunc func(context.Context, syscall.Signal, ...containerd.KillOpts) error
waitFunc func(context.Context) (<-chan containerd.ExitStatus, error)
closeIOFunc func(context.Context, ...containerd.IOCloserOpts) error
resizeFunc func(ctx context.Context, w, h uint32) error
ioFunc func() cio.IO
statusFunc func(context.Context) (containerd.Status, error)
pauseFunc func(context.Context) error
resumeFunc func(context.Context) error
execFunc func(context.Context, string, *specs.Process, cio.Creator) (containerd.Process, error)
pidsFunc func(context.Context) ([]containerd.ProcessInfo, error)
checkpointFunc func(context.Context, ...containerd.CheckpointTaskOpts) (containerd.Image, error)
updateFunc func(context.Context, ...containerd.UpdateTaskOpts) error
loadProcessFunc func(context.Context, string, cio.Attach) (containerd.Process, error)
metricsFunc func(context.Context) (*containerdtypes.Metric, error)
}
testOutStream struct {
bytes.Buffer
}
)
func (w *fakeContainerdClient) Containers(ctx context.Context, filters ...string) ([]containerd.Container, error) {
if w.containersFunc != nil {
return w.containersFunc(ctx, filters...)
}
return []containerd.Container{}, nil
}
func (w *fakeContainerdClient) NewContainer(ctx context.Context, id string, opts ...containerd.NewContainerOpts) (containerd.Container, error) {
if w.newContainerFunc != nil {
return w.newContainerFunc(ctx, id, opts...)
}
return nil, nil
}
func (w *fakeContainerdClient) Pull(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error) {
if w.pullFunc != nil {
return w.pullFunc(ctx, ref, opts...)
}
return nil, nil
}
func (w *fakeContainerdClient) GetImage(ctx context.Context, ref string) (containerd.Image, error) {
if w.getImageFunc != nil {
return w.getImageFunc(ctx, ref)
}
return nil, nil
}
func (w *fakeContainerdClient) ContentStore() content.Store {
if w.contentStoreFunc != nil {
return w.contentStoreFunc()
}
return nil
}
func (w *fakeContainerdClient) ContainerService() containers.Store {
if w.containerServiceFunc != nil {
return w.containerServiceFunc()
}
return nil
}
func (w *fakeContainerdClient) Close() error {
return nil
}
func (c *fakeContainer) ID() string {
if c.idFunc != nil {
return c.idFunc()
}
return ""
}
func (c *fakeContainer) Info(ctx context.Context) (containers.Container, error) {
if c.infoFunc != nil {
return c.infoFunc(ctx)
}
return containers.Container{}, nil
}
func (c *fakeContainer) Delete(ctx context.Context, opts ...containerd.DeleteOpts) error {
if c.deleteFunc != nil {
return c.deleteFunc(ctx, opts...)
}
return nil
}
func (c *fakeContainer) NewTask(ctx context.Context, ioc cio.Creator, opts ...containerd.NewTaskOpts) (containerd.Task, error) {
if c.newTaskFunc != nil {
return c.newTaskFunc(ctx, ioc, opts...)
}
return nil, nil
}
func (c *fakeContainer) Spec(ctx context.Context) (*oci.Spec, error) {
if c.specFunc != nil {
return c.specFunc(ctx)
}
return nil, nil
}
func (c *fakeContainer) Task(ctx context.Context, attach cio.Attach) (containerd.Task, error) {
if c.taskFunc != nil {
return c.taskFunc(ctx, attach)
}
return nil, nil
}
func (c *fakeContainer) Image(ctx context.Context) (containerd.Image, error) {
if c.imageFunc != nil {
return c.imageFunc(ctx)
}
return nil, nil
}
func (c *fakeContainer) Labels(ctx context.Context) (map[string]string, error) {
if c.labelsFunc != nil {
return c.labelsFunc(ctx)
}
return nil, nil
}
func (c *fakeContainer) SetLabels(ctx context.Context, labels map[string]string) (map[string]string, error) {
if c.setLabelsFunc != nil {
return c.setLabelsFunc(ctx, labels)
}
return nil, nil
}
func (c *fakeContainer) Extensions(ctx context.Context) (map[string]prototypes.Any, error) {
if c.extensionsFunc != nil {
return c.extensionsFunc(ctx)
}
return nil, nil
}
func (c *fakeContainer) Update(ctx context.Context, opts ...containerd.UpdateContainerOpts) error {
if c.updateFunc != nil {
return c.updateFunc(ctx, opts...)
}
return nil
}
func (i *fakeImage) Name() string {
if i.nameFunc != nil {
return i.nameFunc()
}
return ""
}
func (i *fakeImage) Target() ocispec.Descriptor {
if i.targetFunc != nil {
return i.targetFunc()
}
return ocispec.Descriptor{}
}
func (i *fakeImage) Unpack(ctx context.Context, name string) error {
if i.unpackFunc != nil {
return i.unpackFunc(ctx, name)
}
return nil
}
func (i *fakeImage) RootFS(ctx context.Context) ([]digest.Digest, error) {
if i.rootFSFunc != nil {
return i.rootFSFunc(ctx)
}
return nil, nil
}
func (i *fakeImage) Size(ctx context.Context) (int64, error) {
if i.sizeFunc != nil {
return i.sizeFunc(ctx)
}
return 0, nil
}
func (i *fakeImage) Config(ctx context.Context) (ocispec.Descriptor, error) {
if i.configFunc != nil {
return i.configFunc(ctx)
}
return ocispec.Descriptor{}, nil
}
func (i *fakeImage) IsUnpacked(ctx context.Context, name string) (bool, error) {
if i.isUnpackedFunc != nil {
return i.isUnpackedFunc(ctx, name)
}
return false, nil
}
func (i *fakeImage) ContentStore() content.Store {
if i.contentStoreFunc != nil {
return i.contentStoreFunc()
}
return nil
}
func (t *fakeTask) ID() string {
if t.idFunc != nil {
return t.idFunc()
}
return ""
}
func (t *fakeTask) Pid() uint32 {
if t.pidFunc != nil {
return t.pidFunc()
}
return 0
}
func (t *fakeTask) Start(ctx context.Context) error {
if t.startFunc != nil {
return t.startFunc(ctx)
}
return nil
}
func (t *fakeTask) Delete(ctx context.Context, opts ...containerd.ProcessDeleteOpts) (*containerd.ExitStatus, error) {
if t.deleteFunc != nil {
return t.deleteFunc(ctx, opts...)
}
return nil, nil
}
func (t *fakeTask) Kill(ctx context.Context, signal syscall.Signal, opts ...containerd.KillOpts) error {
if t.killFunc != nil {
return t.killFunc(ctx, signal, opts...)
}
return nil
}
func (t *fakeTask) Wait(ctx context.Context) (<-chan containerd.ExitStatus, error) {
if t.waitFunc != nil {
return t.waitFunc(ctx)
}
return nil, nil
}
func (t *fakeTask) CloseIO(ctx context.Context, opts ...containerd.IOCloserOpts) error {
if t.closeIOFunc != nil {
return t.closeIOFunc(ctx, opts...)
}
return nil
}
func (t *fakeTask) Resize(ctx context.Context, w, h uint32) error {
if t.resizeFunc != nil {
return t.resizeFunc(ctx, w, h)
}
return nil
}
func (t *fakeTask) IO() cio.IO {
if t.ioFunc != nil {
return t.ioFunc()
}
return nil
}
func (t *fakeTask) Status(ctx context.Context) (containerd.Status, error) {
if t.statusFunc != nil {
return t.statusFunc(ctx)
}
return containerd.Status{}, nil
}
func (t *fakeTask) Pause(ctx context.Context) error {
if t.pauseFunc != nil {
return t.pauseFunc(ctx)
}
return nil
}
func (t *fakeTask) Resume(ctx context.Context) error {
if t.resumeFunc != nil {
return t.resumeFunc(ctx)
}
return nil
}
func (t *fakeTask) Exec(ctx context.Context, cmd string, proc *specs.Process, ioc cio.Creator) (containerd.Process, error) {
if t.execFunc != nil {
return t.execFunc(ctx, cmd, proc, ioc)
}
return nil, nil
}
func (t *fakeTask) Pids(ctx context.Context) ([]containerd.ProcessInfo, error) {
if t.pidsFunc != nil {
return t.pidsFunc(ctx)
}
return nil, nil
}
func (t *fakeTask) Checkpoint(ctx context.Context, opts ...containerd.CheckpointTaskOpts) (containerd.Image, error) {
if t.checkpointFunc != nil {
return t.checkpointFunc(ctx, opts...)
}
return nil, nil
}
func (t *fakeTask) Update(ctx context.Context, opts ...containerd.UpdateTaskOpts) error {
if t.updateFunc != nil {
return t.updateFunc(ctx, opts...)
}
return nil
}
func (t *fakeTask) LoadProcess(ctx context.Context, name string, attach cio.Attach) (containerd.Process, error) {
if t.loadProcessFunc != nil {
return t.loadProcessFunc(ctx, name, attach)
}
return nil, nil
}
func (t *fakeTask) Metrics(ctx context.Context) (*containerdtypes.Metric, error) {
if t.metricsFunc != nil {
return t.metricsFunc(ctx)
}
return nil, nil
}
func (o *testOutStream) FD() uintptr {
return 0
}
func (o *testOutStream) IsTerminal() bool {
return false
}

View file

@ -0,0 +1,77 @@
package containerizedengine
import (
"context"
"io"
"github.com/containerd/containerd"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/remotes/docker"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/jsonmessage"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// NewClient returns a new containerizedengine client
// This client can be used to manage the lifecycle of
// dockerd running as a container on containerd.
func NewClient(sockPath string) (Client, error) {
if sockPath == "" {
sockPath = containerdSockPath
}
cclient, err := containerd.New(sockPath)
if err != nil {
return nil, err
}
return baseClient{
cclient: cclient,
}, nil
}
// Close will close the underlying clients
func (c baseClient) Close() error {
return c.cclient.Close()
}
func (c baseClient) pullWithAuth(ctx context.Context, imageName string, out OutStream,
authConfig *types.AuthConfig) (containerd.Image, error) {
resolver := docker.NewResolver(docker.ResolverOptions{
Credentials: func(string) (string, string, error) {
return authConfig.Username, authConfig.Password, nil
},
})
ongoing := newJobs(imageName)
pctx, stopProgress := context.WithCancel(ctx)
progress := make(chan struct{})
bufin, bufout := io.Pipe()
go func() {
showProgress(pctx, ongoing, c.cclient.ContentStore(), bufout)
}()
go func() {
jsonmessage.DisplayJSONMessagesToStream(bufin, out, nil)
close(progress)
}()
h := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
if desc.MediaType != images.MediaTypeDockerSchema1Manifest {
ongoing.add(desc)
}
return nil, nil
})
image, err := c.cclient.Pull(ctx, imageName,
containerd.WithResolver(resolver),
containerd.WithImageHandler(h),
containerd.WithPullUnpack)
stopProgress()
if err != nil {
return nil, err
}
<-progress
return image, nil
}

View file

@ -0,0 +1,43 @@
package containerizedengine
import (
"context"
"fmt"
"testing"
"github.com/containerd/containerd"
"github.com/docker/docker/api/types"
"gotest.tools/assert"
)
func TestPullWithAuthPullFail(t *testing.T) {
ctx := context.Background()
client := baseClient{
cclient: &fakeContainerdClient{
pullFunc: func(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error) {
return nil, fmt.Errorf("pull failure")
},
},
}
imageName := "testnamegoeshere"
_, err := client.pullWithAuth(ctx, imageName, &testOutStream{}, &types.AuthConfig{})
assert.ErrorContains(t, err, "pull failure")
}
func TestPullWithAuthPullPass(t *testing.T) {
ctx := context.Background()
client := baseClient{
cclient: &fakeContainerdClient{
pullFunc: func(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error) {
return nil, nil
},
},
}
imageName := "testnamegoeshere"
_, err := client.pullWithAuth(ctx, imageName, &testOutStream{}, &types.AuthConfig{})
assert.NilError(t, err)
}

View file

@ -0,0 +1,261 @@
package containerizedengine
import (
"context"
"fmt"
"io"
"strings"
"syscall"
"time"
"github.com/containerd/containerd"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/runtime/restart"
"github.com/docker/cli/internal/pkg/containerized"
"github.com/docker/docker/api/types"
"github.com/pkg/errors"
)
// InitEngine is the main entrypoint for `docker engine init`
func (c baseClient) InitEngine(ctx context.Context, opts EngineInitOptions, out OutStream,
authConfig *types.AuthConfig, healthfn func(context.Context) error) error {
ctx = namespaces.WithNamespace(ctx, engineNamespace)
// Verify engine isn't already running
_, err := c.GetEngine(ctx)
if err == nil {
return ErrEngineAlreadyPresent
} else if err != ErrEngineNotPresent {
return err
}
imageName := fmt.Sprintf("%s/%s:%s", opts.RegistryPrefix, opts.EngineImage, opts.EngineVersion)
// Look for desired image
_, err = c.cclient.GetImage(ctx, imageName)
if err != nil {
if errdefs.IsNotFound(err) {
_, err = c.pullWithAuth(ctx, imageName, out, authConfig)
if err != nil {
return errors.Wrapf(err, "unable to pull image %s", imageName)
}
} else {
return errors.Wrapf(err, "unable to check for image %s", imageName)
}
}
// Spin up the engine
err = c.startEngineOnContainerd(ctx, imageName, opts.ConfigFile)
if err != nil {
return errors.Wrap(err, "failed to create docker daemon")
}
// Wait for the daemon to start, verify it's responsive
fmt.Fprintf(out, "Waiting for engine to start... ")
ctx, cancel := context.WithTimeout(ctx, engineWaitTimeout)
defer cancel()
if err := c.waitForEngine(ctx, out, healthfn); err != nil {
// TODO once we have the logging strategy sorted out
// this should likely gather the last few lines of logs to report
// why the daemon failed to initialize
return errors.Wrap(err, "failed to start docker daemon")
}
fmt.Fprintf(out, "Success! The docker engine is now running.\n")
return nil
}
// GetEngine will return the containerd container running the engine (or error)
func (c baseClient) GetEngine(ctx context.Context) (containerd.Container, error) {
ctx = namespaces.WithNamespace(ctx, engineNamespace)
containers, err := c.cclient.Containers(ctx, "id=="+engineContainerName)
if err != nil {
return nil, err
}
if len(containers) == 0 {
return nil, ErrEngineNotPresent
}
return containers[0], nil
}
// getEngineImage will return the current image used by the engine
func (c baseClient) getEngineImage(engine containerd.Container) (string, error) {
ctx := namespaces.WithNamespace(context.Background(), engineNamespace)
image, err := engine.Image(ctx)
if err != nil {
return "", err
}
return image.Name(), nil
}
// getEngineConfigFilePath will extract the config file location from the engine flags
func (c baseClient) getEngineConfigFilePath(ctx context.Context, engine containerd.Container) (string, error) {
spec, err := engine.Spec(ctx)
configFile := ""
if err != nil {
return configFile, err
}
for i := 0; i < len(spec.Process.Args); i++ {
arg := spec.Process.Args[i]
if strings.HasPrefix(arg, "--config-file") {
if strings.Contains(arg, "=") {
split := strings.SplitN(arg, "=", 2)
configFile = split[1]
} else {
if i+1 >= len(spec.Process.Args) {
return configFile, ErrMalformedConfigFileParam
}
configFile = spec.Process.Args[i+1]
}
}
}
if configFile == "" {
// TODO - any more diagnostics to offer?
return configFile, ErrEngineConfigLookupFailure
}
return configFile, nil
}
var (
engineWaitInterval = 500 * time.Millisecond
engineWaitTimeout = 60 * time.Second
)
// waitForEngine will wait for the engine to start
func (c baseClient) waitForEngine(ctx context.Context, out io.Writer, healthfn func(context.Context) error) error {
ticker := time.NewTicker(engineWaitInterval)
defer ticker.Stop()
defer func() {
fmt.Fprintf(out, "\n")
}()
err := c.waitForEngineContainer(ctx, ticker)
if err != nil {
return err
}
fmt.Fprintf(out, "waiting for engine to be responsive... ")
for {
select {
case <-ticker.C:
err = healthfn(ctx)
if err == nil {
fmt.Fprintf(out, "engine is online.")
return nil
}
case <-ctx.Done():
return errors.Wrap(err, "timeout waiting for engine to be responsive")
}
}
}
func (c baseClient) waitForEngineContainer(ctx context.Context, ticker *time.Ticker) error {
var ret error
for {
select {
case <-ticker.C:
engine, err := c.GetEngine(ctx)
if engine != nil {
return nil
}
ret = err
case <-ctx.Done():
return errors.Wrap(ret, "timeout waiting for engine to be responsive")
}
}
}
// RemoveEngine gracefully unwinds the current engine
func (c baseClient) RemoveEngine(ctx context.Context, engine containerd.Container) error {
ctx = namespaces.WithNamespace(ctx, engineNamespace)
// Make sure the container isn't being restarted while we unwind it
stopLabel := map[string]string{}
stopLabel[restart.StatusLabel] = string(containerd.Stopped)
engine.SetLabels(ctx, stopLabel)
// Wind down the existing engine
task, err := engine.Task(ctx, nil)
if err != nil {
if !errdefs.IsNotFound(err) {
return err
}
} else {
status, err := task.Status(ctx)
if err != nil {
return err
}
if status.Status == containerd.Running {
// It's running, so kill it
err := task.Kill(ctx, syscall.SIGTERM, []containerd.KillOpts{}...)
if err != nil {
return errors.Wrap(err, "task kill error")
}
ch, err := task.Wait(ctx)
if err != nil {
return err
}
timeout := time.NewTimer(engineWaitTimeout)
select {
case <-timeout.C:
// TODO - consider a force flag in the future to allow a more aggressive
// kill of the engine via
// task.Kill(ctx, syscall.SIGKILL, containerd.WithKillAll)
return ErrEngineShutdownTimeout
case <-ch:
}
}
if _, err := task.Delete(ctx); err != nil {
return err
}
}
deleteOpts := []containerd.DeleteOpts{containerd.WithSnapshotCleanup}
err = engine.Delete(ctx, deleteOpts...)
if err != nil && errdefs.IsNotFound(err) {
return nil
}
return errors.Wrap(err, "failed to remove existing engine container")
}
// startEngineOnContainerd creates a new docker engine running on containerd
func (c baseClient) startEngineOnContainerd(ctx context.Context, imageName, configFile string) error {
ctx = namespaces.WithNamespace(ctx, engineNamespace)
image, err := c.cclient.GetImage(ctx, imageName)
if err != nil {
if errdefs.IsNotFound(err) {
return fmt.Errorf("engine image missing: %s", imageName)
}
return errors.Wrap(err, "failed to check for engine image")
}
// Make sure we have a valid config file
err = c.verifyDockerConfig(configFile)
if err != nil {
return err
}
engineSpec.Process.Args = append(engineSpec.Process.Args,
"--config-file", configFile,
)
cOpts := []containerd.NewContainerOpts{
containerized.WithNewSnapshot(image),
restart.WithStatus(containerd.Running),
restart.WithLogPath("/var/log/engine.log"), // TODO - better!
genSpec(),
containerd.WithRuntime("io.containerd.runtime.process.v1", nil),
}
_, err = c.cclient.NewContainer(
ctx,
engineContainerName,
cOpts...,
)
if err != nil {
return errors.Wrap(err, "failed to create engine container")
}
return nil
}

View file

@ -0,0 +1,537 @@
package containerizedengine
import (
"context"
"fmt"
"syscall"
"testing"
"time"
"github.com/containerd/containerd"
"github.com/containerd/containerd/cio"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/oci"
"github.com/docker/docker/api/types"
"github.com/opencontainers/runtime-spec/specs-go"
"gotest.tools/assert"
)
func healthfnHappy(ctx context.Context) error {
return nil
}
func healthfnError(ctx context.Context) error {
return fmt.Errorf("ping failure")
}
func TestInitGetEngineFail(t *testing.T) {
ctx := context.Background()
opts := EngineInitOptions{
EngineVersion: "engineversiongoeshere",
RegistryPrefix: "registryprefixgoeshere",
ConfigFile: "/tmp/configfilegoeshere",
EngineImage: CommunityEngineImage,
}
container := &fakeContainer{}
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{container}, nil
},
},
}
err := client.InitEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
assert.Assert(t, err == ErrEngineAlreadyPresent)
}
func TestInitCheckImageFail(t *testing.T) {
ctx := context.Background()
opts := EngineInitOptions{
EngineVersion: "engineversiongoeshere",
RegistryPrefix: "registryprefixgoeshere",
ConfigFile: "/tmp/configfilegoeshere",
EngineImage: CommunityEngineImage,
}
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{}, nil
},
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
return nil, fmt.Errorf("something went wrong")
},
},
}
err := client.InitEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
assert.ErrorContains(t, err, "unable to check for image")
assert.ErrorContains(t, err, "something went wrong")
}
func TestInitPullFail(t *testing.T) {
ctx := context.Background()
opts := EngineInitOptions{
EngineVersion: "engineversiongoeshere",
RegistryPrefix: "registryprefixgoeshere",
ConfigFile: "/tmp/configfilegoeshere",
EngineImage: CommunityEngineImage,
}
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{}, nil
},
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
return nil, errdefs.ErrNotFound
},
pullFunc: func(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error) {
return nil, fmt.Errorf("pull failure")
},
},
}
err := client.InitEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
assert.ErrorContains(t, err, "unable to pull image")
assert.ErrorContains(t, err, "pull failure")
}
func TestInitStartFail(t *testing.T) {
ctx := context.Background()
opts := EngineInitOptions{
EngineVersion: "engineversiongoeshere",
RegistryPrefix: "registryprefixgoeshere",
ConfigFile: "/tmp/configfilegoeshere",
EngineImage: CommunityEngineImage,
}
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{}, nil
},
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
return nil, errdefs.ErrNotFound
},
pullFunc: func(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error) {
return nil, nil
},
},
}
err := client.InitEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
assert.ErrorContains(t, err, "failed to create docker daemon")
}
func TestGetEngineFail(t *testing.T) {
ctx := context.Background()
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return nil, fmt.Errorf("container failure")
},
},
}
_, err := client.GetEngine(ctx)
assert.ErrorContains(t, err, "failure")
}
func TestGetEngineNotPresent(t *testing.T) {
ctx := context.Background()
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{}, nil
},
},
}
_, err := client.GetEngine(ctx)
assert.Assert(t, err == ErrEngineNotPresent)
}
func TestGetEngineFound(t *testing.T) {
ctx := context.Background()
container := &fakeContainer{}
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{container}, nil
},
},
}
c, err := client.GetEngine(ctx)
assert.NilError(t, err)
assert.Equal(t, c, container)
}
func TestGetEngineImageFail(t *testing.T) {
client := baseClient{}
container := &fakeContainer{
imageFunc: func(context.Context) (containerd.Image, error) {
return nil, fmt.Errorf("failure")
},
}
_, err := client.getEngineImage(container)
assert.ErrorContains(t, err, "failure")
}
func TestGetEngineImagePass(t *testing.T) {
client := baseClient{}
image := &fakeImage{
nameFunc: func() string {
return "imagenamehere"
},
}
container := &fakeContainer{
imageFunc: func(context.Context) (containerd.Image, error) {
return image, nil
},
}
name, err := client.getEngineImage(container)
assert.NilError(t, err)
assert.Equal(t, name, "imagenamehere")
}
func TestWaitForEngineNeverShowsUp(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
defer cancel()
engineWaitInterval = 1 * time.Millisecond
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{}, nil
},
},
}
err := client.waitForEngine(ctx, &testOutStream{}, healthfnError)
assert.ErrorContains(t, err, "timeout waiting")
}
func TestWaitForEnginePingFail(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
defer cancel()
engineWaitInterval = 1 * time.Millisecond
container := &fakeContainer{}
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{container}, nil
},
},
}
err := client.waitForEngine(ctx, &testOutStream{}, healthfnError)
assert.ErrorContains(t, err, "ping fail")
}
func TestWaitForEngineHealthy(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
defer cancel()
engineWaitInterval = 1 * time.Millisecond
container := &fakeContainer{}
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{container}, nil
},
},
}
err := client.waitForEngine(ctx, &testOutStream{}, healthfnHappy)
assert.NilError(t, err)
}
func TestRemoveEngineBadTaskBadDelete(t *testing.T) {
ctx := context.Background()
client := baseClient{}
container := &fakeContainer{
deleteFunc: func(context.Context, ...containerd.DeleteOpts) error {
return fmt.Errorf("delete failure")
},
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
return nil, errdefs.ErrNotFound
},
}
err := client.RemoveEngine(ctx, container)
assert.ErrorContains(t, err, "failed to remove existing engine")
assert.ErrorContains(t, err, "delete failure")
}
func TestRemoveEngineTaskNoStatus(t *testing.T) {
ctx := context.Background()
client := baseClient{}
task := &fakeTask{
statusFunc: func(context.Context) (containerd.Status, error) {
return containerd.Status{}, fmt.Errorf("task status failure")
},
}
container := &fakeContainer{
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
return task, nil
},
}
err := client.RemoveEngine(ctx, container)
assert.ErrorContains(t, err, "task status failure")
}
func TestRemoveEngineTaskNotRunningDeleteFail(t *testing.T) {
ctx := context.Background()
client := baseClient{}
task := &fakeTask{
statusFunc: func(context.Context) (containerd.Status, error) {
return containerd.Status{Status: containerd.Unknown}, nil
},
deleteFunc: func(context.Context, ...containerd.ProcessDeleteOpts) (*containerd.ExitStatus, error) {
return nil, fmt.Errorf("task delete failure")
},
}
container := &fakeContainer{
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
return task, nil
},
}
err := client.RemoveEngine(ctx, container)
assert.ErrorContains(t, err, "task delete failure")
}
func TestRemoveEngineTaskRunningKillFail(t *testing.T) {
ctx := context.Background()
client := baseClient{}
task := &fakeTask{
statusFunc: func(context.Context) (containerd.Status, error) {
return containerd.Status{Status: containerd.Running}, nil
},
killFunc: func(context.Context, syscall.Signal, ...containerd.KillOpts) error {
return fmt.Errorf("task kill failure")
},
}
container := &fakeContainer{
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
return task, nil
},
}
err := client.RemoveEngine(ctx, container)
assert.ErrorContains(t, err, "task kill failure")
}
func TestRemoveEngineTaskRunningWaitFail(t *testing.T) {
ctx := context.Background()
client := baseClient{}
task := &fakeTask{
statusFunc: func(context.Context) (containerd.Status, error) {
return containerd.Status{Status: containerd.Running}, nil
},
waitFunc: func(context.Context) (<-chan containerd.ExitStatus, error) {
return nil, fmt.Errorf("task wait failure")
},
}
container := &fakeContainer{
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
return task, nil
},
}
err := client.RemoveEngine(ctx, container)
assert.ErrorContains(t, err, "task wait failure")
}
func TestRemoveEngineTaskRunningHappyPath(t *testing.T) {
ctx := context.Background()
client := baseClient{}
ch := make(chan containerd.ExitStatus, 1)
task := &fakeTask{
statusFunc: func(context.Context) (containerd.Status, error) {
return containerd.Status{Status: containerd.Running}, nil
},
waitFunc: func(context.Context) (<-chan containerd.ExitStatus, error) {
ch <- containerd.ExitStatus{}
return ch, nil
},
}
container := &fakeContainer{
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
return task, nil
},
}
err := client.RemoveEngine(ctx, container)
assert.NilError(t, err)
}
func TestRemoveEngineTaskKillTimeout(t *testing.T) {
ctx := context.Background()
ch := make(chan containerd.ExitStatus, 1)
client := baseClient{}
engineWaitTimeout = 10 * time.Millisecond
task := &fakeTask{
statusFunc: func(context.Context) (containerd.Status, error) {
return containerd.Status{Status: containerd.Running}, nil
},
waitFunc: func(context.Context) (<-chan containerd.ExitStatus, error) {
//ch <- containerd.ExitStatus{} // let it timeout
return ch, nil
},
}
container := &fakeContainer{
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
return task, nil
},
}
err := client.RemoveEngine(ctx, container)
assert.Assert(t, err == ErrEngineShutdownTimeout)
}
func TestStartEngineOnContainerdImageErr(t *testing.T) {
ctx := context.Background()
imageName := "testnamegoeshere"
configFile := "/tmp/configfilegoeshere"
client := baseClient{
cclient: &fakeContainerdClient{
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
return nil, fmt.Errorf("some image lookup failure")
},
},
}
err := client.startEngineOnContainerd(ctx, imageName, configFile)
assert.ErrorContains(t, err, "some image lookup failure")
}
func TestStartEngineOnContainerdImageNotFound(t *testing.T) {
ctx := context.Background()
imageName := "testnamegoeshere"
configFile := "/tmp/configfilegoeshere"
client := baseClient{
cclient: &fakeContainerdClient{
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
return nil, errdefs.ErrNotFound
},
},
}
err := client.startEngineOnContainerd(ctx, imageName, configFile)
assert.ErrorContains(t, err, "engine image missing")
}
func TestStartEngineOnContainerdHappy(t *testing.T) {
ctx := context.Background()
imageName := "testnamegoeshere"
configFile := "/tmp/configfilegoeshere"
ch := make(chan containerd.ExitStatus, 1)
streams := cio.Streams{}
task := &fakeTask{
statusFunc: func(context.Context) (containerd.Status, error) {
return containerd.Status{Status: containerd.Running}, nil
},
waitFunc: func(context.Context) (<-chan containerd.ExitStatus, error) {
ch <- containerd.ExitStatus{}
return ch, nil
},
}
container := &fakeContainer{
newTaskFunc: func(ctx context.Context, creator cio.Creator, opts ...containerd.NewTaskOpts) (containerd.Task, error) {
if streams.Stdout != nil {
streams.Stdout.Write([]byte("{}"))
}
return task, nil
},
}
client := baseClient{
cclient: &fakeContainerdClient{
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
return nil, nil
},
newContainerFunc: func(ctx context.Context, id string, opts ...containerd.NewContainerOpts) (containerd.Container, error) {
return container, nil
},
},
}
err := client.startEngineOnContainerd(ctx, imageName, configFile)
assert.NilError(t, err)
}
func TestGetEngineConfigFilePathBadSpec(t *testing.T) {
ctx := context.Background()
client := baseClient{}
container := &fakeContainer{
specFunc: func(context.Context) (*oci.Spec, error) {
return nil, fmt.Errorf("spec error")
},
}
_, err := client.getEngineConfigFilePath(ctx, container)
assert.ErrorContains(t, err, "spec error")
}
func TestGetEngineConfigFilePathDistinct(t *testing.T) {
ctx := context.Background()
client := baseClient{}
container := &fakeContainer{
specFunc: func(context.Context) (*oci.Spec, error) {
return &oci.Spec{
Process: &specs.Process{
Args: []string{
"--another-flag",
"foo",
"--config-file",
"configpath",
},
},
}, nil
},
}
configFile, err := client.getEngineConfigFilePath(ctx, container)
assert.NilError(t, err)
assert.Assert(t, err, configFile == "configpath")
}
func TestGetEngineConfigFilePathEquals(t *testing.T) {
ctx := context.Background()
client := baseClient{}
container := &fakeContainer{
specFunc: func(context.Context) (*oci.Spec, error) {
return &oci.Spec{
Process: &specs.Process{
Args: []string{
"--another-flag=foo",
"--config-file=configpath",
},
},
}, nil
},
}
configFile, err := client.getEngineConfigFilePath(ctx, container)
assert.NilError(t, err)
assert.Assert(t, err, configFile == "configpath")
}
func TestGetEngineConfigFilePathMalformed1(t *testing.T) {
ctx := context.Background()
client := baseClient{}
container := &fakeContainer{
specFunc: func(context.Context) (*oci.Spec, error) {
return &oci.Spec{
Process: &specs.Process{
Args: []string{
"--another-flag",
"--config-file",
},
},
}, nil
},
}
_, err := client.getEngineConfigFilePath(ctx, container)
assert.Assert(t, err == ErrMalformedConfigFileParam)
}

View file

@ -0,0 +1,16 @@
// +build !windows
package containerizedengine
import (
"github.com/containerd/containerd"
"github.com/containerd/containerd/oci"
"github.com/docker/cli/internal/pkg/containerized"
)
func genSpec() containerd.NewContainerOpts {
return containerd.WithSpec(&engineSpec,
containerized.WithAllCapabilities,
oci.WithParentCgroupDevices,
)
}

View file

@ -0,0 +1,14 @@
// +build windows
package containerizedengine
import (
"github.com/containerd/containerd"
"github.com/docker/cli/internal/pkg/containerized"
)
func genSpec() containerd.NewContainerOpts {
return containerd.WithSpec(&engineSpec,
containerized.WithAllCapabilities,
)
}

View file

@ -0,0 +1,35 @@
package containerizedengine
import (
"os"
"path"
)
func (c baseClient) verifyDockerConfig(configFile string) error {
// TODO - in the future consider leveraging containerd and a host runtime
// to create the file. For now, just create it locally since we have to be
// local to talk to containerd
configDir := path.Dir(configFile)
err := os.MkdirAll(configDir, 0644)
if err != nil {
return err
}
fd, err := os.OpenFile(configFile, os.O_RDWR|os.O_CREATE, 0755)
if err != nil {
return err
}
defer fd.Close()
info, err := fd.Stat()
if err != nil {
return err
}
if info.Size() == 0 {
_, err := fd.Write([]byte("{}"))
return err
}
return nil
}

View file

@ -0,0 +1,215 @@
package containerizedengine
import (
"context"
"encoding/json"
"fmt"
"io"
"strings"
"sync"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/remotes"
"github.com/docker/docker/pkg/jsonmessage"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
)
func showProgress(ctx context.Context, ongoing *jobs, cs content.Store, out io.WriteCloser) {
var (
ticker = time.NewTicker(100 * time.Millisecond)
start = time.Now()
enc = json.NewEncoder(out)
statuses = map[string]statusInfo{}
done bool
)
defer ticker.Stop()
outer:
for {
select {
case <-ticker.C:
resolved := "resolved"
if !ongoing.isResolved() {
resolved = "resolving"
}
statuses[ongoing.name] = statusInfo{
Ref: ongoing.name,
Status: resolved,
}
keys := []string{ongoing.name}
activeSeen := map[string]struct{}{}
if !done {
active, err := cs.ListStatuses(ctx, "")
if err != nil {
logrus.Debugf("active check failed: %s", err)
continue
}
// update status of active entries!
for _, active := range active {
statuses[active.Ref] = statusInfo{
Ref: active.Ref,
Status: "downloading",
Offset: active.Offset,
Total: active.Total,
StartedAt: active.StartedAt,
UpdatedAt: active.UpdatedAt,
}
activeSeen[active.Ref] = struct{}{}
}
}
err := updateNonActive(ctx, ongoing, cs, statuses, &keys, activeSeen, &done, start)
if err != nil {
continue outer
}
var ordered []statusInfo
for _, key := range keys {
ordered = append(ordered, statuses[key])
}
for _, si := range ordered {
jm := si.JSONMessage()
err := enc.Encode(jm)
if err != nil {
logrus.Debugf("failed to encode progress message: %s", err)
}
}
if done {
out.Close()
return
}
case <-ctx.Done():
done = true // allow ui to update once more
}
}
}
func updateNonActive(ctx context.Context, ongoing *jobs, cs content.Store, statuses map[string]statusInfo, keys *[]string, activeSeen map[string]struct{}, done *bool, start time.Time) error {
for _, j := range ongoing.jobs() {
key := remotes.MakeRefKey(ctx, j)
*keys = append(*keys, key)
if _, ok := activeSeen[key]; ok {
continue
}
status, ok := statuses[key]
if !*done && (!ok || status.Status == "downloading") {
info, err := cs.Info(ctx, j.Digest)
if err != nil {
if !errdefs.IsNotFound(err) {
logrus.Debugf("failed to get content info: %s", err)
return err
}
statuses[key] = statusInfo{
Ref: key,
Status: "waiting",
}
} else if info.CreatedAt.After(start) {
statuses[key] = statusInfo{
Ref: key,
Status: "done",
Offset: info.Size,
Total: info.Size,
UpdatedAt: info.CreatedAt,
}
} else {
statuses[key] = statusInfo{
Ref: key,
Status: "exists",
}
}
} else if *done {
if ok {
if status.Status != "done" && status.Status != "exists" {
status.Status = "done"
statuses[key] = status
}
} else {
statuses[key] = statusInfo{
Ref: key,
Status: "done",
}
}
}
}
return nil
}
type jobs struct {
name string
added map[digest.Digest]struct{}
descs []ocispec.Descriptor
mu sync.Mutex
resolved bool
}
func newJobs(name string) *jobs {
return &jobs{
name: name,
added: map[digest.Digest]struct{}{},
}
}
func (j *jobs) add(desc ocispec.Descriptor) {
j.mu.Lock()
defer j.mu.Unlock()
j.resolved = true
if _, ok := j.added[desc.Digest]; ok {
return
}
j.descs = append(j.descs, desc)
j.added[desc.Digest] = struct{}{}
}
func (j *jobs) jobs() []ocispec.Descriptor {
j.mu.Lock()
defer j.mu.Unlock()
var descs []ocispec.Descriptor
return append(descs, j.descs...)
}
func (j *jobs) isResolved() bool {
j.mu.Lock()
defer j.mu.Unlock()
return j.resolved
}
// statusInfo holds the status info for an upload or download
type statusInfo struct {
Ref string
Status string
Offset int64
Total int64
StartedAt time.Time
UpdatedAt time.Time
}
func (s statusInfo) JSONMessage() jsonmessage.JSONMessage {
// Shorten the ID to use up less width on the display
id := s.Ref
if strings.Contains(id, ":") {
split := strings.SplitN(id, ":", 2)
id = split[1]
}
id = fmt.Sprintf("%.12s", id)
return jsonmessage.JSONMessage{
ID: id,
Status: s.Status,
Progress: &jsonmessage.JSONProgress{
Current: s.Offset,
Total: s.Total,
},
}
}

View file

@ -0,0 +1,12 @@
// +build !windows
package containerizedengine
import (
"golang.org/x/sys/unix"
)
var (
// SIGKILL maps to unix.SIGKILL
SIGKILL = unix.SIGKILL
)

View file

@ -0,0 +1,12 @@
// +build windows
package containerizedengine
import (
"syscall"
)
var (
// SIGKILL all signals are ignored by containerd kill windows
SIGKILL = syscall.Signal(0)
)

View file

@ -0,0 +1,159 @@
package containerizedengine
import (
"context"
"errors"
"io"
"github.com/containerd/containerd"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/content"
registryclient "github.com/docker/cli/cli/registry/client"
"github.com/docker/docker/api/types"
ver "github.com/hashicorp/go-version"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
const (
// CommunityEngineImage is the repo name for the community engine
CommunityEngineImage = "engine-community"
// EnterpriseEngineImage is the repo name for the enterprise engine
EnterpriseEngineImage = "engine-enterprise"
containerdSockPath = "/run/containerd/containerd.sock"
engineContainerName = "dockerd"
engineNamespace = "docker"
// Used to signal the containerd-proxy if it should manage
proxyLabel = "com.docker/containerd-proxy.scope"
)
var (
// ErrEngineAlreadyPresent returned when engine already present and should not be
ErrEngineAlreadyPresent = errors.New("engine already present, use the update command to change versions")
// ErrEngineNotPresent returned when the engine is not present and should be
ErrEngineNotPresent = errors.New("engine not present")
// ErrMalformedConfigFileParam returned if the engine config file parameter is malformed
ErrMalformedConfigFileParam = errors.New("malformed --config-file param on engine")
// ErrEngineConfigLookupFailure returned if unable to lookup existing engine configuration
ErrEngineConfigLookupFailure = errors.New("unable to lookup existing engine configuration")
// ErrEngineShutdownTimeout returned if the engine failed to shutdown in time
ErrEngineShutdownTimeout = errors.New("timeout waiting for engine to exit")
// ErrEngineImageMissingTag returned if the engine image is missing the version tag
ErrEngineImageMissingTag = errors.New("malformed engine image missing tag")
engineSpec = specs.Spec{
Root: &specs.Root{
Path: "rootfs",
},
Process: &specs.Process{
Cwd: "/",
Args: []string{
// In general, configuration should be driven by the config file, not these flags
// TODO - consider moving more of these to the config file, and make sure the defaults are set if not present.
"/sbin/dockerd",
"-s",
"overlay2",
"--containerd",
"/run/containerd/containerd.sock",
"--default-runtime",
"containerd",
"--add-runtime",
"containerd=runc",
},
User: specs.User{
UID: 0,
GID: 0,
},
Env: []string{
"PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin",
},
NoNewPrivileges: false,
},
}
)
// Client can be used to manage the lifecycle of
// dockerd running as a container on containerd.
type Client interface {
Close() error
ActivateEngine(ctx context.Context,
opts EngineInitOptions,
out OutStream,
authConfig *types.AuthConfig,
healthfn func(context.Context) error) error
InitEngine(ctx context.Context,
opts EngineInitOptions,
out OutStream,
authConfig *types.AuthConfig,
healthfn func(context.Context) error) error
DoUpdate(ctx context.Context,
opts EngineInitOptions,
out OutStream,
authConfig *types.AuthConfig,
healthfn func(context.Context) error) error
GetEngineVersions(ctx context.Context, registryClient registryclient.RegistryClient, currentVersion, imageName string) (AvailableVersions, error)
GetEngine(ctx context.Context) (containerd.Container, error)
RemoveEngine(ctx context.Context, engine containerd.Container) error
GetCurrentEngineVersion(ctx context.Context) (EngineInitOptions, error)
}
type baseClient struct {
cclient containerdClient
}
// EngineInitOptions contains the configuration settings
// use during initialization of a containerized docker engine
type EngineInitOptions struct {
RegistryPrefix string
EngineImage string
EngineVersion string
ConfigFile string
scope string
}
// containerdClient abstracts the containerd client to aid in testability
type containerdClient interface {
Containers(ctx context.Context, filters ...string) ([]containerd.Container, error)
NewContainer(ctx context.Context, id string, opts ...containerd.NewContainerOpts) (containerd.Container, error)
Pull(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error)
GetImage(ctx context.Context, ref string) (containerd.Image, error)
Close() error
ContentStore() content.Store
ContainerService() containers.Store
}
// AvailableVersions groups the available versions which were discovered
type AvailableVersions struct {
Downgrades []DockerVersion
Patches []DockerVersion
Upgrades []DockerVersion
}
// DockerVersion wraps a semantic version to retain the original tag
// since the docker date based versions don't strictly follow semantic
// versioning (leading zeros, etc.)
type DockerVersion struct {
ver.Version
Tag string
}
// Update stores available updates for rendering in a table
type Update struct {
Type string
Version string
Notes string
}
// OutStream is an output stream used to write normal program output.
type OutStream interface {
io.Writer
FD() uintptr
IsTerminal() bool
}

View file

@ -0,0 +1,130 @@
package containerizedengine
import (
"context"
"fmt"
"path"
"strings"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/namespaces"
"github.com/docker/cli/internal/pkg/containerized"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/pkg/errors"
)
// GetCurrentEngineVersion determines the current type of engine (image) and version
func (c baseClient) GetCurrentEngineVersion(ctx context.Context) (EngineInitOptions, error) {
ctx = namespaces.WithNamespace(ctx, engineNamespace)
ret := EngineInitOptions{}
currentEngine := CommunityEngineImage
engine, err := c.GetEngine(ctx)
if err != nil {
if err == ErrEngineNotPresent {
return ret, errors.Wrap(err, "failed to find existing engine")
}
return ret, err
}
imageName, err := c.getEngineImage(engine)
if err != nil {
return ret, err
}
distributionRef, err := reference.ParseNormalizedNamed(imageName)
if err != nil {
return ret, errors.Wrapf(err, "failed to parse image name: %s", imageName)
}
if strings.Contains(distributionRef.Name(), EnterpriseEngineImage) {
currentEngine = EnterpriseEngineImage
}
taggedRef, ok := distributionRef.(reference.NamedTagged)
if !ok {
return ret, ErrEngineImageMissingTag
}
ret.EngineImage = currentEngine
ret.EngineVersion = taggedRef.Tag()
ret.RegistryPrefix = reference.Domain(taggedRef) + "/" + path.Dir(reference.Path(taggedRef))
return ret, nil
}
// ActivateEngine will switch the image from the CE to EE image
func (c baseClient) ActivateEngine(ctx context.Context, opts EngineInitOptions, out OutStream,
authConfig *types.AuthConfig, healthfn func(context.Context) error) error {
// set the proxy scope to "ee" for activate flows
opts.scope = "ee"
ctx = namespaces.WithNamespace(ctx, engineNamespace)
// If version is unspecified, use the existing engine version
if opts.EngineVersion == "" {
currentOpts, err := c.GetCurrentEngineVersion(ctx)
if err != nil {
return err
}
opts.EngineVersion = currentOpts.EngineVersion
if currentOpts.EngineImage == EnterpriseEngineImage {
// This is a "no-op" activation so the only change would be the license - don't update the engine itself
return nil
}
}
return c.DoUpdate(ctx, opts, out, authConfig, healthfn)
}
// DoUpdate performs the underlying engine update
func (c baseClient) DoUpdate(ctx context.Context, opts EngineInitOptions, out OutStream,
authConfig *types.AuthConfig, healthfn func(context.Context) error) error {
ctx = namespaces.WithNamespace(ctx, engineNamespace)
if opts.EngineVersion == "" {
// TODO - Future enhancement: This could be improved to be
// smart about figuring out the latest patch rev for the
// current engine version and automatically apply it so users
// could stay in sync by simply having a scheduled
// `docker engine update`
return fmt.Errorf("please pick the version you want to update to")
}
imageName := fmt.Sprintf("%s/%s:%s", opts.RegistryPrefix, opts.EngineImage, opts.EngineVersion)
// Look for desired image
image, err := c.cclient.GetImage(ctx, imageName)
if err != nil {
if errdefs.IsNotFound(err) {
image, err = c.pullWithAuth(ctx, imageName, out, authConfig)
if err != nil {
return errors.Wrapf(err, "unable to pull image %s", imageName)
}
} else {
return errors.Wrapf(err, "unable to check for image %s", imageName)
}
}
// Gather information about the existing engine so we can recreate it
engine, err := c.GetEngine(ctx)
if err != nil {
if err == ErrEngineNotPresent {
return errors.Wrap(err, "unable to find existing engine - please use init")
}
return err
}
// TODO verify the image has changed and don't update if nothing has changed
err = containerized.AtomicImageUpdate(ctx, engine, image, func() error {
ctx, cancel := context.WithTimeout(ctx, engineWaitTimeout)
defer cancel()
return c.waitForEngine(ctx, out, healthfn)
})
if err == nil && opts.scope != "" {
var labels map[string]string
labels, err = engine.Labels(ctx)
if err != nil {
return err
}
labels[proxyLabel] = opts.scope
_, err = engine.SetLabels(ctx, labels)
}
return err
}

View file

@ -0,0 +1,318 @@
package containerizedengine
import (
"context"
"fmt"
"testing"
"github.com/containerd/containerd"
"github.com/containerd/containerd/cio"
"github.com/containerd/containerd/errdefs"
"github.com/docker/docker/api/types"
"gotest.tools/assert"
)
func TestGetCurrentEngineVersionHappy(t *testing.T) {
ctx := context.Background()
image := &fakeImage{
nameFunc: func() string {
return "acme.com/dockermirror/" + CommunityEngineImage + ":engineversion"
},
}
container := &fakeContainer{
imageFunc: func(context.Context) (containerd.Image, error) {
return image, nil
},
}
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{container}, nil
},
},
}
opts, err := client.GetCurrentEngineVersion(ctx)
assert.NilError(t, err)
assert.Equal(t, opts.EngineImage, CommunityEngineImage)
assert.Equal(t, opts.RegistryPrefix, "acme.com/dockermirror")
assert.Equal(t, opts.EngineVersion, "engineversion")
}
func TestGetCurrentEngineVersionEnterpriseHappy(t *testing.T) {
ctx := context.Background()
image := &fakeImage{
nameFunc: func() string {
return "docker.io/docker/" + EnterpriseEngineImage + ":engineversion"
},
}
container := &fakeContainer{
imageFunc: func(context.Context) (containerd.Image, error) {
return image, nil
},
}
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{container}, nil
},
},
}
opts, err := client.GetCurrentEngineVersion(ctx)
assert.NilError(t, err)
assert.Equal(t, opts.EngineImage, EnterpriseEngineImage)
assert.Equal(t, opts.EngineVersion, "engineversion")
assert.Equal(t, opts.RegistryPrefix, "docker.io/docker")
}
func TestGetCurrentEngineVersionNoEngine(t *testing.T) {
ctx := context.Background()
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{}, nil
},
},
}
_, err := client.GetCurrentEngineVersion(ctx)
assert.ErrorContains(t, err, "failed to find existing engine")
}
func TestGetCurrentEngineVersionMiscEngineError(t *testing.T) {
ctx := context.Background()
expectedError := fmt.Errorf("some container lookup error")
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return nil, expectedError
},
},
}
_, err := client.GetCurrentEngineVersion(ctx)
assert.Assert(t, err == expectedError)
}
func TestGetCurrentEngineVersionImageFailure(t *testing.T) {
ctx := context.Background()
container := &fakeContainer{
imageFunc: func(context.Context) (containerd.Image, error) {
return nil, fmt.Errorf("container image failure")
},
}
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{container}, nil
},
},
}
_, err := client.GetCurrentEngineVersion(ctx)
assert.ErrorContains(t, err, "container image failure")
}
func TestGetCurrentEngineVersionMalformed(t *testing.T) {
ctx := context.Background()
image := &fakeImage{
nameFunc: func() string {
return "imagename"
},
}
container := &fakeContainer{
imageFunc: func(context.Context) (containerd.Image, error) {
return image, nil
},
}
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{container}, nil
},
},
}
_, err := client.GetCurrentEngineVersion(ctx)
assert.Assert(t, err == ErrEngineImageMissingTag)
}
func TestActivateNoEngine(t *testing.T) {
ctx := context.Background()
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{}, nil
},
},
}
opts := EngineInitOptions{
EngineVersion: "engineversiongoeshere",
RegistryPrefix: "registryprefixgoeshere",
ConfigFile: "/tmp/configfilegoeshere",
EngineImage: EnterpriseEngineImage,
}
err := client.ActivateEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
assert.ErrorContains(t, err, "unable to find")
}
func TestActivateNoChange(t *testing.T) {
ctx := context.Background()
registryPrefix := "registryprefixgoeshere"
image := &fakeImage{
nameFunc: func() string {
return registryPrefix + "/" + EnterpriseEngineImage + ":engineversion"
},
}
container := &fakeContainer{
imageFunc: func(context.Context) (containerd.Image, error) {
return image, nil
},
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
return nil, errdefs.ErrNotFound
},
labelsFunc: func(context.Context) (map[string]string, error) {
return map[string]string{}, nil
},
}
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{container}, nil
},
},
}
opts := EngineInitOptions{
EngineVersion: "engineversiongoeshere",
RegistryPrefix: "registryprefixgoeshere",
ConfigFile: "/tmp/configfilegoeshere",
EngineImage: EnterpriseEngineImage,
}
err := client.ActivateEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
assert.NilError(t, err)
}
func TestActivateDoUpdateFail(t *testing.T) {
ctx := context.Background()
registryPrefix := "registryprefixgoeshere"
image := &fakeImage{
nameFunc: func() string {
return registryPrefix + "/ce-engine:engineversion"
},
}
container := &fakeContainer{
imageFunc: func(context.Context) (containerd.Image, error) {
return image, nil
},
}
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{container}, nil
},
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
return nil, fmt.Errorf("something went wrong")
},
},
}
opts := EngineInitOptions{
EngineVersion: "engineversiongoeshere",
RegistryPrefix: "registryprefixgoeshere",
ConfigFile: "/tmp/configfilegoeshere",
EngineImage: EnterpriseEngineImage,
}
err := client.ActivateEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
assert.ErrorContains(t, err, "check for image")
assert.ErrorContains(t, err, "something went wrong")
}
func TestDoUpdateNoVersion(t *testing.T) {
ctx := context.Background()
opts := EngineInitOptions{
EngineVersion: "",
RegistryPrefix: "registryprefixgoeshere",
ConfigFile: "/tmp/configfilegoeshere",
EngineImage: EnterpriseEngineImage,
}
client := baseClient{}
err := client.DoUpdate(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
assert.ErrorContains(t, err, "please pick the version you")
}
func TestDoUpdateImageMiscError(t *testing.T) {
ctx := context.Background()
opts := EngineInitOptions{
EngineVersion: "engineversiongoeshere",
RegistryPrefix: "registryprefixgoeshere",
ConfigFile: "/tmp/configfilegoeshere",
EngineImage: "testnamegoeshere",
}
client := baseClient{
cclient: &fakeContainerdClient{
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
return nil, fmt.Errorf("something went wrong")
},
},
}
err := client.DoUpdate(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
assert.ErrorContains(t, err, "check for image")
assert.ErrorContains(t, err, "something went wrong")
}
func TestDoUpdatePullFail(t *testing.T) {
ctx := context.Background()
opts := EngineInitOptions{
EngineVersion: "engineversiongoeshere",
RegistryPrefix: "registryprefixgoeshere",
ConfigFile: "/tmp/configfilegoeshere",
EngineImage: "testnamegoeshere",
}
client := baseClient{
cclient: &fakeContainerdClient{
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
return nil, errdefs.ErrNotFound
},
pullFunc: func(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error) {
return nil, fmt.Errorf("pull failure")
},
},
}
err := client.DoUpdate(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
assert.ErrorContains(t, err, "unable to pull")
assert.ErrorContains(t, err, "pull failure")
}
func TestDoUpdateEngineMissing(t *testing.T) {
ctx := context.Background()
opts := EngineInitOptions{
EngineVersion: "engineversiongoeshere",
RegistryPrefix: "registryprefixgoeshere",
ConfigFile: "/tmp/configfilegoeshere",
EngineImage: "testnamegoeshere",
}
image := &fakeImage{
nameFunc: func() string {
return "imagenamehere"
},
}
client := baseClient{
cclient: &fakeContainerdClient{
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
return image, nil
},
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{}, nil
},
},
}
err := client.DoUpdate(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
assert.ErrorContains(t, err, "unable to find existing engine")
}

View file

@ -0,0 +1,72 @@
package containerizedengine
import (
"context"
"sort"
registryclient "github.com/docker/cli/cli/registry/client"
"github.com/docker/distribution/reference"
ver "github.com/hashicorp/go-version"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// GetEngineVersions reports the versions of the engine that are available
func (c baseClient) GetEngineVersions(ctx context.Context, registryClient registryclient.RegistryClient, currentVersion, imageName string) (AvailableVersions, error) {
imageRef, err := reference.ParseNormalizedNamed(imageName)
if err != nil {
return AvailableVersions{}, err
}
tags, err := registryClient.GetTags(ctx, imageRef)
if err != nil {
return AvailableVersions{}, err
}
return parseTags(tags, currentVersion)
}
func parseTags(tags []string, currentVersion string) (AvailableVersions, error) {
var ret AvailableVersions
currentVer, err := ver.NewVersion(currentVersion)
if err != nil {
return ret, errors.Wrapf(err, "failed to parse existing version %s", currentVersion)
}
downgrades := []DockerVersion{}
patches := []DockerVersion{}
upgrades := []DockerVersion{}
currentSegments := currentVer.Segments()
for _, tag := range tags {
tmp, err := ver.NewVersion(tag)
if err != nil {
logrus.Debugf("Unable to parse %s: %s", tag, err)
continue
}
testVersion := DockerVersion{Version: *tmp, Tag: tag}
if testVersion.LessThan(currentVer) {
downgrades = append(downgrades, testVersion)
continue
}
testSegments := testVersion.Segments()
// lib always provides min 3 segments
if testSegments[0] == currentSegments[0] &&
testSegments[1] == currentSegments[1] {
patches = append(patches, testVersion)
} else {
upgrades = append(upgrades, testVersion)
}
}
sort.Slice(downgrades, func(i, j int) bool {
return downgrades[i].Version.LessThan(&downgrades[j].Version)
})
sort.Slice(patches, func(i, j int) bool {
return patches[i].Version.LessThan(&patches[j].Version)
})
sort.Slice(upgrades, func(i, j int) bool {
return upgrades[i].Version.LessThan(&upgrades[j].Version)
})
ret.Downgrades = downgrades
ret.Patches = patches
ret.Upgrades = upgrades
return ret, nil
}

View file

@ -0,0 +1,80 @@
package containerizedengine
import (
"context"
"testing"
"gotest.tools/assert"
)
func TestGetEngineVersionsBadImage(t *testing.T) {
ctx := context.Background()
client := baseClient{}
currentVersion := "currentversiongoeshere"
imageName := "this is an illegal image $%^&"
_, err := client.GetEngineVersions(ctx, nil, currentVersion, imageName)
assert.ErrorContains(t, err, "invalid reference format")
}
func TestParseTagsSimple(t *testing.T) {
tags := []string{"1.0.0", "1.1.2", "1.1.1", "1.2.2"}
currentVersion := "1.1.0"
res, err := parseTags(tags, currentVersion)
assert.NilError(t, err)
assert.Assert(t, err, "already present")
assert.Assert(t, len(res.Downgrades) == 1 && res.Downgrades[0].Tag == "1.0.0")
assert.Assert(t, len(res.Patches) == 2 && res.Patches[0].Tag == "1.1.1" && res.Patches[1].Tag == "1.1.2")
assert.Assert(t, len(res.Upgrades) == 1 && res.Upgrades[0].Tag == "1.2.2")
}
func TestParseConfirmMinSegments(t *testing.T) {
tags := []string{"1", "1.1.1", "2"}
currentVersion := "1.1"
res, err := parseTags(tags, currentVersion)
assert.NilError(t, err)
assert.Assert(t, err, "already present")
assert.Assert(t, len(res.Downgrades) == 1 && res.Downgrades[0].Tag == "1")
assert.Assert(t, len(res.Patches) == 1 && res.Patches[0].Tag == "1.1.1")
assert.Assert(t, len(res.Upgrades) == 1 && res.Upgrades[0].Tag == "2")
}
func TestParseTagsFilterPrerelease(t *testing.T) {
tags := []string{"1.0.0", "1.1.1", "1.2.2", "1.1.0-beta1"}
currentVersion := "1.1.0"
res, err := parseTags(tags, currentVersion)
assert.NilError(t, err)
assert.Assert(t, err, "already present")
assert.Assert(t, len(res.Downgrades) == 2 && res.Downgrades[0].Tag == "1.0.0")
assert.Assert(t, len(res.Patches) == 1 && res.Patches[0].Tag == "1.1.1")
assert.Assert(t, len(res.Upgrades) == 1 && res.Upgrades[0].Tag == "1.2.2")
}
func TestParseTagsBadTag(t *testing.T) {
tags := []string{"1.0.0", "1.1.1", "1.2.2", "notasemanticversion"}
currentVersion := "1.1.0"
res, err := parseTags(tags, currentVersion)
assert.NilError(t, err)
assert.Assert(t, err, "already present")
assert.Assert(t, len(res.Downgrades) == 1 && res.Downgrades[0].Tag == "1.0.0")
assert.Assert(t, len(res.Patches) == 1 && res.Patches[0].Tag == "1.1.1")
assert.Assert(t, len(res.Upgrades) == 1 && res.Upgrades[0].Tag == "1.2.2")
}
func TestParseBadCurrent(t *testing.T) {
tags := []string{"1.0.0", "1.1.2", "1.1.1", "1.2.2"}
currentVersion := "notasemanticversion"
_, err := parseTags(tags, currentVersion)
assert.ErrorContains(t, err, "failed to parse existing")
}
func TestParseBadCurrent2(t *testing.T) {
tags := []string{"1.0.0", "1.1.2", "1.1.1", "1.2.2"}
currentVersion := ""
_, err := parseTags(tags, currentVersion)
assert.ErrorContains(t, err, "failed to parse existing")
}