add better generate
Signed-off-by: Jess Frazelle <acidburn@microsoft.com>
This commit is contained in:
parent
3fc6abf56b
commit
cdd93563f5
5655 changed files with 1187011 additions and 392 deletions
267
vendor/github.com/docker/docker-ce/components/engine/distribution/config.go
generated
vendored
Normal file
267
vendor/github.com/docker/docker-ce/components/engine/distribution/config.go
generated
vendored
Normal file
|
@ -0,0 +1,267 @@
|
|||
package distribution // import "github.com/docker/docker/distribution"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"runtime"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/distribution/metadata"
|
||||
"github.com/docker/docker/distribution/xfer"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
refstore "github.com/docker/docker/reference"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Config stores configuration for communicating
|
||||
// with a registry.
|
||||
type Config struct {
|
||||
// MetaHeaders stores HTTP headers with metadata about the image
|
||||
MetaHeaders map[string][]string
|
||||
// AuthConfig holds authentication credentials for authenticating with
|
||||
// the registry.
|
||||
AuthConfig *types.AuthConfig
|
||||
// ProgressOutput is the interface for showing the status of the pull
|
||||
// operation.
|
||||
ProgressOutput progress.Output
|
||||
// RegistryService is the registry service to use for TLS configuration
|
||||
// and endpoint lookup.
|
||||
RegistryService registry.Service
|
||||
// ImageEventLogger notifies events for a given image
|
||||
ImageEventLogger func(id, name, action string)
|
||||
// MetadataStore is the storage backend for distribution-specific
|
||||
// metadata.
|
||||
MetadataStore metadata.Store
|
||||
// ImageStore manages images.
|
||||
ImageStore ImageConfigStore
|
||||
// ReferenceStore manages tags. This value is optional, when excluded
|
||||
// content will not be tagged.
|
||||
ReferenceStore refstore.Store
|
||||
// RequireSchema2 ensures that only schema2 manifests are used.
|
||||
RequireSchema2 bool
|
||||
}
|
||||
|
||||
// ImagePullConfig stores pull configuration.
|
||||
type ImagePullConfig struct {
|
||||
Config
|
||||
|
||||
// DownloadManager manages concurrent pulls.
|
||||
DownloadManager RootFSDownloadManager
|
||||
// Schema2Types is the valid schema2 configuration types allowed
|
||||
// by the pull operation.
|
||||
Schema2Types []string
|
||||
// OS is the requested operating system of the image being pulled to ensure it can be validated
|
||||
// when the host OS supports multiple image operating systems.
|
||||
OS string
|
||||
}
|
||||
|
||||
// ImagePushConfig stores push configuration.
|
||||
type ImagePushConfig struct {
|
||||
Config
|
||||
|
||||
// ConfigMediaType is the configuration media type for
|
||||
// schema2 manifests.
|
||||
ConfigMediaType string
|
||||
// LayerStores (indexed by operating system) manages layers.
|
||||
LayerStores map[string]PushLayerProvider
|
||||
// TrustKey is the private key for legacy signatures. This is typically
|
||||
// an ephemeral key, since these signatures are no longer verified.
|
||||
TrustKey libtrust.PrivateKey
|
||||
// UploadManager dispatches uploads.
|
||||
UploadManager *xfer.LayerUploadManager
|
||||
}
|
||||
|
||||
// ImageConfigStore handles storing and getting image configurations
|
||||
// by digest. Allows getting an image configurations rootfs from the
|
||||
// configuration.
|
||||
type ImageConfigStore interface {
|
||||
Put([]byte) (digest.Digest, error)
|
||||
Get(digest.Digest) ([]byte, error)
|
||||
RootFSFromConfig([]byte) (*image.RootFS, error)
|
||||
PlatformFromConfig([]byte) (*specs.Platform, error)
|
||||
}
|
||||
|
||||
// PushLayerProvider provides layers to be pushed by ChainID.
|
||||
type PushLayerProvider interface {
|
||||
Get(layer.ChainID) (PushLayer, error)
|
||||
}
|
||||
|
||||
// PushLayer is a pushable layer with metadata about the layer
|
||||
// and access to the content of the layer.
|
||||
type PushLayer interface {
|
||||
ChainID() layer.ChainID
|
||||
DiffID() layer.DiffID
|
||||
Parent() PushLayer
|
||||
Open() (io.ReadCloser, error)
|
||||
Size() (int64, error)
|
||||
MediaType() string
|
||||
Release()
|
||||
}
|
||||
|
||||
// RootFSDownloadManager handles downloading of the rootfs
|
||||
type RootFSDownloadManager interface {
|
||||
// Download downloads the layers into the given initial rootfs and
|
||||
// returns the final rootfs.
|
||||
// Given progress output to track download progress
|
||||
// Returns function to release download resources
|
||||
Download(ctx context.Context, initialRootFS image.RootFS, os string, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error)
|
||||
}
|
||||
|
||||
type imageConfigStore struct {
|
||||
image.Store
|
||||
}
|
||||
|
||||
// NewImageConfigStoreFromStore returns an ImageConfigStore backed
|
||||
// by an image.Store for container images.
|
||||
func NewImageConfigStoreFromStore(is image.Store) ImageConfigStore {
|
||||
return &imageConfigStore{
|
||||
Store: is,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *imageConfigStore) Put(c []byte) (digest.Digest, error) {
|
||||
id, err := s.Store.Create(c)
|
||||
return digest.Digest(id), err
|
||||
}
|
||||
|
||||
func (s *imageConfigStore) Get(d digest.Digest) ([]byte, error) {
|
||||
img, err := s.Store.Get(image.IDFromDigest(d))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return img.RawJSON(), nil
|
||||
}
|
||||
|
||||
func (s *imageConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) {
|
||||
var unmarshalledConfig image.Image
|
||||
if err := json.Unmarshal(c, &unmarshalledConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return unmarshalledConfig.RootFS, nil
|
||||
}
|
||||
|
||||
func (s *imageConfigStore) PlatformFromConfig(c []byte) (*specs.Platform, error) {
|
||||
var unmarshalledConfig image.Image
|
||||
if err := json.Unmarshal(c, &unmarshalledConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// fail immediately on Windows when downloading a non-Windows image
|
||||
// and vice versa. Exception on Windows if Linux Containers are enabled.
|
||||
if runtime.GOOS == "windows" && unmarshalledConfig.OS == "linux" && !system.LCOWSupported() {
|
||||
return nil, fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS)
|
||||
} else if runtime.GOOS != "windows" && unmarshalledConfig.OS == "windows" {
|
||||
return nil, fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS)
|
||||
}
|
||||
|
||||
os := unmarshalledConfig.OS
|
||||
if os == "" {
|
||||
os = runtime.GOOS
|
||||
}
|
||||
if !system.IsOSSupported(os) {
|
||||
return nil, system.ErrNotSupportedOperatingSystem
|
||||
}
|
||||
return &specs.Platform{OS: os, OSVersion: unmarshalledConfig.OSVersion}, nil
|
||||
}
|
||||
|
||||
type storeLayerProvider struct {
|
||||
ls layer.Store
|
||||
}
|
||||
|
||||
// NewLayerProvidersFromStores returns layer providers backed by
|
||||
// an instance of LayerStore. Only getting layers as gzipped
|
||||
// tars is supported.
|
||||
func NewLayerProvidersFromStores(lss map[string]layer.Store) map[string]PushLayerProvider {
|
||||
plps := make(map[string]PushLayerProvider)
|
||||
for os, ls := range lss {
|
||||
plps[os] = &storeLayerProvider{ls: ls}
|
||||
}
|
||||
return plps
|
||||
}
|
||||
|
||||
func (p *storeLayerProvider) Get(lid layer.ChainID) (PushLayer, error) {
|
||||
if lid == "" {
|
||||
return &storeLayer{
|
||||
Layer: layer.EmptyLayer,
|
||||
}, nil
|
||||
}
|
||||
l, err := p.ls.Get(lid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sl := storeLayer{
|
||||
Layer: l,
|
||||
ls: p.ls,
|
||||
}
|
||||
if d, ok := l.(distribution.Describable); ok {
|
||||
return &describableStoreLayer{
|
||||
storeLayer: sl,
|
||||
describable: d,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &sl, nil
|
||||
}
|
||||
|
||||
type storeLayer struct {
|
||||
layer.Layer
|
||||
ls layer.Store
|
||||
}
|
||||
|
||||
func (l *storeLayer) Parent() PushLayer {
|
||||
p := l.Layer.Parent()
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
sl := storeLayer{
|
||||
Layer: p,
|
||||
ls: l.ls,
|
||||
}
|
||||
if d, ok := p.(distribution.Describable); ok {
|
||||
return &describableStoreLayer{
|
||||
storeLayer: sl,
|
||||
describable: d,
|
||||
}
|
||||
}
|
||||
|
||||
return &sl
|
||||
}
|
||||
|
||||
func (l *storeLayer) Open() (io.ReadCloser, error) {
|
||||
return l.Layer.TarStream()
|
||||
}
|
||||
|
||||
func (l *storeLayer) Size() (int64, error) {
|
||||
return l.Layer.DiffSize()
|
||||
}
|
||||
|
||||
func (l *storeLayer) MediaType() string {
|
||||
// layer store always returns uncompressed tars
|
||||
return schema2.MediaTypeUncompressedLayer
|
||||
}
|
||||
|
||||
func (l *storeLayer) Release() {
|
||||
if l.ls != nil {
|
||||
layer.ReleaseAndLog(l.ls, l.Layer)
|
||||
}
|
||||
}
|
||||
|
||||
type describableStoreLayer struct {
|
||||
storeLayer
|
||||
describable distribution.Describable
|
||||
}
|
||||
|
||||
func (l *describableStoreLayer) Descriptor() distribution.Descriptor {
|
||||
return l.describable.Descriptor()
|
||||
}
|
206
vendor/github.com/docker/docker-ce/components/engine/distribution/errors.go
generated
vendored
Normal file
206
vendor/github.com/docker/docker-ce/components/engine/distribution/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,206 @@
|
|||
package distribution // import "github.com/docker/docker/distribution"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
"github.com/docker/distribution/registry/client/auth"
|
||||
"github.com/docker/docker/distribution/xfer"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ErrNoSupport is an error type used for errors indicating that an operation
|
||||
// is not supported. It encapsulates a more specific error.
|
||||
type ErrNoSupport struct{ Err error }
|
||||
|
||||
func (e ErrNoSupport) Error() string {
|
||||
if e.Err == nil {
|
||||
return "not supported"
|
||||
}
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
// fallbackError wraps an error that can possibly allow fallback to a different
|
||||
// endpoint.
|
||||
type fallbackError struct {
|
||||
// err is the error being wrapped.
|
||||
err error
|
||||
// confirmedV2 is set to true if it was confirmed that the registry
|
||||
// supports the v2 protocol. This is used to limit fallbacks to the v1
|
||||
// protocol.
|
||||
confirmedV2 bool
|
||||
// transportOK is set to true if we managed to speak HTTP with the
|
||||
// registry. This confirms that we're using appropriate TLS settings
|
||||
// (or lack of TLS).
|
||||
transportOK bool
|
||||
}
|
||||
|
||||
// Error renders the FallbackError as a string.
|
||||
func (f fallbackError) Error() string {
|
||||
return f.Cause().Error()
|
||||
}
|
||||
|
||||
func (f fallbackError) Cause() error {
|
||||
return f.err
|
||||
}
|
||||
|
||||
// shouldV2Fallback returns true if this error is a reason to fall back to v1.
|
||||
func shouldV2Fallback(err errcode.Error) bool {
|
||||
switch err.Code {
|
||||
case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type notFoundError struct {
|
||||
cause errcode.Error
|
||||
ref reference.Named
|
||||
}
|
||||
|
||||
func (e notFoundError) Error() string {
|
||||
switch e.cause.Code {
|
||||
case errcode.ErrorCodeDenied:
|
||||
// ErrorCodeDenied is used when access to the repository was denied
|
||||
return fmt.Sprintf("pull access denied for %s, repository does not exist or may require 'docker login'", reference.FamiliarName(e.ref))
|
||||
case v2.ErrorCodeManifestUnknown:
|
||||
return fmt.Sprintf("manifest for %s not found", reference.FamiliarString(e.ref))
|
||||
case v2.ErrorCodeNameUnknown:
|
||||
return fmt.Sprintf("repository %s not found", reference.FamiliarName(e.ref))
|
||||
}
|
||||
// Shouldn't get here, but this is better than returning an empty string
|
||||
return e.cause.Message
|
||||
}
|
||||
|
||||
func (e notFoundError) NotFound() {}
|
||||
|
||||
func (e notFoundError) Cause() error {
|
||||
return e.cause
|
||||
}
|
||||
|
||||
// TranslatePullError is used to convert an error from a registry pull
|
||||
// operation to an error representing the entire pull operation. Any error
|
||||
// information which is not used by the returned error gets output to
|
||||
// log at info level.
|
||||
func TranslatePullError(err error, ref reference.Named) error {
|
||||
switch v := err.(type) {
|
||||
case errcode.Errors:
|
||||
if len(v) != 0 {
|
||||
for _, extra := range v[1:] {
|
||||
logrus.Infof("Ignoring extra error returned from registry: %v", extra)
|
||||
}
|
||||
return TranslatePullError(v[0], ref)
|
||||
}
|
||||
case errcode.Error:
|
||||
switch v.Code {
|
||||
case errcode.ErrorCodeDenied, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown:
|
||||
return notFoundError{v, ref}
|
||||
}
|
||||
case xfer.DoNotRetry:
|
||||
return TranslatePullError(v.Err, ref)
|
||||
}
|
||||
|
||||
return errdefs.Unknown(err)
|
||||
}
|
||||
|
||||
// continueOnError returns true if we should fallback to the next endpoint
|
||||
// as a result of this error.
|
||||
func continueOnError(err error, mirrorEndpoint bool) bool {
|
||||
switch v := err.(type) {
|
||||
case errcode.Errors:
|
||||
if len(v) == 0 {
|
||||
return true
|
||||
}
|
||||
return continueOnError(v[0], mirrorEndpoint)
|
||||
case ErrNoSupport:
|
||||
return continueOnError(v.Err, mirrorEndpoint)
|
||||
case errcode.Error:
|
||||
return mirrorEndpoint || shouldV2Fallback(v)
|
||||
case *client.UnexpectedHTTPResponseError:
|
||||
return true
|
||||
case ImageConfigPullError:
|
||||
// ImageConfigPullError only happens with v2 images, v1 fallback is
|
||||
// unnecessary.
|
||||
// Failures from a mirror endpoint should result in fallback to the
|
||||
// canonical repo.
|
||||
return mirrorEndpoint
|
||||
case error:
|
||||
return !strings.Contains(err.Error(), strings.ToLower(syscall.ESRCH.Error()))
|
||||
}
|
||||
// let's be nice and fallback if the error is a completely
|
||||
// unexpected one.
|
||||
// If new errors have to be handled in some way, please
|
||||
// add them to the switch above.
|
||||
return true
|
||||
}
|
||||
|
||||
// retryOnError wraps the error in xfer.DoNotRetry if we should not retry the
|
||||
// operation after this error.
|
||||
func retryOnError(err error) error {
|
||||
switch v := err.(type) {
|
||||
case errcode.Errors:
|
||||
if len(v) != 0 {
|
||||
return retryOnError(v[0])
|
||||
}
|
||||
case errcode.Error:
|
||||
switch v.Code {
|
||||
case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeUnsupported, errcode.ErrorCodeDenied, errcode.ErrorCodeTooManyRequests, v2.ErrorCodeNameUnknown:
|
||||
return xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
case *url.Error:
|
||||
switch v.Err {
|
||||
case auth.ErrNoBasicAuthCredentials, auth.ErrNoToken:
|
||||
return xfer.DoNotRetry{Err: v.Err}
|
||||
}
|
||||
return retryOnError(v.Err)
|
||||
case *client.UnexpectedHTTPResponseError:
|
||||
return xfer.DoNotRetry{Err: err}
|
||||
case error:
|
||||
if err == distribution.ErrBlobUnknown {
|
||||
return xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
if strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) {
|
||||
return xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
}
|
||||
// let's be nice and fallback if the error is a completely
|
||||
// unexpected one.
|
||||
// If new errors have to be handled in some way, please
|
||||
// add them to the switch above.
|
||||
return err
|
||||
}
|
||||
|
||||
type invalidManifestClassError struct {
|
||||
mediaType string
|
||||
class string
|
||||
}
|
||||
|
||||
func (e invalidManifestClassError) Error() string {
|
||||
return fmt.Sprintf("Encountered remote %q(%s) when fetching", e.mediaType, e.class)
|
||||
}
|
||||
|
||||
func (e invalidManifestClassError) InvalidParameter() {}
|
||||
|
||||
type invalidManifestFormatError struct{}
|
||||
|
||||
func (invalidManifestFormatError) Error() string {
|
||||
return "unsupported manifest format"
|
||||
}
|
||||
|
||||
func (invalidManifestFormatError) InvalidParameter() {}
|
||||
|
||||
type reservedNameError string
|
||||
|
||||
func (e reservedNameError) Error() string {
|
||||
return "'" + string(e) + "' is a reserved name"
|
||||
}
|
||||
|
||||
func (e reservedNameError) Forbidden() {}
|
85
vendor/github.com/docker/docker-ce/components/engine/distribution/errors_test.go
generated
vendored
Normal file
85
vendor/github.com/docker/docker-ce/components/engine/distribution/errors_test.go
generated
vendored
Normal file
|
@ -0,0 +1,85 @@
|
|||
package distribution // import "github.com/docker/docker/distribution"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
)
|
||||
|
||||
var alwaysContinue = []error{
|
||||
&client.UnexpectedHTTPResponseError{},
|
||||
|
||||
// Some errcode.Errors that don't disprove the existence of a V1 image
|
||||
errcode.Error{Code: errcode.ErrorCodeUnauthorized},
|
||||
errcode.Error{Code: v2.ErrorCodeManifestUnknown},
|
||||
errcode.Error{Code: v2.ErrorCodeNameUnknown},
|
||||
|
||||
errors.New("some totally unexpected error"),
|
||||
}
|
||||
|
||||
var continueFromMirrorEndpoint = []error{
|
||||
ImageConfigPullError{},
|
||||
|
||||
// Some other errcode.Error that doesn't indicate we should search for a V1 image.
|
||||
errcode.Error{Code: errcode.ErrorCodeTooManyRequests},
|
||||
}
|
||||
|
||||
var neverContinue = []error{
|
||||
errors.New(strings.ToLower(syscall.ESRCH.Error())), // No such process
|
||||
}
|
||||
|
||||
func TestContinueOnError_NonMirrorEndpoint(t *testing.T) {
|
||||
for _, err := range alwaysContinue {
|
||||
if !continueOnError(err, false) {
|
||||
t.Errorf("Should continue from non-mirror endpoint: %T: '%s'", err, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
for _, err := range continueFromMirrorEndpoint {
|
||||
if continueOnError(err, false) {
|
||||
t.Errorf("Should only continue from mirror endpoint: %T: '%s'", err, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestContinueOnError_MirrorEndpoint(t *testing.T) {
|
||||
errs := []error{}
|
||||
errs = append(errs, alwaysContinue...)
|
||||
errs = append(errs, continueFromMirrorEndpoint...)
|
||||
for _, err := range errs {
|
||||
if !continueOnError(err, true) {
|
||||
t.Errorf("Should continue from mirror endpoint: %T: '%s'", err, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestContinueOnError_NeverContinue(t *testing.T) {
|
||||
for _, isMirrorEndpoint := range []bool{true, false} {
|
||||
for _, err := range neverContinue {
|
||||
if continueOnError(err, isMirrorEndpoint) {
|
||||
t.Errorf("Should never continue: %T: '%s'", err, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestContinueOnError_UnnestsErrors(t *testing.T) {
|
||||
// ContinueOnError should evaluate nested errcode.Errors.
|
||||
|
||||
// Assumes that v2.ErrorCodeNameUnknown is a continueable error code.
|
||||
err := errcode.Errors{errcode.Error{Code: v2.ErrorCodeNameUnknown}}
|
||||
if !continueOnError(err, false) {
|
||||
t.Fatal("ContinueOnError should unnest, base return value on errcode.Errors")
|
||||
}
|
||||
|
||||
// Assumes that errcode.ErrorCodeTooManyRequests is not a V1-fallback indication
|
||||
err = errcode.Errors{errcode.Error{Code: errcode.ErrorCodeTooManyRequests}}
|
||||
if continueOnError(err, false) {
|
||||
t.Fatal("ContinueOnError should unnest, base return value on errcode.Errors")
|
||||
}
|
||||
}
|
38
vendor/github.com/docker/docker-ce/components/engine/distribution/fixtures/validate_manifest/bad_manifest
generated
vendored
Normal file
38
vendor/github.com/docker/docker-ce/components/engine/distribution/fixtures/validate_manifest/bad_manifest
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
{
|
||||
"schemaVersion": 2,
|
||||
"name": "library/hello-world",
|
||||
"tag": "latest",
|
||||
"architecture": "amd64",
|
||||
"fsLayers": [
|
||||
{
|
||||
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"
|
||||
}
|
||||
],
|
||||
"history": [
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n"
|
||||
}
|
||||
],
|
||||
"signatures": [
|
||||
{
|
||||
"header": {
|
||||
"jwk": {
|
||||
"crv": "P-256",
|
||||
"kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4",
|
||||
"kty": "EC",
|
||||
"x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ",
|
||||
"y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8"
|
||||
},
|
||||
"alg": "ES256"
|
||||
},
|
||||
"signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A",
|
||||
"protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ"
|
||||
}
|
||||
]
|
||||
}
|
46
vendor/github.com/docker/docker-ce/components/engine/distribution/fixtures/validate_manifest/extra_data_manifest
generated
vendored
Normal file
46
vendor/github.com/docker/docker-ce/components/engine/distribution/fixtures/validate_manifest/extra_data_manifest
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
{
|
||||
"schemaVersion": 1,
|
||||
"name": "library/hello-world",
|
||||
"tag": "latest",
|
||||
"architecture": "amd64",
|
||||
"fsLayers": [
|
||||
{
|
||||
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"
|
||||
}
|
||||
],
|
||||
"history": [
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n"
|
||||
}
|
||||
],
|
||||
"fsLayers": [
|
||||
{
|
||||
"blobSum": "sha256:ffff95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:ffff658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"
|
||||
}
|
||||
],
|
||||
"signatures": [
|
||||
{
|
||||
"header": {
|
||||
"jwk": {
|
||||
"crv": "P-256",
|
||||
"kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4",
|
||||
"kty": "EC",
|
||||
"x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ",
|
||||
"y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8"
|
||||
},
|
||||
"alg": "ES256"
|
||||
},
|
||||
"signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A",
|
||||
"protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ"
|
||||
}
|
||||
]
|
||||
}
|
38
vendor/github.com/docker/docker-ce/components/engine/distribution/fixtures/validate_manifest/good_manifest
generated
vendored
Normal file
38
vendor/github.com/docker/docker-ce/components/engine/distribution/fixtures/validate_manifest/good_manifest
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
{
|
||||
"schemaVersion": 1,
|
||||
"name": "library/hello-world",
|
||||
"tag": "latest",
|
||||
"architecture": "amd64",
|
||||
"fsLayers": [
|
||||
{
|
||||
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
|
||||
},
|
||||
{
|
||||
"blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"
|
||||
}
|
||||
],
|
||||
"history": [
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
|
||||
},
|
||||
{
|
||||
"v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n"
|
||||
}
|
||||
],
|
||||
"signatures": [
|
||||
{
|
||||
"header": {
|
||||
"jwk": {
|
||||
"crv": "P-256",
|
||||
"kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4",
|
||||
"kty": "EC",
|
||||
"x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ",
|
||||
"y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8"
|
||||
},
|
||||
"alg": "ES256"
|
||||
},
|
||||
"signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A",
|
||||
"protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ"
|
||||
}
|
||||
]
|
||||
}
|
75
vendor/github.com/docker/docker-ce/components/engine/distribution/metadata/metadata.go
generated
vendored
Normal file
75
vendor/github.com/docker/docker-ce/components/engine/distribution/metadata/metadata.go
generated
vendored
Normal file
|
@ -0,0 +1,75 @@
|
|||
package metadata // import "github.com/docker/docker/distribution/metadata"
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
)
|
||||
|
||||
// Store implements a K/V store for mapping distribution-related IDs
|
||||
// to on-disk layer IDs and image IDs. The namespace identifies the type of
|
||||
// mapping (i.e. "v1ids" or "artifacts"). MetadataStore is goroutine-safe.
|
||||
type Store interface {
|
||||
// Get retrieves data by namespace and key.
|
||||
Get(namespace string, key string) ([]byte, error)
|
||||
// Set writes data indexed by namespace and key.
|
||||
Set(namespace, key string, value []byte) error
|
||||
// Delete removes data indexed by namespace and key.
|
||||
Delete(namespace, key string) error
|
||||
}
|
||||
|
||||
// FSMetadataStore uses the filesystem to associate metadata with layer and
|
||||
// image IDs.
|
||||
type FSMetadataStore struct {
|
||||
sync.RWMutex
|
||||
basePath string
|
||||
}
|
||||
|
||||
// NewFSMetadataStore creates a new filesystem-based metadata store.
|
||||
func NewFSMetadataStore(basePath string) (*FSMetadataStore, error) {
|
||||
if err := os.MkdirAll(basePath, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FSMetadataStore{
|
||||
basePath: basePath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (store *FSMetadataStore) path(namespace, key string) string {
|
||||
return filepath.Join(store.basePath, namespace, key)
|
||||
}
|
||||
|
||||
// Get retrieves data by namespace and key. The data is read from a file named
|
||||
// after the key, stored in the namespace's directory.
|
||||
func (store *FSMetadataStore) Get(namespace string, key string) ([]byte, error) {
|
||||
store.RLock()
|
||||
defer store.RUnlock()
|
||||
|
||||
return ioutil.ReadFile(store.path(namespace, key))
|
||||
}
|
||||
|
||||
// Set writes data indexed by namespace and key. The data is written to a file
|
||||
// named after the key, stored in the namespace's directory.
|
||||
func (store *FSMetadataStore) Set(namespace, key string, value []byte) error {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
|
||||
path := store.path(namespace, key)
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutils.AtomicWriteFile(path, value, 0644)
|
||||
}
|
||||
|
||||
// Delete removes data indexed by namespace and key. The data file named after
|
||||
// the key, stored in the namespace's directory is deleted.
|
||||
func (store *FSMetadataStore) Delete(namespace, key string) error {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
|
||||
path := store.path(namespace, key)
|
||||
return os.Remove(path)
|
||||
}
|
51
vendor/github.com/docker/docker-ce/components/engine/distribution/metadata/v1_id_service.go
generated
vendored
Normal file
51
vendor/github.com/docker/docker-ce/components/engine/distribution/metadata/v1_id_service.go
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
package metadata // import "github.com/docker/docker/distribution/metadata"
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/image/v1"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// V1IDService maps v1 IDs to layers on disk.
|
||||
type V1IDService struct {
|
||||
store Store
|
||||
}
|
||||
|
||||
// NewV1IDService creates a new V1 ID mapping service.
|
||||
func NewV1IDService(store Store) *V1IDService {
|
||||
return &V1IDService{
|
||||
store: store,
|
||||
}
|
||||
}
|
||||
|
||||
// namespace returns the namespace used by this service.
|
||||
func (idserv *V1IDService) namespace() string {
|
||||
return "v1id"
|
||||
}
|
||||
|
||||
// Get finds a layer by its V1 ID.
|
||||
func (idserv *V1IDService) Get(v1ID, registry string) (layer.DiffID, error) {
|
||||
if idserv.store == nil {
|
||||
return "", errors.New("no v1IDService storage")
|
||||
}
|
||||
if err := v1.ValidateID(v1ID); err != nil {
|
||||
return layer.DiffID(""), err
|
||||
}
|
||||
|
||||
idBytes, err := idserv.store.Get(idserv.namespace(), registry+","+v1ID)
|
||||
if err != nil {
|
||||
return layer.DiffID(""), err
|
||||
}
|
||||
return layer.DiffID(idBytes), nil
|
||||
}
|
||||
|
||||
// Set associates an image with a V1 ID.
|
||||
func (idserv *V1IDService) Set(v1ID, registry string, id layer.DiffID) error {
|
||||
if idserv.store == nil {
|
||||
return nil
|
||||
}
|
||||
if err := v1.ValidateID(v1ID); err != nil {
|
||||
return err
|
||||
}
|
||||
return idserv.store.Set(idserv.namespace(), registry+","+v1ID, []byte(id))
|
||||
}
|
88
vendor/github.com/docker/docker-ce/components/engine/distribution/metadata/v1_id_service_test.go
generated
vendored
Normal file
88
vendor/github.com/docker/docker-ce/components/engine/distribution/metadata/v1_id_service_test.go
generated
vendored
Normal file
|
@ -0,0 +1,88 @@
|
|||
package metadata // import "github.com/docker/docker/distribution/metadata"
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/gotestyourself/gotestyourself/assert"
|
||||
)
|
||||
|
||||
func TestV1IDService(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir("", "v1-id-service-test")
|
||||
if err != nil {
|
||||
t.Fatalf("could not create temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
metadataStore, err := NewFSMetadataStore(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("could not create metadata store: %v", err)
|
||||
}
|
||||
v1IDService := NewV1IDService(metadataStore)
|
||||
|
||||
ns := v1IDService.namespace()
|
||||
|
||||
assert.Equal(t, "v1id", ns)
|
||||
|
||||
testVectors := []struct {
|
||||
registry string
|
||||
v1ID string
|
||||
layerID layer.DiffID
|
||||
}{
|
||||
{
|
||||
registry: "registry1",
|
||||
v1ID: "f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937",
|
||||
layerID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
|
||||
},
|
||||
{
|
||||
registry: "registry2",
|
||||
v1ID: "9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e",
|
||||
layerID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"),
|
||||
},
|
||||
{
|
||||
registry: "registry1",
|
||||
v1ID: "9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e",
|
||||
layerID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"),
|
||||
},
|
||||
}
|
||||
|
||||
// Set some associations
|
||||
for _, vec := range testVectors {
|
||||
err := v1IDService.Set(vec.v1ID, vec.registry, vec.layerID)
|
||||
if err != nil {
|
||||
t.Fatalf("error calling Set: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Check the correct values are read back
|
||||
for _, vec := range testVectors {
|
||||
layerID, err := v1IDService.Get(vec.v1ID, vec.registry)
|
||||
if err != nil {
|
||||
t.Fatalf("error calling Get: %v", err)
|
||||
}
|
||||
if layerID != vec.layerID {
|
||||
t.Fatal("Get returned incorrect layer ID")
|
||||
}
|
||||
}
|
||||
|
||||
// Test Get on a nonexistent entry
|
||||
_, err = v1IDService.Get("82379823067823853223359023576437723560923756b03560378f4497753917", "registry1")
|
||||
if err == nil {
|
||||
t.Fatal("expected error looking up nonexistent entry")
|
||||
}
|
||||
|
||||
// Overwrite one of the entries and read it back
|
||||
err = v1IDService.Set(testVectors[0].v1ID, testVectors[0].registry, testVectors[1].layerID)
|
||||
if err != nil {
|
||||
t.Fatalf("error calling Set: %v", err)
|
||||
}
|
||||
layerID, err := v1IDService.Get(testVectors[0].v1ID, testVectors[0].registry)
|
||||
if err != nil {
|
||||
t.Fatalf("error calling Get: %v", err)
|
||||
}
|
||||
if layerID != testVectors[1].layerID {
|
||||
t.Fatal("Get returned incorrect layer ID")
|
||||
}
|
||||
}
|
241
vendor/github.com/docker/docker-ce/components/engine/distribution/metadata/v2_metadata_service.go
generated
vendored
Normal file
241
vendor/github.com/docker/docker-ce/components/engine/distribution/metadata/v2_metadata_service.go
generated
vendored
Normal file
|
@ -0,0 +1,241 @@
|
|||
package metadata // import "github.com/docker/docker/distribution/metadata"
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// V2MetadataService maps layer IDs to a set of known metadata for
|
||||
// the layer.
|
||||
type V2MetadataService interface {
|
||||
GetMetadata(diffID layer.DiffID) ([]V2Metadata, error)
|
||||
GetDiffID(dgst digest.Digest) (layer.DiffID, error)
|
||||
Add(diffID layer.DiffID, metadata V2Metadata) error
|
||||
TagAndAdd(diffID layer.DiffID, hmacKey []byte, metadata V2Metadata) error
|
||||
Remove(metadata V2Metadata) error
|
||||
}
|
||||
|
||||
// v2MetadataService implements V2MetadataService
|
||||
type v2MetadataService struct {
|
||||
store Store
|
||||
}
|
||||
|
||||
var _ V2MetadataService = &v2MetadataService{}
|
||||
|
||||
// V2Metadata contains the digest and source repository information for a layer.
|
||||
type V2Metadata struct {
|
||||
Digest digest.Digest
|
||||
SourceRepository string
|
||||
// HMAC hashes above attributes with recent authconfig digest used as a key in order to determine matching
|
||||
// metadata entries accompanied by the same credentials without actually exposing them.
|
||||
HMAC string
|
||||
}
|
||||
|
||||
// CheckV2MetadataHMAC returns true if the given "meta" is tagged with a hmac hashed by the given "key".
|
||||
func CheckV2MetadataHMAC(meta *V2Metadata, key []byte) bool {
|
||||
if len(meta.HMAC) == 0 || len(key) == 0 {
|
||||
return len(meta.HMAC) == 0 && len(key) == 0
|
||||
}
|
||||
mac := hmac.New(sha256.New, key)
|
||||
mac.Write([]byte(meta.Digest))
|
||||
mac.Write([]byte(meta.SourceRepository))
|
||||
expectedMac := mac.Sum(nil)
|
||||
|
||||
storedMac, err := hex.DecodeString(meta.HMAC)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return hmac.Equal(storedMac, expectedMac)
|
||||
}
|
||||
|
||||
// ComputeV2MetadataHMAC returns a hmac for the given "meta" hash by the given key.
|
||||
func ComputeV2MetadataHMAC(key []byte, meta *V2Metadata) string {
|
||||
if len(key) == 0 || meta == nil {
|
||||
return ""
|
||||
}
|
||||
mac := hmac.New(sha256.New, key)
|
||||
mac.Write([]byte(meta.Digest))
|
||||
mac.Write([]byte(meta.SourceRepository))
|
||||
return hex.EncodeToString(mac.Sum(nil))
|
||||
}
|
||||
|
||||
// ComputeV2MetadataHMACKey returns a key for the given "authConfig" that can be used to hash v2 metadata
|
||||
// entries.
|
||||
func ComputeV2MetadataHMACKey(authConfig *types.AuthConfig) ([]byte, error) {
|
||||
if authConfig == nil {
|
||||
return nil, nil
|
||||
}
|
||||
key := authConfigKeyInput{
|
||||
Username: authConfig.Username,
|
||||
Password: authConfig.Password,
|
||||
Auth: authConfig.Auth,
|
||||
IdentityToken: authConfig.IdentityToken,
|
||||
RegistryToken: authConfig.RegistryToken,
|
||||
}
|
||||
buf, err := json.Marshal(&key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []byte(digest.FromBytes(buf)), nil
|
||||
}
|
||||
|
||||
// authConfigKeyInput is a reduced AuthConfig structure holding just relevant credential data eligible for
|
||||
// hmac key creation.
|
||||
type authConfigKeyInput struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Auth string `json:"auth,omitempty"`
|
||||
|
||||
IdentityToken string `json:"identitytoken,omitempty"`
|
||||
RegistryToken string `json:"registrytoken,omitempty"`
|
||||
}
|
||||
|
||||
// maxMetadata is the number of metadata entries to keep per layer DiffID.
|
||||
const maxMetadata = 50
|
||||
|
||||
// NewV2MetadataService creates a new diff ID to v2 metadata mapping service.
|
||||
func NewV2MetadataService(store Store) V2MetadataService {
|
||||
return &v2MetadataService{
|
||||
store: store,
|
||||
}
|
||||
}
|
||||
|
||||
func (serv *v2MetadataService) diffIDNamespace() string {
|
||||
return "v2metadata-by-diffid"
|
||||
}
|
||||
|
||||
func (serv *v2MetadataService) digestNamespace() string {
|
||||
return "diffid-by-digest"
|
||||
}
|
||||
|
||||
func (serv *v2MetadataService) diffIDKey(diffID layer.DiffID) string {
|
||||
return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex()
|
||||
}
|
||||
|
||||
func (serv *v2MetadataService) digestKey(dgst digest.Digest) string {
|
||||
return string(dgst.Algorithm()) + "/" + dgst.Hex()
|
||||
}
|
||||
|
||||
// GetMetadata finds the metadata associated with a layer DiffID.
|
||||
func (serv *v2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) {
|
||||
if serv.store == nil {
|
||||
return nil, errors.New("no metadata storage")
|
||||
}
|
||||
jsonBytes, err := serv.store.Get(serv.diffIDNamespace(), serv.diffIDKey(diffID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var metadata []V2Metadata
|
||||
if err := json.Unmarshal(jsonBytes, &metadata); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
// GetDiffID finds a layer DiffID from a digest.
|
||||
func (serv *v2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) {
|
||||
if serv.store == nil {
|
||||
return layer.DiffID(""), errors.New("no metadata storage")
|
||||
}
|
||||
diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst))
|
||||
if err != nil {
|
||||
return layer.DiffID(""), err
|
||||
}
|
||||
|
||||
return layer.DiffID(diffIDBytes), nil
|
||||
}
|
||||
|
||||
// Add associates metadata with a layer DiffID. If too many metadata entries are
|
||||
// present, the oldest one is dropped.
|
||||
func (serv *v2MetadataService) Add(diffID layer.DiffID, metadata V2Metadata) error {
|
||||
if serv.store == nil {
|
||||
// Support a service which has no backend storage, in this case
|
||||
// an add becomes a no-op.
|
||||
// TODO: implement in memory storage
|
||||
return nil
|
||||
}
|
||||
oldMetadata, err := serv.GetMetadata(diffID)
|
||||
if err != nil {
|
||||
oldMetadata = nil
|
||||
}
|
||||
newMetadata := make([]V2Metadata, 0, len(oldMetadata)+1)
|
||||
|
||||
// Copy all other metadata to new slice
|
||||
for _, oldMeta := range oldMetadata {
|
||||
if oldMeta != metadata {
|
||||
newMetadata = append(newMetadata, oldMeta)
|
||||
}
|
||||
}
|
||||
|
||||
newMetadata = append(newMetadata, metadata)
|
||||
|
||||
if len(newMetadata) > maxMetadata {
|
||||
newMetadata = newMetadata[len(newMetadata)-maxMetadata:]
|
||||
}
|
||||
|
||||
jsonBytes, err := json.Marshal(newMetadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return serv.store.Set(serv.digestNamespace(), serv.digestKey(metadata.Digest), []byte(diffID))
|
||||
}
|
||||
|
||||
// TagAndAdd amends the given "meta" for hmac hashed by the given "hmacKey" and associates it with a layer
|
||||
// DiffID. If too many metadata entries are present, the oldest one is dropped.
|
||||
func (serv *v2MetadataService) TagAndAdd(diffID layer.DiffID, hmacKey []byte, meta V2Metadata) error {
|
||||
meta.HMAC = ComputeV2MetadataHMAC(hmacKey, &meta)
|
||||
return serv.Add(diffID, meta)
|
||||
}
|
||||
|
||||
// Remove disassociates a metadata entry from a layer DiffID.
|
||||
func (serv *v2MetadataService) Remove(metadata V2Metadata) error {
|
||||
if serv.store == nil {
|
||||
// Support a service which has no backend storage, in this case
|
||||
// an remove becomes a no-op.
|
||||
// TODO: implement in memory storage
|
||||
return nil
|
||||
}
|
||||
diffID, err := serv.GetDiffID(metadata.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
oldMetadata, err := serv.GetMetadata(diffID)
|
||||
if err != nil {
|
||||
oldMetadata = nil
|
||||
}
|
||||
newMetadata := make([]V2Metadata, 0, len(oldMetadata))
|
||||
|
||||
// Copy all other metadata to new slice
|
||||
for _, oldMeta := range oldMetadata {
|
||||
if oldMeta != metadata {
|
||||
newMetadata = append(newMetadata, oldMeta)
|
||||
}
|
||||
}
|
||||
|
||||
if len(newMetadata) == 0 {
|
||||
return serv.store.Delete(serv.diffIDNamespace(), serv.diffIDKey(diffID))
|
||||
}
|
||||
|
||||
jsonBytes, err := json.Marshal(newMetadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes)
|
||||
}
|
115
vendor/github.com/docker/docker-ce/components/engine/distribution/metadata/v2_metadata_service_test.go
generated
vendored
Normal file
115
vendor/github.com/docker/docker-ce/components/engine/distribution/metadata/v2_metadata_service_test.go
generated
vendored
Normal file
|
@ -0,0 +1,115 @@
|
|||
package metadata // import "github.com/docker/docker/distribution/metadata"
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
func TestV2MetadataService(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir("", "blobsum-storage-service-test")
|
||||
if err != nil {
|
||||
t.Fatalf("could not create temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
metadataStore, err := NewFSMetadataStore(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("could not create metadata store: %v", err)
|
||||
}
|
||||
V2MetadataService := NewV2MetadataService(metadataStore)
|
||||
|
||||
tooManyBlobSums := make([]V2Metadata, 100)
|
||||
for i := range tooManyBlobSums {
|
||||
randDigest := randomDigest()
|
||||
tooManyBlobSums[i] = V2Metadata{Digest: randDigest}
|
||||
}
|
||||
|
||||
testVectors := []struct {
|
||||
diffID layer.DiffID
|
||||
metadata []V2Metadata
|
||||
}{
|
||||
{
|
||||
diffID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
|
||||
metadata: []V2Metadata{
|
||||
{Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")},
|
||||
},
|
||||
},
|
||||
{
|
||||
diffID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"),
|
||||
metadata: []V2Metadata{
|
||||
{Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")},
|
||||
{Digest: digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e")},
|
||||
},
|
||||
},
|
||||
{
|
||||
diffID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"),
|
||||
metadata: tooManyBlobSums,
|
||||
},
|
||||
}
|
||||
|
||||
// Set some associations
|
||||
for _, vec := range testVectors {
|
||||
for _, blobsum := range vec.metadata {
|
||||
err := V2MetadataService.Add(vec.diffID, blobsum)
|
||||
if err != nil {
|
||||
t.Fatalf("error calling Set: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check the correct values are read back
|
||||
for _, vec := range testVectors {
|
||||
metadata, err := V2MetadataService.GetMetadata(vec.diffID)
|
||||
if err != nil {
|
||||
t.Fatalf("error calling Get: %v", err)
|
||||
}
|
||||
expectedMetadataEntries := len(vec.metadata)
|
||||
if expectedMetadataEntries > 50 {
|
||||
expectedMetadataEntries = 50
|
||||
}
|
||||
if !reflect.DeepEqual(metadata, vec.metadata[len(vec.metadata)-expectedMetadataEntries:len(vec.metadata)]) {
|
||||
t.Fatal("Get returned incorrect layer ID")
|
||||
}
|
||||
}
|
||||
|
||||
// Test GetMetadata on a nonexistent entry
|
||||
_, err = V2MetadataService.GetMetadata(layer.DiffID("sha256:82379823067823853223359023576437723560923756b03560378f4497753917"))
|
||||
if err == nil {
|
||||
t.Fatal("expected error looking up nonexistent entry")
|
||||
}
|
||||
|
||||
// Test GetDiffID on a nonexistent entry
|
||||
_, err = V2MetadataService.GetDiffID(digest.Digest("sha256:82379823067823853223359023576437723560923756b03560378f4497753917"))
|
||||
if err == nil {
|
||||
t.Fatal("expected error looking up nonexistent entry")
|
||||
}
|
||||
|
||||
// Overwrite one of the entries and read it back
|
||||
err = V2MetadataService.Add(testVectors[1].diffID, testVectors[0].metadata[0])
|
||||
if err != nil {
|
||||
t.Fatalf("error calling Add: %v", err)
|
||||
}
|
||||
diffID, err := V2MetadataService.GetDiffID(testVectors[0].metadata[0].Digest)
|
||||
if err != nil {
|
||||
t.Fatalf("error calling GetDiffID: %v", err)
|
||||
}
|
||||
if diffID != testVectors[1].diffID {
|
||||
t.Fatal("GetDiffID returned incorrect diffID")
|
||||
}
|
||||
}
|
||||
|
||||
func randomDigest() digest.Digest {
|
||||
b := [32]byte{}
|
||||
for i := 0; i < len(b); i++ {
|
||||
b[i] = byte(rand.Intn(256))
|
||||
}
|
||||
d := hex.EncodeToString(b[:])
|
||||
return digest.Digest("sha256:" + d)
|
||||
}
|
206
vendor/github.com/docker/docker-ce/components/engine/distribution/pull.go
generated
vendored
Normal file
206
vendor/github.com/docker/docker-ce/components/engine/distribution/pull.go
generated
vendored
Normal file
|
@ -0,0 +1,206 @@
|
|||
package distribution // import "github.com/docker/docker/distribution"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/distribution/metadata"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
refstore "github.com/docker/docker/reference"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Puller is an interface that abstracts pulling for different API versions.
|
||||
type Puller interface {
|
||||
// Pull tries to pull the image referenced by `tag`
|
||||
// Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint.
|
||||
//
|
||||
Pull(ctx context.Context, ref reference.Named, os string) error
|
||||
}
|
||||
|
||||
// newPuller returns a Puller interface that will pull from either a v1 or v2
|
||||
// registry. The endpoint argument contains a Version field that determines
|
||||
// whether a v1 or v2 puller will be created. The other parameters are passed
|
||||
// through to the underlying puller implementation for use during the actual
|
||||
// pull operation.
|
||||
func newPuller(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePullConfig *ImagePullConfig) (Puller, error) {
|
||||
switch endpoint.Version {
|
||||
case registry.APIVersion2:
|
||||
return &v2Puller{
|
||||
V2MetadataService: metadata.NewV2MetadataService(imagePullConfig.MetadataStore),
|
||||
endpoint: endpoint,
|
||||
config: imagePullConfig,
|
||||
repoInfo: repoInfo,
|
||||
}, nil
|
||||
case registry.APIVersion1:
|
||||
return &v1Puller{
|
||||
v1IDService: metadata.NewV1IDService(imagePullConfig.MetadataStore),
|
||||
endpoint: endpoint,
|
||||
config: imagePullConfig,
|
||||
repoInfo: repoInfo,
|
||||
}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL)
|
||||
}
|
||||
|
||||
// Pull initiates a pull operation. image is the repository name to pull, and
|
||||
// tag may be either empty, or indicate a specific tag to pull.
|
||||
func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullConfig) error {
|
||||
// Resolve the Repository name from fqn to RepositoryInfo
|
||||
repoInfo, err := imagePullConfig.RegistryService.ResolveRepository(ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// makes sure name is not `scratch`
|
||||
if err := ValidateRepoName(repoInfo.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
endpoints, err := imagePullConfig.RegistryService.LookupPullEndpoints(reference.Domain(repoInfo.Name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
lastErr error
|
||||
|
||||
// discardNoSupportErrors is used to track whether an endpoint encountered an error of type registry.ErrNoSupport
|
||||
// By default it is false, which means that if an ErrNoSupport error is encountered, it will be saved in lastErr.
|
||||
// As soon as another kind of error is encountered, discardNoSupportErrors is set to true, avoiding the saving of
|
||||
// any subsequent ErrNoSupport errors in lastErr.
|
||||
// It's needed for pull-by-digest on v1 endpoints: if there are only v1 endpoints configured, the error should be
|
||||
// returned and displayed, but if there was a v2 endpoint which supports pull-by-digest, then the last relevant
|
||||
// error is the ones from v2 endpoints not v1.
|
||||
discardNoSupportErrors bool
|
||||
|
||||
// confirmedV2 is set to true if a pull attempt managed to
|
||||
// confirm that it was talking to a v2 registry. This will
|
||||
// prevent fallback to the v1 protocol.
|
||||
confirmedV2 bool
|
||||
|
||||
// confirmedTLSRegistries is a map indicating which registries
|
||||
// are known to be using TLS. There should never be a plaintext
|
||||
// retry for any of these.
|
||||
confirmedTLSRegistries = make(map[string]struct{})
|
||||
)
|
||||
for _, endpoint := range endpoints {
|
||||
if imagePullConfig.RequireSchema2 && endpoint.Version == registry.APIVersion1 {
|
||||
continue
|
||||
}
|
||||
|
||||
if confirmedV2 && endpoint.Version == registry.APIVersion1 {
|
||||
logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL)
|
||||
continue
|
||||
}
|
||||
|
||||
if endpoint.URL.Scheme != "https" {
|
||||
if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS {
|
||||
logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("Trying to pull %s from %s %s", reference.FamiliarName(repoInfo.Name), endpoint.URL, endpoint.Version)
|
||||
|
||||
puller, err := newPuller(endpoint, repoInfo, imagePullConfig)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
|
||||
// Make sure we default the OS if it hasn't been supplied
|
||||
if imagePullConfig.OS == "" {
|
||||
imagePullConfig.OS = runtime.GOOS
|
||||
}
|
||||
|
||||
if err := puller.Pull(ctx, ref, imagePullConfig.OS); err != nil {
|
||||
// Was this pull cancelled? If so, don't try to fall
|
||||
// back.
|
||||
fallback := false
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
default:
|
||||
if fallbackErr, ok := err.(fallbackError); ok {
|
||||
fallback = true
|
||||
confirmedV2 = confirmedV2 || fallbackErr.confirmedV2
|
||||
if fallbackErr.transportOK && endpoint.URL.Scheme == "https" {
|
||||
confirmedTLSRegistries[endpoint.URL.Host] = struct{}{}
|
||||
}
|
||||
err = fallbackErr.err
|
||||
}
|
||||
}
|
||||
if fallback {
|
||||
if _, ok := err.(ErrNoSupport); !ok {
|
||||
// Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors.
|
||||
discardNoSupportErrors = true
|
||||
// append subsequent errors
|
||||
lastErr = err
|
||||
} else if !discardNoSupportErrors {
|
||||
// Save the ErrNoSupport error, because it's either the first error or all encountered errors
|
||||
// were also ErrNoSupport errors.
|
||||
// append subsequent errors
|
||||
lastErr = err
|
||||
}
|
||||
logrus.Infof("Attempting next endpoint for pull after error: %v", err)
|
||||
continue
|
||||
}
|
||||
logrus.Errorf("Not continuing with pull after error: %v", err)
|
||||
return TranslatePullError(err, ref)
|
||||
}
|
||||
|
||||
imagePullConfig.ImageEventLogger(reference.FamiliarString(ref), reference.FamiliarName(repoInfo.Name), "pull")
|
||||
return nil
|
||||
}
|
||||
|
||||
if lastErr == nil {
|
||||
lastErr = fmt.Errorf("no endpoints found for %s", reference.FamiliarString(ref))
|
||||
}
|
||||
|
||||
return TranslatePullError(lastErr, ref)
|
||||
}
|
||||
|
||||
// writeStatus writes a status message to out. If layersDownloaded is true, the
|
||||
// status message indicates that a newer image was downloaded. Otherwise, it
|
||||
// indicates that the image is up to date. requestedTag is the tag the message
|
||||
// will refer to.
|
||||
func writeStatus(requestedTag string, out progress.Output, layersDownloaded bool) {
|
||||
if layersDownloaded {
|
||||
progress.Message(out, "", "Status: Downloaded newer image for "+requestedTag)
|
||||
} else {
|
||||
progress.Message(out, "", "Status: Image is up to date for "+requestedTag)
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateRepoName validates the name of a repository.
|
||||
func ValidateRepoName(name reference.Named) error {
|
||||
if reference.FamiliarName(name) == api.NoBaseImageSpecifier {
|
||||
return errors.WithStack(reservedNameError(api.NoBaseImageSpecifier))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func addDigestReference(store refstore.Store, ref reference.Named, dgst digest.Digest, id digest.Digest) error {
|
||||
dgstRef, err := reference.WithDigest(reference.TrimNamed(ref), dgst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if oldTagID, err := store.Get(dgstRef); err == nil {
|
||||
if oldTagID != id {
|
||||
// Updating digests not supported by reference store
|
||||
logrus.Errorf("Image ID for digest %s changed from %s to %s, cannot update", dgst.String(), oldTagID, id)
|
||||
}
|
||||
return nil
|
||||
} else if err != refstore.ErrDoesNotExist {
|
||||
return err
|
||||
}
|
||||
|
||||
return store.AddDigest(dgstRef, id, true)
|
||||
}
|
367
vendor/github.com/docker/docker-ce/components/engine/distribution/pull_v1.go
generated
vendored
Normal file
367
vendor/github.com/docker/docker-ce/components/engine/distribution/pull_v1.go
generated
vendored
Normal file
|
@ -0,0 +1,367 @@
|
|||
package distribution // import "github.com/docker/docker/distribution"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/client/auth"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
"github.com/docker/docker/distribution/metadata"
|
||||
"github.com/docker/docker/distribution/xfer"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/image/v1"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type v1Puller struct {
|
||||
v1IDService *metadata.V1IDService
|
||||
endpoint registry.APIEndpoint
|
||||
config *ImagePullConfig
|
||||
repoInfo *registry.RepositoryInfo
|
||||
session *registry.Session
|
||||
}
|
||||
|
||||
func (p *v1Puller) Pull(ctx context.Context, ref reference.Named, os string) error {
|
||||
if _, isCanonical := ref.(reference.Canonical); isCanonical {
|
||||
// Allowing fallback, because HTTPS v1 is before HTTP v2
|
||||
return fallbackError{err: ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}}
|
||||
}
|
||||
|
||||
tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Adds Docker-specific headers as well as user-specified headers (metaHeaders)
|
||||
tr := transport.NewTransport(
|
||||
// TODO(tiborvass): was ReceiveTimeout
|
||||
registry.NewTransport(tlsConfig),
|
||||
registry.Headers(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)...,
|
||||
)
|
||||
client := registry.HTTPClient(tr)
|
||||
v1Endpoint := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)
|
||||
p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint)
|
||||
if err != nil {
|
||||
// TODO(dmcgowan): Check if should fallback
|
||||
logrus.Debugf("Fallback from error: %s", err)
|
||||
return fallbackError{err: err}
|
||||
}
|
||||
if err := p.pullRepository(ctx, ref); err != nil {
|
||||
// TODO(dmcgowan): Check if should fallback
|
||||
return err
|
||||
}
|
||||
progress.Message(p.config.ProgressOutput, "", p.repoInfo.Name.Name()+": this image was pulled from a legacy registry. Important: This registry version will not be supported in future versions of docker.")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Note use auth.Scope rather than reference.Named due to this warning causing Jenkins CI to fail:
|
||||
// warning: ref can be github.com/docker/docker/vendor/github.com/docker/distribution/registry/client/auth.Scope (interfacer)
|
||||
func (p *v1Puller) pullRepository(ctx context.Context, ref auth.Scope) error {
|
||||
progress.Message(p.config.ProgressOutput, "", "Pulling repository "+p.repoInfo.Name.Name())
|
||||
|
||||
tagged, isTagged := ref.(reference.NamedTagged)
|
||||
|
||||
repoData, err := p.session.GetRepositoryData(p.repoInfo.Name)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "HTTP code: 404") {
|
||||
if isTagged {
|
||||
return fmt.Errorf("Error: image %s:%s not found", reference.Path(p.repoInfo.Name), tagged.Tag())
|
||||
}
|
||||
return fmt.Errorf("Error: image %s not found", reference.Path(p.repoInfo.Name))
|
||||
}
|
||||
// Unexpected HTTP error
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debug("Retrieving the tag list")
|
||||
var tagsList map[string]string
|
||||
if !isTagged {
|
||||
tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo.Name)
|
||||
} else {
|
||||
var tagID string
|
||||
tagsList = make(map[string]string)
|
||||
tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo.Name, tagged.Tag())
|
||||
if err == registry.ErrRepoNotFound {
|
||||
return fmt.Errorf("Tag %s not found in repository %s", tagged.Tag(), p.repoInfo.Name.Name())
|
||||
}
|
||||
tagsList[tagged.Tag()] = tagID
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Errorf("unable to get remote tags: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
for tag, id := range tagsList {
|
||||
repoData.ImgList[id] = ®istry.ImgData{
|
||||
ID: id,
|
||||
Tag: tag,
|
||||
Checksum: "",
|
||||
}
|
||||
}
|
||||
|
||||
layersDownloaded := false
|
||||
for _, imgData := range repoData.ImgList {
|
||||
if isTagged && imgData.Tag != tagged.Tag() {
|
||||
continue
|
||||
}
|
||||
|
||||
err := p.downloadImage(ctx, repoData, imgData, &layersDownloaded)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *v1Puller) downloadImage(ctx context.Context, repoData *registry.RepositoryData, img *registry.ImgData, layersDownloaded *bool) error {
|
||||
if img.Tag == "" {
|
||||
logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
localNameRef, err := reference.WithTag(p.repoInfo.Name, img.Tag)
|
||||
if err != nil {
|
||||
retErr := fmt.Errorf("Image (id: %s) has invalid tag: %s", img.ID, img.Tag)
|
||||
logrus.Debug(retErr.Error())
|
||||
return retErr
|
||||
}
|
||||
|
||||
if err := v1.ValidateID(img.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s", img.Tag, p.repoInfo.Name.Name())
|
||||
success := false
|
||||
var lastErr error
|
||||
for _, ep := range p.repoInfo.Index.Mirrors {
|
||||
ep += "v1/"
|
||||
progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.Name.Name(), ep))
|
||||
if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil {
|
||||
// Don't report errors when pulling from mirrors.
|
||||
logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.Name.Name(), ep, err)
|
||||
continue
|
||||
}
|
||||
success = true
|
||||
break
|
||||
}
|
||||
if !success {
|
||||
for _, ep := range repoData.Endpoints {
|
||||
progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.Name.Name(), ep)
|
||||
if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil {
|
||||
// It's not ideal that only the last error is returned, it would be better to concatenate the errors.
|
||||
// As the error is also given to the output stream the user will see the error.
|
||||
lastErr = err
|
||||
progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.Name.Name(), ep, err)
|
||||
continue
|
||||
}
|
||||
success = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !success {
|
||||
err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.Name.Name(), lastErr)
|
||||
progress.Update(p.config.ProgressOutput, stringid.TruncateID(img.ID), err.Error())
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *v1Puller) pullImage(ctx context.Context, v1ID, endpoint string, localNameRef reference.Named, layersDownloaded *bool) (err error) {
|
||||
var history []string
|
||||
history, err = p.session.GetRemoteHistory(v1ID, endpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(history) < 1 {
|
||||
return fmt.Errorf("empty history for image %s", v1ID)
|
||||
}
|
||||
progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1ID), "Pulling dependent layers")
|
||||
|
||||
var (
|
||||
descriptors []xfer.DownloadDescriptor
|
||||
newHistory []image.History
|
||||
imgJSON []byte
|
||||
imgSize int64
|
||||
)
|
||||
|
||||
// Iterate over layers, in order from bottom-most to top-most. Download
|
||||
// config for all layers and create descriptors.
|
||||
for i := len(history) - 1; i >= 0; i-- {
|
||||
v1LayerID := history[i]
|
||||
imgJSON, imgSize, err = p.downloadLayerConfig(v1LayerID, endpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create a new-style config from the legacy configs
|
||||
h, err := v1.HistoryFromConfig(imgJSON, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newHistory = append(newHistory, h)
|
||||
|
||||
layerDescriptor := &v1LayerDescriptor{
|
||||
v1LayerID: v1LayerID,
|
||||
indexName: p.repoInfo.Index.Name,
|
||||
endpoint: endpoint,
|
||||
v1IDService: p.v1IDService,
|
||||
layersDownloaded: layersDownloaded,
|
||||
layerSize: imgSize,
|
||||
session: p.session,
|
||||
}
|
||||
|
||||
descriptors = append(descriptors, layerDescriptor)
|
||||
}
|
||||
|
||||
rootFS := image.NewRootFS()
|
||||
resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, "", descriptors, p.config.ProgressOutput)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer release()
|
||||
|
||||
config, err := v1.MakeConfigFromV1Config(imgJSON, &resultRootFS, newHistory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imageID, err := p.config.ImageStore.Put(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if p.config.ReferenceStore != nil {
|
||||
if err := p.config.ReferenceStore.AddTag(localNameRef, imageID, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *v1Puller) downloadLayerConfig(v1LayerID, endpoint string) (imgJSON []byte, imgSize int64, err error) {
|
||||
progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Pulling metadata")
|
||||
|
||||
retries := 5
|
||||
for j := 1; j <= retries; j++ {
|
||||
imgJSON, imgSize, err := p.session.GetRemoteImageJSON(v1LayerID, endpoint)
|
||||
if err != nil && j == retries {
|
||||
progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Error pulling layer metadata")
|
||||
return nil, 0, err
|
||||
} else if err != nil {
|
||||
time.Sleep(time.Duration(j) * 500 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
return imgJSON, imgSize, nil
|
||||
}
|
||||
|
||||
// not reached
|
||||
return nil, 0, nil
|
||||
}
|
||||
|
||||
type v1LayerDescriptor struct {
|
||||
v1LayerID string
|
||||
indexName string
|
||||
endpoint string
|
||||
v1IDService *metadata.V1IDService
|
||||
layersDownloaded *bool
|
||||
layerSize int64
|
||||
session *registry.Session
|
||||
tmpFile *os.File
|
||||
}
|
||||
|
||||
func (ld *v1LayerDescriptor) Key() string {
|
||||
return "v1:" + ld.v1LayerID
|
||||
}
|
||||
|
||||
func (ld *v1LayerDescriptor) ID() string {
|
||||
return stringid.TruncateID(ld.v1LayerID)
|
||||
}
|
||||
|
||||
func (ld *v1LayerDescriptor) DiffID() (layer.DiffID, error) {
|
||||
return ld.v1IDService.Get(ld.v1LayerID, ld.indexName)
|
||||
}
|
||||
|
||||
func (ld *v1LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
|
||||
progress.Update(progressOutput, ld.ID(), "Pulling fs layer")
|
||||
layerReader, err := ld.session.GetRemoteImageLayer(ld.v1LayerID, ld.endpoint, ld.layerSize)
|
||||
if err != nil {
|
||||
progress.Update(progressOutput, ld.ID(), "Error pulling dependent layers")
|
||||
if uerr, ok := err.(*url.Error); ok {
|
||||
err = uerr.Err
|
||||
}
|
||||
if terr, ok := err.(net.Error); ok && terr.Timeout() {
|
||||
return nil, 0, err
|
||||
}
|
||||
return nil, 0, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
*ld.layersDownloaded = true
|
||||
|
||||
ld.tmpFile, err = ioutil.TempFile("", "GetImageBlob")
|
||||
if err != nil {
|
||||
layerReader.Close()
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerReader), progressOutput, ld.layerSize, ld.ID(), "Downloading")
|
||||
defer reader.Close()
|
||||
|
||||
_, err = io.Copy(ld.tmpFile, reader)
|
||||
if err != nil {
|
||||
ld.Close()
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
progress.Update(progressOutput, ld.ID(), "Download complete")
|
||||
|
||||
logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), ld.tmpFile.Name())
|
||||
|
||||
ld.tmpFile.Seek(0, 0)
|
||||
|
||||
// hand off the temporary file to the download manager, so it will only
|
||||
// be closed once
|
||||
tmpFile := ld.tmpFile
|
||||
ld.tmpFile = nil
|
||||
|
||||
return ioutils.NewReadCloserWrapper(tmpFile, func() error {
|
||||
tmpFile.Close()
|
||||
err := os.RemoveAll(tmpFile.Name())
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
|
||||
}
|
||||
return err
|
||||
}), ld.layerSize, nil
|
||||
}
|
||||
|
||||
func (ld *v1LayerDescriptor) Close() {
|
||||
if ld.tmpFile != nil {
|
||||
ld.tmpFile.Close()
|
||||
if err := os.RemoveAll(ld.tmpFile.Name()); err != nil {
|
||||
logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
|
||||
}
|
||||
ld.tmpFile = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (ld *v1LayerDescriptor) Registered(diffID layer.DiffID) {
|
||||
// Cache mapping from this layer's DiffID to the blobsum
|
||||
ld.v1IDService.Set(ld.v1LayerID, ld.indexName, diffID)
|
||||
}
|
941
vendor/github.com/docker/docker-ce/components/engine/distribution/pull_v2.go
generated
vendored
Normal file
941
vendor/github.com/docker/docker-ce/components/engine/distribution/pull_v2.go
generated
vendored
Normal file
|
@ -0,0 +1,941 @@
|
|||
package distribution // import "github.com/docker/docker/distribution"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/manifest/manifestlist"
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/client/auth"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
"github.com/docker/docker/distribution/metadata"
|
||||
"github.com/docker/docker/distribution/xfer"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/image/v1"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
refstore "github.com/docker/docker/reference"
|
||||
"github.com/docker/docker/registry"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
errRootFSMismatch = errors.New("layers from manifest don't match image configuration")
|
||||
errRootFSInvalid = errors.New("invalid rootfs in image configuration")
|
||||
)
|
||||
|
||||
// ImageConfigPullError is an error pulling the image config blob
|
||||
// (only applies to schema2).
|
||||
type ImageConfigPullError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
// Error returns the error string for ImageConfigPullError.
|
||||
func (e ImageConfigPullError) Error() string {
|
||||
return "error pulling image configuration: " + e.Err.Error()
|
||||
}
|
||||
|
||||
type v2Puller struct {
|
||||
V2MetadataService metadata.V2MetadataService
|
||||
endpoint registry.APIEndpoint
|
||||
config *ImagePullConfig
|
||||
repoInfo *registry.RepositoryInfo
|
||||
repo distribution.Repository
|
||||
// confirmedV2 is set to true if we confirm we're talking to a v2
|
||||
// registry. This is used to limit fallbacks to the v1 protocol.
|
||||
confirmedV2 bool
|
||||
}
|
||||
|
||||
func (p *v2Puller) Pull(ctx context.Context, ref reference.Named, os string) (err error) {
|
||||
// TODO(tiborvass): was ReceiveTimeout
|
||||
p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull")
|
||||
if err != nil {
|
||||
logrus.Warnf("Error getting v2 registry: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err = p.pullV2Repository(ctx, ref, os); err != nil {
|
||||
if _, ok := err.(fallbackError); ok {
|
||||
return err
|
||||
}
|
||||
if continueOnError(err, p.endpoint.Mirror) {
|
||||
return fallbackError{
|
||||
err: err,
|
||||
confirmedV2: p.confirmedV2,
|
||||
transportOK: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named, os string) (err error) {
|
||||
var layersDownloaded bool
|
||||
if !reference.IsNameOnly(ref) {
|
||||
layersDownloaded, err = p.pullV2Tag(ctx, ref, os)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
tags, err := p.repo.Tags(ctx).All(ctx)
|
||||
if err != nil {
|
||||
// If this repository doesn't exist on V2, we should
|
||||
// permit a fallback to V1.
|
||||
return allowV1Fallback(err)
|
||||
}
|
||||
|
||||
// The v2 registry knows about this repository, so we will not
|
||||
// allow fallback to the v1 protocol even if we encounter an
|
||||
// error later on.
|
||||
p.confirmedV2 = true
|
||||
|
||||
for _, tag := range tags {
|
||||
tagRef, err := reference.WithTag(ref, tag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pulledNew, err := p.pullV2Tag(ctx, tagRef, os)
|
||||
if err != nil {
|
||||
// Since this is the pull-all-tags case, don't
|
||||
// allow an error pulling a particular tag to
|
||||
// make the whole pull fall back to v1.
|
||||
if fallbackErr, ok := err.(fallbackError); ok {
|
||||
return fallbackErr.err
|
||||
}
|
||||
return err
|
||||
}
|
||||
// pulledNew is true if either new layers were downloaded OR if existing images were newly tagged
|
||||
// TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus?
|
||||
layersDownloaded = layersDownloaded || pulledNew
|
||||
}
|
||||
}
|
||||
|
||||
writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type v2LayerDescriptor struct {
|
||||
digest digest.Digest
|
||||
diffID layer.DiffID
|
||||
repoInfo *registry.RepositoryInfo
|
||||
repo distribution.Repository
|
||||
V2MetadataService metadata.V2MetadataService
|
||||
tmpFile *os.File
|
||||
verifier digest.Verifier
|
||||
src distribution.Descriptor
|
||||
}
|
||||
|
||||
func (ld *v2LayerDescriptor) Key() string {
|
||||
return "v2:" + ld.digest.String()
|
||||
}
|
||||
|
||||
func (ld *v2LayerDescriptor) ID() string {
|
||||
return stringid.TruncateID(ld.digest.String())
|
||||
}
|
||||
|
||||
func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) {
|
||||
if ld.diffID != "" {
|
||||
return ld.diffID, nil
|
||||
}
|
||||
return ld.V2MetadataService.GetDiffID(ld.digest)
|
||||
}
|
||||
|
||||
func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
|
||||
logrus.Debugf("pulling blob %q", ld.digest)
|
||||
|
||||
var (
|
||||
err error
|
||||
offset int64
|
||||
)
|
||||
|
||||
if ld.tmpFile == nil {
|
||||
ld.tmpFile, err = createDownloadFile()
|
||||
if err != nil {
|
||||
return nil, 0, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
} else {
|
||||
offset, err = ld.tmpFile.Seek(0, os.SEEK_END)
|
||||
if err != nil {
|
||||
logrus.Debugf("error seeking to end of download file: %v", err)
|
||||
offset = 0
|
||||
|
||||
ld.tmpFile.Close()
|
||||
if err := os.Remove(ld.tmpFile.Name()); err != nil {
|
||||
logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
|
||||
}
|
||||
ld.tmpFile, err = createDownloadFile()
|
||||
if err != nil {
|
||||
return nil, 0, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
} else if offset != 0 {
|
||||
logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset)
|
||||
}
|
||||
}
|
||||
|
||||
tmpFile := ld.tmpFile
|
||||
|
||||
layerDownload, err := ld.open(ctx)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error initiating layer download: %v", err)
|
||||
return nil, 0, retryOnError(err)
|
||||
}
|
||||
|
||||
if offset != 0 {
|
||||
_, err := layerDownload.Seek(offset, os.SEEK_SET)
|
||||
if err != nil {
|
||||
if err := ld.truncateDownloadFile(); err != nil {
|
||||
return nil, 0, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
size, err := layerDownload.Seek(0, os.SEEK_END)
|
||||
if err != nil {
|
||||
// Seek failed, perhaps because there was no Content-Length
|
||||
// header. This shouldn't fail the download, because we can
|
||||
// still continue without a progress bar.
|
||||
size = 0
|
||||
} else {
|
||||
if size != 0 && offset > size {
|
||||
logrus.Debug("Partial download is larger than full blob. Starting over")
|
||||
offset = 0
|
||||
if err := ld.truncateDownloadFile(); err != nil {
|
||||
return nil, 0, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
}
|
||||
|
||||
// Restore the seek offset either at the beginning of the
|
||||
// stream, or just after the last byte we have from previous
|
||||
// attempts.
|
||||
_, err = layerDownload.Seek(offset, os.SEEK_SET)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading")
|
||||
defer reader.Close()
|
||||
|
||||
if ld.verifier == nil {
|
||||
ld.verifier = ld.digest.Verifier()
|
||||
}
|
||||
|
||||
_, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier))
|
||||
if err != nil {
|
||||
if err == transport.ErrWrongCodeForByteRange {
|
||||
if err := ld.truncateDownloadFile(); err != nil {
|
||||
return nil, 0, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
return nil, 0, err
|
||||
}
|
||||
return nil, 0, retryOnError(err)
|
||||
}
|
||||
|
||||
progress.Update(progressOutput, ld.ID(), "Verifying Checksum")
|
||||
|
||||
if !ld.verifier.Verified() {
|
||||
err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest)
|
||||
logrus.Error(err)
|
||||
|
||||
// Allow a retry if this digest verification error happened
|
||||
// after a resumed download.
|
||||
if offset != 0 {
|
||||
if err := ld.truncateDownloadFile(); err != nil {
|
||||
return nil, 0, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
|
||||
return nil, 0, err
|
||||
}
|
||||
return nil, 0, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
|
||||
progress.Update(progressOutput, ld.ID(), "Download complete")
|
||||
|
||||
logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name())
|
||||
|
||||
_, err = tmpFile.Seek(0, os.SEEK_SET)
|
||||
if err != nil {
|
||||
tmpFile.Close()
|
||||
if err := os.Remove(tmpFile.Name()); err != nil {
|
||||
logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
|
||||
}
|
||||
ld.tmpFile = nil
|
||||
ld.verifier = nil
|
||||
return nil, 0, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
|
||||
// hand off the temporary file to the download manager, so it will only
|
||||
// be closed once
|
||||
ld.tmpFile = nil
|
||||
|
||||
return ioutils.NewReadCloserWrapper(tmpFile, func() error {
|
||||
tmpFile.Close()
|
||||
err := os.RemoveAll(tmpFile.Name())
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
|
||||
}
|
||||
return err
|
||||
}), size, nil
|
||||
}
|
||||
|
||||
func (ld *v2LayerDescriptor) Close() {
|
||||
if ld.tmpFile != nil {
|
||||
ld.tmpFile.Close()
|
||||
if err := os.RemoveAll(ld.tmpFile.Name()); err != nil {
|
||||
logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ld *v2LayerDescriptor) truncateDownloadFile() error {
|
||||
// Need a new hash context since we will be redoing the download
|
||||
ld.verifier = nil
|
||||
|
||||
if _, err := ld.tmpFile.Seek(0, os.SEEK_SET); err != nil {
|
||||
logrus.Errorf("error seeking to beginning of download file: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ld.tmpFile.Truncate(0); err != nil {
|
||||
logrus.Errorf("error truncating download file: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) {
|
||||
// Cache mapping from this layer's DiffID to the blobsum
|
||||
ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.Name.Name()})
|
||||
}
|
||||
|
||||
func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named, os string) (tagUpdated bool, err error) {
|
||||
manSvc, err := p.repo.Manifests(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
var (
|
||||
manifest distribution.Manifest
|
||||
tagOrDigest string // Used for logging/progress only
|
||||
)
|
||||
if digested, isDigested := ref.(reference.Canonical); isDigested {
|
||||
manifest, err = manSvc.Get(ctx, digested.Digest())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
tagOrDigest = digested.Digest().String()
|
||||
} else if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
|
||||
manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag()))
|
||||
if err != nil {
|
||||
return false, allowV1Fallback(err)
|
||||
}
|
||||
tagOrDigest = tagged.Tag()
|
||||
} else {
|
||||
return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", reference.FamiliarString(ref))
|
||||
}
|
||||
|
||||
if manifest == nil {
|
||||
return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
|
||||
}
|
||||
|
||||
if m, ok := manifest.(*schema2.DeserializedManifest); ok {
|
||||
var allowedMediatype bool
|
||||
for _, t := range p.config.Schema2Types {
|
||||
if m.Manifest.Config.MediaType == t {
|
||||
allowedMediatype = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !allowedMediatype {
|
||||
configClass := mediaTypeClasses[m.Manifest.Config.MediaType]
|
||||
if configClass == "" {
|
||||
configClass = "unknown"
|
||||
}
|
||||
return false, invalidManifestClassError{m.Manifest.Config.MediaType, configClass}
|
||||
}
|
||||
}
|
||||
|
||||
// If manSvc.Get succeeded, we can be confident that the registry on
|
||||
// the other side speaks the v2 protocol.
|
||||
p.confirmedV2 = true
|
||||
|
||||
logrus.Debugf("Pulling ref from V2 registry: %s", reference.FamiliarString(ref))
|
||||
progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+reference.FamiliarName(p.repo.Named()))
|
||||
|
||||
var (
|
||||
id digest.Digest
|
||||
manifestDigest digest.Digest
|
||||
)
|
||||
|
||||
switch v := manifest.(type) {
|
||||
case *schema1.SignedManifest:
|
||||
if p.config.RequireSchema2 {
|
||||
return false, fmt.Errorf("invalid manifest: not schema2")
|
||||
}
|
||||
id, manifestDigest, err = p.pullSchema1(ctx, ref, v, os)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
case *schema2.DeserializedManifest:
|
||||
id, manifestDigest, err = p.pullSchema2(ctx, ref, v, os)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
case *manifestlist.DeserializedManifestList:
|
||||
id, manifestDigest, err = p.pullManifestList(ctx, ref, v, os)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
default:
|
||||
return false, invalidManifestFormatError{}
|
||||
}
|
||||
|
||||
progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())
|
||||
|
||||
if p.config.ReferenceStore != nil {
|
||||
oldTagID, err := p.config.ReferenceStore.Get(ref)
|
||||
if err == nil {
|
||||
if oldTagID == id {
|
||||
return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id)
|
||||
}
|
||||
} else if err != refstore.ErrDoesNotExist {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if canonical, ok := ref.(reference.Canonical); ok {
|
||||
if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil {
|
||||
return false, err
|
||||
}
|
||||
} else {
|
||||
if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Reference, unverifiedManifest *schema1.SignedManifest, requestedOS string) (id digest.Digest, manifestDigest digest.Digest, err error) {
|
||||
var verifiedManifest *schema1.Manifest
|
||||
verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
rootFS := image.NewRootFS()
|
||||
|
||||
// remove duplicate layers and check parent chain validity
|
||||
err = fixManifestLayers(verifiedManifest)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
var descriptors []xfer.DownloadDescriptor
|
||||
|
||||
// Image history converted to the new format
|
||||
var history []image.History
|
||||
|
||||
// Note that the order of this loop is in the direction of bottom-most
|
||||
// to top-most, so that the downloads slice gets ordered correctly.
|
||||
for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
|
||||
blobSum := verifiedManifest.FSLayers[i].BlobSum
|
||||
|
||||
var throwAway struct {
|
||||
ThrowAway bool `json:"throwaway,omitempty"`
|
||||
}
|
||||
if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
history = append(history, h)
|
||||
|
||||
if throwAway.ThrowAway {
|
||||
continue
|
||||
}
|
||||
|
||||
layerDescriptor := &v2LayerDescriptor{
|
||||
digest: blobSum,
|
||||
repoInfo: p.repoInfo,
|
||||
repo: p.repo,
|
||||
V2MetadataService: p.V2MetadataService,
|
||||
}
|
||||
|
||||
descriptors = append(descriptors, layerDescriptor)
|
||||
}
|
||||
|
||||
// The v1 manifest itself doesn't directly contain an OS. However,
|
||||
// the history does, but unfortunately that's a string, so search through
|
||||
// all the history until hopefully we find one which indicates the OS.
|
||||
// supertest2014/nyan is an example of a registry image with schemav1.
|
||||
configOS := runtime.GOOS
|
||||
if system.LCOWSupported() {
|
||||
type config struct {
|
||||
Os string `json:"os,omitempty"`
|
||||
}
|
||||
for _, v := range verifiedManifest.History {
|
||||
var c config
|
||||
if err := json.Unmarshal([]byte(v.V1Compatibility), &c); err == nil {
|
||||
if c.Os != "" {
|
||||
configOS = c.Os
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Early bath if the requested OS doesn't match that of the configuration.
|
||||
// This avoids doing the download, only to potentially fail later.
|
||||
if !strings.EqualFold(configOS, requestedOS) {
|
||||
return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configOS, requestedOS)
|
||||
}
|
||||
|
||||
resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, configOS, descriptors, p.config.ProgressOutput)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
defer release()
|
||||
|
||||
config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
imageID, err := p.config.ImageStore.Put(config)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
manifestDigest = digest.FromBytes(unverifiedManifest.Canonical)
|
||||
|
||||
return imageID, manifestDigest, nil
|
||||
}
|
||||
|
||||
func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest, requestedOS string) (id digest.Digest, manifestDigest digest.Digest, err error) {
|
||||
manifestDigest, err = schema2ManifestDigest(ref, mfst)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
target := mfst.Target()
|
||||
if _, err := p.config.ImageStore.Get(target.Digest); err == nil {
|
||||
// If the image already exists locally, no need to pull
|
||||
// anything.
|
||||
return target.Digest, manifestDigest, nil
|
||||
}
|
||||
|
||||
var descriptors []xfer.DownloadDescriptor
|
||||
|
||||
// Note that the order of this loop is in the direction of bottom-most
|
||||
// to top-most, so that the downloads slice gets ordered correctly.
|
||||
for _, d := range mfst.Layers {
|
||||
layerDescriptor := &v2LayerDescriptor{
|
||||
digest: d.Digest,
|
||||
repo: p.repo,
|
||||
repoInfo: p.repoInfo,
|
||||
V2MetadataService: p.V2MetadataService,
|
||||
src: d,
|
||||
}
|
||||
|
||||
descriptors = append(descriptors, layerDescriptor)
|
||||
}
|
||||
|
||||
configChan := make(chan []byte, 1)
|
||||
configErrChan := make(chan error, 1)
|
||||
layerErrChan := make(chan error, 1)
|
||||
downloadsDone := make(chan struct{})
|
||||
var cancel func()
|
||||
ctx, cancel = context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
// Pull the image config
|
||||
go func() {
|
||||
configJSON, err := p.pullSchema2Config(ctx, target.Digest)
|
||||
if err != nil {
|
||||
configErrChan <- ImageConfigPullError{Err: err}
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
configChan <- configJSON
|
||||
}()
|
||||
|
||||
var (
|
||||
configJSON []byte // raw serialized image config
|
||||
downloadedRootFS *image.RootFS // rootFS from registered layers
|
||||
configRootFS *image.RootFS // rootFS from configuration
|
||||
release func() // release resources from rootFS download
|
||||
configPlatform *specs.Platform // for LCOW when registering downloaded layers
|
||||
)
|
||||
|
||||
// https://github.com/docker/docker/issues/24766 - Err on the side of caution,
|
||||
// explicitly blocking images intended for linux from the Windows daemon. On
|
||||
// Windows, we do this before the attempt to download, effectively serialising
|
||||
// the download slightly slowing it down. We have to do it this way, as
|
||||
// chances are the download of layers itself would fail due to file names
|
||||
// which aren't suitable for NTFS. At some point in the future, if a similar
|
||||
// check to block Windows images being pulled on Linux is implemented, it
|
||||
// may be necessary to perform the same type of serialisation.
|
||||
if runtime.GOOS == "windows" {
|
||||
configJSON, configRootFS, configPlatform, err = receiveConfig(p.config.ImageStore, configChan, configErrChan)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if configRootFS == nil {
|
||||
return "", "", errRootFSInvalid
|
||||
}
|
||||
if err := checkImageCompatibility(configPlatform.OS, configPlatform.OSVersion); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
if len(descriptors) != len(configRootFS.DiffIDs) {
|
||||
return "", "", errRootFSMismatch
|
||||
}
|
||||
|
||||
// Early bath if the requested OS doesn't match that of the configuration.
|
||||
// This avoids doing the download, only to potentially fail later.
|
||||
if !strings.EqualFold(configPlatform.OS, requestedOS) {
|
||||
return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configPlatform.OS, requestedOS)
|
||||
}
|
||||
|
||||
// Populate diff ids in descriptors to avoid downloading foreign layers
|
||||
// which have been side loaded
|
||||
for i := range descriptors {
|
||||
descriptors[i].(*v2LayerDescriptor).diffID = configRootFS.DiffIDs[i]
|
||||
}
|
||||
}
|
||||
|
||||
if p.config.DownloadManager != nil {
|
||||
go func() {
|
||||
var (
|
||||
err error
|
||||
rootFS image.RootFS
|
||||
)
|
||||
downloadRootFS := *image.NewRootFS()
|
||||
rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, requestedOS, descriptors, p.config.ProgressOutput)
|
||||
if err != nil {
|
||||
// Intentionally do not cancel the config download here
|
||||
// as the error from config download (if there is one)
|
||||
// is more interesting than the layer download error
|
||||
layerErrChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
downloadedRootFS = &rootFS
|
||||
close(downloadsDone)
|
||||
}()
|
||||
} else {
|
||||
// We have nothing to download
|
||||
close(downloadsDone)
|
||||
}
|
||||
|
||||
if configJSON == nil {
|
||||
configJSON, configRootFS, _, err = receiveConfig(p.config.ImageStore, configChan, configErrChan)
|
||||
if err == nil && configRootFS == nil {
|
||||
err = errRootFSInvalid
|
||||
}
|
||||
if err != nil {
|
||||
cancel()
|
||||
select {
|
||||
case <-downloadsDone:
|
||||
case <-layerErrChan:
|
||||
}
|
||||
return "", "", err
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-downloadsDone:
|
||||
case err = <-layerErrChan:
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
if release != nil {
|
||||
defer release()
|
||||
}
|
||||
|
||||
if downloadedRootFS != nil {
|
||||
// The DiffIDs returned in rootFS MUST match those in the config.
|
||||
// Otherwise the image config could be referencing layers that aren't
|
||||
// included in the manifest.
|
||||
if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) {
|
||||
return "", "", errRootFSMismatch
|
||||
}
|
||||
|
||||
for i := range downloadedRootFS.DiffIDs {
|
||||
if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] {
|
||||
return "", "", errRootFSMismatch
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
imageID, err := p.config.ImageStore.Put(configJSON)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return imageID, manifestDigest, nil
|
||||
}
|
||||
|
||||
func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, *specs.Platform, error) {
|
||||
select {
|
||||
case configJSON := <-configChan:
|
||||
rootfs, err := s.RootFSFromConfig(configJSON)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
platform, err := s.PlatformFromConfig(configJSON)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
return configJSON, rootfs, platform, nil
|
||||
case err := <-errChan:
|
||||
return nil, nil, nil, err
|
||||
// Don't need a case for ctx.Done in the select because cancellation
|
||||
// will trigger an error in p.pullSchema2ImageConfig.
|
||||
}
|
||||
}
|
||||
|
||||
// pullManifestList handles "manifest lists" which point to various
|
||||
// platform-specific manifests.
|
||||
func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList, os string) (id digest.Digest, manifestListDigest digest.Digest, err error) {
|
||||
manifestListDigest, err = schema2ManifestDigest(ref, mfstList)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
logrus.Debugf("%s resolved to a manifestList object with %d entries; looking for a %s/%s match", ref, len(mfstList.Manifests), os, runtime.GOARCH)
|
||||
|
||||
manifestMatches := filterManifests(mfstList.Manifests, os)
|
||||
|
||||
if len(manifestMatches) == 0 {
|
||||
errMsg := fmt.Sprintf("no matching manifest for %s/%s in the manifest list entries", os, runtime.GOARCH)
|
||||
logrus.Debugf(errMsg)
|
||||
return "", "", errors.New(errMsg)
|
||||
}
|
||||
|
||||
if len(manifestMatches) > 1 {
|
||||
logrus.Debugf("found multiple matches in manifest list, choosing best match %s", manifestMatches[0].Digest.String())
|
||||
}
|
||||
manifestDigest := manifestMatches[0].Digest
|
||||
|
||||
if err := checkImageCompatibility(manifestMatches[0].Platform.OS, manifestMatches[0].Platform.OSVersion); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
manSvc, err := p.repo.Manifests(ctx)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
manifest, err := manSvc.Get(ctx, manifestDigest)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), manifestDigest)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
switch v := manifest.(type) {
|
||||
case *schema1.SignedManifest:
|
||||
id, _, err = p.pullSchema1(ctx, manifestRef, v, os)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
case *schema2.DeserializedManifest:
|
||||
id, _, err = p.pullSchema2(ctx, manifestRef, v, os)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
default:
|
||||
return "", "", errors.New("unsupported manifest format")
|
||||
}
|
||||
|
||||
return id, manifestListDigest, err
|
||||
}
|
||||
|
||||
func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) {
|
||||
blobs := p.repo.Blobs(ctx)
|
||||
configJSON, err = blobs.Get(ctx, dgst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Verify image config digest
|
||||
verifier := dgst.Verifier()
|
||||
if _, err := verifier.Write(configJSON); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !verifier.Verified() {
|
||||
err := fmt.Errorf("image config verification failed for digest %s", dgst)
|
||||
logrus.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return configJSON, nil
|
||||
}
|
||||
|
||||
// schema2ManifestDigest computes the manifest digest, and, if pulling by
|
||||
// digest, ensures that it matches the requested digest.
|
||||
func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) {
|
||||
_, canonical, err := mfst.Payload()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// If pull by digest, then verify the manifest digest.
|
||||
if digested, isDigested := ref.(reference.Canonical); isDigested {
|
||||
verifier := digested.Digest().Verifier()
|
||||
if _, err := verifier.Write(canonical); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !verifier.Verified() {
|
||||
err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest())
|
||||
logrus.Error(err)
|
||||
return "", err
|
||||
}
|
||||
return digested.Digest(), nil
|
||||
}
|
||||
|
||||
return digest.FromBytes(canonical), nil
|
||||
}
|
||||
|
||||
// allowV1Fallback checks if the error is a possible reason to fallback to v1
|
||||
// (even if confirmedV2 has been set already), and if so, wraps the error in
|
||||
// a fallbackError with confirmedV2 set to false. Otherwise, it returns the
|
||||
// error unmodified.
|
||||
func allowV1Fallback(err error) error {
|
||||
switch v := err.(type) {
|
||||
case errcode.Errors:
|
||||
if len(v) != 0 {
|
||||
if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) {
|
||||
return fallbackError{
|
||||
err: err,
|
||||
confirmedV2: false,
|
||||
transportOK: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
case errcode.Error:
|
||||
if shouldV2Fallback(v) {
|
||||
return fallbackError{
|
||||
err: err,
|
||||
confirmedV2: false,
|
||||
transportOK: true,
|
||||
}
|
||||
}
|
||||
case *url.Error:
|
||||
if v.Err == auth.ErrNoBasicAuthCredentials {
|
||||
return fallbackError{err: err, confirmedV2: false}
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Reference) (m *schema1.Manifest, err error) {
|
||||
// If pull by digest, then verify the manifest digest. NOTE: It is
|
||||
// important to do this first, before any other content validation. If the
|
||||
// digest cannot be verified, don't even bother with those other things.
|
||||
if digested, isCanonical := ref.(reference.Canonical); isCanonical {
|
||||
verifier := digested.Digest().Verifier()
|
||||
if _, err := verifier.Write(signedManifest.Canonical); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !verifier.Verified() {
|
||||
err := fmt.Errorf("image verification failed for digest %s", digested.Digest())
|
||||
logrus.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
m = &signedManifest.Manifest
|
||||
|
||||
if m.SchemaVersion != 1 {
|
||||
return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, reference.FamiliarString(ref))
|
||||
}
|
||||
if len(m.FSLayers) != len(m.History) {
|
||||
return nil, fmt.Errorf("length of history not equal to number of layers for %q", reference.FamiliarString(ref))
|
||||
}
|
||||
if len(m.FSLayers) == 0 {
|
||||
return nil, fmt.Errorf("no FSLayers in manifest for %q", reference.FamiliarString(ref))
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// fixManifestLayers removes repeated layers from the manifest and checks the
|
||||
// correctness of the parent chain.
|
||||
func fixManifestLayers(m *schema1.Manifest) error {
|
||||
imgs := make([]*image.V1Image, len(m.FSLayers))
|
||||
for i := range m.FSLayers {
|
||||
img := &image.V1Image{}
|
||||
|
||||
if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imgs[i] = img
|
||||
if err := v1.ValidateID(img.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" {
|
||||
// Windows base layer can point to a base layer parent that is not in manifest.
|
||||
return errors.New("invalid parent ID in the base layer of the image")
|
||||
}
|
||||
|
||||
// check general duplicates to error instead of a deadlock
|
||||
idmap := make(map[string]struct{})
|
||||
|
||||
var lastID string
|
||||
for _, img := range imgs {
|
||||
// skip IDs that appear after each other, we handle those later
|
||||
if _, exists := idmap[img.ID]; img.ID != lastID && exists {
|
||||
return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
|
||||
}
|
||||
lastID = img.ID
|
||||
idmap[lastID] = struct{}{}
|
||||
}
|
||||
|
||||
// backwards loop so that we keep the remaining indexes after removing items
|
||||
for i := len(imgs) - 2; i >= 0; i-- {
|
||||
if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
|
||||
m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
|
||||
m.History = append(m.History[:i], m.History[i+1:]...)
|
||||
} else if imgs[i].Parent != imgs[i+1].ID {
|
||||
return fmt.Errorf("invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createDownloadFile() (*os.File, error) {
|
||||
return ioutil.TempFile("", "GetImageBlob")
|
||||
}
|
183
vendor/github.com/docker/docker-ce/components/engine/distribution/pull_v2_test.go
generated
vendored
Normal file
183
vendor/github.com/docker/docker-ce/components/engine/distribution/pull_v2_test.go
generated
vendored
Normal file
|
@ -0,0 +1,183 @@
|
|||
package distribution // import "github.com/docker/docker/distribution"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/internal/testutil"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// TestFixManifestLayers checks that fixManifestLayers removes a duplicate
|
||||
// layer, and that it makes no changes to the manifest when called a second
|
||||
// time, after the duplicate is removed.
|
||||
func TestFixManifestLayers(t *testing.T) {
|
||||
duplicateLayerManifest := schema1.Manifest{
|
||||
FSLayers: []schema1.FSLayer{
|
||||
{BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
|
||||
{BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
|
||||
{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
|
||||
},
|
||||
History: []schema1.History{
|
||||
{V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"},
|
||||
{V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"},
|
||||
{V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"},
|
||||
},
|
||||
}
|
||||
|
||||
duplicateLayerManifestExpectedOutput := schema1.Manifest{
|
||||
FSLayers: []schema1.FSLayer{
|
||||
{BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
|
||||
{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
|
||||
},
|
||||
History: []schema1.History{
|
||||
{V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"},
|
||||
{V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"},
|
||||
},
|
||||
}
|
||||
|
||||
if err := fixManifestLayers(&duplicateLayerManifest); err != nil {
|
||||
t.Fatalf("unexpected error from fixManifestLayers: %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) {
|
||||
t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest")
|
||||
}
|
||||
|
||||
// Run fixManifestLayers again and confirm that it doesn't change the
|
||||
// manifest (which no longer has duplicate layers).
|
||||
if err := fixManifestLayers(&duplicateLayerManifest); err != nil {
|
||||
t.Fatalf("unexpected error from fixManifestLayers: %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) {
|
||||
t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest (second pass)")
|
||||
}
|
||||
}
|
||||
|
||||
// TestFixManifestLayersBaseLayerParent makes sure that fixManifestLayers fails
|
||||
// if the base layer configuration specifies a parent.
|
||||
func TestFixManifestLayersBaseLayerParent(t *testing.T) {
|
||||
// TODO Windows: Fix this unit text
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Needs fixing on Windows")
|
||||
}
|
||||
duplicateLayerManifest := schema1.Manifest{
|
||||
FSLayers: []schema1.FSLayer{
|
||||
{BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
|
||||
{BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
|
||||
{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
|
||||
},
|
||||
History: []schema1.History{
|
||||
{V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"},
|
||||
{V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"},
|
||||
{V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"parent\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"},
|
||||
},
|
||||
}
|
||||
|
||||
if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "invalid parent ID in the base layer of the image") {
|
||||
t.Fatalf("expected an invalid parent ID error from fixManifestLayers")
|
||||
}
|
||||
}
|
||||
|
||||
// TestFixManifestLayersBadParent makes sure that fixManifestLayers fails
|
||||
// if an image configuration specifies a parent that doesn't directly follow
|
||||
// that (deduplicated) image in the image history.
|
||||
func TestFixManifestLayersBadParent(t *testing.T) {
|
||||
duplicateLayerManifest := schema1.Manifest{
|
||||
FSLayers: []schema1.FSLayer{
|
||||
{BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
|
||||
{BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
|
||||
{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
|
||||
},
|
||||
History: []schema1.History{
|
||||
{V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"},
|
||||
{V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"},
|
||||
{V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"},
|
||||
},
|
||||
}
|
||||
|
||||
err := fixManifestLayers(&duplicateLayerManifest)
|
||||
testutil.ErrorContains(t, err, "invalid parent ID")
|
||||
}
|
||||
|
||||
// TestValidateManifest verifies the validateManifest function
|
||||
func TestValidateManifest(t *testing.T) {
|
||||
// TODO Windows: Fix this unit text
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Needs fixing on Windows")
|
||||
}
|
||||
expectedDigest, err := reference.ParseNormalizedNamed("repo@sha256:02fee8c3220ba806531f606525eceb83f4feb654f62b207191b1c9209188dedd")
|
||||
if err != nil {
|
||||
t.Fatal("could not parse reference")
|
||||
}
|
||||
expectedFSLayer0 := digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
|
||||
|
||||
// Good manifest
|
||||
|
||||
goodManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/good_manifest")
|
||||
if err != nil {
|
||||
t.Fatal("error reading fixture:", err)
|
||||
}
|
||||
|
||||
var goodSignedManifest schema1.SignedManifest
|
||||
err = json.Unmarshal(goodManifestBytes, &goodSignedManifest)
|
||||
if err != nil {
|
||||
t.Fatal("error unmarshaling manifest:", err)
|
||||
}
|
||||
|
||||
verifiedManifest, err := verifySchema1Manifest(&goodSignedManifest, expectedDigest)
|
||||
if err != nil {
|
||||
t.Fatal("validateManifest failed:", err)
|
||||
}
|
||||
|
||||
if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 {
|
||||
t.Fatal("unexpected FSLayer in good manifest")
|
||||
}
|
||||
|
||||
// "Extra data" manifest
|
||||
|
||||
extraDataManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/extra_data_manifest")
|
||||
if err != nil {
|
||||
t.Fatal("error reading fixture:", err)
|
||||
}
|
||||
|
||||
var extraDataSignedManifest schema1.SignedManifest
|
||||
err = json.Unmarshal(extraDataManifestBytes, &extraDataSignedManifest)
|
||||
if err != nil {
|
||||
t.Fatal("error unmarshaling manifest:", err)
|
||||
}
|
||||
|
||||
verifiedManifest, err = verifySchema1Manifest(&extraDataSignedManifest, expectedDigest)
|
||||
if err != nil {
|
||||
t.Fatal("validateManifest failed:", err)
|
||||
}
|
||||
|
||||
if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 {
|
||||
t.Fatal("unexpected FSLayer in extra data manifest")
|
||||
}
|
||||
|
||||
// Bad manifest
|
||||
|
||||
badManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/bad_manifest")
|
||||
if err != nil {
|
||||
t.Fatal("error reading fixture:", err)
|
||||
}
|
||||
|
||||
var badSignedManifest schema1.SignedManifest
|
||||
err = json.Unmarshal(badManifestBytes, &badSignedManifest)
|
||||
if err != nil {
|
||||
t.Fatal("error unmarshaling manifest:", err)
|
||||
}
|
||||
|
||||
verifiedManifest, err = verifySchema1Manifest(&badSignedManifest, expectedDigest)
|
||||
if err == nil || !strings.HasPrefix(err.Error(), "image verification failed for digest") {
|
||||
t.Fatal("expected validateManifest to fail with digest error")
|
||||
}
|
||||
}
|
34
vendor/github.com/docker/docker-ce/components/engine/distribution/pull_v2_unix.go
generated
vendored
Normal file
34
vendor/github.com/docker/docker-ce/components/engine/distribution/pull_v2_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
|||
// +build !windows
|
||||
|
||||
package distribution // import "github.com/docker/docker/distribution"
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/manifest/manifestlist"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) {
|
||||
blobs := ld.repo.Blobs(ctx)
|
||||
return blobs.Open(ctx, ld.digest)
|
||||
}
|
||||
|
||||
func filterManifests(manifests []manifestlist.ManifestDescriptor, os string) []manifestlist.ManifestDescriptor {
|
||||
var matches []manifestlist.ManifestDescriptor
|
||||
for _, manifestDescriptor := range manifests {
|
||||
if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == os {
|
||||
matches = append(matches, manifestDescriptor)
|
||||
|
||||
logrus.Debugf("found match for %s/%s with media type %s, digest %s", os, runtime.GOARCH, manifestDescriptor.MediaType, manifestDescriptor.Digest.String())
|
||||
}
|
||||
}
|
||||
return matches
|
||||
}
|
||||
|
||||
// checkImageCompatibility is a Windows-specific function. No-op on Linux
|
||||
func checkImageCompatibility(imageOS, imageOSVersion string) error {
|
||||
return nil
|
||||
}
|
130
vendor/github.com/docker/docker-ce/components/engine/distribution/pull_v2_windows.go
generated
vendored
Normal file
130
vendor/github.com/docker/docker-ce/components/engine/distribution/pull_v2_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,130 @@
|
|||
package distribution // import "github.com/docker/docker/distribution"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/manifest/manifestlist"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var _ distribution.Describable = &v2LayerDescriptor{}
|
||||
|
||||
func (ld *v2LayerDescriptor) Descriptor() distribution.Descriptor {
|
||||
if ld.src.MediaType == schema2.MediaTypeForeignLayer && len(ld.src.URLs) > 0 {
|
||||
return ld.src
|
||||
}
|
||||
return distribution.Descriptor{}
|
||||
}
|
||||
|
||||
func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) {
|
||||
blobs := ld.repo.Blobs(ctx)
|
||||
rsc, err := blobs.Open(ctx, ld.digest)
|
||||
|
||||
if len(ld.src.URLs) == 0 {
|
||||
return rsc, err
|
||||
}
|
||||
|
||||
// We're done if the registry has this blob.
|
||||
if err == nil {
|
||||
// Seek does an HTTP GET. If it succeeds, the blob really is accessible.
|
||||
if _, err = rsc.Seek(0, os.SEEK_SET); err == nil {
|
||||
return rsc, nil
|
||||
}
|
||||
rsc.Close()
|
||||
}
|
||||
|
||||
// Find the first URL that results in a 200 result code.
|
||||
for _, url := range ld.src.URLs {
|
||||
logrus.Debugf("Pulling %v from foreign URL %v", ld.digest, url)
|
||||
rsc = transport.NewHTTPReadSeeker(http.DefaultClient, url, nil)
|
||||
|
||||
// Seek does an HTTP GET. If it succeeds, the blob really is accessible.
|
||||
_, err = rsc.Seek(0, os.SEEK_SET)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
logrus.Debugf("Download for %v failed: %v", ld.digest, err)
|
||||
rsc.Close()
|
||||
rsc = nil
|
||||
}
|
||||
return rsc, err
|
||||
}
|
||||
|
||||
func filterManifests(manifests []manifestlist.ManifestDescriptor, os string) []manifestlist.ManifestDescriptor {
|
||||
osVersion := ""
|
||||
if os == "windows" {
|
||||
version := system.GetOSVersion()
|
||||
osVersion = fmt.Sprintf("%d.%d.%d", version.MajorVersion, version.MinorVersion, version.Build)
|
||||
logrus.Debugf("will prefer entries with version %s", osVersion)
|
||||
}
|
||||
|
||||
var matches []manifestlist.ManifestDescriptor
|
||||
for _, manifestDescriptor := range manifests {
|
||||
if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == os {
|
||||
matches = append(matches, manifestDescriptor)
|
||||
logrus.Debugf("found match for %s/%s %s with media type %s, digest %s", os, runtime.GOARCH, manifestDescriptor.Platform.OSVersion, manifestDescriptor.MediaType, manifestDescriptor.Digest.String())
|
||||
} else {
|
||||
logrus.Debugf("ignoring %s/%s %s with media type %s, digest %s", os, runtime.GOARCH, manifestDescriptor.Platform.OSVersion, manifestDescriptor.MediaType, manifestDescriptor.Digest.String())
|
||||
}
|
||||
}
|
||||
if os == "windows" {
|
||||
sort.Stable(manifestsByVersion{osVersion, matches})
|
||||
}
|
||||
return matches
|
||||
}
|
||||
|
||||
func versionMatch(actual, expected string) bool {
|
||||
// Check whether the version matches up to the build, ignoring UBR
|
||||
return strings.HasPrefix(actual, expected+".")
|
||||
}
|
||||
|
||||
type manifestsByVersion struct {
|
||||
version string
|
||||
list []manifestlist.ManifestDescriptor
|
||||
}
|
||||
|
||||
func (mbv manifestsByVersion) Less(i, j int) bool {
|
||||
// TODO: Split version by parts and compare
|
||||
// TODO: Prefer versions which have a greater version number
|
||||
// Move compatible versions to the top, with no other ordering changes
|
||||
return versionMatch(mbv.list[i].Platform.OSVersion, mbv.version) && !versionMatch(mbv.list[j].Platform.OSVersion, mbv.version)
|
||||
}
|
||||
|
||||
func (mbv manifestsByVersion) Len() int {
|
||||
return len(mbv.list)
|
||||
}
|
||||
|
||||
func (mbv manifestsByVersion) Swap(i, j int) {
|
||||
mbv.list[i], mbv.list[j] = mbv.list[j], mbv.list[i]
|
||||
}
|
||||
|
||||
// checkImageCompatibility blocks pulling incompatible images based on a later OS build
|
||||
// Fixes https://github.com/moby/moby/issues/36184.
|
||||
func checkImageCompatibility(imageOS, imageOSVersion string) error {
|
||||
if imageOS == "windows" {
|
||||
hostOSV := system.GetOSVersion()
|
||||
splitImageOSVersion := strings.Split(imageOSVersion, ".") // eg 10.0.16299.nnnn
|
||||
if len(splitImageOSVersion) >= 3 {
|
||||
if imageOSBuild, err := strconv.Atoi(splitImageOSVersion[2]); err == nil {
|
||||
if imageOSBuild > int(hostOSV.Build) {
|
||||
errMsg := fmt.Sprintf("a Windows version %s.%s.%s-based image is incompatible with a %s host", splitImageOSVersion[0], splitImageOSVersion[1], splitImageOSVersion[2], hostOSV.ToString())
|
||||
logrus.Debugf(errMsg)
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
186
vendor/github.com/docker/docker-ce/components/engine/distribution/push.go
generated
vendored
Normal file
186
vendor/github.com/docker/docker-ce/components/engine/distribution/push.go
generated
vendored
Normal file
|
@ -0,0 +1,186 @@
|
|||
package distribution // import "github.com/docker/docker/distribution"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/distribution/metadata"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Pusher is an interface that abstracts pushing for different API versions.
|
||||
type Pusher interface {
|
||||
// Push tries to push the image configured at the creation of Pusher.
|
||||
// Push returns an error if any, as well as a boolean that determines whether to retry Push on the next configured endpoint.
|
||||
//
|
||||
// TODO(tiborvass): have Push() take a reference to repository + tag, so that the pusher itself is repository-agnostic.
|
||||
Push(ctx context.Context) error
|
||||
}
|
||||
|
||||
const compressionBufSize = 32768
|
||||
|
||||
// NewPusher creates a new Pusher interface that will push to either a v1 or v2
|
||||
// registry. The endpoint argument contains a Version field that determines
|
||||
// whether a v1 or v2 pusher will be created. The other parameters are passed
|
||||
// through to the underlying pusher implementation for use during the actual
|
||||
// push operation.
|
||||
func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePushConfig *ImagePushConfig) (Pusher, error) {
|
||||
switch endpoint.Version {
|
||||
case registry.APIVersion2:
|
||||
return &v2Pusher{
|
||||
v2MetadataService: metadata.NewV2MetadataService(imagePushConfig.MetadataStore),
|
||||
ref: ref,
|
||||
endpoint: endpoint,
|
||||
repoInfo: repoInfo,
|
||||
config: imagePushConfig,
|
||||
}, nil
|
||||
case registry.APIVersion1:
|
||||
return &v1Pusher{
|
||||
v1IDService: metadata.NewV1IDService(imagePushConfig.MetadataStore),
|
||||
ref: ref,
|
||||
endpoint: endpoint,
|
||||
repoInfo: repoInfo,
|
||||
config: imagePushConfig,
|
||||
}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL)
|
||||
}
|
||||
|
||||
// Push initiates a push operation on ref.
|
||||
// ref is the specific variant of the image to be pushed.
|
||||
// If no tag is provided, all tags will be pushed.
|
||||
func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushConfig) error {
|
||||
// FIXME: Allow to interrupt current push when new push of same image is done.
|
||||
|
||||
// Resolve the Repository name from fqn to RepositoryInfo
|
||||
repoInfo, err := imagePushConfig.RegistryService.ResolveRepository(ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
endpoints, err := imagePushConfig.RegistryService.LookupPushEndpoints(reference.Domain(repoInfo.Name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
progress.Messagef(imagePushConfig.ProgressOutput, "", "The push refers to repository [%s]", repoInfo.Name.Name())
|
||||
|
||||
associations := imagePushConfig.ReferenceStore.ReferencesByName(repoInfo.Name)
|
||||
if len(associations) == 0 {
|
||||
return fmt.Errorf("An image does not exist locally with the tag: %s", reference.FamiliarName(repoInfo.Name))
|
||||
}
|
||||
|
||||
var (
|
||||
lastErr error
|
||||
|
||||
// confirmedV2 is set to true if a push attempt managed to
|
||||
// confirm that it was talking to a v2 registry. This will
|
||||
// prevent fallback to the v1 protocol.
|
||||
confirmedV2 bool
|
||||
|
||||
// confirmedTLSRegistries is a map indicating which registries
|
||||
// are known to be using TLS. There should never be a plaintext
|
||||
// retry for any of these.
|
||||
confirmedTLSRegistries = make(map[string]struct{})
|
||||
)
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
if imagePushConfig.RequireSchema2 && endpoint.Version == registry.APIVersion1 {
|
||||
continue
|
||||
}
|
||||
if confirmedV2 && endpoint.Version == registry.APIVersion1 {
|
||||
logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL)
|
||||
continue
|
||||
}
|
||||
|
||||
if endpoint.URL.Scheme != "https" {
|
||||
if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS {
|
||||
logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("Trying to push %s to %s %s", repoInfo.Name.Name(), endpoint.URL, endpoint.Version)
|
||||
|
||||
pusher, err := NewPusher(ref, endpoint, repoInfo, imagePushConfig)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
if err := pusher.Push(ctx); err != nil {
|
||||
// Was this push cancelled? If so, don't try to fall
|
||||
// back.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
default:
|
||||
if fallbackErr, ok := err.(fallbackError); ok {
|
||||
confirmedV2 = confirmedV2 || fallbackErr.confirmedV2
|
||||
if fallbackErr.transportOK && endpoint.URL.Scheme == "https" {
|
||||
confirmedTLSRegistries[endpoint.URL.Host] = struct{}{}
|
||||
}
|
||||
err = fallbackErr.err
|
||||
lastErr = err
|
||||
logrus.Infof("Attempting next endpoint for push after error: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Errorf("Not continuing with push after error: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
imagePushConfig.ImageEventLogger(reference.FamiliarString(ref), reference.FamiliarName(repoInfo.Name), "push")
|
||||
return nil
|
||||
}
|
||||
|
||||
if lastErr == nil {
|
||||
lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.Name.Name())
|
||||
}
|
||||
return lastErr
|
||||
}
|
||||
|
||||
// compress returns an io.ReadCloser which will supply a compressed version of
|
||||
// the provided Reader. The caller must close the ReadCloser after reading the
|
||||
// compressed data.
|
||||
//
|
||||
// Note that this function returns a reader instead of taking a writer as an
|
||||
// argument so that it can be used with httpBlobWriter's ReadFrom method.
|
||||
// Using httpBlobWriter's Write method would send a PATCH request for every
|
||||
// Write call.
|
||||
//
|
||||
// The second return value is a channel that gets closed when the goroutine
|
||||
// is finished. This allows the caller to make sure the goroutine finishes
|
||||
// before it releases any resources connected with the reader that was
|
||||
// passed in.
|
||||
func compress(in io.Reader) (io.ReadCloser, chan struct{}) {
|
||||
compressionDone := make(chan struct{})
|
||||
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
// Use a bufio.Writer to avoid excessive chunking in HTTP request.
|
||||
bufWriter := bufio.NewWriterSize(pipeWriter, compressionBufSize)
|
||||
compressor := gzip.NewWriter(bufWriter)
|
||||
|
||||
go func() {
|
||||
_, err := io.Copy(compressor, in)
|
||||
if err == nil {
|
||||
err = compressor.Close()
|
||||
}
|
||||
if err == nil {
|
||||
err = bufWriter.Flush()
|
||||
}
|
||||
if err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
} else {
|
||||
pipeWriter.Close()
|
||||
}
|
||||
close(compressionDone)
|
||||
}()
|
||||
|
||||
return pipeReader, compressionDone
|
||||
}
|
457
vendor/github.com/docker/docker-ce/components/engine/distribution/push_v1.go
generated
vendored
Normal file
457
vendor/github.com/docker/docker-ce/components/engine/distribution/push_v1.go
generated
vendored
Normal file
|
@ -0,0 +1,457 @@
|
|||
package distribution // import "github.com/docker/docker/distribution"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
"github.com/docker/docker/distribution/metadata"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/image/v1"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type v1Pusher struct {
|
||||
v1IDService *metadata.V1IDService
|
||||
endpoint registry.APIEndpoint
|
||||
ref reference.Named
|
||||
repoInfo *registry.RepositoryInfo
|
||||
config *ImagePushConfig
|
||||
session *registry.Session
|
||||
}
|
||||
|
||||
func (p *v1Pusher) Push(ctx context.Context) error {
|
||||
tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Adds Docker-specific headers as well as user-specified headers (metaHeaders)
|
||||
tr := transport.NewTransport(
|
||||
// TODO(tiborvass): was NoTimeout
|
||||
registry.NewTransport(tlsConfig),
|
||||
registry.Headers(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)...,
|
||||
)
|
||||
client := registry.HTTPClient(tr)
|
||||
v1Endpoint := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)
|
||||
p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint)
|
||||
if err != nil {
|
||||
// TODO(dmcgowan): Check if should fallback
|
||||
return fallbackError{err: err}
|
||||
}
|
||||
if err := p.pushRepository(ctx); err != nil {
|
||||
// TODO(dmcgowan): Check if should fallback
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// v1Image exposes the configuration, filesystem layer ID, and a v1 ID for an
|
||||
// image being pushed to a v1 registry.
|
||||
type v1Image interface {
|
||||
Config() []byte
|
||||
Layer() layer.Layer
|
||||
V1ID() string
|
||||
}
|
||||
|
||||
type v1ImageCommon struct {
|
||||
layer layer.Layer
|
||||
config []byte
|
||||
v1ID string
|
||||
}
|
||||
|
||||
func (common *v1ImageCommon) Config() []byte {
|
||||
return common.config
|
||||
}
|
||||
|
||||
func (common *v1ImageCommon) V1ID() string {
|
||||
return common.v1ID
|
||||
}
|
||||
|
||||
func (common *v1ImageCommon) Layer() layer.Layer {
|
||||
return common.layer
|
||||
}
|
||||
|
||||
// v1TopImage defines a runnable (top layer) image being pushed to a v1
|
||||
// registry.
|
||||
type v1TopImage struct {
|
||||
v1ImageCommon
|
||||
imageID image.ID
|
||||
}
|
||||
|
||||
func newV1TopImage(imageID image.ID, img *image.Image, l layer.Layer, parent *v1DependencyImage) (*v1TopImage, error) {
|
||||
v1ID := imageID.Digest().Hex()
|
||||
parentV1ID := ""
|
||||
if parent != nil {
|
||||
parentV1ID = parent.V1ID()
|
||||
}
|
||||
|
||||
config, err := v1.MakeV1ConfigFromConfig(img, v1ID, parentV1ID, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v1TopImage{
|
||||
v1ImageCommon: v1ImageCommon{
|
||||
v1ID: v1ID,
|
||||
config: config,
|
||||
layer: l,
|
||||
},
|
||||
imageID: imageID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// v1DependencyImage defines a dependency layer being pushed to a v1 registry.
|
||||
type v1DependencyImage struct {
|
||||
v1ImageCommon
|
||||
}
|
||||
|
||||
func newV1DependencyImage(l layer.Layer, parent *v1DependencyImage) *v1DependencyImage {
|
||||
v1ID := digest.Digest(l.ChainID()).Hex()
|
||||
|
||||
var config string
|
||||
if parent != nil {
|
||||
config = fmt.Sprintf(`{"id":"%s","parent":"%s"}`, v1ID, parent.V1ID())
|
||||
} else {
|
||||
config = fmt.Sprintf(`{"id":"%s"}`, v1ID)
|
||||
}
|
||||
return &v1DependencyImage{
|
||||
v1ImageCommon: v1ImageCommon{
|
||||
v1ID: v1ID,
|
||||
config: []byte(config),
|
||||
layer: l,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve the all the images to be uploaded in the correct order
|
||||
func (p *v1Pusher) getImageList() (imageList []v1Image, tagsByImage map[image.ID][]string, referencedLayers []PushLayer, err error) {
|
||||
tagsByImage = make(map[image.ID][]string)
|
||||
|
||||
// Ignore digest references
|
||||
if _, isCanonical := p.ref.(reference.Canonical); isCanonical {
|
||||
return
|
||||
}
|
||||
|
||||
tagged, isTagged := p.ref.(reference.NamedTagged)
|
||||
if isTagged {
|
||||
// Push a specific tag
|
||||
var imgID image.ID
|
||||
var dgst digest.Digest
|
||||
dgst, err = p.config.ReferenceStore.Get(p.ref)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
imgID = image.IDFromDigest(dgst)
|
||||
|
||||
imageList, err = p.imageListForTag(imgID, nil, &referencedLayers)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
tagsByImage[imgID] = []string{tagged.Tag()}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
imagesSeen := make(map[digest.Digest]struct{})
|
||||
dependenciesSeen := make(map[layer.ChainID]*v1DependencyImage)
|
||||
|
||||
associations := p.config.ReferenceStore.ReferencesByName(p.ref)
|
||||
for _, association := range associations {
|
||||
if tagged, isTagged = association.Ref.(reference.NamedTagged); !isTagged {
|
||||
// Ignore digest references.
|
||||
continue
|
||||
}
|
||||
|
||||
imgID := image.IDFromDigest(association.ID)
|
||||
tagsByImage[imgID] = append(tagsByImage[imgID], tagged.Tag())
|
||||
|
||||
if _, present := imagesSeen[association.ID]; present {
|
||||
// Skip generating image list for already-seen image
|
||||
continue
|
||||
}
|
||||
imagesSeen[association.ID] = struct{}{}
|
||||
|
||||
imageListForThisTag, err := p.imageListForTag(imgID, dependenciesSeen, &referencedLayers)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
// append to main image list
|
||||
imageList = append(imageList, imageListForThisTag...)
|
||||
}
|
||||
if len(imageList) == 0 {
|
||||
return nil, nil, nil, fmt.Errorf("No images found for the requested repository / tag")
|
||||
}
|
||||
logrus.Debugf("Image list: %v", imageList)
|
||||
logrus.Debugf("Tags by image: %v", tagsByImage)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (p *v1Pusher) imageListForTag(imgID image.ID, dependenciesSeen map[layer.ChainID]*v1DependencyImage, referencedLayers *[]PushLayer) (imageListForThisTag []v1Image, err error) {
|
||||
ics, ok := p.config.ImageStore.(*imageConfigStore)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("only image store images supported for v1 push")
|
||||
}
|
||||
img, err := ics.Store.Get(imgID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
topLayerID := img.RootFS.ChainID()
|
||||
|
||||
if !system.IsOSSupported(img.OperatingSystem()) {
|
||||
return nil, system.ErrNotSupportedOperatingSystem
|
||||
}
|
||||
pl, err := p.config.LayerStores[img.OperatingSystem()].Get(topLayerID)
|
||||
*referencedLayers = append(*referencedLayers, pl)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get top layer from image: %v", err)
|
||||
}
|
||||
|
||||
// V1 push is deprecated, only support existing layerstore layers
|
||||
lsl, ok := pl.(*storeLayer)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("only layer store layers supported for v1 push")
|
||||
}
|
||||
l := lsl.Layer
|
||||
|
||||
dependencyImages, parent := generateDependencyImages(l.Parent(), dependenciesSeen)
|
||||
|
||||
topImage, err := newV1TopImage(imgID, img, l, parent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
imageListForThisTag = append(dependencyImages, topImage)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func generateDependencyImages(l layer.Layer, dependenciesSeen map[layer.ChainID]*v1DependencyImage) (imageListForThisTag []v1Image, parent *v1DependencyImage) {
|
||||
if l == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
imageListForThisTag, parent = generateDependencyImages(l.Parent(), dependenciesSeen)
|
||||
|
||||
if dependenciesSeen != nil {
|
||||
if dependencyImage, present := dependenciesSeen[l.ChainID()]; present {
|
||||
// This layer is already on the list, we can ignore it
|
||||
// and all its parents.
|
||||
return imageListForThisTag, dependencyImage
|
||||
}
|
||||
}
|
||||
|
||||
dependencyImage := newV1DependencyImage(l, parent)
|
||||
imageListForThisTag = append(imageListForThisTag, dependencyImage)
|
||||
|
||||
if dependenciesSeen != nil {
|
||||
dependenciesSeen[l.ChainID()] = dependencyImage
|
||||
}
|
||||
|
||||
return imageListForThisTag, dependencyImage
|
||||
}
|
||||
|
||||
// createImageIndex returns an index of an image's layer IDs and tags.
|
||||
func createImageIndex(images []v1Image, tags map[image.ID][]string) []*registry.ImgData {
|
||||
var imageIndex []*registry.ImgData
|
||||
for _, img := range images {
|
||||
v1ID := img.V1ID()
|
||||
|
||||
if topImage, isTopImage := img.(*v1TopImage); isTopImage {
|
||||
if tags, hasTags := tags[topImage.imageID]; hasTags {
|
||||
// If an image has tags you must add an entry in the image index
|
||||
// for each tag
|
||||
for _, tag := range tags {
|
||||
imageIndex = append(imageIndex, ®istry.ImgData{
|
||||
ID: v1ID,
|
||||
Tag: tag,
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// If the image does not have a tag it still needs to be sent to the
|
||||
// registry with an empty tag so that it is associated with the repository
|
||||
imageIndex = append(imageIndex, ®istry.ImgData{
|
||||
ID: v1ID,
|
||||
Tag: "",
|
||||
})
|
||||
}
|
||||
return imageIndex
|
||||
}
|
||||
|
||||
// lookupImageOnEndpoint checks the specified endpoint to see if an image exists
|
||||
// and if it is absent then it sends the image id to the channel to be pushed.
|
||||
func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, endpoint string, images chan v1Image, imagesToPush chan string) {
|
||||
defer wg.Done()
|
||||
for image := range images {
|
||||
v1ID := image.V1ID()
|
||||
truncID := stringid.TruncateID(image.Layer().DiffID().String())
|
||||
if err := p.session.LookupRemoteImage(v1ID, endpoint); err != nil {
|
||||
logrus.Errorf("Error in LookupRemoteImage: %s", err)
|
||||
imagesToPush <- v1ID
|
||||
progress.Update(p.config.ProgressOutput, truncID, "Waiting")
|
||||
} else {
|
||||
progress.Update(p.config.ProgressOutput, truncID, "Already exists")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *v1Pusher) pushImageToEndpoint(ctx context.Context, endpoint string, imageList []v1Image, tags map[image.ID][]string, repo *registry.RepositoryData) error {
|
||||
workerCount := len(imageList)
|
||||
// start a maximum of 5 workers to check if images exist on the specified endpoint.
|
||||
if workerCount > 5 {
|
||||
workerCount = 5
|
||||
}
|
||||
var (
|
||||
wg = &sync.WaitGroup{}
|
||||
imageData = make(chan v1Image, workerCount*2)
|
||||
imagesToPush = make(chan string, workerCount*2)
|
||||
pushes = make(chan map[string]struct{}, 1)
|
||||
)
|
||||
for i := 0; i < workerCount; i++ {
|
||||
wg.Add(1)
|
||||
go p.lookupImageOnEndpoint(wg, endpoint, imageData, imagesToPush)
|
||||
}
|
||||
// start a go routine that consumes the images to push
|
||||
go func() {
|
||||
shouldPush := make(map[string]struct{})
|
||||
for id := range imagesToPush {
|
||||
shouldPush[id] = struct{}{}
|
||||
}
|
||||
pushes <- shouldPush
|
||||
}()
|
||||
for _, v1Image := range imageList {
|
||||
imageData <- v1Image
|
||||
}
|
||||
// close the channel to notify the workers that there will be no more images to check.
|
||||
close(imageData)
|
||||
wg.Wait()
|
||||
close(imagesToPush)
|
||||
// wait for all the images that require pushes to be collected into a consumable map.
|
||||
shouldPush := <-pushes
|
||||
// finish by pushing any images and tags to the endpoint. The order that the images are pushed
|
||||
// is very important that is why we are still iterating over the ordered list of imageIDs.
|
||||
for _, img := range imageList {
|
||||
v1ID := img.V1ID()
|
||||
if _, push := shouldPush[v1ID]; push {
|
||||
if _, err := p.pushImage(ctx, img, endpoint); err != nil {
|
||||
// FIXME: Continue on error?
|
||||
return err
|
||||
}
|
||||
}
|
||||
if topImage, isTopImage := img.(*v1TopImage); isTopImage {
|
||||
for _, tag := range tags[topImage.imageID] {
|
||||
progress.Messagef(p.config.ProgressOutput, "", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(v1ID), endpoint+"repositories/"+reference.Path(p.repoInfo.Name)+"/tags/"+tag)
|
||||
if err := p.session.PushRegistryTag(p.repoInfo.Name, v1ID, tag, endpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// pushRepository pushes layers that do not already exist on the registry.
|
||||
func (p *v1Pusher) pushRepository(ctx context.Context) error {
|
||||
imgList, tags, referencedLayers, err := p.getImageList()
|
||||
defer func() {
|
||||
for _, l := range referencedLayers {
|
||||
l.Release()
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imageIndex := createImageIndex(imgList, tags)
|
||||
for _, data := range imageIndex {
|
||||
logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag)
|
||||
}
|
||||
|
||||
// Register all the images in a repository with the registry
|
||||
// If an image is not in this list it will not be associated with the repository
|
||||
repoData, err := p.session.PushImageJSONIndex(p.repoInfo.Name, imageIndex, false, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// push the repository to each of the endpoints only if it does not exist.
|
||||
for _, endpoint := range repoData.Endpoints {
|
||||
if err := p.pushImageToEndpoint(ctx, endpoint, imgList, tags, repoData); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
_, err = p.session.PushImageJSONIndex(p.repoInfo.Name, imageIndex, true, repoData.Endpoints)
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *v1Pusher) pushImage(ctx context.Context, v1Image v1Image, ep string) (checksum string, err error) {
|
||||
l := v1Image.Layer()
|
||||
v1ID := v1Image.V1ID()
|
||||
truncID := stringid.TruncateID(l.DiffID().String())
|
||||
|
||||
jsonRaw := v1Image.Config()
|
||||
progress.Update(p.config.ProgressOutput, truncID, "Pushing")
|
||||
|
||||
// General rule is to use ID for graph accesses and compatibilityID for
|
||||
// calls to session.registry()
|
||||
imgData := ®istry.ImgData{
|
||||
ID: v1ID,
|
||||
}
|
||||
|
||||
// Send the json
|
||||
if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil {
|
||||
if err == registry.ErrAlreadyExists {
|
||||
progress.Update(p.config.ProgressOutput, truncID, "Image already pushed, skipping")
|
||||
return "", nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
arch, err := l.TarStream()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer arch.Close()
|
||||
|
||||
// don't care if this fails; best effort
|
||||
size, _ := l.DiffSize()
|
||||
|
||||
// Send the layer
|
||||
logrus.Debugf("rendered layer for %s of [%d] size", v1ID, size)
|
||||
|
||||
reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), p.config.ProgressOutput, size, truncID, "Pushing")
|
||||
defer reader.Close()
|
||||
|
||||
checksum, checksumPayload, err := p.session.PushImageLayerRegistry(v1ID, reader, ep, jsonRaw)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
imgData.Checksum = checksum
|
||||
imgData.ChecksumPayload = checksumPayload
|
||||
// Send the checksum
|
||||
if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err := p.v1IDService.Set(v1ID, p.repoInfo.Index.Name, l.DiffID()); err != nil {
|
||||
logrus.Warnf("Could not set v1 ID mapping: %v", err)
|
||||
}
|
||||
|
||||
progress.Update(p.config.ProgressOutput, truncID, "Image successfully pushed")
|
||||
return imgData.Checksum, nil
|
||||
}
|
692
vendor/github.com/docker/docker-ce/components/engine/distribution/push_v2.go
generated
vendored
Normal file
692
vendor/github.com/docker/docker-ce/components/engine/distribution/push_v2.go
generated
vendored
Normal file
|
@ -0,0 +1,692 @@
|
|||
package distribution // import "github.com/docker/docker/distribution"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
apitypes "github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/distribution/metadata"
|
||||
"github.com/docker/docker/distribution/xfer"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/registry"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
smallLayerMaximumSize = 100 * (1 << 10) // 100KB
|
||||
middleLayerMaximumSize = 10 * (1 << 20) // 10MB
|
||||
)
|
||||
|
||||
type v2Pusher struct {
|
||||
v2MetadataService metadata.V2MetadataService
|
||||
ref reference.Named
|
||||
endpoint registry.APIEndpoint
|
||||
repoInfo *registry.RepositoryInfo
|
||||
config *ImagePushConfig
|
||||
repo distribution.Repository
|
||||
|
||||
// pushState is state built by the Upload functions.
|
||||
pushState pushState
|
||||
}
|
||||
|
||||
type pushState struct {
|
||||
sync.Mutex
|
||||
// remoteLayers is the set of layers known to exist on the remote side.
|
||||
// This avoids redundant queries when pushing multiple tags that
|
||||
// involve the same layers. It is also used to fill in digest and size
|
||||
// information when building the manifest.
|
||||
remoteLayers map[layer.DiffID]distribution.Descriptor
|
||||
// confirmedV2 is set to true if we confirm we're talking to a v2
|
||||
// registry. This is used to limit fallbacks to the v1 protocol.
|
||||
confirmedV2 bool
|
||||
}
|
||||
|
||||
func (p *v2Pusher) Push(ctx context.Context) (err error) {
|
||||
p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor)
|
||||
|
||||
p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull")
|
||||
if err != nil {
|
||||
logrus.Debugf("Error getting v2 registry: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err = p.pushV2Repository(ctx); err != nil {
|
||||
if continueOnError(err, p.endpoint.Mirror) {
|
||||
return fallbackError{
|
||||
err: err,
|
||||
confirmedV2: p.pushState.confirmedV2,
|
||||
transportOK: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) {
|
||||
if namedTagged, isNamedTagged := p.ref.(reference.NamedTagged); isNamedTagged {
|
||||
imageID, err := p.config.ReferenceStore.Get(p.ref)
|
||||
if err != nil {
|
||||
return fmt.Errorf("tag does not exist: %s", reference.FamiliarString(p.ref))
|
||||
}
|
||||
|
||||
return p.pushV2Tag(ctx, namedTagged, imageID)
|
||||
}
|
||||
|
||||
if !reference.IsNameOnly(p.ref) {
|
||||
return errors.New("cannot push a digest reference")
|
||||
}
|
||||
|
||||
// Pull all tags
|
||||
pushed := 0
|
||||
for _, association := range p.config.ReferenceStore.ReferencesByName(p.ref) {
|
||||
if namedTagged, isNamedTagged := association.Ref.(reference.NamedTagged); isNamedTagged {
|
||||
pushed++
|
||||
if err := p.pushV2Tag(ctx, namedTagged, association.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pushed == 0 {
|
||||
return fmt.Errorf("no tags to push for %s", reference.FamiliarName(p.repoInfo.Name))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id digest.Digest) error {
|
||||
logrus.Debugf("Pushing repository: %s", reference.FamiliarString(ref))
|
||||
|
||||
imgConfig, err := p.config.ImageStore.Get(id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not find image from tag %s: %v", reference.FamiliarString(ref), err)
|
||||
}
|
||||
|
||||
rootfs, err := p.config.ImageStore.RootFSFromConfig(imgConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get rootfs for image %s: %s", reference.FamiliarString(ref), err)
|
||||
}
|
||||
|
||||
platform, err := p.config.ImageStore.PlatformFromConfig(imgConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get platform for image %s: %s", reference.FamiliarString(ref), err)
|
||||
}
|
||||
|
||||
l, err := p.config.LayerStores[platform.OS].Get(rootfs.ChainID())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get top layer from image: %v", err)
|
||||
}
|
||||
defer l.Release()
|
||||
|
||||
hmacKey, err := metadata.ComputeV2MetadataHMACKey(p.config.AuthConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to compute hmac key of auth config: %v", err)
|
||||
}
|
||||
|
||||
var descriptors []xfer.UploadDescriptor
|
||||
|
||||
descriptorTemplate := v2PushDescriptor{
|
||||
v2MetadataService: p.v2MetadataService,
|
||||
hmacKey: hmacKey,
|
||||
repoInfo: p.repoInfo.Name,
|
||||
ref: p.ref,
|
||||
endpoint: p.endpoint,
|
||||
repo: p.repo,
|
||||
pushState: &p.pushState,
|
||||
}
|
||||
|
||||
// Loop bounds condition is to avoid pushing the base layer on Windows.
|
||||
for range rootfs.DiffIDs {
|
||||
descriptor := descriptorTemplate
|
||||
descriptor.layer = l
|
||||
descriptor.checkedDigests = make(map[digest.Digest]struct{})
|
||||
descriptors = append(descriptors, &descriptor)
|
||||
|
||||
l = l.Parent()
|
||||
}
|
||||
|
||||
if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Try schema2 first
|
||||
builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), p.config.ConfigMediaType, imgConfig)
|
||||
manifest, err := manifestFromBuilder(ctx, builder, descriptors)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
manSvc, err := p.repo.Manifests(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
putOptions := []distribution.ManifestServiceOption{distribution.WithTag(ref.Tag())}
|
||||
if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil {
|
||||
if runtime.GOOS == "windows" || p.config.TrustKey == nil || p.config.RequireSchema2 {
|
||||
logrus.Warnf("failed to upload schema2 manifest: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err)
|
||||
|
||||
manifestRef, err := reference.WithTag(p.repo.Named(), ref.Tag())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, manifestRef, imgConfig)
|
||||
manifest, err = manifestFromBuilder(ctx, builder, descriptors)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var canonicalManifest []byte
|
||||
|
||||
switch v := manifest.(type) {
|
||||
case *schema1.SignedManifest:
|
||||
canonicalManifest = v.Canonical
|
||||
case *schema2.DeserializedManifest:
|
||||
_, canonicalManifest, err = v.Payload()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
manifestDigest := digest.FromBytes(canonicalManifest)
|
||||
progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest))
|
||||
|
||||
if err := addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Signal digest to the trust client so it can sign the
|
||||
// push, if appropriate.
|
||||
progress.Aux(p.config.ProgressOutput, apitypes.PushResult{Tag: ref.Tag(), Digest: manifestDigest.String(), Size: len(canonicalManifest)})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuilder, descriptors []xfer.UploadDescriptor) (distribution.Manifest, error) {
|
||||
// descriptors is in reverse order; iterate backwards to get references
|
||||
// appended in the right order.
|
||||
for i := len(descriptors) - 1; i >= 0; i-- {
|
||||
if err := builder.AppendReference(descriptors[i].(*v2PushDescriptor)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return builder.Build(ctx)
|
||||
}
|
||||
|
||||
type v2PushDescriptor struct {
|
||||
layer PushLayer
|
||||
v2MetadataService metadata.V2MetadataService
|
||||
hmacKey []byte
|
||||
repoInfo reference.Named
|
||||
ref reference.Named
|
||||
endpoint registry.APIEndpoint
|
||||
repo distribution.Repository
|
||||
pushState *pushState
|
||||
remoteDescriptor distribution.Descriptor
|
||||
// a set of digests whose presence has been checked in a target repository
|
||||
checkedDigests map[digest.Digest]struct{}
|
||||
}
|
||||
|
||||
func (pd *v2PushDescriptor) Key() string {
|
||||
return "v2push:" + pd.ref.Name() + " " + pd.layer.DiffID().String()
|
||||
}
|
||||
|
||||
func (pd *v2PushDescriptor) ID() string {
|
||||
return stringid.TruncateID(pd.layer.DiffID().String())
|
||||
}
|
||||
|
||||
func (pd *v2PushDescriptor) DiffID() layer.DiffID {
|
||||
return pd.layer.DiffID()
|
||||
}
|
||||
|
||||
func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) {
|
||||
// Skip foreign layers unless this registry allows nondistributable artifacts.
|
||||
if !pd.endpoint.AllowNondistributableArtifacts {
|
||||
if fs, ok := pd.layer.(distribution.Describable); ok {
|
||||
if d := fs.Descriptor(); len(d.URLs) > 0 {
|
||||
progress.Update(progressOutput, pd.ID(), "Skipped foreign layer")
|
||||
return d, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
diffID := pd.DiffID()
|
||||
|
||||
pd.pushState.Lock()
|
||||
if descriptor, ok := pd.pushState.remoteLayers[diffID]; ok {
|
||||
// it is already known that the push is not needed and
|
||||
// therefore doing a stat is unnecessary
|
||||
pd.pushState.Unlock()
|
||||
progress.Update(progressOutput, pd.ID(), "Layer already exists")
|
||||
return descriptor, nil
|
||||
}
|
||||
pd.pushState.Unlock()
|
||||
|
||||
maxMountAttempts, maxExistenceChecks, checkOtherRepositories := getMaxMountAndExistenceCheckAttempts(pd.layer)
|
||||
|
||||
// Do we have any metadata associated with this layer's DiffID?
|
||||
v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID)
|
||||
if err == nil {
|
||||
// check for blob existence in the target repository
|
||||
descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, true, 1, v2Metadata)
|
||||
if exists || err != nil {
|
||||
return descriptor, err
|
||||
}
|
||||
}
|
||||
|
||||
// if digest was empty or not saved, or if blob does not exist on the remote repository,
|
||||
// then push the blob.
|
||||
bs := pd.repo.Blobs(ctx)
|
||||
|
||||
var layerUpload distribution.BlobWriter
|
||||
|
||||
// Attempt to find another repository in the same registry to mount the layer from to avoid an unnecessary upload
|
||||
candidates := getRepositoryMountCandidates(pd.repoInfo, pd.hmacKey, maxMountAttempts, v2Metadata)
|
||||
for _, mountCandidate := range candidates {
|
||||
logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, mountCandidate.Digest, mountCandidate.SourceRepository)
|
||||
createOpts := []distribution.BlobCreateOption{}
|
||||
|
||||
if len(mountCandidate.SourceRepository) > 0 {
|
||||
namedRef, err := reference.ParseNormalizedNamed(mountCandidate.SourceRepository)
|
||||
if err != nil {
|
||||
logrus.Errorf("failed to parse source repository reference %v: %v", reference.FamiliarString(namedRef), err)
|
||||
pd.v2MetadataService.Remove(mountCandidate)
|
||||
continue
|
||||
}
|
||||
|
||||
// Candidates are always under same domain, create remote reference
|
||||
// with only path to set mount from with
|
||||
remoteRef, err := reference.WithName(reference.Path(namedRef))
|
||||
if err != nil {
|
||||
logrus.Errorf("failed to make remote reference out of %q: %v", reference.Path(namedRef), err)
|
||||
continue
|
||||
}
|
||||
|
||||
canonicalRef, err := reference.WithDigest(reference.TrimNamed(remoteRef), mountCandidate.Digest)
|
||||
if err != nil {
|
||||
logrus.Errorf("failed to make canonical reference: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
createOpts = append(createOpts, client.WithMountFrom(canonicalRef))
|
||||
}
|
||||
|
||||
// send the layer
|
||||
lu, err := bs.Create(ctx, createOpts...)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
// noop
|
||||
case distribution.ErrBlobMounted:
|
||||
progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name())
|
||||
|
||||
err.Descriptor.MediaType = schema2.MediaTypeLayer
|
||||
|
||||
pd.pushState.Lock()
|
||||
pd.pushState.confirmedV2 = true
|
||||
pd.pushState.remoteLayers[diffID] = err.Descriptor
|
||||
pd.pushState.Unlock()
|
||||
|
||||
// Cache mapping from this layer's DiffID to the blobsum
|
||||
if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{
|
||||
Digest: err.Descriptor.Digest,
|
||||
SourceRepository: pd.repoInfo.Name(),
|
||||
}); err != nil {
|
||||
return distribution.Descriptor{}, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
return err.Descriptor, nil
|
||||
default:
|
||||
logrus.Infof("failed to mount layer %s (%s) from %s: %v", diffID, mountCandidate.Digest, mountCandidate.SourceRepository, err)
|
||||
}
|
||||
|
||||
if len(mountCandidate.SourceRepository) > 0 &&
|
||||
(metadata.CheckV2MetadataHMAC(&mountCandidate, pd.hmacKey) ||
|
||||
len(mountCandidate.HMAC) == 0) {
|
||||
cause := "blob mount failure"
|
||||
if err != nil {
|
||||
cause = fmt.Sprintf("an error: %v", err.Error())
|
||||
}
|
||||
logrus.Debugf("removing association between layer %s and %s due to %s", mountCandidate.Digest, mountCandidate.SourceRepository, cause)
|
||||
pd.v2MetadataService.Remove(mountCandidate)
|
||||
}
|
||||
|
||||
if lu != nil {
|
||||
// cancel previous upload
|
||||
cancelLayerUpload(ctx, mountCandidate.Digest, layerUpload)
|
||||
layerUpload = lu
|
||||
}
|
||||
}
|
||||
|
||||
if maxExistenceChecks-len(pd.checkedDigests) > 0 {
|
||||
// do additional layer existence checks with other known digests if any
|
||||
descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, checkOtherRepositories, maxExistenceChecks-len(pd.checkedDigests), v2Metadata)
|
||||
if exists || err != nil {
|
||||
return descriptor, err
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("Pushing layer: %s", diffID)
|
||||
if layerUpload == nil {
|
||||
layerUpload, err = bs.Create(ctx)
|
||||
if err != nil {
|
||||
return distribution.Descriptor{}, retryOnError(err)
|
||||
}
|
||||
}
|
||||
defer layerUpload.Close()
|
||||
|
||||
// upload the blob
|
||||
return pd.uploadUsingSession(ctx, progressOutput, diffID, layerUpload)
|
||||
}
|
||||
|
||||
func (pd *v2PushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) {
|
||||
pd.remoteDescriptor = descriptor
|
||||
}
|
||||
|
||||
func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor {
|
||||
return pd.remoteDescriptor
|
||||
}
|
||||
|
||||
func (pd *v2PushDescriptor) uploadUsingSession(
|
||||
ctx context.Context,
|
||||
progressOutput progress.Output,
|
||||
diffID layer.DiffID,
|
||||
layerUpload distribution.BlobWriter,
|
||||
) (distribution.Descriptor, error) {
|
||||
var reader io.ReadCloser
|
||||
|
||||
contentReader, err := pd.layer.Open()
|
||||
if err != nil {
|
||||
return distribution.Descriptor{}, retryOnError(err)
|
||||
}
|
||||
|
||||
size, _ := pd.layer.Size()
|
||||
|
||||
reader = progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, contentReader), progressOutput, size, pd.ID(), "Pushing")
|
||||
|
||||
switch m := pd.layer.MediaType(); m {
|
||||
case schema2.MediaTypeUncompressedLayer:
|
||||
compressedReader, compressionDone := compress(reader)
|
||||
defer func(closer io.Closer) {
|
||||
closer.Close()
|
||||
<-compressionDone
|
||||
}(reader)
|
||||
reader = compressedReader
|
||||
case schema2.MediaTypeLayer:
|
||||
default:
|
||||
reader.Close()
|
||||
return distribution.Descriptor{}, fmt.Errorf("unsupported layer media type %s", m)
|
||||
}
|
||||
|
||||
digester := digest.Canonical.Digester()
|
||||
tee := io.TeeReader(reader, digester.Hash())
|
||||
|
||||
nn, err := layerUpload.ReadFrom(tee)
|
||||
reader.Close()
|
||||
if err != nil {
|
||||
return distribution.Descriptor{}, retryOnError(err)
|
||||
}
|
||||
|
||||
pushDigest := digester.Digest()
|
||||
if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil {
|
||||
return distribution.Descriptor{}, retryOnError(err)
|
||||
}
|
||||
|
||||
logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn)
|
||||
progress.Update(progressOutput, pd.ID(), "Pushed")
|
||||
|
||||
// Cache mapping from this layer's DiffID to the blobsum
|
||||
if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{
|
||||
Digest: pushDigest,
|
||||
SourceRepository: pd.repoInfo.Name(),
|
||||
}); err != nil {
|
||||
return distribution.Descriptor{}, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
|
||||
desc := distribution.Descriptor{
|
||||
Digest: pushDigest,
|
||||
MediaType: schema2.MediaTypeLayer,
|
||||
Size: nn,
|
||||
}
|
||||
|
||||
pd.pushState.Lock()
|
||||
// If Commit succeeded, that's an indication that the remote registry speaks the v2 protocol.
|
||||
pd.pushState.confirmedV2 = true
|
||||
pd.pushState.remoteLayers[diffID] = desc
|
||||
pd.pushState.Unlock()
|
||||
|
||||
return desc, nil
|
||||
}
|
||||
|
||||
// layerAlreadyExists checks if the registry already knows about any of the metadata passed in the "metadata"
|
||||
// slice. If it finds one that the registry knows about, it returns the known digest and "true". If
|
||||
// "checkOtherRepositories" is true, stat will be performed also with digests mapped to any other repository
|
||||
// (not just the target one).
|
||||
func (pd *v2PushDescriptor) layerAlreadyExists(
|
||||
ctx context.Context,
|
||||
progressOutput progress.Output,
|
||||
diffID layer.DiffID,
|
||||
checkOtherRepositories bool,
|
||||
maxExistenceCheckAttempts int,
|
||||
v2Metadata []metadata.V2Metadata,
|
||||
) (desc distribution.Descriptor, exists bool, err error) {
|
||||
// filter the metadata
|
||||
candidates := []metadata.V2Metadata{}
|
||||
for _, meta := range v2Metadata {
|
||||
if len(meta.SourceRepository) > 0 && !checkOtherRepositories && meta.SourceRepository != pd.repoInfo.Name() {
|
||||
continue
|
||||
}
|
||||
candidates = append(candidates, meta)
|
||||
}
|
||||
// sort the candidates by similarity
|
||||
sortV2MetadataByLikenessAndAge(pd.repoInfo, pd.hmacKey, candidates)
|
||||
|
||||
digestToMetadata := make(map[digest.Digest]*metadata.V2Metadata)
|
||||
// an array of unique blob digests ordered from the best mount candidates to worst
|
||||
layerDigests := []digest.Digest{}
|
||||
for i := 0; i < len(candidates); i++ {
|
||||
if len(layerDigests) >= maxExistenceCheckAttempts {
|
||||
break
|
||||
}
|
||||
meta := &candidates[i]
|
||||
if _, exists := digestToMetadata[meta.Digest]; exists {
|
||||
// keep reference just to the first mapping (the best mount candidate)
|
||||
continue
|
||||
}
|
||||
if _, exists := pd.checkedDigests[meta.Digest]; exists {
|
||||
// existence of this digest has already been tested
|
||||
continue
|
||||
}
|
||||
digestToMetadata[meta.Digest] = meta
|
||||
layerDigests = append(layerDigests, meta.Digest)
|
||||
}
|
||||
|
||||
attempts:
|
||||
for _, dgst := range layerDigests {
|
||||
meta := digestToMetadata[dgst]
|
||||
logrus.Debugf("Checking for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name())
|
||||
desc, err = pd.repo.Blobs(ctx).Stat(ctx, dgst)
|
||||
pd.checkedDigests[meta.Digest] = struct{}{}
|
||||
switch err {
|
||||
case nil:
|
||||
if m, ok := digestToMetadata[desc.Digest]; !ok || m.SourceRepository != pd.repoInfo.Name() || !metadata.CheckV2MetadataHMAC(m, pd.hmacKey) {
|
||||
// cache mapping from this layer's DiffID to the blobsum
|
||||
if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{
|
||||
Digest: desc.Digest,
|
||||
SourceRepository: pd.repoInfo.Name(),
|
||||
}); err != nil {
|
||||
return distribution.Descriptor{}, false, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
}
|
||||
desc.MediaType = schema2.MediaTypeLayer
|
||||
exists = true
|
||||
break attempts
|
||||
case distribution.ErrBlobUnknown:
|
||||
if meta.SourceRepository == pd.repoInfo.Name() {
|
||||
// remove the mapping to the target repository
|
||||
pd.v2MetadataService.Remove(*meta)
|
||||
}
|
||||
default:
|
||||
logrus.WithError(err).Debugf("Failed to check for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name())
|
||||
}
|
||||
}
|
||||
|
||||
if exists {
|
||||
progress.Update(progressOutput, pd.ID(), "Layer already exists")
|
||||
pd.pushState.Lock()
|
||||
pd.pushState.remoteLayers[diffID] = desc
|
||||
pd.pushState.Unlock()
|
||||
}
|
||||
|
||||
return desc, exists, nil
|
||||
}
|
||||
|
||||
// getMaxMountAndExistenceCheckAttempts returns a maximum number of cross repository mount attempts from
|
||||
// source repositories of target registry, maximum number of layer existence checks performed on the target
|
||||
// repository and whether the check shall be done also with digests mapped to different repositories. The
|
||||
// decision is based on layer size. The smaller the layer, the fewer attempts shall be made because the cost
|
||||
// of upload does not outweigh a latency.
|
||||
func getMaxMountAndExistenceCheckAttempts(layer PushLayer) (maxMountAttempts, maxExistenceCheckAttempts int, checkOtherRepositories bool) {
|
||||
size, err := layer.Size()
|
||||
switch {
|
||||
// big blob
|
||||
case size > middleLayerMaximumSize:
|
||||
// 1st attempt to mount the blob few times
|
||||
// 2nd few existence checks with digests associated to any repository
|
||||
// then fallback to upload
|
||||
return 4, 3, true
|
||||
|
||||
// middle sized blobs; if we could not get the size, assume we deal with middle sized blob
|
||||
case size > smallLayerMaximumSize, err != nil:
|
||||
// 1st attempt to mount blobs of average size few times
|
||||
// 2nd try at most 1 existence check if there's an existing mapping to the target repository
|
||||
// then fallback to upload
|
||||
return 3, 1, false
|
||||
|
||||
// small blobs, do a minimum number of checks
|
||||
default:
|
||||
return 1, 1, false
|
||||
}
|
||||
}
|
||||
|
||||
// getRepositoryMountCandidates returns an array of v2 metadata items belonging to the given registry. The
|
||||
// array is sorted from youngest to oldest. If requireRegistryMatch is true, the resulting array will contain
|
||||
// only metadata entries having registry part of SourceRepository matching the part of repoInfo.
|
||||
func getRepositoryMountCandidates(
|
||||
repoInfo reference.Named,
|
||||
hmacKey []byte,
|
||||
max int,
|
||||
v2Metadata []metadata.V2Metadata,
|
||||
) []metadata.V2Metadata {
|
||||
candidates := []metadata.V2Metadata{}
|
||||
for _, meta := range v2Metadata {
|
||||
sourceRepo, err := reference.ParseNamed(meta.SourceRepository)
|
||||
if err != nil || reference.Domain(repoInfo) != reference.Domain(sourceRepo) {
|
||||
continue
|
||||
}
|
||||
// target repository is not a viable candidate
|
||||
if meta.SourceRepository == repoInfo.Name() {
|
||||
continue
|
||||
}
|
||||
candidates = append(candidates, meta)
|
||||
}
|
||||
|
||||
sortV2MetadataByLikenessAndAge(repoInfo, hmacKey, candidates)
|
||||
if max >= 0 && len(candidates) > max {
|
||||
// select the youngest metadata
|
||||
candidates = candidates[:max]
|
||||
}
|
||||
|
||||
return candidates
|
||||
}
|
||||
|
||||
// byLikeness is a sorting container for v2 metadata candidates for cross repository mount. The
|
||||
// candidate "a" is preferred over "b":
|
||||
//
|
||||
// 1. if it was hashed using the same AuthConfig as the one used to authenticate to target repository and the
|
||||
// "b" was not
|
||||
// 2. if a number of its repository path components exactly matching path components of target repository is higher
|
||||
type byLikeness struct {
|
||||
arr []metadata.V2Metadata
|
||||
hmacKey []byte
|
||||
pathComponents []string
|
||||
}
|
||||
|
||||
func (bla byLikeness) Less(i, j int) bool {
|
||||
aMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[i], bla.hmacKey)
|
||||
bMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[j], bla.hmacKey)
|
||||
if aMacMatch != bMacMatch {
|
||||
return aMacMatch
|
||||
}
|
||||
aMatch := numOfMatchingPathComponents(bla.arr[i].SourceRepository, bla.pathComponents)
|
||||
bMatch := numOfMatchingPathComponents(bla.arr[j].SourceRepository, bla.pathComponents)
|
||||
return aMatch > bMatch
|
||||
}
|
||||
func (bla byLikeness) Swap(i, j int) {
|
||||
bla.arr[i], bla.arr[j] = bla.arr[j], bla.arr[i]
|
||||
}
|
||||
func (bla byLikeness) Len() int { return len(bla.arr) }
|
||||
|
||||
// nolint: interfacer
|
||||
func sortV2MetadataByLikenessAndAge(repoInfo reference.Named, hmacKey []byte, marr []metadata.V2Metadata) {
|
||||
// reverse the metadata array to shift the newest entries to the beginning
|
||||
for i := 0; i < len(marr)/2; i++ {
|
||||
marr[i], marr[len(marr)-i-1] = marr[len(marr)-i-1], marr[i]
|
||||
}
|
||||
// keep equal entries ordered from the youngest to the oldest
|
||||
sort.Stable(byLikeness{
|
||||
arr: marr,
|
||||
hmacKey: hmacKey,
|
||||
pathComponents: getPathComponents(repoInfo.Name()),
|
||||
})
|
||||
}
|
||||
|
||||
// numOfMatchingPathComponents returns a number of path components in "pth" that exactly match "matchComponents".
|
||||
func numOfMatchingPathComponents(pth string, matchComponents []string) int {
|
||||
pthComponents := getPathComponents(pth)
|
||||
i := 0
|
||||
for ; i < len(pthComponents) && i < len(matchComponents); i++ {
|
||||
if matchComponents[i] != pthComponents[i] {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func getPathComponents(path string) []string {
|
||||
return strings.Split(path, "/")
|
||||
}
|
||||
|
||||
func cancelLayerUpload(ctx context.Context, dgst digest.Digest, layerUpload distribution.BlobWriter) {
|
||||
if layerUpload != nil {
|
||||
logrus.Debugf("cancelling upload of blob %s", dgst)
|
||||
err := layerUpload.Cancel(ctx)
|
||||
if err != nil {
|
||||
logrus.Warnf("failed to cancel upload: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
583
vendor/github.com/docker/docker-ce/components/engine/distribution/push_v2_test.go
generated
vendored
Normal file
583
vendor/github.com/docker/docker-ce/components/engine/distribution/push_v2_test.go
generated
vendored
Normal file
|
@ -0,0 +1,583 @@
|
|||
package distribution // import "github.com/docker/docker/distribution"
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/distribution/metadata"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
func TestGetRepositoryMountCandidates(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
hmacKey string
|
||||
targetRepo string
|
||||
maxCandidates int
|
||||
metadata []metadata.V2Metadata
|
||||
candidates []metadata.V2Metadata
|
||||
}{
|
||||
{
|
||||
name: "empty metadata",
|
||||
targetRepo: "busybox",
|
||||
maxCandidates: -1,
|
||||
metadata: []metadata.V2Metadata{},
|
||||
candidates: []metadata.V2Metadata{},
|
||||
},
|
||||
{
|
||||
name: "one item not matching",
|
||||
targetRepo: "busybox",
|
||||
maxCandidates: -1,
|
||||
metadata: []metadata.V2Metadata{taggedMetadata("key", "dgst", "127.0.0.1/repo")},
|
||||
candidates: []metadata.V2Metadata{},
|
||||
},
|
||||
{
|
||||
name: "one item matching",
|
||||
targetRepo: "busybox",
|
||||
maxCandidates: -1,
|
||||
metadata: []metadata.V2Metadata{taggedMetadata("hash", "1", "docker.io/library/hello-world")},
|
||||
candidates: []metadata.V2Metadata{taggedMetadata("hash", "1", "docker.io/library/hello-world")},
|
||||
},
|
||||
{
|
||||
name: "allow missing SourceRepository",
|
||||
targetRepo: "busybox",
|
||||
maxCandidates: -1,
|
||||
metadata: []metadata.V2Metadata{
|
||||
{Digest: digest.Digest("1")},
|
||||
{Digest: digest.Digest("3")},
|
||||
{Digest: digest.Digest("2")},
|
||||
},
|
||||
candidates: []metadata.V2Metadata{},
|
||||
},
|
||||
{
|
||||
name: "handle docker.io",
|
||||
targetRepo: "user/app",
|
||||
maxCandidates: -1,
|
||||
metadata: []metadata.V2Metadata{
|
||||
{Digest: digest.Digest("1"), SourceRepository: "docker.io/user/foo"},
|
||||
{Digest: digest.Digest("3"), SourceRepository: "docker.io/user/bar"},
|
||||
{Digest: digest.Digest("2"), SourceRepository: "docker.io/library/app"},
|
||||
},
|
||||
candidates: []metadata.V2Metadata{
|
||||
{Digest: digest.Digest("3"), SourceRepository: "docker.io/user/bar"},
|
||||
{Digest: digest.Digest("1"), SourceRepository: "docker.io/user/foo"},
|
||||
{Digest: digest.Digest("2"), SourceRepository: "docker.io/library/app"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "sort more items",
|
||||
hmacKey: "abcd",
|
||||
targetRepo: "127.0.0.1/foo/bar",
|
||||
maxCandidates: -1,
|
||||
metadata: []metadata.V2Metadata{
|
||||
taggedMetadata("hash", "1", "docker.io/library/hello-world"),
|
||||
taggedMetadata("efgh", "2", "127.0.0.1/hello-world"),
|
||||
taggedMetadata("abcd", "3", "docker.io/library/busybox"),
|
||||
taggedMetadata("hash", "4", "docker.io/library/busybox"),
|
||||
taggedMetadata("hash", "5", "127.0.0.1/foo"),
|
||||
taggedMetadata("hash", "6", "127.0.0.1/bar"),
|
||||
taggedMetadata("efgh", "7", "127.0.0.1/foo/bar"),
|
||||
taggedMetadata("abcd", "8", "127.0.0.1/xyz"),
|
||||
taggedMetadata("hash", "9", "127.0.0.1/foo/app"),
|
||||
},
|
||||
candidates: []metadata.V2Metadata{
|
||||
// first by matching hash
|
||||
taggedMetadata("abcd", "8", "127.0.0.1/xyz"),
|
||||
// then by longest matching prefix
|
||||
taggedMetadata("hash", "9", "127.0.0.1/foo/app"),
|
||||
taggedMetadata("hash", "5", "127.0.0.1/foo"),
|
||||
// sort the rest of the matching items in reversed order
|
||||
taggedMetadata("hash", "6", "127.0.0.1/bar"),
|
||||
taggedMetadata("efgh", "2", "127.0.0.1/hello-world"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "limit max candidates",
|
||||
hmacKey: "abcd",
|
||||
targetRepo: "user/app",
|
||||
maxCandidates: 3,
|
||||
metadata: []metadata.V2Metadata{
|
||||
taggedMetadata("abcd", "1", "docker.io/user/app1"),
|
||||
taggedMetadata("abcd", "2", "docker.io/user/app/base"),
|
||||
taggedMetadata("hash", "3", "docker.io/user/app"),
|
||||
taggedMetadata("abcd", "4", "127.0.0.1/user/app"),
|
||||
taggedMetadata("hash", "5", "docker.io/user/foo"),
|
||||
taggedMetadata("hash", "6", "docker.io/app/bar"),
|
||||
},
|
||||
candidates: []metadata.V2Metadata{
|
||||
// first by matching hash
|
||||
taggedMetadata("abcd", "2", "docker.io/user/app/base"),
|
||||
taggedMetadata("abcd", "1", "docker.io/user/app1"),
|
||||
// then by longest matching prefix
|
||||
// "docker.io/usr/app" is excluded since candidates must
|
||||
// be from a different repository
|
||||
taggedMetadata("hash", "5", "docker.io/user/foo"),
|
||||
},
|
||||
},
|
||||
} {
|
||||
repoInfo, err := reference.ParseNormalizedNamed(tc.targetRepo)
|
||||
if err != nil {
|
||||
t.Fatalf("[%s] failed to parse reference name: %v", tc.name, err)
|
||||
}
|
||||
candidates := getRepositoryMountCandidates(repoInfo, []byte(tc.hmacKey), tc.maxCandidates, tc.metadata)
|
||||
if len(candidates) != len(tc.candidates) {
|
||||
t.Errorf("[%s] got unexpected number of candidates: %d != %d", tc.name, len(candidates), len(tc.candidates))
|
||||
}
|
||||
for i := 0; i < len(candidates) && i < len(tc.candidates); i++ {
|
||||
if !reflect.DeepEqual(candidates[i], tc.candidates[i]) {
|
||||
t.Errorf("[%s] candidate %d does not match expected: %#+v != %#+v", tc.name, i, candidates[i], tc.candidates[i])
|
||||
}
|
||||
}
|
||||
for i := len(candidates); i < len(tc.candidates); i++ {
|
||||
t.Errorf("[%s] missing expected candidate at position %d (%#+v)", tc.name, i, tc.candidates[i])
|
||||
}
|
||||
for i := len(tc.candidates); i < len(candidates); i++ {
|
||||
t.Errorf("[%s] got unexpected candidate at position %d (%#+v)", tc.name, i, candidates[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLayerAlreadyExists(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
metadata []metadata.V2Metadata
|
||||
targetRepo string
|
||||
hmacKey string
|
||||
maxExistenceChecks int
|
||||
checkOtherRepositories bool
|
||||
remoteBlobs map[digest.Digest]distribution.Descriptor
|
||||
remoteErrors map[digest.Digest]error
|
||||
expectedDescriptor distribution.Descriptor
|
||||
expectedExists bool
|
||||
expectedError error
|
||||
expectedRequests []string
|
||||
expectedAdditions []metadata.V2Metadata
|
||||
expectedRemovals []metadata.V2Metadata
|
||||
}{
|
||||
{
|
||||
name: "empty metadata",
|
||||
targetRepo: "busybox",
|
||||
maxExistenceChecks: 3,
|
||||
checkOtherRepositories: true,
|
||||
},
|
||||
{
|
||||
name: "single not existent metadata",
|
||||
targetRepo: "busybox",
|
||||
metadata: []metadata.V2Metadata{{Digest: digest.Digest("pear"), SourceRepository: "docker.io/library/busybox"}},
|
||||
maxExistenceChecks: 3,
|
||||
expectedRequests: []string{"pear"},
|
||||
expectedRemovals: []metadata.V2Metadata{{Digest: digest.Digest("pear"), SourceRepository: "docker.io/library/busybox"}},
|
||||
},
|
||||
{
|
||||
name: "access denied",
|
||||
targetRepo: "busybox",
|
||||
maxExistenceChecks: 1,
|
||||
metadata: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}},
|
||||
remoteErrors: map[digest.Digest]error{digest.Digest("apple"): distribution.ErrAccessDenied},
|
||||
expectedError: nil,
|
||||
expectedRequests: []string{"apple"},
|
||||
},
|
||||
{
|
||||
name: "not matching repositories",
|
||||
targetRepo: "busybox",
|
||||
maxExistenceChecks: 3,
|
||||
metadata: []metadata.V2Metadata{
|
||||
{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/hello-world"},
|
||||
{Digest: digest.Digest("orange"), SourceRepository: "docker.io/library/busybox/subapp"},
|
||||
{Digest: digest.Digest("pear"), SourceRepository: "docker.io/busybox"},
|
||||
{Digest: digest.Digest("plum"), SourceRepository: "busybox"},
|
||||
{Digest: digest.Digest("banana"), SourceRepository: "127.0.0.1/busybox"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "check other repositories",
|
||||
targetRepo: "busybox",
|
||||
maxExistenceChecks: 10,
|
||||
checkOtherRepositories: true,
|
||||
metadata: []metadata.V2Metadata{
|
||||
{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/hello-world"},
|
||||
{Digest: digest.Digest("orange"), SourceRepository: "docker.io/busybox/subapp"},
|
||||
{Digest: digest.Digest("pear"), SourceRepository: "docker.io/busybox"},
|
||||
{Digest: digest.Digest("plum"), SourceRepository: "docker.io/library/busybox"},
|
||||
{Digest: digest.Digest("banana"), SourceRepository: "127.0.0.1/busybox"},
|
||||
},
|
||||
expectedRequests: []string{"plum", "apple", "pear", "orange", "banana"},
|
||||
expectedRemovals: []metadata.V2Metadata{
|
||||
{Digest: digest.Digest("plum"), SourceRepository: "docker.io/library/busybox"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "find existing blob",
|
||||
targetRepo: "busybox",
|
||||
metadata: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}},
|
||||
maxExistenceChecks: 3,
|
||||
remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {Digest: digest.Digest("apple")}},
|
||||
expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("apple"), MediaType: schema2.MediaTypeLayer},
|
||||
expectedExists: true,
|
||||
expectedRequests: []string{"apple"},
|
||||
},
|
||||
{
|
||||
name: "find existing blob with different hmac",
|
||||
targetRepo: "busybox",
|
||||
metadata: []metadata.V2Metadata{{SourceRepository: "docker.io/library/busybox", Digest: digest.Digest("apple"), HMAC: "dummyhmac"}},
|
||||
maxExistenceChecks: 3,
|
||||
remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {Digest: digest.Digest("apple")}},
|
||||
expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("apple"), MediaType: schema2.MediaTypeLayer},
|
||||
expectedExists: true,
|
||||
expectedRequests: []string{"apple"},
|
||||
expectedAdditions: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}},
|
||||
},
|
||||
{
|
||||
name: "overwrite media types",
|
||||
targetRepo: "busybox",
|
||||
metadata: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}},
|
||||
hmacKey: "key",
|
||||
maxExistenceChecks: 3,
|
||||
remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {Digest: digest.Digest("apple"), MediaType: "custom-media-type"}},
|
||||
expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("apple"), MediaType: schema2.MediaTypeLayer},
|
||||
expectedExists: true,
|
||||
expectedRequests: []string{"apple"},
|
||||
expectedAdditions: []metadata.V2Metadata{taggedMetadata("key", "apple", "docker.io/library/busybox")},
|
||||
},
|
||||
{
|
||||
name: "find existing blob among many",
|
||||
targetRepo: "127.0.0.1/myapp",
|
||||
hmacKey: "key",
|
||||
metadata: []metadata.V2Metadata{
|
||||
taggedMetadata("someotherkey", "pear", "127.0.0.1/myapp"),
|
||||
taggedMetadata("key", "apple", "127.0.0.1/myapp"),
|
||||
taggedMetadata("", "plum", "127.0.0.1/myapp"),
|
||||
},
|
||||
maxExistenceChecks: 3,
|
||||
remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}},
|
||||
expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("pear"), MediaType: schema2.MediaTypeLayer},
|
||||
expectedExists: true,
|
||||
expectedRequests: []string{"apple", "plum", "pear"},
|
||||
expectedAdditions: []metadata.V2Metadata{taggedMetadata("key", "pear", "127.0.0.1/myapp")},
|
||||
expectedRemovals: []metadata.V2Metadata{
|
||||
taggedMetadata("key", "apple", "127.0.0.1/myapp"),
|
||||
{Digest: digest.Digest("plum"), SourceRepository: "127.0.0.1/myapp"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "reach maximum existence checks",
|
||||
targetRepo: "user/app",
|
||||
metadata: []metadata.V2Metadata{
|
||||
{Digest: digest.Digest("pear"), SourceRepository: "docker.io/user/app"},
|
||||
{Digest: digest.Digest("apple"), SourceRepository: "docker.io/user/app"},
|
||||
{Digest: digest.Digest("plum"), SourceRepository: "docker.io/user/app"},
|
||||
{Digest: digest.Digest("banana"), SourceRepository: "docker.io/user/app"},
|
||||
},
|
||||
maxExistenceChecks: 3,
|
||||
remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}},
|
||||
expectedExists: false,
|
||||
expectedRequests: []string{"banana", "plum", "apple"},
|
||||
expectedRemovals: []metadata.V2Metadata{
|
||||
{Digest: digest.Digest("banana"), SourceRepository: "docker.io/user/app"},
|
||||
{Digest: digest.Digest("plum"), SourceRepository: "docker.io/user/app"},
|
||||
{Digest: digest.Digest("apple"), SourceRepository: "docker.io/user/app"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "zero allowed existence checks",
|
||||
targetRepo: "user/app",
|
||||
metadata: []metadata.V2Metadata{
|
||||
{Digest: digest.Digest("pear"), SourceRepository: "docker.io/user/app"},
|
||||
{Digest: digest.Digest("apple"), SourceRepository: "docker.io/user/app"},
|
||||
{Digest: digest.Digest("plum"), SourceRepository: "docker.io/user/app"},
|
||||
{Digest: digest.Digest("banana"), SourceRepository: "docker.io/user/app"},
|
||||
},
|
||||
maxExistenceChecks: 0,
|
||||
remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}},
|
||||
},
|
||||
{
|
||||
name: "stat single digest just once",
|
||||
targetRepo: "busybox",
|
||||
metadata: []metadata.V2Metadata{
|
||||
taggedMetadata("key1", "pear", "docker.io/library/busybox"),
|
||||
taggedMetadata("key2", "apple", "docker.io/library/busybox"),
|
||||
taggedMetadata("key3", "apple", "docker.io/library/busybox"),
|
||||
},
|
||||
maxExistenceChecks: 3,
|
||||
remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}},
|
||||
expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("pear"), MediaType: schema2.MediaTypeLayer},
|
||||
expectedExists: true,
|
||||
expectedRequests: []string{"apple", "pear"},
|
||||
expectedAdditions: []metadata.V2Metadata{{Digest: digest.Digest("pear"), SourceRepository: "docker.io/library/busybox"}},
|
||||
expectedRemovals: []metadata.V2Metadata{taggedMetadata("key3", "apple", "docker.io/library/busybox")},
|
||||
},
|
||||
{
|
||||
name: "don't stop on first error",
|
||||
targetRepo: "user/app",
|
||||
hmacKey: "key",
|
||||
metadata: []metadata.V2Metadata{
|
||||
taggedMetadata("key", "banana", "docker.io/user/app"),
|
||||
taggedMetadata("key", "orange", "docker.io/user/app"),
|
||||
taggedMetadata("key", "plum", "docker.io/user/app"),
|
||||
},
|
||||
maxExistenceChecks: 3,
|
||||
remoteErrors: map[digest.Digest]error{"orange": distribution.ErrAccessDenied},
|
||||
remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {}},
|
||||
expectedError: nil,
|
||||
expectedRequests: []string{"plum", "orange", "banana"},
|
||||
expectedRemovals: []metadata.V2Metadata{
|
||||
taggedMetadata("key", "plum", "docker.io/user/app"),
|
||||
taggedMetadata("key", "banana", "docker.io/user/app"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "remove outdated metadata",
|
||||
targetRepo: "docker.io/user/app",
|
||||
metadata: []metadata.V2Metadata{
|
||||
{Digest: digest.Digest("plum"), SourceRepository: "docker.io/library/busybox"},
|
||||
{Digest: digest.Digest("orange"), SourceRepository: "docker.io/user/app"},
|
||||
},
|
||||
maxExistenceChecks: 3,
|
||||
remoteErrors: map[digest.Digest]error{"orange": distribution.ErrBlobUnknown},
|
||||
remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("plum"): {}},
|
||||
expectedExists: false,
|
||||
expectedRequests: []string{"orange"},
|
||||
expectedRemovals: []metadata.V2Metadata{{Digest: digest.Digest("orange"), SourceRepository: "docker.io/user/app"}},
|
||||
},
|
||||
{
|
||||
name: "missing SourceRepository",
|
||||
targetRepo: "busybox",
|
||||
metadata: []metadata.V2Metadata{
|
||||
{Digest: digest.Digest("1")},
|
||||
{Digest: digest.Digest("3")},
|
||||
{Digest: digest.Digest("2")},
|
||||
},
|
||||
maxExistenceChecks: 3,
|
||||
expectedExists: false,
|
||||
expectedRequests: []string{"2", "3", "1"},
|
||||
},
|
||||
|
||||
{
|
||||
name: "with and without SourceRepository",
|
||||
targetRepo: "busybox",
|
||||
metadata: []metadata.V2Metadata{
|
||||
{Digest: digest.Digest("1")},
|
||||
{Digest: digest.Digest("2"), SourceRepository: "docker.io/library/busybox"},
|
||||
{Digest: digest.Digest("3")},
|
||||
},
|
||||
remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("1"): {Digest: digest.Digest("1")}},
|
||||
maxExistenceChecks: 3,
|
||||
expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("1"), MediaType: schema2.MediaTypeLayer},
|
||||
expectedExists: true,
|
||||
expectedRequests: []string{"2", "3", "1"},
|
||||
expectedAdditions: []metadata.V2Metadata{{Digest: digest.Digest("1"), SourceRepository: "docker.io/library/busybox"}},
|
||||
expectedRemovals: []metadata.V2Metadata{
|
||||
{Digest: digest.Digest("2"), SourceRepository: "docker.io/library/busybox"},
|
||||
},
|
||||
},
|
||||
} {
|
||||
repoInfo, err := reference.ParseNormalizedNamed(tc.targetRepo)
|
||||
if err != nil {
|
||||
t.Fatalf("[%s] failed to parse reference name: %v", tc.name, err)
|
||||
}
|
||||
repo := &mockRepo{
|
||||
t: t,
|
||||
errors: tc.remoteErrors,
|
||||
blobs: tc.remoteBlobs,
|
||||
requests: []string{},
|
||||
}
|
||||
ctx := context.Background()
|
||||
ms := &mockV2MetadataService{}
|
||||
pd := &v2PushDescriptor{
|
||||
hmacKey: []byte(tc.hmacKey),
|
||||
repoInfo: repoInfo,
|
||||
layer: &storeLayer{
|
||||
Layer: layer.EmptyLayer,
|
||||
},
|
||||
repo: repo,
|
||||
v2MetadataService: ms,
|
||||
pushState: &pushState{remoteLayers: make(map[layer.DiffID]distribution.Descriptor)},
|
||||
checkedDigests: make(map[digest.Digest]struct{}),
|
||||
}
|
||||
|
||||
desc, exists, err := pd.layerAlreadyExists(ctx, &progressSink{t}, layer.EmptyLayer.DiffID(), tc.checkOtherRepositories, tc.maxExistenceChecks, tc.metadata)
|
||||
|
||||
if !reflect.DeepEqual(desc, tc.expectedDescriptor) {
|
||||
t.Errorf("[%s] got unexpected descriptor: %#+v != %#+v", tc.name, desc, tc.expectedDescriptor)
|
||||
}
|
||||
if exists != tc.expectedExists {
|
||||
t.Errorf("[%s] got unexpected exists: %t != %t", tc.name, exists, tc.expectedExists)
|
||||
}
|
||||
if !reflect.DeepEqual(err, tc.expectedError) {
|
||||
t.Errorf("[%s] got unexpected error: %#+v != %#+v", tc.name, err, tc.expectedError)
|
||||
}
|
||||
|
||||
if len(repo.requests) != len(tc.expectedRequests) {
|
||||
t.Errorf("[%s] got unexpected number of requests: %d != %d", tc.name, len(repo.requests), len(tc.expectedRequests))
|
||||
}
|
||||
for i := 0; i < len(repo.requests) && i < len(tc.expectedRequests); i++ {
|
||||
if repo.requests[i] != tc.expectedRequests[i] {
|
||||
t.Errorf("[%s] request %d does not match expected: %q != %q", tc.name, i, repo.requests[i], tc.expectedRequests[i])
|
||||
}
|
||||
}
|
||||
for i := len(repo.requests); i < len(tc.expectedRequests); i++ {
|
||||
t.Errorf("[%s] missing expected request at position %d (%q)", tc.name, i, tc.expectedRequests[i])
|
||||
}
|
||||
for i := len(tc.expectedRequests); i < len(repo.requests); i++ {
|
||||
t.Errorf("[%s] got unexpected request at position %d (%q)", tc.name, i, repo.requests[i])
|
||||
}
|
||||
|
||||
if len(ms.added) != len(tc.expectedAdditions) {
|
||||
t.Errorf("[%s] got unexpected number of additions: %d != %d", tc.name, len(ms.added), len(tc.expectedAdditions))
|
||||
}
|
||||
for i := 0; i < len(ms.added) && i < len(tc.expectedAdditions); i++ {
|
||||
if ms.added[i] != tc.expectedAdditions[i] {
|
||||
t.Errorf("[%s] added metadata at %d does not match expected: %q != %q", tc.name, i, ms.added[i], tc.expectedAdditions[i])
|
||||
}
|
||||
}
|
||||
for i := len(ms.added); i < len(tc.expectedAdditions); i++ {
|
||||
t.Errorf("[%s] missing expected addition at position %d (%q)", tc.name, i, tc.expectedAdditions[i])
|
||||
}
|
||||
for i := len(tc.expectedAdditions); i < len(ms.added); i++ {
|
||||
t.Errorf("[%s] unexpected metadata addition at position %d (%q)", tc.name, i, ms.added[i])
|
||||
}
|
||||
|
||||
if len(ms.removed) != len(tc.expectedRemovals) {
|
||||
t.Errorf("[%s] got unexpected number of removals: %d != %d", tc.name, len(ms.removed), len(tc.expectedRemovals))
|
||||
}
|
||||
for i := 0; i < len(ms.removed) && i < len(tc.expectedRemovals); i++ {
|
||||
if ms.removed[i] != tc.expectedRemovals[i] {
|
||||
t.Errorf("[%s] removed metadata at %d does not match expected: %q != %q", tc.name, i, ms.removed[i], tc.expectedRemovals[i])
|
||||
}
|
||||
}
|
||||
for i := len(ms.removed); i < len(tc.expectedRemovals); i++ {
|
||||
t.Errorf("[%s] missing expected removal at position %d (%q)", tc.name, i, tc.expectedRemovals[i])
|
||||
}
|
||||
for i := len(tc.expectedRemovals); i < len(ms.removed); i++ {
|
||||
t.Errorf("[%s] removed unexpected metadata at position %d (%q)", tc.name, i, ms.removed[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func taggedMetadata(key string, dgst string, sourceRepo string) metadata.V2Metadata {
|
||||
meta := metadata.V2Metadata{
|
||||
Digest: digest.Digest(dgst),
|
||||
SourceRepository: sourceRepo,
|
||||
}
|
||||
|
||||
meta.HMAC = metadata.ComputeV2MetadataHMAC([]byte(key), &meta)
|
||||
return meta
|
||||
}
|
||||
|
||||
type mockRepo struct {
|
||||
t *testing.T
|
||||
errors map[digest.Digest]error
|
||||
blobs map[digest.Digest]distribution.Descriptor
|
||||
requests []string
|
||||
}
|
||||
|
||||
var _ distribution.Repository = &mockRepo{}
|
||||
|
||||
func (m *mockRepo) Named() reference.Named {
|
||||
m.t.Fatalf("Named() not implemented")
|
||||
return nil
|
||||
}
|
||||
func (m *mockRepo) Manifests(ctc context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
|
||||
m.t.Fatalf("Manifests() not implemented")
|
||||
return nil, nil
|
||||
}
|
||||
func (m *mockRepo) Tags(ctc context.Context) distribution.TagService {
|
||||
m.t.Fatalf("Tags() not implemented")
|
||||
return nil
|
||||
}
|
||||
func (m *mockRepo) Blobs(ctx context.Context) distribution.BlobStore {
|
||||
return &mockBlobStore{
|
||||
repo: m,
|
||||
}
|
||||
}
|
||||
|
||||
type mockBlobStore struct {
|
||||
repo *mockRepo
|
||||
}
|
||||
|
||||
var _ distribution.BlobStore = &mockBlobStore{}
|
||||
|
||||
func (m *mockBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
|
||||
m.repo.requests = append(m.repo.requests, dgst.String())
|
||||
if err, exists := m.repo.errors[dgst]; exists {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
if desc, exists := m.repo.blobs[dgst]; exists {
|
||||
return desc, nil
|
||||
}
|
||||
return distribution.Descriptor{}, distribution.ErrBlobUnknown
|
||||
}
|
||||
func (m *mockBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
|
||||
m.repo.t.Fatal("Get() not implemented")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
|
||||
m.repo.t.Fatal("Open() not implemented")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
|
||||
m.repo.t.Fatal("Put() not implemented")
|
||||
return distribution.Descriptor{}, nil
|
||||
}
|
||||
|
||||
func (m *mockBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
|
||||
m.repo.t.Fatal("Create() not implemented")
|
||||
return nil, nil
|
||||
}
|
||||
func (m *mockBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
|
||||
m.repo.t.Fatal("Resume() not implemented")
|
||||
return nil, nil
|
||||
}
|
||||
func (m *mockBlobStore) Delete(ctx context.Context, dgst digest.Digest) error {
|
||||
m.repo.t.Fatal("Delete() not implemented")
|
||||
return nil
|
||||
}
|
||||
func (m *mockBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
|
||||
m.repo.t.Fatalf("ServeBlob() not implemented")
|
||||
return nil
|
||||
}
|
||||
|
||||
type mockV2MetadataService struct {
|
||||
added []metadata.V2Metadata
|
||||
removed []metadata.V2Metadata
|
||||
}
|
||||
|
||||
var _ metadata.V2MetadataService = &mockV2MetadataService{}
|
||||
|
||||
func (*mockV2MetadataService) GetMetadata(diffID layer.DiffID) ([]metadata.V2Metadata, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (*mockV2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) {
|
||||
return "", nil
|
||||
}
|
||||
func (m *mockV2MetadataService) Add(diffID layer.DiffID, metadata metadata.V2Metadata) error {
|
||||
m.added = append(m.added, metadata)
|
||||
return nil
|
||||
}
|
||||
func (m *mockV2MetadataService) TagAndAdd(diffID layer.DiffID, hmacKey []byte, meta metadata.V2Metadata) error {
|
||||
meta.HMAC = metadata.ComputeV2MetadataHMAC(hmacKey, &meta)
|
||||
m.Add(diffID, meta)
|
||||
return nil
|
||||
}
|
||||
func (m *mockV2MetadataService) Remove(metadata metadata.V2Metadata) error {
|
||||
m.removed = append(m.removed, metadata)
|
||||
return nil
|
||||
}
|
||||
|
||||
type progressSink struct {
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func (s *progressSink) WriteProgress(p progress.Progress) error {
|
||||
s.t.Logf("progress update: %#+v", p)
|
||||
return nil
|
||||
}
|
156
vendor/github.com/docker/docker-ce/components/engine/distribution/registry.go
generated
vendored
Normal file
156
vendor/github.com/docker/docker-ce/components/engine/distribution/registry.go
generated
vendored
Normal file
|
@ -0,0 +1,156 @@
|
|||
package distribution // import "github.com/docker/docker/distribution"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
"github.com/docker/distribution/registry/client/auth"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/go-connections/sockets"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// ImageTypes represents the schema2 config types for images
|
||||
var ImageTypes = []string{
|
||||
schema2.MediaTypeImageConfig,
|
||||
// Handle unexpected values from https://github.com/docker/distribution/issues/1621
|
||||
// (see also https://github.com/docker/docker/issues/22378,
|
||||
// https://github.com/docker/docker/issues/30083)
|
||||
"application/octet-stream",
|
||||
"application/json",
|
||||
"text/html",
|
||||
// Treat defaulted values as images, newer types cannot be implied
|
||||
"",
|
||||
}
|
||||
|
||||
// PluginTypes represents the schema2 config types for plugins
|
||||
var PluginTypes = []string{
|
||||
schema2.MediaTypePluginConfig,
|
||||
}
|
||||
|
||||
var mediaTypeClasses map[string]string
|
||||
|
||||
func init() {
|
||||
// initialize media type classes with all know types for
|
||||
// plugin
|
||||
mediaTypeClasses = map[string]string{}
|
||||
for _, t := range ImageTypes {
|
||||
mediaTypeClasses[t] = "image"
|
||||
}
|
||||
for _, t := range PluginTypes {
|
||||
mediaTypeClasses[t] = "plugin"
|
||||
}
|
||||
}
|
||||
|
||||
// NewV2Repository returns a repository (v2 only). It creates an HTTP transport
|
||||
// providing timeout settings and authentication support, and also verifies the
|
||||
// remote API version.
|
||||
func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *types.AuthConfig, actions ...string) (repo distribution.Repository, foundVersion bool, err error) {
|
||||
repoName := repoInfo.Name.Name()
|
||||
// If endpoint does not support CanonicalName, use the RemoteName instead
|
||||
if endpoint.TrimHostname {
|
||||
repoName = reference.Path(repoInfo.Name)
|
||||
}
|
||||
|
||||
direct := &net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
DualStack: true,
|
||||
}
|
||||
|
||||
// TODO(dmcgowan): Call close idle connections when complete, use keep alive
|
||||
base := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Dial: direct.Dial,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
TLSClientConfig: endpoint.TLSConfig,
|
||||
// TODO(dmcgowan): Call close idle connections when complete and use keep alive
|
||||
DisableKeepAlives: true,
|
||||
}
|
||||
|
||||
proxyDialer, err := sockets.DialerFromEnvironment(direct)
|
||||
if err == nil {
|
||||
base.Dial = proxyDialer.Dial
|
||||
}
|
||||
|
||||
modifiers := registry.Headers(dockerversion.DockerUserAgent(ctx), metaHeaders)
|
||||
authTransport := transport.NewTransport(base, modifiers...)
|
||||
|
||||
challengeManager, foundVersion, err := registry.PingV2Registry(endpoint.URL, authTransport)
|
||||
if err != nil {
|
||||
transportOK := false
|
||||
if responseErr, ok := err.(registry.PingResponseError); ok {
|
||||
transportOK = true
|
||||
err = responseErr.Err
|
||||
}
|
||||
return nil, foundVersion, fallbackError{
|
||||
err: err,
|
||||
confirmedV2: foundVersion,
|
||||
transportOK: transportOK,
|
||||
}
|
||||
}
|
||||
|
||||
if authConfig.RegistryToken != "" {
|
||||
passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken}
|
||||
modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler))
|
||||
} else {
|
||||
scope := auth.RepositoryScope{
|
||||
Repository: repoName,
|
||||
Actions: actions,
|
||||
Class: repoInfo.Class,
|
||||
}
|
||||
|
||||
creds := registry.NewStaticCredentialStore(authConfig)
|
||||
tokenHandlerOptions := auth.TokenHandlerOptions{
|
||||
Transport: authTransport,
|
||||
Credentials: creds,
|
||||
Scopes: []auth.Scope{scope},
|
||||
ClientID: registry.AuthClientID,
|
||||
}
|
||||
tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions)
|
||||
basicHandler := auth.NewBasicHandler(creds)
|
||||
modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))
|
||||
}
|
||||
tr := transport.NewTransport(base, modifiers...)
|
||||
|
||||
repoNameRef, err := reference.WithName(repoName)
|
||||
if err != nil {
|
||||
return nil, foundVersion, fallbackError{
|
||||
err: err,
|
||||
confirmedV2: foundVersion,
|
||||
transportOK: true,
|
||||
}
|
||||
}
|
||||
|
||||
repo, err = client.NewRepository(ctx, repoNameRef, endpoint.URL.String(), tr)
|
||||
if err != nil {
|
||||
err = fallbackError{
|
||||
err: err,
|
||||
confirmedV2: foundVersion,
|
||||
transportOK: true,
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type existingTokenHandler struct {
|
||||
token string
|
||||
}
|
||||
|
||||
func (th *existingTokenHandler) Scheme() string {
|
||||
return "bearer"
|
||||
}
|
||||
|
||||
func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error {
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token))
|
||||
return nil
|
||||
}
|
128
vendor/github.com/docker/docker-ce/components/engine/distribution/registry_unit_test.go
generated
vendored
Normal file
128
vendor/github.com/docker/docker-ce/components/engine/distribution/registry_unit_test.go
generated
vendored
Normal file
|
@ -0,0 +1,128 @@
|
|||
package distribution // import "github.com/docker/docker/distribution"
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
registrytypes "github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const secretRegistryToken = "mysecrettoken"
|
||||
|
||||
type tokenPassThruHandler struct {
|
||||
reached bool
|
||||
gotToken bool
|
||||
shouldSend401 func(url string) bool
|
||||
}
|
||||
|
||||
func (h *tokenPassThruHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
h.reached = true
|
||||
if strings.Contains(r.Header.Get("Authorization"), secretRegistryToken) {
|
||||
logrus.Debug("Detected registry token in auth header")
|
||||
h.gotToken = true
|
||||
}
|
||||
if h.shouldSend401 == nil || h.shouldSend401(r.RequestURI) {
|
||||
w.Header().Set("WWW-Authenticate", `Bearer realm="foorealm"`)
|
||||
w.WriteHeader(401)
|
||||
}
|
||||
}
|
||||
|
||||
func testTokenPassThru(t *testing.T, ts *httptest.Server) {
|
||||
uri, err := url.Parse(ts.URL)
|
||||
if err != nil {
|
||||
t.Fatalf("could not parse url from test server: %v", err)
|
||||
}
|
||||
|
||||
endpoint := registry.APIEndpoint{
|
||||
Mirror: false,
|
||||
URL: uri,
|
||||
Version: 2,
|
||||
Official: false,
|
||||
TrimHostname: false,
|
||||
TLSConfig: nil,
|
||||
}
|
||||
n, _ := reference.ParseNormalizedNamed("testremotename")
|
||||
repoInfo := ®istry.RepositoryInfo{
|
||||
Name: n,
|
||||
Index: ®istrytypes.IndexInfo{
|
||||
Name: "testrepo",
|
||||
Mirrors: nil,
|
||||
Secure: false,
|
||||
Official: false,
|
||||
},
|
||||
Official: false,
|
||||
}
|
||||
imagePullConfig := &ImagePullConfig{
|
||||
Config: Config{
|
||||
MetaHeaders: http.Header{},
|
||||
AuthConfig: &types.AuthConfig{
|
||||
RegistryToken: secretRegistryToken,
|
||||
},
|
||||
},
|
||||
Schema2Types: ImageTypes,
|
||||
}
|
||||
puller, err := newPuller(endpoint, repoInfo, imagePullConfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
p := puller.(*v2Puller)
|
||||
ctx := context.Background()
|
||||
p.repo, _, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
logrus.Debug("About to pull")
|
||||
// We expect it to fail, since we haven't mock'd the full registry exchange in our handler above
|
||||
tag, _ := reference.WithTag(n, "tag_goes_here")
|
||||
_ = p.pullV2Repository(ctx, tag, runtime.GOOS)
|
||||
}
|
||||
|
||||
func TestTokenPassThru(t *testing.T) {
|
||||
handler := &tokenPassThruHandler{shouldSend401: func(url string) bool { return url == "/v2/" }}
|
||||
ts := httptest.NewServer(handler)
|
||||
defer ts.Close()
|
||||
|
||||
testTokenPassThru(t, ts)
|
||||
|
||||
if !handler.reached {
|
||||
t.Fatal("Handler not reached")
|
||||
}
|
||||
if !handler.gotToken {
|
||||
t.Fatal("Failed to receive registry token")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTokenPassThruDifferentHost(t *testing.T) {
|
||||
handler := new(tokenPassThruHandler)
|
||||
ts := httptest.NewServer(handler)
|
||||
defer ts.Close()
|
||||
|
||||
tsredirect := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.RequestURI == "/v2/" {
|
||||
w.Header().Set("WWW-Authenticate", `Bearer realm="foorealm"`)
|
||||
w.WriteHeader(401)
|
||||
return
|
||||
}
|
||||
http.Redirect(w, r, ts.URL+r.URL.Path, http.StatusMovedPermanently)
|
||||
}))
|
||||
defer tsredirect.Close()
|
||||
|
||||
testTokenPassThru(t, tsredirect)
|
||||
|
||||
if !handler.reached {
|
||||
t.Fatal("Handler not reached")
|
||||
}
|
||||
if handler.gotToken {
|
||||
t.Fatal("Redirect should not forward Authorization header to another host")
|
||||
}
|
||||
}
|
44
vendor/github.com/docker/docker-ce/components/engine/distribution/utils/progress.go
generated
vendored
Normal file
44
vendor/github.com/docker/docker-ce/components/engine/distribution/utils/progress.go
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
package utils // import "github.com/docker/docker/distribution/utils"
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// WriteDistributionProgress is a helper for writing progress from chan to JSON
|
||||
// stream with an optional cancel function.
|
||||
func WriteDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) {
|
||||
progressOutput := streamformatter.NewJSONProgressOutput(outStream, false)
|
||||
operationCancelled := false
|
||||
|
||||
for prog := range progressChan {
|
||||
if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled {
|
||||
// don't log broken pipe errors as this is the normal case when a client aborts
|
||||
if isBrokenPipe(err) {
|
||||
logrus.Info("Pull session cancelled")
|
||||
} else {
|
||||
logrus.Errorf("error writing progress to client: %v", err)
|
||||
}
|
||||
cancelFunc()
|
||||
operationCancelled = true
|
||||
// Don't return, because we need to continue draining
|
||||
// progressChan until it's closed to avoid a deadlock.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isBrokenPipe(e error) bool {
|
||||
if netErr, ok := e.(*net.OpError); ok {
|
||||
e = netErr.Err
|
||||
if sysErr, ok := netErr.Err.(*os.SyscallError); ok {
|
||||
e = sysErr.Err
|
||||
}
|
||||
}
|
||||
return e == syscall.EPIPE
|
||||
}
|
474
vendor/github.com/docker/docker-ce/components/engine/distribution/xfer/download.go
generated
vendored
Normal file
474
vendor/github.com/docker/docker-ce/components/engine/distribution/xfer/download.go
generated
vendored
Normal file
|
@ -0,0 +1,474 @@
|
|||
package xfer // import "github.com/docker/docker/distribution/xfer"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const maxDownloadAttempts = 5
|
||||
|
||||
// LayerDownloadManager figures out which layers need to be downloaded, then
|
||||
// registers and downloads those, taking into account dependencies between
|
||||
// layers.
|
||||
type LayerDownloadManager struct {
|
||||
layerStores map[string]layer.Store
|
||||
tm TransferManager
|
||||
waitDuration time.Duration
|
||||
}
|
||||
|
||||
// SetConcurrency sets the max concurrent downloads for each pull
|
||||
func (ldm *LayerDownloadManager) SetConcurrency(concurrency int) {
|
||||
ldm.tm.SetConcurrency(concurrency)
|
||||
}
|
||||
|
||||
// NewLayerDownloadManager returns a new LayerDownloadManager.
|
||||
func NewLayerDownloadManager(layerStores map[string]layer.Store, concurrencyLimit int, options ...func(*LayerDownloadManager)) *LayerDownloadManager {
|
||||
manager := LayerDownloadManager{
|
||||
layerStores: layerStores,
|
||||
tm: NewTransferManager(concurrencyLimit),
|
||||
waitDuration: time.Second,
|
||||
}
|
||||
for _, option := range options {
|
||||
option(&manager)
|
||||
}
|
||||
return &manager
|
||||
}
|
||||
|
||||
type downloadTransfer struct {
|
||||
Transfer
|
||||
|
||||
layerStore layer.Store
|
||||
layer layer.Layer
|
||||
err error
|
||||
}
|
||||
|
||||
// result returns the layer resulting from the download, if the download
|
||||
// and registration were successful.
|
||||
func (d *downloadTransfer) result() (layer.Layer, error) {
|
||||
return d.layer, d.err
|
||||
}
|
||||
|
||||
// A DownloadDescriptor references a layer that may need to be downloaded.
|
||||
type DownloadDescriptor interface {
|
||||
// Key returns the key used to deduplicate downloads.
|
||||
Key() string
|
||||
// ID returns the ID for display purposes.
|
||||
ID() string
|
||||
// DiffID should return the DiffID for this layer, or an error
|
||||
// if it is unknown (for example, if it has not been downloaded
|
||||
// before).
|
||||
DiffID() (layer.DiffID, error)
|
||||
// Download is called to perform the download.
|
||||
Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error)
|
||||
// Close is called when the download manager is finished with this
|
||||
// descriptor and will not call Download again or read from the reader
|
||||
// that Download returned.
|
||||
Close()
|
||||
}
|
||||
|
||||
// DownloadDescriptorWithRegistered is a DownloadDescriptor that has an
|
||||
// additional Registered method which gets called after a downloaded layer is
|
||||
// registered. This allows the user of the download manager to know the DiffID
|
||||
// of each registered layer. This method is called if a cast to
|
||||
// DownloadDescriptorWithRegistered is successful.
|
||||
type DownloadDescriptorWithRegistered interface {
|
||||
DownloadDescriptor
|
||||
Registered(diffID layer.DiffID)
|
||||
}
|
||||
|
||||
// Download is a blocking function which ensures the requested layers are
|
||||
// present in the layer store. It uses the string returned by the Key method to
|
||||
// deduplicate downloads. If a given layer is not already known to present in
|
||||
// the layer store, and the key is not used by an in-progress download, the
|
||||
// Download method is called to get the layer tar data. Layers are then
|
||||
// registered in the appropriate order. The caller must call the returned
|
||||
// release function once it is done with the returned RootFS object.
|
||||
func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, os string, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) {
|
||||
var (
|
||||
topLayer layer.Layer
|
||||
topDownload *downloadTransfer
|
||||
watcher *Watcher
|
||||
missingLayer bool
|
||||
transferKey = ""
|
||||
downloadsByKey = make(map[string]*downloadTransfer)
|
||||
)
|
||||
|
||||
// Assume that the operating system is the host OS if blank, and validate it
|
||||
// to ensure we don't cause a panic by an invalid index into the layerstores.
|
||||
if os == "" {
|
||||
os = runtime.GOOS
|
||||
}
|
||||
if !system.IsOSSupported(os) {
|
||||
return image.RootFS{}, nil, system.ErrNotSupportedOperatingSystem
|
||||
}
|
||||
|
||||
rootFS := initialRootFS
|
||||
for _, descriptor := range layers {
|
||||
key := descriptor.Key()
|
||||
transferKey += key
|
||||
|
||||
if !missingLayer {
|
||||
missingLayer = true
|
||||
diffID, err := descriptor.DiffID()
|
||||
if err == nil {
|
||||
getRootFS := rootFS
|
||||
getRootFS.Append(diffID)
|
||||
l, err := ldm.layerStores[os].Get(getRootFS.ChainID())
|
||||
if err == nil {
|
||||
// Layer already exists.
|
||||
logrus.Debugf("Layer already exists: %s", descriptor.ID())
|
||||
progress.Update(progressOutput, descriptor.ID(), "Already exists")
|
||||
if topLayer != nil {
|
||||
layer.ReleaseAndLog(ldm.layerStores[os], topLayer)
|
||||
}
|
||||
topLayer = l
|
||||
missingLayer = false
|
||||
rootFS.Append(diffID)
|
||||
// Register this repository as a source of this layer.
|
||||
withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered)
|
||||
if hasRegistered { // As layerstore may set the driver
|
||||
withRegistered.Registered(diffID)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Does this layer have the same data as a previous layer in
|
||||
// the stack? If so, avoid downloading it more than once.
|
||||
var topDownloadUncasted Transfer
|
||||
if existingDownload, ok := downloadsByKey[key]; ok {
|
||||
xferFunc := ldm.makeDownloadFuncFromDownload(descriptor, existingDownload, topDownload, os)
|
||||
defer topDownload.Transfer.Release(watcher)
|
||||
topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput)
|
||||
topDownload = topDownloadUncasted.(*downloadTransfer)
|
||||
continue
|
||||
}
|
||||
|
||||
// Layer is not known to exist - download and register it.
|
||||
progress.Update(progressOutput, descriptor.ID(), "Pulling fs layer")
|
||||
|
||||
var xferFunc DoFunc
|
||||
if topDownload != nil {
|
||||
xferFunc = ldm.makeDownloadFunc(descriptor, "", topDownload, os)
|
||||
defer topDownload.Transfer.Release(watcher)
|
||||
} else {
|
||||
xferFunc = ldm.makeDownloadFunc(descriptor, rootFS.ChainID(), nil, os)
|
||||
}
|
||||
topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput)
|
||||
topDownload = topDownloadUncasted.(*downloadTransfer)
|
||||
downloadsByKey[key] = topDownload
|
||||
}
|
||||
|
||||
if topDownload == nil {
|
||||
return rootFS, func() {
|
||||
if topLayer != nil {
|
||||
layer.ReleaseAndLog(ldm.layerStores[os], topLayer)
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Won't be using the list built up so far - will generate it
|
||||
// from downloaded layers instead.
|
||||
rootFS.DiffIDs = []layer.DiffID{}
|
||||
|
||||
defer func() {
|
||||
if topLayer != nil {
|
||||
layer.ReleaseAndLog(ldm.layerStores[os], topLayer)
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
topDownload.Transfer.Release(watcher)
|
||||
return rootFS, func() {}, ctx.Err()
|
||||
case <-topDownload.Done():
|
||||
break
|
||||
}
|
||||
|
||||
l, err := topDownload.result()
|
||||
if err != nil {
|
||||
topDownload.Transfer.Release(watcher)
|
||||
return rootFS, func() {}, err
|
||||
}
|
||||
|
||||
// Must do this exactly len(layers) times, so we don't include the
|
||||
// base layer on Windows.
|
||||
for range layers {
|
||||
if l == nil {
|
||||
topDownload.Transfer.Release(watcher)
|
||||
return rootFS, func() {}, errors.New("internal error: too few parent layers")
|
||||
}
|
||||
rootFS.DiffIDs = append([]layer.DiffID{l.DiffID()}, rootFS.DiffIDs...)
|
||||
l = l.Parent()
|
||||
}
|
||||
return rootFS, func() { topDownload.Transfer.Release(watcher) }, err
|
||||
}
|
||||
|
||||
// makeDownloadFunc returns a function that performs the layer download and
|
||||
// registration. If parentDownload is non-nil, it waits for that download to
|
||||
// complete before the registration step, and registers the downloaded data
|
||||
// on top of parentDownload's resulting layer. Otherwise, it registers the
|
||||
// layer on top of the ChainID given by parentLayer.
|
||||
func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer, os string) DoFunc {
|
||||
return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
|
||||
d := &downloadTransfer{
|
||||
Transfer: NewTransfer(),
|
||||
layerStore: ldm.layerStores[os],
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
close(progressChan)
|
||||
}()
|
||||
|
||||
progressOutput := progress.ChanOutput(progressChan)
|
||||
|
||||
select {
|
||||
case <-start:
|
||||
default:
|
||||
progress.Update(progressOutput, descriptor.ID(), "Waiting")
|
||||
<-start
|
||||
}
|
||||
|
||||
if parentDownload != nil {
|
||||
// Did the parent download already fail or get
|
||||
// cancelled?
|
||||
select {
|
||||
case <-parentDownload.Done():
|
||||
_, err := parentDownload.result()
|
||||
if err != nil {
|
||||
d.err = err
|
||||
return
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
downloadReader io.ReadCloser
|
||||
size int64
|
||||
err error
|
||||
retries int
|
||||
)
|
||||
|
||||
defer descriptor.Close()
|
||||
|
||||
for {
|
||||
downloadReader, size, err = descriptor.Download(d.Transfer.Context(), progressOutput)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// If an error was returned because the context
|
||||
// was cancelled, we shouldn't retry.
|
||||
select {
|
||||
case <-d.Transfer.Context().Done():
|
||||
d.err = err
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
retries++
|
||||
if _, isDNR := err.(DoNotRetry); isDNR || retries == maxDownloadAttempts {
|
||||
logrus.Errorf("Download failed: %v", err)
|
||||
d.err = err
|
||||
return
|
||||
}
|
||||
|
||||
logrus.Errorf("Download failed, retrying: %v", err)
|
||||
delay := retries * 5
|
||||
ticker := time.NewTicker(ldm.waitDuration)
|
||||
|
||||
selectLoop:
|
||||
for {
|
||||
progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1])
|
||||
select {
|
||||
case <-ticker.C:
|
||||
delay--
|
||||
if delay == 0 {
|
||||
ticker.Stop()
|
||||
break selectLoop
|
||||
}
|
||||
case <-d.Transfer.Context().Done():
|
||||
ticker.Stop()
|
||||
d.err = errors.New("download cancelled during retry delay")
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
close(inactive)
|
||||
|
||||
if parentDownload != nil {
|
||||
select {
|
||||
case <-d.Transfer.Context().Done():
|
||||
d.err = errors.New("layer registration cancelled")
|
||||
downloadReader.Close()
|
||||
return
|
||||
case <-parentDownload.Done():
|
||||
}
|
||||
|
||||
l, err := parentDownload.result()
|
||||
if err != nil {
|
||||
d.err = err
|
||||
downloadReader.Close()
|
||||
return
|
||||
}
|
||||
parentLayer = l.ChainID()
|
||||
}
|
||||
|
||||
reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(d.Transfer.Context(), downloadReader), progressOutput, size, descriptor.ID(), "Extracting")
|
||||
defer reader.Close()
|
||||
|
||||
inflatedLayerData, err := archive.DecompressStream(reader)
|
||||
if err != nil {
|
||||
d.err = fmt.Errorf("could not get decompression stream: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
var src distribution.Descriptor
|
||||
if fs, ok := descriptor.(distribution.Describable); ok {
|
||||
src = fs.Descriptor()
|
||||
}
|
||||
if ds, ok := d.layerStore.(layer.DescribableStore); ok {
|
||||
d.layer, err = ds.RegisterWithDescriptor(inflatedLayerData, parentLayer, src)
|
||||
} else {
|
||||
d.layer, err = d.layerStore.Register(inflatedLayerData, parentLayer)
|
||||
}
|
||||
if err != nil {
|
||||
select {
|
||||
case <-d.Transfer.Context().Done():
|
||||
d.err = errors.New("layer registration cancelled")
|
||||
default:
|
||||
d.err = fmt.Errorf("failed to register layer: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
progress.Update(progressOutput, descriptor.ID(), "Pull complete")
|
||||
withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered)
|
||||
if hasRegistered {
|
||||
withRegistered.Registered(d.layer.DiffID())
|
||||
}
|
||||
|
||||
// Doesn't actually need to be its own goroutine, but
|
||||
// done like this so we can defer close(c).
|
||||
go func() {
|
||||
<-d.Transfer.Released()
|
||||
if d.layer != nil {
|
||||
layer.ReleaseAndLog(d.layerStore, d.layer)
|
||||
}
|
||||
}()
|
||||
}()
|
||||
|
||||
return d
|
||||
}
|
||||
}
|
||||
|
||||
// makeDownloadFuncFromDownload returns a function that performs the layer
|
||||
// registration when the layer data is coming from an existing download. It
|
||||
// waits for sourceDownload and parentDownload to complete, and then
|
||||
// reregisters the data from sourceDownload's top layer on top of
|
||||
// parentDownload. This function does not log progress output because it would
|
||||
// interfere with the progress reporting for sourceDownload, which has the same
|
||||
// Key.
|
||||
func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer, os string) DoFunc {
|
||||
return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
|
||||
d := &downloadTransfer{
|
||||
Transfer: NewTransfer(),
|
||||
layerStore: ldm.layerStores[os],
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
close(progressChan)
|
||||
}()
|
||||
|
||||
<-start
|
||||
|
||||
close(inactive)
|
||||
|
||||
select {
|
||||
case <-d.Transfer.Context().Done():
|
||||
d.err = errors.New("layer registration cancelled")
|
||||
return
|
||||
case <-parentDownload.Done():
|
||||
}
|
||||
|
||||
l, err := parentDownload.result()
|
||||
if err != nil {
|
||||
d.err = err
|
||||
return
|
||||
}
|
||||
parentLayer := l.ChainID()
|
||||
|
||||
// sourceDownload should have already finished if
|
||||
// parentDownload finished, but wait for it explicitly
|
||||
// to be sure.
|
||||
select {
|
||||
case <-d.Transfer.Context().Done():
|
||||
d.err = errors.New("layer registration cancelled")
|
||||
return
|
||||
case <-sourceDownload.Done():
|
||||
}
|
||||
|
||||
l, err = sourceDownload.result()
|
||||
if err != nil {
|
||||
d.err = err
|
||||
return
|
||||
}
|
||||
|
||||
layerReader, err := l.TarStream()
|
||||
if err != nil {
|
||||
d.err = err
|
||||
return
|
||||
}
|
||||
defer layerReader.Close()
|
||||
|
||||
var src distribution.Descriptor
|
||||
if fs, ok := l.(distribution.Describable); ok {
|
||||
src = fs.Descriptor()
|
||||
}
|
||||
if ds, ok := d.layerStore.(layer.DescribableStore); ok {
|
||||
d.layer, err = ds.RegisterWithDescriptor(layerReader, parentLayer, src)
|
||||
} else {
|
||||
d.layer, err = d.layerStore.Register(layerReader, parentLayer)
|
||||
}
|
||||
if err != nil {
|
||||
d.err = fmt.Errorf("failed to register layer: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered)
|
||||
if hasRegistered {
|
||||
withRegistered.Registered(d.layer.DiffID())
|
||||
}
|
||||
|
||||
// Doesn't actually need to be its own goroutine, but
|
||||
// done like this so we can defer close(c).
|
||||
go func() {
|
||||
<-d.Transfer.Released()
|
||||
if d.layer != nil {
|
||||
layer.ReleaseAndLog(d.layerStore, d.layer)
|
||||
}
|
||||
}()
|
||||
}()
|
||||
|
||||
return d
|
||||
}
|
||||
}
|
362
vendor/github.com/docker/docker-ce/components/engine/distribution/xfer/download_test.go
generated
vendored
Normal file
362
vendor/github.com/docker/docker-ce/components/engine/distribution/xfer/download_test.go
generated
vendored
Normal file
|
@ -0,0 +1,362 @@
|
|||
package xfer // import "github.com/docker/docker/distribution/xfer"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const maxDownloadConcurrency = 3
|
||||
|
||||
type mockLayer struct {
|
||||
layerData bytes.Buffer
|
||||
diffID layer.DiffID
|
||||
chainID layer.ChainID
|
||||
parent layer.Layer
|
||||
os string
|
||||
}
|
||||
|
||||
func (ml *mockLayer) TarStream() (io.ReadCloser, error) {
|
||||
return ioutil.NopCloser(bytes.NewBuffer(ml.layerData.Bytes())), nil
|
||||
}
|
||||
|
||||
func (ml *mockLayer) TarStreamFrom(layer.ChainID) (io.ReadCloser, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (ml *mockLayer) ChainID() layer.ChainID {
|
||||
return ml.chainID
|
||||
}
|
||||
|
||||
func (ml *mockLayer) DiffID() layer.DiffID {
|
||||
return ml.diffID
|
||||
}
|
||||
|
||||
func (ml *mockLayer) Parent() layer.Layer {
|
||||
return ml.parent
|
||||
}
|
||||
|
||||
func (ml *mockLayer) Size() (size int64, err error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (ml *mockLayer) DiffSize() (size int64, err error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (ml *mockLayer) Metadata() (map[string]string, error) {
|
||||
return make(map[string]string), nil
|
||||
}
|
||||
|
||||
type mockLayerStore struct {
|
||||
layers map[layer.ChainID]*mockLayer
|
||||
}
|
||||
|
||||
func createChainIDFromParent(parent layer.ChainID, dgsts ...layer.DiffID) layer.ChainID {
|
||||
if len(dgsts) == 0 {
|
||||
return parent
|
||||
}
|
||||
if parent == "" {
|
||||
return createChainIDFromParent(layer.ChainID(dgsts[0]), dgsts[1:]...)
|
||||
}
|
||||
// H = "H(n-1) SHA256(n)"
|
||||
dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0])))
|
||||
return createChainIDFromParent(layer.ChainID(dgst), dgsts[1:]...)
|
||||
}
|
||||
|
||||
func (ls *mockLayerStore) Map() map[layer.ChainID]layer.Layer {
|
||||
layers := map[layer.ChainID]layer.Layer{}
|
||||
|
||||
for k, v := range ls.layers {
|
||||
layers[k] = v
|
||||
}
|
||||
|
||||
return layers
|
||||
}
|
||||
|
||||
func (ls *mockLayerStore) Register(reader io.Reader, parentID layer.ChainID) (layer.Layer, error) {
|
||||
return ls.RegisterWithDescriptor(reader, parentID, distribution.Descriptor{})
|
||||
}
|
||||
|
||||
func (ls *mockLayerStore) RegisterWithDescriptor(reader io.Reader, parentID layer.ChainID, _ distribution.Descriptor) (layer.Layer, error) {
|
||||
var (
|
||||
parent layer.Layer
|
||||
err error
|
||||
)
|
||||
|
||||
if parentID != "" {
|
||||
parent, err = ls.Get(parentID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
l := &mockLayer{parent: parent}
|
||||
_, err = l.layerData.ReadFrom(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l.diffID = layer.DiffID(digest.FromBytes(l.layerData.Bytes()))
|
||||
l.chainID = createChainIDFromParent(parentID, l.diffID)
|
||||
|
||||
ls.layers[l.chainID] = l
|
||||
return l, nil
|
||||
}
|
||||
|
||||
func (ls *mockLayerStore) Get(chainID layer.ChainID) (layer.Layer, error) {
|
||||
l, ok := ls.layers[chainID]
|
||||
if !ok {
|
||||
return nil, layer.ErrLayerDoesNotExist
|
||||
}
|
||||
return l, nil
|
||||
}
|
||||
|
||||
func (ls *mockLayerStore) Release(l layer.Layer) ([]layer.Metadata, error) {
|
||||
return []layer.Metadata{}, nil
|
||||
}
|
||||
func (ls *mockLayerStore) CreateRWLayer(string, layer.ChainID, *layer.CreateRWLayerOpts) (layer.RWLayer, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (ls *mockLayerStore) GetRWLayer(string) (layer.RWLayer, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (ls *mockLayerStore) ReleaseRWLayer(layer.RWLayer) ([]layer.Metadata, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
func (ls *mockLayerStore) GetMountID(string) (string, error) {
|
||||
return "", errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (ls *mockLayerStore) Cleanup() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ls *mockLayerStore) DriverStatus() [][2]string {
|
||||
return [][2]string{}
|
||||
}
|
||||
|
||||
func (ls *mockLayerStore) DriverName() string {
|
||||
return "mock"
|
||||
}
|
||||
|
||||
type mockDownloadDescriptor struct {
|
||||
currentDownloads *int32
|
||||
id string
|
||||
diffID layer.DiffID
|
||||
registeredDiffID layer.DiffID
|
||||
expectedDiffID layer.DiffID
|
||||
simulateRetries int
|
||||
}
|
||||
|
||||
// Key returns the key used to deduplicate downloads.
|
||||
func (d *mockDownloadDescriptor) Key() string {
|
||||
return d.id
|
||||
}
|
||||
|
||||
// ID returns the ID for display purposes.
|
||||
func (d *mockDownloadDescriptor) ID() string {
|
||||
return d.id
|
||||
}
|
||||
|
||||
// DiffID should return the DiffID for this layer, or an error
|
||||
// if it is unknown (for example, if it has not been downloaded
|
||||
// before).
|
||||
func (d *mockDownloadDescriptor) DiffID() (layer.DiffID, error) {
|
||||
if d.diffID != "" {
|
||||
return d.diffID, nil
|
||||
}
|
||||
return "", errors.New("no diffID available")
|
||||
}
|
||||
|
||||
func (d *mockDownloadDescriptor) Registered(diffID layer.DiffID) {
|
||||
d.registeredDiffID = diffID
|
||||
}
|
||||
|
||||
func (d *mockDownloadDescriptor) mockTarStream() io.ReadCloser {
|
||||
// The mock implementation returns the ID repeated 5 times as a tar
|
||||
// stream instead of actual tar data. The data is ignored except for
|
||||
// computing IDs.
|
||||
return ioutil.NopCloser(bytes.NewBuffer([]byte(d.id + d.id + d.id + d.id + d.id)))
|
||||
}
|
||||
|
||||
// Download is called to perform the download.
|
||||
func (d *mockDownloadDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
|
||||
if d.currentDownloads != nil {
|
||||
defer atomic.AddInt32(d.currentDownloads, -1)
|
||||
|
||||
if atomic.AddInt32(d.currentDownloads, 1) > maxDownloadConcurrency {
|
||||
return nil, 0, errors.New("concurrency limit exceeded")
|
||||
}
|
||||
}
|
||||
|
||||
// Sleep a bit to simulate a time-consuming download.
|
||||
for i := int64(0); i <= 10; i++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, 0, ctx.Err()
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
progressOutput.WriteProgress(progress.Progress{ID: d.ID(), Action: "Downloading", Current: i, Total: 10})
|
||||
}
|
||||
}
|
||||
|
||||
if d.simulateRetries != 0 {
|
||||
d.simulateRetries--
|
||||
return nil, 0, errors.New("simulating retry")
|
||||
}
|
||||
|
||||
return d.mockTarStream(), 0, nil
|
||||
}
|
||||
|
||||
func (d *mockDownloadDescriptor) Close() {
|
||||
}
|
||||
|
||||
func downloadDescriptors(currentDownloads *int32) []DownloadDescriptor {
|
||||
return []DownloadDescriptor{
|
||||
&mockDownloadDescriptor{
|
||||
currentDownloads: currentDownloads,
|
||||
id: "id1",
|
||||
expectedDiffID: layer.DiffID("sha256:68e2c75dc5c78ea9240689c60d7599766c213ae210434c53af18470ae8c53ec1"),
|
||||
},
|
||||
&mockDownloadDescriptor{
|
||||
currentDownloads: currentDownloads,
|
||||
id: "id2",
|
||||
expectedDiffID: layer.DiffID("sha256:64a636223116aa837973a5d9c2bdd17d9b204e4f95ac423e20e65dfbb3655473"),
|
||||
},
|
||||
&mockDownloadDescriptor{
|
||||
currentDownloads: currentDownloads,
|
||||
id: "id3",
|
||||
expectedDiffID: layer.DiffID("sha256:58745a8bbd669c25213e9de578c4da5c8ee1c836b3581432c2b50e38a6753300"),
|
||||
},
|
||||
&mockDownloadDescriptor{
|
||||
currentDownloads: currentDownloads,
|
||||
id: "id2",
|
||||
expectedDiffID: layer.DiffID("sha256:64a636223116aa837973a5d9c2bdd17d9b204e4f95ac423e20e65dfbb3655473"),
|
||||
},
|
||||
&mockDownloadDescriptor{
|
||||
currentDownloads: currentDownloads,
|
||||
id: "id4",
|
||||
expectedDiffID: layer.DiffID("sha256:0dfb5b9577716cc173e95af7c10289322c29a6453a1718addc00c0c5b1330936"),
|
||||
simulateRetries: 1,
|
||||
},
|
||||
&mockDownloadDescriptor{
|
||||
currentDownloads: currentDownloads,
|
||||
id: "id5",
|
||||
expectedDiffID: layer.DiffID("sha256:0a5f25fa1acbc647f6112a6276735d0fa01e4ee2aa7ec33015e337350e1ea23d"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestSuccessfulDownload(t *testing.T) {
|
||||
// TODO Windows: Fix this unit text
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Needs fixing on Windows")
|
||||
}
|
||||
|
||||
layerStore := &mockLayerStore{make(map[layer.ChainID]*mockLayer)}
|
||||
lsMap := make(map[string]layer.Store)
|
||||
lsMap[runtime.GOOS] = layerStore
|
||||
ldm := NewLayerDownloadManager(lsMap, maxDownloadConcurrency, func(m *LayerDownloadManager) { m.waitDuration = time.Millisecond })
|
||||
|
||||
progressChan := make(chan progress.Progress)
|
||||
progressDone := make(chan struct{})
|
||||
receivedProgress := make(map[string]progress.Progress)
|
||||
|
||||
go func() {
|
||||
for p := range progressChan {
|
||||
receivedProgress[p.ID] = p
|
||||
}
|
||||
close(progressDone)
|
||||
}()
|
||||
|
||||
var currentDownloads int32
|
||||
descriptors := downloadDescriptors(¤tDownloads)
|
||||
|
||||
firstDescriptor := descriptors[0].(*mockDownloadDescriptor)
|
||||
|
||||
// Pre-register the first layer to simulate an already-existing layer
|
||||
l, err := layerStore.Register(firstDescriptor.mockTarStream(), "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
firstDescriptor.diffID = l.DiffID()
|
||||
|
||||
rootFS, releaseFunc, err := ldm.Download(context.Background(), *image.NewRootFS(), runtime.GOOS, descriptors, progress.ChanOutput(progressChan))
|
||||
if err != nil {
|
||||
t.Fatalf("download error: %v", err)
|
||||
}
|
||||
|
||||
releaseFunc()
|
||||
|
||||
close(progressChan)
|
||||
<-progressDone
|
||||
|
||||
if len(rootFS.DiffIDs) != len(descriptors) {
|
||||
t.Fatal("got wrong number of diffIDs in rootfs")
|
||||
}
|
||||
|
||||
for i, d := range descriptors {
|
||||
descriptor := d.(*mockDownloadDescriptor)
|
||||
|
||||
if descriptor.diffID != "" {
|
||||
if receivedProgress[d.ID()].Action != "Already exists" {
|
||||
t.Fatalf("did not get 'Already exists' message for %v", d.ID())
|
||||
}
|
||||
} else if receivedProgress[d.ID()].Action != "Pull complete" {
|
||||
t.Fatalf("did not get 'Pull complete' message for %v", d.ID())
|
||||
}
|
||||
|
||||
if rootFS.DiffIDs[i] != descriptor.expectedDiffID {
|
||||
t.Fatalf("rootFS item %d has the wrong diffID (expected: %v got: %v)", i, descriptor.expectedDiffID, rootFS.DiffIDs[i])
|
||||
}
|
||||
|
||||
if descriptor.diffID == "" && descriptor.registeredDiffID != rootFS.DiffIDs[i] {
|
||||
t.Fatal("diffID mismatch between rootFS and Registered callback")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCancelledDownload(t *testing.T) {
|
||||
layerStore := &mockLayerStore{make(map[layer.ChainID]*mockLayer)}
|
||||
lsMap := make(map[string]layer.Store)
|
||||
lsMap[runtime.GOOS] = layerStore
|
||||
ldm := NewLayerDownloadManager(lsMap, maxDownloadConcurrency, func(m *LayerDownloadManager) { m.waitDuration = time.Millisecond })
|
||||
progressChan := make(chan progress.Progress)
|
||||
progressDone := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
for range progressChan {
|
||||
}
|
||||
close(progressDone)
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
go func() {
|
||||
<-time.After(time.Millisecond)
|
||||
cancel()
|
||||
}()
|
||||
|
||||
descriptors := downloadDescriptors(nil)
|
||||
_, _, err := ldm.Download(ctx, *image.NewRootFS(), runtime.GOOS, descriptors, progress.ChanOutput(progressChan))
|
||||
if err != context.Canceled {
|
||||
t.Fatal("expected download to be cancelled")
|
||||
}
|
||||
|
||||
close(progressChan)
|
||||
<-progressDone
|
||||
}
|
401
vendor/github.com/docker/docker-ce/components/engine/distribution/xfer/transfer.go
generated
vendored
Normal file
401
vendor/github.com/docker/docker-ce/components/engine/distribution/xfer/transfer.go
generated
vendored
Normal file
|
@ -0,0 +1,401 @@
|
|||
package xfer // import "github.com/docker/docker/distribution/xfer"
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// DoNotRetry is an error wrapper indicating that the error cannot be resolved
|
||||
// with a retry.
|
||||
type DoNotRetry struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
// Error returns the stringified representation of the encapsulated error.
|
||||
func (e DoNotRetry) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
// Watcher is returned by Watch and can be passed to Release to stop watching.
|
||||
type Watcher struct {
|
||||
// signalChan is used to signal to the watcher goroutine that
|
||||
// new progress information is available, or that the transfer
|
||||
// has finished.
|
||||
signalChan chan struct{}
|
||||
// releaseChan signals to the watcher goroutine that the watcher
|
||||
// should be detached.
|
||||
releaseChan chan struct{}
|
||||
// running remains open as long as the watcher is watching the
|
||||
// transfer. It gets closed if the transfer finishes or the
|
||||
// watcher is detached.
|
||||
running chan struct{}
|
||||
}
|
||||
|
||||
// Transfer represents an in-progress transfer.
|
||||
type Transfer interface {
|
||||
Watch(progressOutput progress.Output) *Watcher
|
||||
Release(*Watcher)
|
||||
Context() context.Context
|
||||
Close()
|
||||
Done() <-chan struct{}
|
||||
Released() <-chan struct{}
|
||||
Broadcast(masterProgressChan <-chan progress.Progress)
|
||||
}
|
||||
|
||||
type transfer struct {
|
||||
mu sync.Mutex
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
// watchers keeps track of the goroutines monitoring progress output,
|
||||
// indexed by the channels that release them.
|
||||
watchers map[chan struct{}]*Watcher
|
||||
|
||||
// lastProgress is the most recently received progress event.
|
||||
lastProgress progress.Progress
|
||||
// hasLastProgress is true when lastProgress has been set.
|
||||
hasLastProgress bool
|
||||
|
||||
// running remains open as long as the transfer is in progress.
|
||||
running chan struct{}
|
||||
// released stays open until all watchers release the transfer and
|
||||
// the transfer is no longer tracked by the transfer manager.
|
||||
released chan struct{}
|
||||
|
||||
// broadcastDone is true if the master progress channel has closed.
|
||||
broadcastDone bool
|
||||
// closed is true if Close has been called
|
||||
closed bool
|
||||
// broadcastSyncChan allows watchers to "ping" the broadcasting
|
||||
// goroutine to wait for it for deplete its input channel. This ensures
|
||||
// a detaching watcher won't miss an event that was sent before it
|
||||
// started detaching.
|
||||
broadcastSyncChan chan struct{}
|
||||
}
|
||||
|
||||
// NewTransfer creates a new transfer.
|
||||
func NewTransfer() Transfer {
|
||||
t := &transfer{
|
||||
watchers: make(map[chan struct{}]*Watcher),
|
||||
running: make(chan struct{}),
|
||||
released: make(chan struct{}),
|
||||
broadcastSyncChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
// This uses context.Background instead of a caller-supplied context
|
||||
// so that a transfer won't be cancelled automatically if the client
|
||||
// which requested it is ^C'd (there could be other viewers).
|
||||
t.ctx, t.cancel = context.WithCancel(context.Background())
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
// Broadcast copies the progress and error output to all viewers.
|
||||
func (t *transfer) Broadcast(masterProgressChan <-chan progress.Progress) {
|
||||
for {
|
||||
var (
|
||||
p progress.Progress
|
||||
ok bool
|
||||
)
|
||||
select {
|
||||
case p, ok = <-masterProgressChan:
|
||||
default:
|
||||
// We've depleted the channel, so now we can handle
|
||||
// reads on broadcastSyncChan to let detaching watchers
|
||||
// know we're caught up.
|
||||
select {
|
||||
case <-t.broadcastSyncChan:
|
||||
continue
|
||||
case p, ok = <-masterProgressChan:
|
||||
}
|
||||
}
|
||||
|
||||
t.mu.Lock()
|
||||
if ok {
|
||||
t.lastProgress = p
|
||||
t.hasLastProgress = true
|
||||
for _, w := range t.watchers {
|
||||
select {
|
||||
case w.signalChan <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
} else {
|
||||
t.broadcastDone = true
|
||||
}
|
||||
t.mu.Unlock()
|
||||
if !ok {
|
||||
close(t.running)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Watch adds a watcher to the transfer. The supplied channel gets progress
|
||||
// updates and is closed when the transfer finishes.
|
||||
func (t *transfer) Watch(progressOutput progress.Output) *Watcher {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
w := &Watcher{
|
||||
releaseChan: make(chan struct{}),
|
||||
signalChan: make(chan struct{}),
|
||||
running: make(chan struct{}),
|
||||
}
|
||||
|
||||
t.watchers[w.releaseChan] = w
|
||||
|
||||
if t.broadcastDone {
|
||||
close(w.running)
|
||||
return w
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
close(w.running)
|
||||
}()
|
||||
var (
|
||||
done bool
|
||||
lastWritten progress.Progress
|
||||
hasLastWritten bool
|
||||
)
|
||||
for {
|
||||
t.mu.Lock()
|
||||
hasLastProgress := t.hasLastProgress
|
||||
lastProgress := t.lastProgress
|
||||
t.mu.Unlock()
|
||||
|
||||
// Make sure we don't write the last progress item
|
||||
// twice.
|
||||
if hasLastProgress && (!done || !hasLastWritten || lastProgress != lastWritten) {
|
||||
progressOutput.WriteProgress(lastProgress)
|
||||
lastWritten = lastProgress
|
||||
hasLastWritten = true
|
||||
}
|
||||
|
||||
if done {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-w.signalChan:
|
||||
case <-w.releaseChan:
|
||||
done = true
|
||||
// Since the watcher is going to detach, make
|
||||
// sure the broadcaster is caught up so we
|
||||
// don't miss anything.
|
||||
select {
|
||||
case t.broadcastSyncChan <- struct{}{}:
|
||||
case <-t.running:
|
||||
}
|
||||
case <-t.running:
|
||||
done = true
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return w
|
||||
}
|
||||
|
||||
// Release is the inverse of Watch; indicating that the watcher no longer wants
|
||||
// to be notified about the progress of the transfer. All calls to Watch must
|
||||
// be paired with later calls to Release so that the lifecycle of the transfer
|
||||
// is properly managed.
|
||||
func (t *transfer) Release(watcher *Watcher) {
|
||||
t.mu.Lock()
|
||||
delete(t.watchers, watcher.releaseChan)
|
||||
|
||||
if len(t.watchers) == 0 {
|
||||
if t.closed {
|
||||
// released may have been closed already if all
|
||||
// watchers were released, then another one was added
|
||||
// while waiting for a previous watcher goroutine to
|
||||
// finish.
|
||||
select {
|
||||
case <-t.released:
|
||||
default:
|
||||
close(t.released)
|
||||
}
|
||||
} else {
|
||||
t.cancel()
|
||||
}
|
||||
}
|
||||
t.mu.Unlock()
|
||||
|
||||
close(watcher.releaseChan)
|
||||
// Block until the watcher goroutine completes
|
||||
<-watcher.running
|
||||
}
|
||||
|
||||
// Done returns a channel which is closed if the transfer completes or is
|
||||
// cancelled. Note that having 0 watchers causes a transfer to be cancelled.
|
||||
func (t *transfer) Done() <-chan struct{} {
|
||||
// Note that this doesn't return t.ctx.Done() because that channel will
|
||||
// be closed the moment Cancel is called, and we need to return a
|
||||
// channel that blocks until a cancellation is actually acknowledged by
|
||||
// the transfer function.
|
||||
return t.running
|
||||
}
|
||||
|
||||
// Released returns a channel which is closed once all watchers release the
|
||||
// transfer AND the transfer is no longer tracked by the transfer manager.
|
||||
func (t *transfer) Released() <-chan struct{} {
|
||||
return t.released
|
||||
}
|
||||
|
||||
// Context returns the context associated with the transfer.
|
||||
func (t *transfer) Context() context.Context {
|
||||
return t.ctx
|
||||
}
|
||||
|
||||
// Close is called by the transfer manager when the transfer is no longer
|
||||
// being tracked.
|
||||
func (t *transfer) Close() {
|
||||
t.mu.Lock()
|
||||
t.closed = true
|
||||
if len(t.watchers) == 0 {
|
||||
close(t.released)
|
||||
}
|
||||
t.mu.Unlock()
|
||||
}
|
||||
|
||||
// DoFunc is a function called by the transfer manager to actually perform
|
||||
// a transfer. It should be non-blocking. It should wait until the start channel
|
||||
// is closed before transferring any data. If the function closes inactive, that
|
||||
// signals to the transfer manager that the job is no longer actively moving
|
||||
// data - for example, it may be waiting for a dependent transfer to finish.
|
||||
// This prevents it from taking up a slot.
|
||||
type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer
|
||||
|
||||
// TransferManager is used by LayerDownloadManager and LayerUploadManager to
|
||||
// schedule and deduplicate transfers. It is up to the TransferManager
|
||||
// implementation to make the scheduling and concurrency decisions.
|
||||
type TransferManager interface {
|
||||
// Transfer checks if a transfer with the given key is in progress. If
|
||||
// so, it returns progress and error output from that transfer.
|
||||
// Otherwise, it will call xferFunc to initiate the transfer.
|
||||
Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher)
|
||||
// SetConcurrency set the concurrencyLimit so that it is adjustable daemon reload
|
||||
SetConcurrency(concurrency int)
|
||||
}
|
||||
|
||||
type transferManager struct {
|
||||
mu sync.Mutex
|
||||
|
||||
concurrencyLimit int
|
||||
activeTransfers int
|
||||
transfers map[string]Transfer
|
||||
waitingTransfers []chan struct{}
|
||||
}
|
||||
|
||||
// NewTransferManager returns a new TransferManager.
|
||||
func NewTransferManager(concurrencyLimit int) TransferManager {
|
||||
return &transferManager{
|
||||
concurrencyLimit: concurrencyLimit,
|
||||
transfers: make(map[string]Transfer),
|
||||
}
|
||||
}
|
||||
|
||||
// SetConcurrency sets the concurrencyLimit
|
||||
func (tm *transferManager) SetConcurrency(concurrency int) {
|
||||
tm.mu.Lock()
|
||||
tm.concurrencyLimit = concurrency
|
||||
tm.mu.Unlock()
|
||||
}
|
||||
|
||||
// Transfer checks if a transfer matching the given key is in progress. If not,
|
||||
// it starts one by calling xferFunc. The caller supplies a channel which
|
||||
// receives progress output from the transfer.
|
||||
func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) {
|
||||
tm.mu.Lock()
|
||||
defer tm.mu.Unlock()
|
||||
|
||||
for {
|
||||
xfer, present := tm.transfers[key]
|
||||
if !present {
|
||||
break
|
||||
}
|
||||
// Transfer is already in progress.
|
||||
watcher := xfer.Watch(progressOutput)
|
||||
|
||||
select {
|
||||
case <-xfer.Context().Done():
|
||||
// We don't want to watch a transfer that has been cancelled.
|
||||
// Wait for it to be removed from the map and try again.
|
||||
xfer.Release(watcher)
|
||||
tm.mu.Unlock()
|
||||
// The goroutine that removes this transfer from the
|
||||
// map is also waiting for xfer.Done(), so yield to it.
|
||||
// This could be avoided by adding a Closed method
|
||||
// to Transfer to allow explicitly waiting for it to be
|
||||
// removed the map, but forcing a scheduling round in
|
||||
// this very rare case seems better than bloating the
|
||||
// interface definition.
|
||||
runtime.Gosched()
|
||||
<-xfer.Done()
|
||||
tm.mu.Lock()
|
||||
default:
|
||||
return xfer, watcher
|
||||
}
|
||||
}
|
||||
|
||||
start := make(chan struct{})
|
||||
inactive := make(chan struct{})
|
||||
|
||||
if tm.concurrencyLimit == 0 || tm.activeTransfers < tm.concurrencyLimit {
|
||||
close(start)
|
||||
tm.activeTransfers++
|
||||
} else {
|
||||
tm.waitingTransfers = append(tm.waitingTransfers, start)
|
||||
}
|
||||
|
||||
masterProgressChan := make(chan progress.Progress)
|
||||
xfer := xferFunc(masterProgressChan, start, inactive)
|
||||
watcher := xfer.Watch(progressOutput)
|
||||
go xfer.Broadcast(masterProgressChan)
|
||||
tm.transfers[key] = xfer
|
||||
|
||||
// When the transfer is finished, remove from the map.
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-inactive:
|
||||
tm.mu.Lock()
|
||||
tm.inactivate(start)
|
||||
tm.mu.Unlock()
|
||||
inactive = nil
|
||||
case <-xfer.Done():
|
||||
tm.mu.Lock()
|
||||
if inactive != nil {
|
||||
tm.inactivate(start)
|
||||
}
|
||||
delete(tm.transfers, key)
|
||||
tm.mu.Unlock()
|
||||
xfer.Close()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return xfer, watcher
|
||||
}
|
||||
|
||||
func (tm *transferManager) inactivate(start chan struct{}) {
|
||||
// If the transfer was started, remove it from the activeTransfers
|
||||
// count.
|
||||
select {
|
||||
case <-start:
|
||||
// Start next transfer if any are waiting
|
||||
if len(tm.waitingTransfers) != 0 {
|
||||
close(tm.waitingTransfers[0])
|
||||
tm.waitingTransfers = tm.waitingTransfers[1:]
|
||||
} else {
|
||||
tm.activeTransfers--
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
410
vendor/github.com/docker/docker-ce/components/engine/distribution/xfer/transfer_test.go
generated
vendored
Normal file
410
vendor/github.com/docker/docker-ce/components/engine/distribution/xfer/transfer_test.go
generated
vendored
Normal file
|
@ -0,0 +1,410 @@
|
|||
package xfer // import "github.com/docker/docker/distribution/xfer"
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
)
|
||||
|
||||
func TestTransfer(t *testing.T) {
|
||||
makeXferFunc := func(id string) DoFunc {
|
||||
return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
|
||||
select {
|
||||
case <-start:
|
||||
default:
|
||||
t.Fatalf("transfer function not started even though concurrency limit not reached")
|
||||
}
|
||||
|
||||
xfer := NewTransfer()
|
||||
go func() {
|
||||
for i := 0; i <= 10; i++ {
|
||||
progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
close(progressChan)
|
||||
}()
|
||||
return xfer
|
||||
}
|
||||
}
|
||||
|
||||
tm := NewTransferManager(5)
|
||||
progressChan := make(chan progress.Progress)
|
||||
progressDone := make(chan struct{})
|
||||
receivedProgress := make(map[string]int64)
|
||||
|
||||
go func() {
|
||||
for p := range progressChan {
|
||||
val, present := receivedProgress[p.ID]
|
||||
if present && p.Current <= val {
|
||||
t.Fatalf("got unexpected progress value: %d (expected %d)", p.Current, val+1)
|
||||
}
|
||||
receivedProgress[p.ID] = p.Current
|
||||
}
|
||||
close(progressDone)
|
||||
}()
|
||||
|
||||
// Start a few transfers
|
||||
ids := []string{"id1", "id2", "id3"}
|
||||
xfers := make([]Transfer, len(ids))
|
||||
watchers := make([]*Watcher, len(ids))
|
||||
for i, id := range ids {
|
||||
xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan))
|
||||
}
|
||||
|
||||
for i, xfer := range xfers {
|
||||
<-xfer.Done()
|
||||
xfer.Release(watchers[i])
|
||||
}
|
||||
close(progressChan)
|
||||
<-progressDone
|
||||
|
||||
for _, id := range ids {
|
||||
if receivedProgress[id] != 10 {
|
||||
t.Fatalf("final progress value %d instead of 10", receivedProgress[id])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConcurrencyLimit(t *testing.T) {
|
||||
concurrencyLimit := 3
|
||||
var runningJobs int32
|
||||
|
||||
makeXferFunc := func(id string) DoFunc {
|
||||
return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
|
||||
xfer := NewTransfer()
|
||||
go func() {
|
||||
<-start
|
||||
totalJobs := atomic.AddInt32(&runningJobs, 1)
|
||||
if int(totalJobs) > concurrencyLimit {
|
||||
t.Fatalf("too many jobs running")
|
||||
}
|
||||
for i := 0; i <= 10; i++ {
|
||||
progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
atomic.AddInt32(&runningJobs, -1)
|
||||
close(progressChan)
|
||||
}()
|
||||
return xfer
|
||||
}
|
||||
}
|
||||
|
||||
tm := NewTransferManager(concurrencyLimit)
|
||||
progressChan := make(chan progress.Progress)
|
||||
progressDone := make(chan struct{})
|
||||
receivedProgress := make(map[string]int64)
|
||||
|
||||
go func() {
|
||||
for p := range progressChan {
|
||||
receivedProgress[p.ID] = p.Current
|
||||
}
|
||||
close(progressDone)
|
||||
}()
|
||||
|
||||
// Start more transfers than the concurrency limit
|
||||
ids := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8"}
|
||||
xfers := make([]Transfer, len(ids))
|
||||
watchers := make([]*Watcher, len(ids))
|
||||
for i, id := range ids {
|
||||
xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan))
|
||||
}
|
||||
|
||||
for i, xfer := range xfers {
|
||||
<-xfer.Done()
|
||||
xfer.Release(watchers[i])
|
||||
}
|
||||
close(progressChan)
|
||||
<-progressDone
|
||||
|
||||
for _, id := range ids {
|
||||
if receivedProgress[id] != 10 {
|
||||
t.Fatalf("final progress value %d instead of 10", receivedProgress[id])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInactiveJobs(t *testing.T) {
|
||||
concurrencyLimit := 3
|
||||
var runningJobs int32
|
||||
testDone := make(chan struct{})
|
||||
|
||||
makeXferFunc := func(id string) DoFunc {
|
||||
return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
|
||||
xfer := NewTransfer()
|
||||
go func() {
|
||||
<-start
|
||||
totalJobs := atomic.AddInt32(&runningJobs, 1)
|
||||
if int(totalJobs) > concurrencyLimit {
|
||||
t.Fatalf("too many jobs running")
|
||||
}
|
||||
for i := 0; i <= 10; i++ {
|
||||
progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
atomic.AddInt32(&runningJobs, -1)
|
||||
close(inactive)
|
||||
<-testDone
|
||||
close(progressChan)
|
||||
}()
|
||||
return xfer
|
||||
}
|
||||
}
|
||||
|
||||
tm := NewTransferManager(concurrencyLimit)
|
||||
progressChan := make(chan progress.Progress)
|
||||
progressDone := make(chan struct{})
|
||||
receivedProgress := make(map[string]int64)
|
||||
|
||||
go func() {
|
||||
for p := range progressChan {
|
||||
receivedProgress[p.ID] = p.Current
|
||||
}
|
||||
close(progressDone)
|
||||
}()
|
||||
|
||||
// Start more transfers than the concurrency limit
|
||||
ids := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8"}
|
||||
xfers := make([]Transfer, len(ids))
|
||||
watchers := make([]*Watcher, len(ids))
|
||||
for i, id := range ids {
|
||||
xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan))
|
||||
}
|
||||
|
||||
close(testDone)
|
||||
for i, xfer := range xfers {
|
||||
<-xfer.Done()
|
||||
xfer.Release(watchers[i])
|
||||
}
|
||||
close(progressChan)
|
||||
<-progressDone
|
||||
|
||||
for _, id := range ids {
|
||||
if receivedProgress[id] != 10 {
|
||||
t.Fatalf("final progress value %d instead of 10", receivedProgress[id])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWatchRelease(t *testing.T) {
|
||||
ready := make(chan struct{})
|
||||
|
||||
makeXferFunc := func(id string) DoFunc {
|
||||
return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
|
||||
xfer := NewTransfer()
|
||||
go func() {
|
||||
defer func() {
|
||||
close(progressChan)
|
||||
}()
|
||||
<-ready
|
||||
for i := int64(0); ; i++ {
|
||||
select {
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
case <-xfer.Context().Done():
|
||||
return
|
||||
}
|
||||
progressChan <- progress.Progress{ID: id, Action: "testing", Current: i, Total: 10}
|
||||
}
|
||||
}()
|
||||
return xfer
|
||||
}
|
||||
}
|
||||
|
||||
tm := NewTransferManager(5)
|
||||
|
||||
type watcherInfo struct {
|
||||
watcher *Watcher
|
||||
progressChan chan progress.Progress
|
||||
progressDone chan struct{}
|
||||
receivedFirstProgress chan struct{}
|
||||
}
|
||||
|
||||
progressConsumer := func(w watcherInfo) {
|
||||
first := true
|
||||
for range w.progressChan {
|
||||
if first {
|
||||
close(w.receivedFirstProgress)
|
||||
}
|
||||
first = false
|
||||
}
|
||||
close(w.progressDone)
|
||||
}
|
||||
|
||||
// Start a transfer
|
||||
watchers := make([]watcherInfo, 5)
|
||||
var xfer Transfer
|
||||
watchers[0].progressChan = make(chan progress.Progress)
|
||||
watchers[0].progressDone = make(chan struct{})
|
||||
watchers[0].receivedFirstProgress = make(chan struct{})
|
||||
xfer, watchers[0].watcher = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(watchers[0].progressChan))
|
||||
go progressConsumer(watchers[0])
|
||||
|
||||
// Give it multiple watchers
|
||||
for i := 1; i != len(watchers); i++ {
|
||||
watchers[i].progressChan = make(chan progress.Progress)
|
||||
watchers[i].progressDone = make(chan struct{})
|
||||
watchers[i].receivedFirstProgress = make(chan struct{})
|
||||
watchers[i].watcher = xfer.Watch(progress.ChanOutput(watchers[i].progressChan))
|
||||
go progressConsumer(watchers[i])
|
||||
}
|
||||
|
||||
// Now that the watchers are set up, allow the transfer goroutine to
|
||||
// proceed.
|
||||
close(ready)
|
||||
|
||||
// Confirm that each watcher gets progress output.
|
||||
for _, w := range watchers {
|
||||
<-w.receivedFirstProgress
|
||||
}
|
||||
|
||||
// Release one watcher every 5ms
|
||||
for _, w := range watchers {
|
||||
xfer.Release(w.watcher)
|
||||
<-time.After(5 * time.Millisecond)
|
||||
}
|
||||
|
||||
// Now that all watchers have been released, Released() should
|
||||
// return a closed channel.
|
||||
<-xfer.Released()
|
||||
|
||||
// Done() should return a closed channel because the xfer func returned
|
||||
// due to cancellation.
|
||||
<-xfer.Done()
|
||||
|
||||
for _, w := range watchers {
|
||||
close(w.progressChan)
|
||||
<-w.progressDone
|
||||
}
|
||||
}
|
||||
|
||||
func TestWatchFinishedTransfer(t *testing.T) {
|
||||
makeXferFunc := func(id string) DoFunc {
|
||||
return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
|
||||
xfer := NewTransfer()
|
||||
go func() {
|
||||
// Finish immediately
|
||||
close(progressChan)
|
||||
}()
|
||||
return xfer
|
||||
}
|
||||
}
|
||||
|
||||
tm := NewTransferManager(5)
|
||||
|
||||
// Start a transfer
|
||||
watchers := make([]*Watcher, 3)
|
||||
var xfer Transfer
|
||||
xfer, watchers[0] = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(make(chan progress.Progress)))
|
||||
|
||||
// Give it a watcher immediately
|
||||
watchers[1] = xfer.Watch(progress.ChanOutput(make(chan progress.Progress)))
|
||||
|
||||
// Wait for the transfer to complete
|
||||
<-xfer.Done()
|
||||
|
||||
// Set up another watcher
|
||||
watchers[2] = xfer.Watch(progress.ChanOutput(make(chan progress.Progress)))
|
||||
|
||||
// Release the watchers
|
||||
for _, w := range watchers {
|
||||
xfer.Release(w)
|
||||
}
|
||||
|
||||
// Now that all watchers have been released, Released() should
|
||||
// return a closed channel.
|
||||
<-xfer.Released()
|
||||
}
|
||||
|
||||
func TestDuplicateTransfer(t *testing.T) {
|
||||
ready := make(chan struct{})
|
||||
|
||||
var xferFuncCalls int32
|
||||
|
||||
makeXferFunc := func(id string) DoFunc {
|
||||
return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
|
||||
atomic.AddInt32(&xferFuncCalls, 1)
|
||||
xfer := NewTransfer()
|
||||
go func() {
|
||||
defer func() {
|
||||
close(progressChan)
|
||||
}()
|
||||
<-ready
|
||||
for i := int64(0); ; i++ {
|
||||
select {
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
case <-xfer.Context().Done():
|
||||
return
|
||||
}
|
||||
progressChan <- progress.Progress{ID: id, Action: "testing", Current: i, Total: 10}
|
||||
}
|
||||
}()
|
||||
return xfer
|
||||
}
|
||||
}
|
||||
|
||||
tm := NewTransferManager(5)
|
||||
|
||||
type transferInfo struct {
|
||||
xfer Transfer
|
||||
watcher *Watcher
|
||||
progressChan chan progress.Progress
|
||||
progressDone chan struct{}
|
||||
receivedFirstProgress chan struct{}
|
||||
}
|
||||
|
||||
progressConsumer := func(t transferInfo) {
|
||||
first := true
|
||||
for range t.progressChan {
|
||||
if first {
|
||||
close(t.receivedFirstProgress)
|
||||
}
|
||||
first = false
|
||||
}
|
||||
close(t.progressDone)
|
||||
}
|
||||
|
||||
// Try to start multiple transfers with the same ID
|
||||
transfers := make([]transferInfo, 5)
|
||||
for i := range transfers {
|
||||
t := &transfers[i]
|
||||
t.progressChan = make(chan progress.Progress)
|
||||
t.progressDone = make(chan struct{})
|
||||
t.receivedFirstProgress = make(chan struct{})
|
||||
t.xfer, t.watcher = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(t.progressChan))
|
||||
go progressConsumer(*t)
|
||||
}
|
||||
|
||||
// Allow the transfer goroutine to proceed.
|
||||
close(ready)
|
||||
|
||||
// Confirm that each watcher gets progress output.
|
||||
for _, t := range transfers {
|
||||
<-t.receivedFirstProgress
|
||||
}
|
||||
|
||||
// Confirm that the transfer function was called exactly once.
|
||||
if xferFuncCalls != 1 {
|
||||
t.Fatal("transfer function wasn't called exactly once")
|
||||
}
|
||||
|
||||
// Release one watcher every 5ms
|
||||
for _, t := range transfers {
|
||||
t.xfer.Release(t.watcher)
|
||||
<-time.After(5 * time.Millisecond)
|
||||
}
|
||||
|
||||
for _, t := range transfers {
|
||||
// Now that all watchers have been released, Released() should
|
||||
// return a closed channel.
|
||||
<-t.xfer.Released()
|
||||
// Done() should return a closed channel because the xfer func returned
|
||||
// due to cancellation.
|
||||
<-t.xfer.Done()
|
||||
}
|
||||
|
||||
for _, t := range transfers {
|
||||
close(t.progressChan)
|
||||
<-t.progressDone
|
||||
}
|
||||
}
|
174
vendor/github.com/docker/docker-ce/components/engine/distribution/xfer/upload.go
generated
vendored
Normal file
174
vendor/github.com/docker/docker-ce/components/engine/distribution/xfer/upload.go
generated
vendored
Normal file
|
@ -0,0 +1,174 @@
|
|||
package xfer // import "github.com/docker/docker/distribution/xfer"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const maxUploadAttempts = 5
|
||||
|
||||
// LayerUploadManager provides task management and progress reporting for
|
||||
// uploads.
|
||||
type LayerUploadManager struct {
|
||||
tm TransferManager
|
||||
waitDuration time.Duration
|
||||
}
|
||||
|
||||
// SetConcurrency sets the max concurrent uploads for each push
|
||||
func (lum *LayerUploadManager) SetConcurrency(concurrency int) {
|
||||
lum.tm.SetConcurrency(concurrency)
|
||||
}
|
||||
|
||||
// NewLayerUploadManager returns a new LayerUploadManager.
|
||||
func NewLayerUploadManager(concurrencyLimit int, options ...func(*LayerUploadManager)) *LayerUploadManager {
|
||||
manager := LayerUploadManager{
|
||||
tm: NewTransferManager(concurrencyLimit),
|
||||
waitDuration: time.Second,
|
||||
}
|
||||
for _, option := range options {
|
||||
option(&manager)
|
||||
}
|
||||
return &manager
|
||||
}
|
||||
|
||||
type uploadTransfer struct {
|
||||
Transfer
|
||||
|
||||
remoteDescriptor distribution.Descriptor
|
||||
err error
|
||||
}
|
||||
|
||||
// An UploadDescriptor references a layer that may need to be uploaded.
|
||||
type UploadDescriptor interface {
|
||||
// Key returns the key used to deduplicate uploads.
|
||||
Key() string
|
||||
// ID returns the ID for display purposes.
|
||||
ID() string
|
||||
// DiffID should return the DiffID for this layer.
|
||||
DiffID() layer.DiffID
|
||||
// Upload is called to perform the Upload.
|
||||
Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error)
|
||||
// SetRemoteDescriptor provides the distribution.Descriptor that was
|
||||
// returned by Upload. This descriptor is not to be confused with
|
||||
// the UploadDescriptor interface, which is used for internally
|
||||
// identifying layers that are being uploaded.
|
||||
SetRemoteDescriptor(descriptor distribution.Descriptor)
|
||||
}
|
||||
|
||||
// Upload is a blocking function which ensures the listed layers are present on
|
||||
// the remote registry. It uses the string returned by the Key method to
|
||||
// deduplicate uploads.
|
||||
func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error {
|
||||
var (
|
||||
uploads []*uploadTransfer
|
||||
dedupDescriptors = make(map[string]*uploadTransfer)
|
||||
)
|
||||
|
||||
for _, descriptor := range layers {
|
||||
progress.Update(progressOutput, descriptor.ID(), "Preparing")
|
||||
|
||||
key := descriptor.Key()
|
||||
if _, present := dedupDescriptors[key]; present {
|
||||
continue
|
||||
}
|
||||
|
||||
xferFunc := lum.makeUploadFunc(descriptor)
|
||||
upload, watcher := lum.tm.Transfer(descriptor.Key(), xferFunc, progressOutput)
|
||||
defer upload.Release(watcher)
|
||||
uploads = append(uploads, upload.(*uploadTransfer))
|
||||
dedupDescriptors[key] = upload.(*uploadTransfer)
|
||||
}
|
||||
|
||||
for _, upload := range uploads {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-upload.Transfer.Done():
|
||||
if upload.err != nil {
|
||||
return upload.err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, l := range layers {
|
||||
l.SetRemoteDescriptor(dedupDescriptors[l.Key()].remoteDescriptor)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFunc {
|
||||
return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
|
||||
u := &uploadTransfer{
|
||||
Transfer: NewTransfer(),
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
close(progressChan)
|
||||
}()
|
||||
|
||||
progressOutput := progress.ChanOutput(progressChan)
|
||||
|
||||
select {
|
||||
case <-start:
|
||||
default:
|
||||
progress.Update(progressOutput, descriptor.ID(), "Waiting")
|
||||
<-start
|
||||
}
|
||||
|
||||
retries := 0
|
||||
for {
|
||||
remoteDescriptor, err := descriptor.Upload(u.Transfer.Context(), progressOutput)
|
||||
if err == nil {
|
||||
u.remoteDescriptor = remoteDescriptor
|
||||
break
|
||||
}
|
||||
|
||||
// If an error was returned because the context
|
||||
// was cancelled, we shouldn't retry.
|
||||
select {
|
||||
case <-u.Transfer.Context().Done():
|
||||
u.err = err
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
retries++
|
||||
if _, isDNR := err.(DoNotRetry); isDNR || retries == maxUploadAttempts {
|
||||
logrus.Errorf("Upload failed: %v", err)
|
||||
u.err = err
|
||||
return
|
||||
}
|
||||
|
||||
logrus.Errorf("Upload failed, retrying: %v", err)
|
||||
delay := retries * 5
|
||||
ticker := time.NewTicker(lum.waitDuration)
|
||||
|
||||
selectLoop:
|
||||
for {
|
||||
progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1])
|
||||
select {
|
||||
case <-ticker.C:
|
||||
delay--
|
||||
if delay == 0 {
|
||||
ticker.Stop()
|
||||
break selectLoop
|
||||
}
|
||||
case <-u.Transfer.Context().Done():
|
||||
ticker.Stop()
|
||||
u.err = errors.New("upload cancelled during retry delay")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return u
|
||||
}
|
||||
}
|
134
vendor/github.com/docker/docker-ce/components/engine/distribution/xfer/upload_test.go
generated
vendored
Normal file
134
vendor/github.com/docker/docker-ce/components/engine/distribution/xfer/upload_test.go
generated
vendored
Normal file
|
@ -0,0 +1,134 @@
|
|||
package xfer // import "github.com/docker/docker/distribution/xfer"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const maxUploadConcurrency = 3
|
||||
|
||||
type mockUploadDescriptor struct {
|
||||
currentUploads *int32
|
||||
diffID layer.DiffID
|
||||
simulateRetries int
|
||||
}
|
||||
|
||||
// Key returns the key used to deduplicate downloads.
|
||||
func (u *mockUploadDescriptor) Key() string {
|
||||
return u.diffID.String()
|
||||
}
|
||||
|
||||
// ID returns the ID for display purposes.
|
||||
func (u *mockUploadDescriptor) ID() string {
|
||||
return u.diffID.String()
|
||||
}
|
||||
|
||||
// DiffID should return the DiffID for this layer.
|
||||
func (u *mockUploadDescriptor) DiffID() layer.DiffID {
|
||||
return u.diffID
|
||||
}
|
||||
|
||||
// SetRemoteDescriptor is not used in the mock.
|
||||
func (u *mockUploadDescriptor) SetRemoteDescriptor(remoteDescriptor distribution.Descriptor) {
|
||||
}
|
||||
|
||||
// Upload is called to perform the upload.
|
||||
func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) {
|
||||
if u.currentUploads != nil {
|
||||
defer atomic.AddInt32(u.currentUploads, -1)
|
||||
|
||||
if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency {
|
||||
return distribution.Descriptor{}, errors.New("concurrency limit exceeded")
|
||||
}
|
||||
}
|
||||
|
||||
// Sleep a bit to simulate a time-consuming upload.
|
||||
for i := int64(0); i <= 10; i++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return distribution.Descriptor{}, ctx.Err()
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10})
|
||||
}
|
||||
}
|
||||
|
||||
if u.simulateRetries != 0 {
|
||||
u.simulateRetries--
|
||||
return distribution.Descriptor{}, errors.New("simulating retry")
|
||||
}
|
||||
|
||||
return distribution.Descriptor{}, nil
|
||||
}
|
||||
|
||||
func uploadDescriptors(currentUploads *int32) []UploadDescriptor {
|
||||
return []UploadDescriptor{
|
||||
&mockUploadDescriptor{currentUploads, layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), 0},
|
||||
&mockUploadDescriptor{currentUploads, layer.DiffID("sha256:1515325234325236634634608943609283523908626098235490238423902343"), 0},
|
||||
&mockUploadDescriptor{currentUploads, layer.DiffID("sha256:6929356290463485374960346430698374523437683470934634534953453453"), 0},
|
||||
&mockUploadDescriptor{currentUploads, layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), 0},
|
||||
&mockUploadDescriptor{currentUploads, layer.DiffID("sha256:8159352387436803946235346346368745389534789534897538734598734987"), 1},
|
||||
&mockUploadDescriptor{currentUploads, layer.DiffID("sha256:4637863963478346897346987346987346789346789364879364897364987346"), 0},
|
||||
}
|
||||
}
|
||||
|
||||
func TestSuccessfulUpload(t *testing.T) {
|
||||
lum := NewLayerUploadManager(maxUploadConcurrency, func(m *LayerUploadManager) { m.waitDuration = time.Millisecond })
|
||||
|
||||
progressChan := make(chan progress.Progress)
|
||||
progressDone := make(chan struct{})
|
||||
receivedProgress := make(map[string]int64)
|
||||
|
||||
go func() {
|
||||
for p := range progressChan {
|
||||
receivedProgress[p.ID] = p.Current
|
||||
}
|
||||
close(progressDone)
|
||||
}()
|
||||
|
||||
var currentUploads int32
|
||||
descriptors := uploadDescriptors(¤tUploads)
|
||||
|
||||
err := lum.Upload(context.Background(), descriptors, progress.ChanOutput(progressChan))
|
||||
if err != nil {
|
||||
t.Fatalf("upload error: %v", err)
|
||||
}
|
||||
|
||||
close(progressChan)
|
||||
<-progressDone
|
||||
}
|
||||
|
||||
func TestCancelledUpload(t *testing.T) {
|
||||
lum := NewLayerUploadManager(maxUploadConcurrency, func(m *LayerUploadManager) { m.waitDuration = time.Millisecond })
|
||||
|
||||
progressChan := make(chan progress.Progress)
|
||||
progressDone := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
for range progressChan {
|
||||
}
|
||||
close(progressDone)
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
go func() {
|
||||
<-time.After(time.Millisecond)
|
||||
cancel()
|
||||
}()
|
||||
|
||||
descriptors := uploadDescriptors(nil)
|
||||
err := lum.Upload(ctx, descriptors, progress.ChanOutput(progressChan))
|
||||
if err != context.Canceled {
|
||||
t.Fatal("expected upload to be cancelled")
|
||||
}
|
||||
|
||||
close(progressChan)
|
||||
<-progressDone
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue