From 13467555652c6a8d63b5095cb339506cdc641267 Mon Sep 17 00:00:00 2001 From: Nalin Dahyabhai Date: Tue, 24 Oct 2017 23:07:20 -0400 Subject: [PATCH] Bump containers/image and containers/storage Update to proposed changes in containers/image, and bump containers/storage to 04ad0b827097209ca65e59b5fd768511f3b1ae91, which is currently the tip of the master branch. Signed-off-by: Nalin Dahyabhai --- vendor.conf | 4 +- .../github.com/containers/image/copy/copy.go | 11 +- .../image/directory/directory_src.go | 5 + .../containers/image/docker/archive/src.go | 5 + .../image/docker/daemon/daemon_src.go | 5 + .../containers/image/docker/docker_client.go | 64 -- .../image/docker/docker_image_src.go | 5 + .../containers/image/docker/tarfile/dest.go | 6 +- .../containers/image/docker/tarfile/src.go | 8 +- .../containers/image/docker/tarfile/types.go | 26 +- .../containers/image/image/docker_list.go | 2 +- .../containers/image/image/docker_schema1.go | 224 +--- .../containers/image/image/docker_schema2.go | 131 +-- .../containers/image/image/manifest.go | 21 +- .../containers/image/image/memory.go | 7 + .../github.com/containers/image/image/oci.go | 97 +- .../containers/image/image/sourced.go | 4 + .../containers/image/image/unparsed.go | 7 + .../image/manifest/docker_schema1.go | 212 ++++ .../image/manifest/docker_schema2.go | 241 ++++ .../containers/image/manifest/manifest.go | 73 ++ .../containers/image/manifest/oci.go | 108 ++ .../containers/image/oci/archive/oci_src.go | 4 + .../containers/image/oci/layout/oci_src.go | 5 + .../containers/image/openshift/openshift.go | 4 + .../containers/image/storage/storage_image.go | 1024 +++++++++++------ .../image/storage/storage_reference.go | 31 +- .../image/storage/storage_transport.go | 179 ++- .../containers/image/types/types.go | 20 +- .../github.com/containers/image/vendor.conf | 5 +- .../storage/drivers/overlay/overlay.go | 11 + vendor/github.com/containers/storage/store.go | 8 +- 32 files changed, 1705 insertions(+), 852 deletions(-) create mode 100644 vendor/github.com/containers/image/manifest/docker_schema1.go create mode 100644 vendor/github.com/containers/image/manifest/docker_schema2.go create mode 100644 vendor/github.com/containers/image/manifest/oci.go diff --git a/vendor.conf b/vendor.conf index 02e24ecb..7d31d9be 100644 --- a/vendor.conf +++ b/vendor.conf @@ -4,10 +4,10 @@ k8s.io/apimachinery release-1.7 https://github.com/kubernetes/apimachinery k8s.io/apiserver release-1.7 https://github.com/kubernetes/apiserver # github.com/sirupsen/logrus v1.0.0 -github.com/containers/image 57b257d128d6075ea3287991ee408d24c7bd2758 +github.com/containers/image storage-update https://github.com/nalind/image github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 github.com/ostreedev/ostree-go master -github.com/containers/storage d7921c6facc516358070a1306689eda18adaa20a +github.com/containers/storage 9e0c323a4b425557f8310ee8d125634acd39d8f5 github.com/containernetworking/cni v0.4.0 google.golang.org/grpc v1.0.4 https://github.com/grpc/grpc-go github.com/opencontainers/selinux b29023b86e4a69d1b46b7e7b4e2b6fda03f0b9cd diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go index 590b3787..0380bf72 100644 --- a/vendor/github.com/containers/image/copy/copy.go +++ b/vendor/github.com/containers/image/copy/copy.go @@ -320,6 +320,15 @@ func (ic *imageCopier) copyLayers() error { srcInfos := ic.src.LayerInfos() destInfos := []types.BlobInfo{} diffIDs := []digest.Digest{} + updatedSrcInfos := ic.src.UpdatedLayerInfos() + srcInfosUpdated := false + if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) { + if !ic.canModifyManifest { + return errors.Errorf("Internal error: copyLayers() needs to use an updated manifest but that was known to be forbidden") + } + srcInfos = updatedSrcInfos + srcInfosUpdated = true + } for _, srcLayer := range srcInfos { var ( destInfo types.BlobInfo @@ -348,7 +357,7 @@ func (ic *imageCopier) copyLayers() error { if ic.diffIDsAreNeeded { ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs } - if layerDigestsDiffer(srcInfos, destInfos) { + if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) { ic.manifestUpdates.LayerInfos = destInfos } return nil diff --git a/vendor/github.com/containers/image/directory/directory_src.go b/vendor/github.com/containers/image/directory/directory_src.go index fddc1c52..705e289b 100644 --- a/vendor/github.com/containers/image/directory/directory_src.go +++ b/vendor/github.com/containers/image/directory/directory_src.go @@ -74,3 +74,8 @@ func (s *dirImageSource) GetSignatures(ctx context.Context) ([][]byte, error) { } return signatures, nil } + +// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *dirImageSource) UpdatedLayerInfos() []types.BlobInfo { + return nil +} diff --git a/vendor/github.com/containers/image/docker/archive/src.go b/vendor/github.com/containers/image/docker/archive/src.go index aebcaa82..b9941dfc 100644 --- a/vendor/github.com/containers/image/docker/archive/src.go +++ b/vendor/github.com/containers/image/docker/archive/src.go @@ -34,3 +34,8 @@ func (s *archiveImageSource) Reference() types.ImageReference { func (s *archiveImageSource) Close() error { return nil } + +// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *archiveImageSource) UpdatedLayerInfos() []types.BlobInfo { + return nil +} diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/docker/daemon/daemon_src.go index 644dbeec..3d059da9 100644 --- a/vendor/github.com/containers/image/docker/daemon/daemon_src.go +++ b/vendor/github.com/containers/image/docker/daemon/daemon_src.go @@ -83,3 +83,8 @@ func (s *daemonImageSource) Reference() types.ImageReference { func (s *daemonImageSource) Close() error { return os.Remove(s.tarCopyPath) } + +// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *daemonImageSource) UpdatedLayerInfos() []types.BlobInfo { + return nil +} diff --git a/vendor/github.com/containers/image/docker/docker_client.go b/vendor/github.com/containers/image/docker/docker_client.go index 24b82d6f..217e9dcb 100644 --- a/vendor/github.com/containers/image/docker/docker_client.go +++ b/vendor/github.com/containers/image/docker/docker_client.go @@ -8,7 +8,6 @@ import ( "io" "io/ioutil" "net/http" - "os" "path/filepath" "strings" "time" @@ -125,69 +124,6 @@ func dockerCertDir(ctx *types.SystemContext, hostPort string) string { return filepath.Join(hostCertDir, hostPort) } -func setupCertificates(dir string, tlsc *tls.Config) error { - logrus.Debugf("Looking for TLS certificates and private keys in %s", dir) - fs, err := ioutil.ReadDir(dir) - if err != nil { - if os.IsNotExist(err) { - return nil - } - if os.IsPermission(err) { - logrus.Debugf("Skipping scan of %s due to permission error: %v", dir, err) - return nil - } - return err - } - - for _, f := range fs { - fullPath := filepath.Join(dir, f.Name()) - if strings.HasSuffix(f.Name(), ".crt") { - systemPool, err := tlsconfig.SystemCertPool() - if err != nil { - return errors.Wrap(err, "unable to get system cert pool") - } - tlsc.RootCAs = systemPool - logrus.Debugf(" crt: %s", fullPath) - data, err := ioutil.ReadFile(fullPath) - if err != nil { - return err - } - tlsc.RootCAs.AppendCertsFromPEM(data) - } - if strings.HasSuffix(f.Name(), ".cert") { - certName := f.Name() - keyName := certName[:len(certName)-5] + ".key" - logrus.Debugf(" cert: %s", fullPath) - if !hasFile(fs, keyName) { - return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) - } - cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName)) - if err != nil { - return err - } - tlsc.Certificates = append(tlsc.Certificates, cert) - } - if strings.HasSuffix(f.Name(), ".key") { - keyName := f.Name() - certName := keyName[:len(keyName)-4] + ".cert" - logrus.Debugf(" key: %s", fullPath) - if !hasFile(fs, certName) { - return errors.Errorf("missing client certificate %s for key %s", certName, keyName) - } - } - } - return nil -} - -func hasFile(files []os.FileInfo, name string) bool { - for _, f := range files { - if f.Name() == name { - return true - } - } - return false -} - // newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) // “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) func newDockerClientFromRef(ctx *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) { diff --git a/vendor/github.com/containers/image/docker/docker_image_src.go b/vendor/github.com/containers/image/docker/docker_image_src.go index 232c3cf9..14e3c2b5 100644 --- a/vendor/github.com/containers/image/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/docker/docker_image_src.go @@ -52,6 +52,11 @@ func (s *dockerImageSource) Close() error { return nil } +// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *dockerImageSource) UpdatedLayerInfos() []types.BlobInfo { + return nil +} + // simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1) // Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string. func simplifyContentType(contentType string) string { diff --git a/vendor/github.com/containers/image/docker/tarfile/dest.go b/vendor/github.com/containers/image/docker/tarfile/dest.go index 72c85c70..aab4a6d9 100644 --- a/vendor/github.com/containers/image/docker/tarfile/dest.go +++ b/vendor/github.com/containers/image/docker/tarfile/dest.go @@ -168,7 +168,7 @@ func (d *Destination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { func (d *Destination) PutManifest(m []byte) error { // We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative, // so the caller trying a different manifest kind would be pointless. - var man schema2Manifest + var man manifest.Schema2 if err := json.Unmarshal(m, &man); err != nil { return errors.Wrap(err, "Error parsing manifest") } @@ -177,12 +177,12 @@ func (d *Destination) PutManifest(m []byte) error { } layerPaths := []string{} - for _, l := range man.Layers { + for _, l := range man.LayersDescriptors { layerPaths = append(layerPaths, l.Digest.String()) } items := []ManifestItem{{ - Config: man.Config.Digest.String(), + Config: man.ConfigDescriptor.Digest.String(), RepoTags: []string{d.repoTag}, Layers: layerPaths, Parent: "", diff --git a/vendor/github.com/containers/image/docker/tarfile/src.go b/vendor/github.com/containers/image/docker/tarfile/src.go index f77cb713..34d5ff32 100644 --- a/vendor/github.com/containers/image/docker/tarfile/src.go +++ b/vendor/github.com/containers/image/docker/tarfile/src.go @@ -254,22 +254,22 @@ func (s *Source) GetManifest() ([]byte, string, error) { if err := s.ensureCachedDataIsPresent(); err != nil { return nil, "", err } - m := schema2Manifest{ + m := manifest.Schema2{ SchemaVersion: 2, MediaType: manifest.DockerV2Schema2MediaType, - Config: distributionDescriptor{ + ConfigDescriptor: manifest.Schema2Descriptor{ MediaType: manifest.DockerV2Schema2ConfigMediaType, Size: int64(len(s.configBytes)), Digest: s.configDigest, }, - Layers: []distributionDescriptor{}, + LayersDescriptors: []manifest.Schema2Descriptor{}, } for _, diffID := range s.orderedDiffIDList { li, ok := s.knownLayers[diffID] if !ok { return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID) } - m.Layers = append(m.Layers, distributionDescriptor{ + m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{ Digest: digest.Digest(diffID), // diffID is a digest of the uncompressed tarball MediaType: manifest.DockerV2Schema2LayerMediaType, Size: li.size, diff --git a/vendor/github.com/containers/image/docker/tarfile/types.go b/vendor/github.com/containers/image/docker/tarfile/types.go index f16cc8c6..4780d66c 100644 --- a/vendor/github.com/containers/image/docker/tarfile/types.go +++ b/vendor/github.com/containers/image/docker/tarfile/types.go @@ -1,6 +1,9 @@ package tarfile -import "github.com/opencontainers/go-digest" +import ( + "github.com/containers/image/manifest" + "github.com/opencontainers/go-digest" +) // Various data structures. @@ -18,30 +21,13 @@ type ManifestItem struct { Config string RepoTags []string Layers []string - Parent imageID `json:",omitempty"` - LayerSources map[diffID]distributionDescriptor `json:",omitempty"` + Parent imageID `json:",omitempty"` + LayerSources map[diffID]manifest.Schema2Descriptor `json:",omitempty"` } type imageID string type diffID digest.Digest -// Based on github.com/docker/distribution/blobs.go -type distributionDescriptor struct { - MediaType string `json:"mediaType,omitempty"` - Size int64 `json:"size,omitempty"` - Digest digest.Digest `json:"digest,omitempty"` - URLs []string `json:"urls,omitempty"` -} - -// Based on github.com/docker/distribution/manifest/schema2/manifest.go -// FIXME: We are repeating this all over the place; make a public copy? -type schema2Manifest struct { - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType,omitempty"` - Config distributionDescriptor `json:"config"` - Layers []distributionDescriptor `json:"layers"` -} - // Based on github.com/docker/docker/image/image.go // MOST CONTENT OMITTED AS UNNECESSARY type image struct { diff --git a/vendor/github.com/containers/image/image/docker_list.go b/vendor/github.com/containers/image/image/docker_list.go index c79adacc..4b152d26 100644 --- a/vendor/github.com/containers/image/image/docker_list.go +++ b/vendor/github.com/containers/image/image/docker_list.go @@ -21,7 +21,7 @@ type platformSpec struct { // A manifestDescriptor references a platform-specific manifest. type manifestDescriptor struct { - descriptor + manifest.Schema2Descriptor Platform platformSpec `json:"platform"` } diff --git a/vendor/github.com/containers/image/image/docker_schema1.go b/vendor/github.com/containers/image/image/docker_schema1.go index 4152b3cd..86e30b3e 100644 --- a/vendor/github.com/containers/image/image/docker_schema1.go +++ b/vendor/github.com/containers/image/image/docker_schema1.go @@ -2,9 +2,7 @@ package image import ( "encoding/json" - "regexp" "strings" - "time" "github.com/containers/image/docker/reference" "github.com/containers/image/manifest" @@ -14,87 +12,25 @@ import ( "github.com/pkg/errors" ) -var ( - validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) -) - -type fsLayersSchema1 struct { - BlobSum digest.Digest `json:"blobSum"` -} - -type historySchema1 struct { - V1Compatibility string `json:"v1Compatibility"` -} - -// historySchema1 is a string containing this. It is similar to v1Image but not the same, in particular note the ThrowAway field. -type v1Compatibility struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - ContainerConfig struct { - Cmd []string - } `json:"container_config,omitempty"` - Author string `json:"author,omitempty"` - ThrowAway bool `json:"throwaway,omitempty"` -} - type manifestSchema1 struct { - Name string `json:"name"` - Tag string `json:"tag"` - Architecture string `json:"architecture"` - FSLayers []fsLayersSchema1 `json:"fsLayers"` - History []historySchema1 `json:"history"` - SchemaVersion int `json:"schemaVersion"` + m *manifest.Schema1 } -func manifestSchema1FromManifest(manifest []byte) (genericManifest, error) { - mschema1 := &manifestSchema1{} - if err := json.Unmarshal(manifest, mschema1); err != nil { - return nil, err - } - if mschema1.SchemaVersion != 1 { - return nil, errors.Errorf("unsupported schema version %d", mschema1.SchemaVersion) - } - if len(mschema1.FSLayers) != len(mschema1.History) { - return nil, errors.New("length of history not equal to number of layers") - } - if len(mschema1.FSLayers) == 0 { - return nil, errors.New("no FSLayers in manifest") - } - - if err := fixManifestLayers(mschema1); err != nil { - return nil, err - } - return mschema1, nil -} - -// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data. -func manifestSchema1FromComponents(ref reference.Named, fsLayers []fsLayersSchema1, history []historySchema1, architecture string) genericManifest { - var name, tag string - if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them. - name = reference.Path(ref) - if tagged, ok := ref.(reference.NamedTagged); ok { - tag = tagged.Tag() - } - } - return &manifestSchema1{ - Name: name, - Tag: tag, - Architecture: architecture, - FSLayers: fsLayers, - History: history, - SchemaVersion: 1, - } -} - -func (m *manifestSchema1) serialize() ([]byte, error) { - // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType. - unsigned, err := json.Marshal(*m) +func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) { + m, err := manifest.Schema1FromManifest(manifestBlob) if err != nil { return nil, err } - return manifest.AddDummyV2S1Signature(unsigned) + return &manifestSchema1{m: m}, nil +} + +// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data. +func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) genericManifest { + return &manifestSchema1{m: manifest.Schema1FromComponents(ref, fsLayers, history, architecture)} +} + +func (m *manifestSchema1) serialize() ([]byte, error) { + return m.m.Serialize() } func (m *manifestSchema1) manifestMIMEType() string { @@ -104,7 +40,7 @@ func (m *manifestSchema1) manifestMIMEType() string { // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. func (m *manifestSchema1) ConfigInfo() types.BlobInfo { - return types.BlobInfo{} + return m.m.ConfigInfo() } // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. @@ -128,11 +64,7 @@ func (m *manifestSchema1) OCIConfig() (*imgspecv1.Image, error) { // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *manifestSchema1) LayerInfos() []types.BlobInfo { - layers := make([]types.BlobInfo, len(m.FSLayers)) - for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) - layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1} - } - return layers + return m.m.LayerInfos() } // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. @@ -153,22 +85,25 @@ func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named) } else { tag = "" } - return m.Name != name || m.Tag != tag + return m.m.Name != name || m.m.Tag != tag } func (m *manifestSchema1) imageInspectInfo() (*types.ImageInspectInfo, error) { v1 := &v1Image{} - if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), v1); err != nil { + if err := json.Unmarshal([]byte(m.m.History[0].V1Compatibility), v1); err != nil { return nil, err } - return &types.ImageInspectInfo{ - Tag: m.Tag, + i := &types.ImageInspectInfo{ + Tag: m.m.Tag, DockerVersion: v1.DockerVersion, Created: v1.Created, - Labels: v1.Config.Labels, Architecture: v1.Architecture, Os: v1.OS, - }, nil + } + if v1.Config != nil { + i.Labels = v1.Config.Labels + } + return i, nil } // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. @@ -181,25 +116,18 @@ func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUp // UpdatedImage returns a types.Image modified according to options. // This does not change the state of the original Image object. func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) { - copy := *m + copy := manifestSchema1{m: manifest.Schema1Clone(m.m)} if options.LayerInfos != nil { - // Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well. - if len(copy.FSLayers) != len(options.LayerInfos) { - return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.FSLayers), len(options.LayerInfos)) - } - for i, info := range options.LayerInfos { - // (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest, - // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. - // So, we don't bother recomputing the IDs in m.History.V1Compatibility. - copy.FSLayers[(len(options.LayerInfos)-1)-i].BlobSum = info.Digest + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err } } if options.EmbeddedDockerReference != nil { - copy.Name = reference.Path(options.EmbeddedDockerReference) + copy.m.Name = reference.Path(options.EmbeddedDockerReference) if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged { - copy.Tag = tagged.Tag() + copy.m.Tag = tagged.Tag() } else { - copy.Tag = "" + copy.m.Tag = "" } } @@ -217,78 +145,20 @@ func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (typ return memoryImageFromManifest(©), nil } -// fixManifestLayers, after validating the supplied manifest -// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in manifest.History), -// modifies manifest to only have one entry for each layer ID in manifest.History (deleting the older duplicates, -// both from manifest.History and manifest.FSLayers). -// Note that even after this succeeds, manifest.FSLayers may contain duplicate entries -// (for Dockerfile operations which change the configuration but not the filesystem). -func fixManifestLayers(manifest *manifestSchema1) error { - type imageV1 struct { - ID string - Parent string - } - // Per the specification, we can assume that len(manifest.FSLayers) == len(manifest.History) - imgs := make([]*imageV1, len(manifest.FSLayers)) - for i := range manifest.FSLayers { - img := &imageV1{} - - if err := json.Unmarshal([]byte(manifest.History[i].V1Compatibility), img); err != nil { - return err - } - - imgs[i] = img - if err := validateV1ID(img.ID); err != nil { - return err - } - } - if imgs[len(imgs)-1].Parent != "" { - return errors.New("Invalid parent ID in the base layer of the image") - } - // check general duplicates to error instead of a deadlock - idmap := make(map[string]struct{}) - var lastID string - for _, img := range imgs { - // skip IDs that appear after each other, we handle those later - if _, exists := idmap[img.ID]; img.ID != lastID && exists { - return errors.Errorf("ID %+v appears multiple times in manifest", img.ID) - } - lastID = img.ID - idmap[lastID] = struct{}{} - } - // backwards loop so that we keep the remaining indexes after removing items - for i := len(imgs) - 2; i >= 0; i-- { - if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue - manifest.FSLayers = append(manifest.FSLayers[:i], manifest.FSLayers[i+1:]...) - manifest.History = append(manifest.History[:i], manifest.History[i+1:]...) - } else if imgs[i].Parent != imgs[i+1].ID { - return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent) - } - } - return nil -} - -func validateV1ID(id string) error { - if ok := validHex.MatchString(id); !ok { - return errors.Errorf("image ID %q is invalid", id) - } - return nil -} - // Based on github.com/docker/docker/distribution/pull_v2.go func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (types.Image, error) { - if len(m.History) == 0 { + if len(m.m.History) == 0 { // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) } - if len(m.History) != len(m.FSLayers) { - return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.History), len(m.FSLayers)) + if len(m.m.History) != len(m.m.FSLayers) { + return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.History), len(m.m.FSLayers)) } - if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.FSLayers) { - return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.FSLayers)) + if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) { + return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers)) } - if layerDiffIDs != nil && len(layerDiffIDs) != len(m.FSLayers) { - return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.FSLayers)) + if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) { + return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers)) } rootFS := rootFS{ @@ -296,13 +166,13 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl DiffIDs: []digest.Digest{}, BaseLayer: "", } - var layers []descriptor - history := make([]imageHistory, len(m.History)) - for v1Index := len(m.History) - 1; v1Index >= 0; v1Index-- { - v2Index := (len(m.History) - 1) - v1Index + var layers []manifest.Schema2Descriptor + history := make([]imageHistory, len(m.m.History)) + for v1Index := len(m.m.History) - 1; v1Index >= 0; v1Index-- { + v2Index := (len(m.m.History) - 1) - v1Index - var v1compat v1Compatibility - if err := json.Unmarshal([]byte(m.History[v1Index].V1Compatibility), &v1compat); err != nil { + var v1compat manifest.Schema1V1Compatibility + if err := json.Unmarshal([]byte(m.m.History[v1Index].V1Compatibility), &v1compat); err != nil { return nil, errors.Wrapf(err, "Error decoding history entry %d", v1Index) } history[v2Index] = imageHistory{ @@ -322,19 +192,19 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl if layerDiffIDs != nil { d = layerDiffIDs[v2Index] } - layers = append(layers, descriptor{ + layers = append(layers, manifest.Schema2Descriptor{ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", Size: size, - Digest: m.FSLayers[v1Index].BlobSum, + Digest: m.m.FSLayers[v1Index].BlobSum, }) rootFS.DiffIDs = append(rootFS.DiffIDs, d) } } - configJSON, err := configJSONFromV1Config([]byte(m.History[0].V1Compatibility), rootFS, history) + configJSON, err := configJSONFromV1Config([]byte(m.m.History[0].V1Compatibility), rootFS, history) if err != nil { return nil, err } - configDescriptor := descriptor{ + configDescriptor := manifest.Schema2Descriptor{ MediaType: "application/vnd.docker.container.image.v1+json", Size: int64(len(configJSON)), Digest: digest.FromBytes(configJSON), diff --git a/vendor/github.com/containers/image/image/docker_schema2.go b/vendor/github.com/containers/image/image/docker_schema2.go index 8cc3c495..7ccd061c 100644 --- a/vendor/github.com/containers/image/image/docker_schema2.go +++ b/vendor/github.com/containers/image/image/docker_schema2.go @@ -29,54 +29,44 @@ var gzippedEmptyLayer = []byte{ // gzippedEmptyLayerDigest is a digest of gzippedEmptyLayer const gzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") -type descriptor struct { - MediaType string `json:"mediaType"` - Size int64 `json:"size"` - Digest digest.Digest `json:"digest"` - URLs []string `json:"urls,omitempty"` -} - type manifestSchema2 struct { - src types.ImageSource // May be nil if configBlob is not nil - configBlob []byte // If set, corresponds to contents of ConfigDescriptor. - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType"` - ConfigDescriptor descriptor `json:"config"` - LayersDescriptors []descriptor `json:"layers"` + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of ConfigDescriptor. + m *manifest.Schema2 } -func manifestSchema2FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) { - v2s2 := manifestSchema2{src: src} - if err := json.Unmarshal(manifest, &v2s2); err != nil { +func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { + m, err := manifest.Schema2FromManifest(manifestBlob) + if err != nil { return nil, err } - return &v2s2, nil + return &manifestSchema2{ + src: src, + m: m, + }, nil } // manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data: -func manifestSchema2FromComponents(config descriptor, src types.ImageSource, configBlob []byte, layers []descriptor) genericManifest { +func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) genericManifest { return &manifestSchema2{ - src: src, - configBlob: configBlob, - SchemaVersion: 2, - MediaType: manifest.DockerV2Schema2MediaType, - ConfigDescriptor: config, - LayersDescriptors: layers, + src: src, + configBlob: configBlob, + m: manifest.Schema2FromComponents(config, layers), } } func (m *manifestSchema2) serialize() ([]byte, error) { - return json.Marshal(*m) + return m.m.Serialize() } func (m *manifestSchema2) manifestMIMEType() string { - return m.MediaType + return m.m.MediaType } // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. func (m *manifestSchema2) ConfigInfo() types.BlobInfo { - return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size} + return m.m.ConfigInfo() } // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about @@ -105,9 +95,9 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) { return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") } stream, _, err := m.src.GetBlob(types.BlobInfo{ - Digest: m.ConfigDescriptor.Digest, - Size: m.ConfigDescriptor.Size, - URLs: m.ConfigDescriptor.URLs, + Digest: m.m.ConfigDescriptor.Digest, + Size: m.m.ConfigDescriptor.Size, + URLs: m.m.ConfigDescriptor.URLs, }) if err != nil { return nil, err @@ -118,8 +108,8 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) { return nil, err } computedDigest := digest.FromBytes(blob) - if computedDigest != m.ConfigDescriptor.Digest { - return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest) + if computedDigest != m.m.ConfigDescriptor.Digest { + return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest) } m.configBlob = blob } @@ -130,15 +120,7 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) { // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *manifestSchema2) LayerInfos() []types.BlobInfo { - blobs := []types.BlobInfo{} - for _, layer := range m.LayersDescriptors { - blobs = append(blobs, types.BlobInfo{ - Digest: layer.Digest, - Size: layer.Size, - URLs: layer.URLs, - }) - } - return blobs + return m.m.LayerInfos() } // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. @@ -157,13 +139,16 @@ func (m *manifestSchema2) imageInspectInfo() (*types.ImageInspectInfo, error) { if err := json.Unmarshal(config, v1); err != nil { return nil, err } - return &types.ImageInspectInfo{ + i := &types.ImageInspectInfo{ DockerVersion: v1.DockerVersion, Created: v1.Created, - Labels: v1.Config.Labels, Architecture: v1.Architecture, Os: v1.OS, - }, nil + } + if v1.Config != nil { + i.Labels = v1.Config.Labels + } + return i, nil } // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. @@ -176,17 +161,14 @@ func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUp // UpdatedImage returns a types.Image modified according to options. // This does not change the state of the original Image object. func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) { - copy := *m // NOTE: This is not a deep copy, it still shares slices etc. + copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc. + src: m.src, + configBlob: m.configBlob, + m: manifest.Schema2Clone(m.m), + } if options.LayerInfos != nil { - if len(copy.LayersDescriptors) != len(options.LayerInfos) { - return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos)) - } - copy.LayersDescriptors = make([]descriptor, len(options.LayerInfos)) - for i, info := range options.LayerInfos { - copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType - copy.LayersDescriptors[i].Digest = info.Digest - copy.LayersDescriptors[i].Size = info.Size - copy.LayersDescriptors[i].URLs = info.URLs + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err } } // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care. @@ -204,6 +186,15 @@ func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (typ return memoryImageFromManifest(©), nil } +func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor { + return imgspecv1.Descriptor{ + MediaType: d.MediaType, + Size: d.Size, + Digest: d.Digest, + URLs: d.URLs, + } +} + func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) { configOCI, err := m.OCIConfig() if err != nil { @@ -214,18 +205,16 @@ func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) { return nil, err } - config := descriptorOCI1{ - descriptor: descriptor{ - MediaType: imgspecv1.MediaTypeImageConfig, - Size: int64(len(configOCIBytes)), - Digest: digest.FromBytes(configOCIBytes), - }, + config := imgspecv1.Descriptor{ + MediaType: imgspecv1.MediaTypeImageConfig, + Size: int64(len(configOCIBytes)), + Digest: digest.FromBytes(configOCIBytes), } - layers := make([]descriptorOCI1, len(m.LayersDescriptors)) + layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors)) for idx := range layers { - layers[idx] = descriptorOCI1{descriptor: m.LayersDescriptors[idx]} - if m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType { + layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx]) + if m.m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType { layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable } else { // we assume layers are gzip'ed because docker v2s2 only deals with @@ -250,8 +239,8 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) } // Build fsLayers and History, discarding all configs. We will patch the top-level config in later. - fsLayers := make([]fsLayersSchema1, len(imageConfig.History)) - history := make([]historySchema1, len(imageConfig.History)) + fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History)) + history := make([]manifest.Schema1History, len(imageConfig.History)) nonemptyLayerIndex := 0 var parentV1ID string // Set in the loop v1ID := "" @@ -279,10 +268,10 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) } blobDigest = gzippedEmptyLayerDigest } else { - if nonemptyLayerIndex >= len(m.LayersDescriptors) { - return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.LayersDescriptors)) + if nonemptyLayerIndex >= len(m.m.LayersDescriptors) { + return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors)) } - blobDigest = m.LayersDescriptors[nonemptyLayerIndex].Digest + blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest nonemptyLayerIndex++ } @@ -293,7 +282,7 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) } v1ID = v - fakeImage := v1Compatibility{ + fakeImage := manifest.Schema1V1Compatibility{ ID: v1ID, Parent: parentV1ID, Comment: historyEntry.Comment, @@ -307,8 +296,8 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage) } - fsLayers[v1Index] = fsLayersSchema1{BlobSum: blobDigest} - history[v1Index] = historySchema1{V1Compatibility: string(v1CompatibilityBytes)} + fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest} + history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)} // Note that parentV1ID of the top layer is preserved when exiting this loop } diff --git a/vendor/github.com/containers/image/image/manifest.go b/vendor/github.com/containers/image/image/manifest.go index 75c9e711..4a79ac27 100644 --- a/vendor/github.com/containers/image/image/manifest.go +++ b/vendor/github.com/containers/image/image/manifest.go @@ -1,6 +1,7 @@ package image import ( + "fmt" "time" "github.com/containers/image/docker/reference" @@ -88,11 +89,8 @@ type genericManifest interface { } func manifestInstanceFromBlob(src types.ImageSource, manblob []byte, mt string) (genericManifest, error) { - switch mt { - // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . - // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might - // need to happen within the ImageSource. - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, "application/json": + switch manifest.NormalizedMIMEType(mt) { + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: return manifestSchema1FromManifest(manblob) case imgspecv1.MediaTypeImageManifest: return manifestOCI1FromManifest(src, manblob) @@ -100,17 +98,8 @@ func manifestInstanceFromBlob(src types.ImageSource, manblob []byte, mt string) return manifestSchema2FromManifest(src, manblob) case manifest.DockerV2ListMediaType: return manifestSchema2FromManifestList(src, manblob) - default: - // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time - // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 - // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 - // - // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. - // This makes no real sense, but it happens - // because requests for manifests are - // redirected to a content distribution - // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 - return manifestSchema1FromManifest(manblob) + default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) } } diff --git a/vendor/github.com/containers/image/image/memory.go b/vendor/github.com/containers/image/image/memory.go index 62995f61..1e8bb3a4 100644 --- a/vendor/github.com/containers/image/image/memory.go +++ b/vendor/github.com/containers/image/image/memory.go @@ -71,3 +71,10 @@ func (i *memoryImage) Inspect() (*types.ImageInspectInfo, error) { func (i *memoryImage) IsMultiImage() bool { return false } + +// UpdatedLayerInfos returns an updated set of layer blob information which may not match the manifest. +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (i *memoryImage) UpdatedLayerInfos() []types.BlobInfo { + return i.LayerInfos() +} diff --git a/vendor/github.com/containers/image/image/oci.go b/vendor/github.com/containers/image/image/oci.go index 5f7c0728..77ddedae 100644 --- a/vendor/github.com/containers/image/image/oci.go +++ b/vendor/github.com/containers/image/image/oci.go @@ -12,41 +12,34 @@ import ( "github.com/pkg/errors" ) -type descriptorOCI1 struct { - descriptor - Annotations map[string]string `json:"annotations,omitempty"` -} - type manifestOCI1 struct { - src types.ImageSource // May be nil if configBlob is not nil - configBlob []byte // If set, corresponds to contents of ConfigDescriptor. - SchemaVersion int `json:"schemaVersion"` - ConfigDescriptor descriptorOCI1 `json:"config"` - LayersDescriptors []descriptorOCI1 `json:"layers"` - Annotations map[string]string `json:"annotations,omitempty"` + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of m.Config. + m *manifest.OCI1 } -func manifestOCI1FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) { - oci := manifestOCI1{src: src} - if err := json.Unmarshal(manifest, &oci); err != nil { +func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { + m, err := manifest.OCI1FromManifest(manifestBlob) + if err != nil { return nil, err } - return &oci, nil + return &manifestOCI1{ + src: src, + m: m, + }, nil } // manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data: -func manifestOCI1FromComponents(config descriptorOCI1, src types.ImageSource, configBlob []byte, layers []descriptorOCI1) genericManifest { +func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest { return &manifestOCI1{ - src: src, - configBlob: configBlob, - SchemaVersion: 2, - ConfigDescriptor: config, - LayersDescriptors: layers, + src: src, + configBlob: configBlob, + m: manifest.OCI1FromComponents(config, layers), } } func (m *manifestOCI1) serialize() ([]byte, error) { - return json.Marshal(*m) + return m.m.Serialize() } func (m *manifestOCI1) manifestMIMEType() string { @@ -56,7 +49,7 @@ func (m *manifestOCI1) manifestMIMEType() string { // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. func (m *manifestOCI1) ConfigInfo() types.BlobInfo { - return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size, Annotations: m.ConfigDescriptor.Annotations} + return m.m.ConfigInfo() } // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. @@ -67,9 +60,9 @@ func (m *manifestOCI1) ConfigBlob() ([]byte, error) { return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1") } stream, _, err := m.src.GetBlob(types.BlobInfo{ - Digest: m.ConfigDescriptor.Digest, - Size: m.ConfigDescriptor.Size, - URLs: m.ConfigDescriptor.URLs, + Digest: m.m.Config.Digest, + Size: m.m.Config.Size, + URLs: m.m.Config.URLs, }) if err != nil { return nil, err @@ -80,8 +73,8 @@ func (m *manifestOCI1) ConfigBlob() ([]byte, error) { return nil, err } computedDigest := digest.FromBytes(blob) - if computedDigest != m.ConfigDescriptor.Digest { - return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest) + if computedDigest != m.m.Config.Digest { + return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest) } m.configBlob = blob } @@ -107,11 +100,7 @@ func (m *manifestOCI1) OCIConfig() (*imgspecv1.Image, error) { // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *manifestOCI1) LayerInfos() []types.BlobInfo { - blobs := []types.BlobInfo{} - for _, layer := range m.LayersDescriptors { - blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size, Annotations: layer.Annotations, URLs: layer.URLs}) - } - return blobs + return m.m.LayerInfos() } // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. @@ -130,13 +119,16 @@ func (m *manifestOCI1) imageInspectInfo() (*types.ImageInspectInfo, error) { if err := json.Unmarshal(config, v1); err != nil { return nil, err } - return &types.ImageInspectInfo{ + i := &types.ImageInspectInfo{ DockerVersion: v1.DockerVersion, Created: v1.Created, - Labels: v1.Config.Labels, Architecture: v1.Architecture, Os: v1.OS, - }, nil + } + if v1.Config != nil { + i.Labels = v1.Config.Labels + } + return i, nil } // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. @@ -149,18 +141,14 @@ func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdat // UpdatedImage returns a types.Image modified according to options. // This does not change the state of the original Image object. func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) { - copy := *m // NOTE: This is not a deep copy, it still shares slices etc. + copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc. + src: m.src, + configBlob: m.configBlob, + m: manifest.OCI1Clone(m.m), + } if options.LayerInfos != nil { - if len(copy.LayersDescriptors) != len(options.LayerInfos) { - return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos)) - } - copy.LayersDescriptors = make([]descriptorOCI1, len(options.LayerInfos)) - for i, info := range options.LayerInfos { - copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType - copy.LayersDescriptors[i].Digest = info.Digest - copy.LayersDescriptors[i].Size = info.Size - copy.LayersDescriptors[i].Annotations = info.Annotations - copy.LayersDescriptors[i].URLs = info.URLs + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err } } // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care. @@ -176,17 +164,26 @@ func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types. return memoryImageFromManifest(©), nil } +func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor { + return manifest.Schema2Descriptor{ + MediaType: d.MediaType, + Size: d.Size, + Digest: d.Digest, + URLs: d.URLs, + } +} + func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) { // Create a copy of the descriptor. - config := m.ConfigDescriptor.descriptor + config := schema2DescriptorFromOCI1Descriptor(m.m.Config) // The only difference between OCI and DockerSchema2 is the mediatypes. The // media type of the manifest is handled by manifestSchema2FromComponents. config.MediaType = manifest.DockerV2Schema2ConfigMediaType - layers := make([]descriptor, len(m.LayersDescriptors)) + layers := make([]manifest.Schema2Descriptor, len(m.m.Layers)) for idx := range layers { - layers[idx] = m.LayersDescriptors[idx].descriptor + layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx]) layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType } diff --git a/vendor/github.com/containers/image/image/sourced.go b/vendor/github.com/containers/image/image/sourced.go index ef35b3c3..1293f7d3 100644 --- a/vendor/github.com/containers/image/image/sourced.go +++ b/vendor/github.com/containers/image/image/sourced.go @@ -88,3 +88,7 @@ func (i *sourcedImage) Inspect() (*types.ImageInspectInfo, error) { func (i *sourcedImage) IsMultiImage() bool { return i.manifestMIMEType == manifest.DockerV2ListMediaType } + +func (i *sourcedImage) UpdatedLayerInfos() []types.BlobInfo { + return i.UnparsedImage.UpdatedLayerInfos() +} diff --git a/vendor/github.com/containers/image/image/unparsed.go b/vendor/github.com/containers/image/image/unparsed.go index 483cfd04..7bcac06e 100644 --- a/vendor/github.com/containers/image/image/unparsed.go +++ b/vendor/github.com/containers/image/image/unparsed.go @@ -83,3 +83,10 @@ func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) { } return i.cachedSignatures, nil } + +// UpdatedLayerInfos returns an updated set of layer blob information which may not match the manifest. +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (i *UnparsedImage) UpdatedLayerInfos() []types.BlobInfo { + return i.src.UpdatedLayerInfos() +} diff --git a/vendor/github.com/containers/image/manifest/docker_schema1.go b/vendor/github.com/containers/image/manifest/docker_schema1.go new file mode 100644 index 00000000..f4ce7320 --- /dev/null +++ b/vendor/github.com/containers/image/manifest/docker_schema1.go @@ -0,0 +1,212 @@ +package manifest + +import ( + "encoding/json" + "regexp" + "time" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1. +type Schema1FSLayers struct { + BlobSum digest.Digest `json:"blobSum"` +} + +// Schema1History is an entry of the "history" array in docker/distribution schema 1. +type Schema1History struct { + V1Compatibility string `json:"v1Compatibility"` +} + +// Schema1 is a manifest in docker/distribution schema 1. +type Schema1 struct { + Name string `json:"name"` + Tag string `json:"tag"` + Architecture string `json:"architecture"` + FSLayers []Schema1FSLayers `json:"fsLayers"` + History []Schema1History `json:"history"` + SchemaVersion int `json:"schemaVersion"` +} + +// Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1. +type Schema1V1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig struct { + Cmd []string + } `json:"container_config,omitempty"` + Author string `json:"author,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` +} + +// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob. +// (NOTE: The instance is not necessary a literal representation of the original blob, +// layers with duplicate IDs are eliminated.) +func Schema1FromManifest(manifest []byte) (*Schema1, error) { + s1 := Schema1{} + if err := json.Unmarshal(manifest, &s1); err != nil { + return nil, err + } + if s1.SchemaVersion != 1 { + return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion) + } + if len(s1.FSLayers) != len(s1.History) { + return nil, errors.New("length of history not equal to number of layers") + } + if len(s1.FSLayers) == 0 { + return nil, errors.New("no FSLayers in manifest") + } + if err := s1.fixManifestLayers(); err != nil { + return nil, err + } + return &s1, nil +} + +// Schema1FromComponents creates an Schema1 manifest instance from the supplied data. +func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) *Schema1 { + var name, tag string + if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them. + name = reference.Path(ref) + if tagged, ok := ref.(reference.NamedTagged); ok { + tag = tagged.Tag() + } + } + return &Schema1{ + Name: name, + Tag: tag, + Architecture: architecture, + FSLayers: fsLayers, + History: history, + SchemaVersion: 1, + } +} + +// Schema1Clone creates a copy of the supplied Schema1 manifest. +func Schema1Clone(src *Schema1) *Schema1 { + copy := *src + return © +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *Schema1) ConfigInfo() types.BlobInfo { + return types.BlobInfo{} +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *Schema1) LayerInfos() []types.BlobInfo { + layers := make([]types.BlobInfo, len(m.FSLayers)) + for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) + layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1} + } + return layers +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + // Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well. + if len(m.FSLayers) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos)) + } + for i, info := range layerInfos { + // (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest, + // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. + // So, we don't bother recomputing the IDs in m.History.V1Compatibility. + m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *Schema1) Serialize() ([]byte, error) { + // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType. + unsigned, err := json.Marshal(*m) + if err != nil { + return nil, err + } + return AddDummyV2S1Signature(unsigned) +} + +// fixManifestLayers, after validating the supplied manifest +// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in m.History), +// modifies manifest to only have one entry for each layer ID in m.History (deleting the older duplicates, +// both from m.History and m.FSLayers). +// Note that even after this succeeds, m.FSLayers may contain duplicate entries +// (for Dockerfile operations which change the configuration but not the filesystem). +func (m *Schema1) fixManifestLayers() error { + type imageV1 struct { + ID string + Parent string + } + // Per the specification, we can assume that len(m.FSLayers) == len(m.History) + imgs := make([]*imageV1, len(m.FSLayers)) + for i := range m.FSLayers { + img := &imageV1{} + + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { + return err + } + + imgs[i] = img + if err := validateV1ID(img.ID); err != nil { + return err + } + } + if imgs[len(imgs)-1].Parent != "" { + return errors.New("Invalid parent ID in the base layer of the image") + } + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + var lastID string + for _, img := range imgs { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return errors.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + // backwards loop so that we keep the remaining indexes after removing items + for i := len(imgs) - 2; i >= 0; i-- { + if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + } else if imgs[i].Parent != imgs[i+1].ID { + return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent) + } + } + return nil +} + +var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) + +func validateV1ID(id string) error { + if ok := validHex.MatchString(id); !ok { + return errors.Errorf("image ID %q is invalid", id) + } + return nil +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + s1 := &Schema1V1Compatibility{} + if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil { + return nil, err + } + return &types.ImageInspectInfo{ + Tag: m.Tag, + Created: s1.Created, + DockerVersion: "", + Labels: make(map[string]string), + Architecture: "", + Os: "", + Layers: []string{}, + }, nil +} diff --git a/vendor/github.com/containers/image/manifest/docker_schema2.go b/vendor/github.com/containers/image/manifest/docker_schema2.go new file mode 100644 index 00000000..a44e561b --- /dev/null +++ b/vendor/github.com/containers/image/manifest/docker_schema2.go @@ -0,0 +1,241 @@ +package manifest + +import ( + "encoding/json" + "time" + + "github.com/containers/image/pkg/strslice" + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Schema2Descriptor is a “descriptor” in docker/distribution schema 2. +type Schema2Descriptor struct { + MediaType string `json:"mediaType"` + Size int64 `json:"size"` + Digest digest.Digest `json:"digest"` + URLs []string `json:"urls,omitempty"` +} + +// Schema2 is a manifest in docker/distribution schema 2. +type Schema2 struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + ConfigDescriptor Schema2Descriptor `json:"config"` + LayersDescriptors []Schema2Descriptor `json:"layers"` +} + +// Schema2Port is a Port, a string containing port number and protocol in the +// format "80/tcp", from docker/go-connections/nat. +type Schema2Port string + +// Schema2PortSet is a PortSet, a collection of structs indexed by Port, from +// docker/go-connections/nat. +type Schema2PortSet map[Schema2Port]struct{} + +// Schema2HealthConfig is a HealthConfig, which holds configuration settings +// for the HEALTHCHECK feature, from docker/docker/api/types/container. +type Schema2HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Schema2Config is a Config in docker/docker/api/types/container. +type Schema2Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts Schema2PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *Schema2HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} + +// Schema2V1Image is a V1Image in docker/docker/image. +type Schema2V1Image struct { + // ID is a unique 64 character identifier of the image + ID string `json:"id,omitempty"` + // Parent is the ID of the parent image + Parent string `json:"parent,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Container is the id of the container used to commit + Container string `json:"container,omitempty"` + // ContainerConfig is the configuration of the container that is committed into the image + ContainerConfig Schema2Config `json:"container_config,omitempty"` + // DockerVersion specifies the version of Docker that was used to build the image + DockerVersion string `json:"docker_version,omitempty"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // Config is the configuration of the container received from the client + Config *Schema2Config `json:"config,omitempty"` + // Architecture is the hardware that the image is build and runs on + Architecture string `json:"architecture,omitempty"` + // OS is the operating system used to build and run the image + OS string `json:"os,omitempty"` + // Size is the total size of the image including all layers it is composed of + Size int64 `json:",omitempty"` +} + +// Schema2RootFS is a description of how to build up an image's root filesystem, from docker/docker/image. +type Schema2RootFS struct { + Type string `json:"type"` + DiffIDs []digest.Digest `json:"diff_ids,omitempty"` +} + +// Schema2History stores build commands that were used to create an image, from docker/docker/image. +type Schema2History struct { + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // CreatedBy keeps the Dockerfile command used while building the image + CreatedBy string `json:"created_by,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // EmptyLayer is set to true if this history item did not generate a + // layer. Otherwise, the history item is associated with the next + // layer in the RootFS section. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Schema2Image is an Image in docker/docker/image. +type Schema2Image struct { + Schema2V1Image + Parent digest.Digest `json:"parent,omitempty"` + RootFS *Schema2RootFS `json:"rootfs,omitempty"` + History []Schema2History `json:"history,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + + // rawJSON caches the immutable JSON associated with this image. + rawJSON []byte + + // computedID is the ID computed from the hash of the image config. + // Not to be confused with the legacy V1 ID in V1Image. + computedID digest.Digest +} + +// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob. +func Schema2FromManifest(manifest []byte) (*Schema2, error) { + s2 := Schema2{} + if err := json.Unmarshal(manifest, &s2); err != nil { + return nil, err + } + return &s2, nil +} + +// Schema2FromComponents creates an Schema2 manifest instance from the supplied data. +func Schema2FromComponents(config Schema2Descriptor, layers []Schema2Descriptor) *Schema2 { + return &Schema2{ + SchemaVersion: 2, + MediaType: DockerV2Schema2MediaType, + ConfigDescriptor: config, + LayersDescriptors: layers, + } +} + +// Schema2Clone creates a copy of the supplied Schema2 manifest. +func Schema2Clone(src *Schema2) *Schema2 { + copy := *src + return © +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *Schema2) ConfigInfo() types.BlobInfo { + return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size, MediaType: DockerV2Schema2ConfigMediaType} +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *Schema2) LayerInfos() []types.BlobInfo { + blobs := []types.BlobInfo{} + for _, layer := range m.LayersDescriptors { + blobs = append(blobs, types.BlobInfo{ + Digest: layer.Digest, + Size: layer.Size, + URLs: layer.URLs, + MediaType: layer.MediaType, + }) + } + return blobs +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + if len(m.LayersDescriptors) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos)) + } + original := m.LayersDescriptors + m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos)) + for i, info := range layerInfos { + m.LayersDescriptors[i].MediaType = original[i].MediaType + m.LayersDescriptors[i].Digest = info.Digest + m.LayersDescriptors[i].Size = info.Size + m.LayersDescriptors[i].URLs = info.URLs + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *Schema2) Serialize() ([]byte, error) { + return json.Marshal(*m) +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + config, err := configGetter(m.ConfigInfo()) + if err != nil { + return nil, err + } + s2 := &Schema2Image{} + if err := json.Unmarshal(config, s2); err != nil { + return nil, err + } + return &types.ImageInspectInfo{ + Tag: "", + Created: s2.Created, + DockerVersion: s2.DockerVersion, + Labels: s2.Config.Labels, + Architecture: s2.Architecture, + Os: s2.OS, + Layers: []string{}, + }, nil +} diff --git a/vendor/github.com/containers/image/manifest/manifest.go b/vendor/github.com/containers/image/manifest/manifest.go index e329ee57..01f57a32 100644 --- a/vendor/github.com/containers/image/manifest/manifest.go +++ b/vendor/github.com/containers/image/manifest/manifest.go @@ -2,7 +2,9 @@ package manifest import ( "encoding/json" + "fmt" + "github.com/containers/image/types" "github.com/docker/libtrust" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -38,6 +40,33 @@ var DefaultRequestedManifestMIMETypes = []string{ // DockerV2ListMediaType, // FIXME: Restore this ASAP } +// Manifest is an interface for parsing, modifying image manifests in isolation. +// Callers can either use this abstract interface without understanding the details of the formats, +// or instantiate a specific implementation (e.g. manifest.OCI1) and access the public members +// directly. +// +// See types.Image for functionality not limited to manifests, including format conversions and config parsing. +// This interface is similar to, but not strictly equivalent to, the equivalent methods in types.Image. +type Manifest interface { + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + ConfigInfo() types.BlobInfo + // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []types.BlobInfo + // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) + UpdateLayerInfos(layerInfos []types.BlobInfo) error + + // Inspect returns various information for (skopeo inspect) parsed from the manifest, + // incorporating information from a configuration blob returned by configGetter, if + // the underlying image format is expected to include a configuration blob. + Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) + + // Serialize returns the manifest in a blob format. + // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! + Serialize() ([]byte, error) +} + // GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. // FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest, // but we may not have such metadata available (e.g. when the manifest is a local file). @@ -142,3 +171,47 @@ func AddDummyV2S1Signature(manifest []byte) ([]byte, error) { } return js.PrettySignature("signatures") } + +// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server, +// centralizing various workarounds. +func NormalizedMIMEType(input string) string { + switch input { + // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . + // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might + // need to happen within the ImageSource. + case "application/json": + return DockerV2Schema1SignedMediaType + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, + imgspecv1.MediaTypeImageManifest, + DockerV2Schema2MediaType, + DockerV2ListMediaType: + return input + default: + // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time + // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 + // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 + // + // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. + // This makes no real sense, but it happens + // because requests for manifests are + // redirected to a content distribution + // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 + return DockerV2Schema1SignedMediaType + } +} + +// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type +func FromBlob(manblob []byte, mt string) (Manifest, error) { + switch NormalizedMIMEType(mt) { + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType: + return Schema1FromManifest(manblob) + case imgspecv1.MediaTypeImageManifest: + return OCI1FromManifest(manblob) + case DockerV2Schema2MediaType: + return Schema2FromManifest(manblob) + case DockerV2ListMediaType: + return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented") + default: // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) + } +} diff --git a/vendor/github.com/containers/image/manifest/oci.go b/vendor/github.com/containers/image/manifest/oci.go new file mode 100644 index 00000000..18e27d23 --- /dev/null +++ b/vendor/github.com/containers/image/manifest/oci.go @@ -0,0 +1,108 @@ +package manifest + +import ( + "encoding/json" + "time" + + "github.com/containers/image/types" + "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// OCI1 is a manifest.Manifest implementation for OCI images. +// The underlying data from imgspecv1.Manifest is also available. +type OCI1 struct { + imgspecv1.Manifest +} + +// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob. +func OCI1FromManifest(manifest []byte) (*OCI1, error) { + oci1 := OCI1{} + if err := json.Unmarshal(manifest, &oci1); err != nil { + return nil, err + } + return &oci1, nil +} + +// OCI1FromComponents creates an OCI1 manifest instance from the supplied data. +func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descriptor) *OCI1 { + return &OCI1{ + imgspecv1.Manifest{ + Versioned: specs.Versioned{SchemaVersion: 2}, + Config: config, + Layers: layers, + }, + } +} + +// OCI1Clone creates a copy of the supplied OCI1 manifest. +func OCI1Clone(src *OCI1) *OCI1 { + return &OCI1{ + Manifest: src.Manifest, + } +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *OCI1) ConfigInfo() types.BlobInfo { + return types.BlobInfo{Digest: m.Config.Digest, Size: m.Config.Size, Annotations: m.Config.Annotations, MediaType: imgspecv1.MediaTypeImageConfig} +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *OCI1) LayerInfos() []types.BlobInfo { + blobs := []types.BlobInfo{} + for _, layer := range m.Layers { + blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size, Annotations: layer.Annotations, URLs: layer.URLs, MediaType: layer.MediaType}) + } + return blobs +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + if len(m.Layers) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos)) + } + original := m.Layers + m.Layers = make([]imgspecv1.Descriptor, len(layerInfos)) + for i, info := range layerInfos { + m.Layers[i].MediaType = original[i].MediaType + m.Layers[i].Digest = info.Digest + m.Layers[i].Size = info.Size + m.Layers[i].Annotations = info.Annotations + m.Layers[i].URLs = info.URLs + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *OCI1) Serialize() ([]byte, error) { + return json.Marshal(*m) +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + config, err := configGetter(m.ConfigInfo()) + if err != nil { + return nil, err + } + v1 := &imgspecv1.Image{} + if err := json.Unmarshal(config, v1); err != nil { + return nil, err + } + created := time.Time{} + if v1.Created != nil { + created = *v1.Created + } + return &types.ImageInspectInfo{ + Tag: "", + Created: created, + DockerVersion: "", + Labels: v1.Config.Labels, + Architecture: v1.Architecture, + Os: v1.OS, + Layers: []string{}, + }, nil +} diff --git a/vendor/github.com/containers/image/oci/archive/oci_src.go b/vendor/github.com/containers/image/oci/archive/oci_src.go index 8644202f..fd437f5a 100644 --- a/vendor/github.com/containers/image/oci/archive/oci_src.go +++ b/vendor/github.com/containers/image/oci/archive/oci_src.go @@ -86,3 +86,7 @@ func (s *ociArchiveImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int func (s *ociArchiveImageSource) GetSignatures(c context.Context) ([][]byte, error) { return s.unpackedSrc.GetSignatures(c) } + +func (s *ociArchiveImageSource) UpdatedLayerInfos() []types.BlobInfo { + return nil +} diff --git a/vendor/github.com/containers/image/oci/layout/oci_src.go b/vendor/github.com/containers/image/oci/layout/oci_src.go index be8a2aa7..67f0c3b8 100644 --- a/vendor/github.com/containers/image/oci/layout/oci_src.go +++ b/vendor/github.com/containers/image/oci/layout/oci_src.go @@ -133,6 +133,11 @@ func (s *ociImageSource) getExternalBlob(urls []string) (io.ReadCloser, int64, e return nil, 0, errWrap } +// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *ociImageSource) UpdatedLayerInfos() []types.BlobInfo { + return nil +} + func getBlobSize(resp *http.Response) int64 { size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) if err != nil { diff --git a/vendor/github.com/containers/image/openshift/openshift.go b/vendor/github.com/containers/image/openshift/openshift.go index 0117f2e0..794521b1 100644 --- a/vendor/github.com/containers/image/openshift/openshift.go +++ b/vendor/github.com/containers/image/openshift/openshift.go @@ -242,6 +242,10 @@ func (s *openshiftImageSource) GetSignatures(ctx context.Context) ([][]byte, err return sigs, nil } +func (s *openshiftImageSource) UpdatedLayerInfos() []types.BlobInfo { + return nil +} + // ensureImageIsResolved sets up s.docker and s.imageStreamImageName func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error { if s.docker != nil { diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go index 08fa71b5..185e5f49 100644 --- a/vendor/github.com/containers/image/storage/storage_image.go +++ b/vendor/github.com/containers/image/storage/storage_image.go @@ -4,11 +4,13 @@ import ( "bytes" "context" "encoding/json" + "fmt" "io" "io/ioutil" - "time" - - "github.com/pkg/errors" + "os" + "path/filepath" + "strings" + "sync/atomic" "github.com/containers/image/image" "github.com/containers/image/manifest" @@ -16,10 +18,15 @@ import ( "github.com/containers/storage" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/ioutils" - ddigest "github.com/opencontainers/go-digest" + "github.com/docker/docker/api/types/versions" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" "github.com/sirupsen/logrus" ) +const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs. + var ( // ErrBlobDigestMismatch is returned when PutBlob() is given a blob // with a digest-based name that doesn't match its contents. @@ -27,8 +34,7 @@ var ( // ErrBlobSizeMismatch is returned when PutBlob() is given a blob // with an expected size that doesn't match the reader. ErrBlobSizeMismatch = errors.New("blob size mismatch") - // ErrNoManifestLists is returned when GetTargetManifest() is - // called. + // ErrNoManifestLists is returned when GetTargetManifest() is called. ErrNoManifestLists = errors.New("manifest lists are not supported by this transport") // ErrNoSuchImage is returned when we attempt to access an image which // doesn't exist in the storage area. @@ -37,32 +43,23 @@ var ( type storageImageSource struct { imageRef storageReference - Tag string `json:"tag,omitempty"` - Created time.Time `json:"created-time,omitempty"` - ID string `json:"id"` - BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle - Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs - LayerPosition map[ddigest.Digest]int `json:"-"` // Where we are in reading a blob's layers - SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice + ID string + layerPosition map[digest.Digest]int // Where we are in reading a blob's layers + cachedManifest []byte // A cached copy of the manifest, if already known, or nil + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice } type storageImageDestination struct { - imageRef storageReference - Tag string `json:"tag,omitempty"` - Created time.Time `json:"created-time,omitempty"` - ID string `json:"id"` - BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle - Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs - BlobData map[ddigest.Digest][]byte `json:"-"` // Map from names of blobs that aren't layers to contents, temporary - Manifest []byte `json:"-"` // Manifest contents, temporary - Signatures []byte `json:"-"` // Signature contents, temporary - SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice -} - -type storageLayerMetadata struct { - Digest string `json:"digest,omitempty"` - Size int64 `json:"size"` - CompressedSize int64 `json:"compressed-size,omitempty"` + imageRef storageReference // The reference we'll use to name the image + publicRef storageReference // The reference we return when asked about the name we'll give to the image + directory string // Temporary directory where we store blobs until Commit() time + nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs + manifest []byte // Manifest contents, temporary + signatures []byte // Signature contents, temporary + blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs + fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes + filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice } type storageImage struct { @@ -70,19 +67,19 @@ type storageImage struct { size int64 } -// newImageSource sets us up to read out an image, which needs to already exist. +// newImageSource sets up an image for reading. func newImageSource(imageRef storageReference) (*storageImageSource, error) { + // First, locate the image. img, err := imageRef.resolveImage() if err != nil { return nil, err } + + // Build the reader object. image := &storageImageSource{ imageRef: imageRef, - Created: time.Now(), ID: img.ID, - BlobList: []types.BlobInfo{}, - Layers: make(map[ddigest.Digest][]string), - LayerPosition: make(map[ddigest.Digest]int), + layerPosition: make(map[digest.Digest]int), SignatureSizes: []int{}, } if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { @@ -91,202 +88,266 @@ func newImageSource(imageRef storageReference) (*storageImageSource, error) { return image, nil } -// newImageDestination sets us up to write a new image. +// Reference returns the image reference that we used to find this image. +func (s storageImageSource) Reference() types.ImageReference { + return s.imageRef +} + +// Close cleans up any resources we tied up while reading the image. +func (s storageImageSource) Close() error { + return nil +} + +// GetBlob reads the data blob or filesystem layer which matches the digest and size, if given. +func (s *storageImageSource) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) { + rc, n, _, err = s.getBlobAndLayerID(info) + return rc, n, err +} + +// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given. +func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { + var layer storage.Layer + var diffOptions *storage.DiffOptions + // We need a valid digest value. + err = info.Digest.Validate() + if err != nil { + return nil, -1, "", err + } + // Check if the blob corresponds to a diff that was used to initialize any layers. Our + // callers should try to retrieve layers using their uncompressed digests, so no need to + // check if they're using one of the compressed digests, which we can't reproduce anyway. + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest) + // If it's not a layer, then it must be a data item. + if len(layers) == 0 { + b, err := s.imageRef.transport.store.ImageBigData(s.ID, info.Digest.String()) + if err != nil { + return nil, -1, "", err + } + r := bytes.NewReader(b) + logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) + return ioutil.NopCloser(r), int64(r.Len()), "", nil + } + // Step through the list of matching layers. Tests may want to verify that if we have multiple layers + // which claim to have the same contents, that we actually do have multiple layers, otherwise we could + // just go ahead and use the first one every time. + i := s.layerPosition[info.Digest] + s.layerPosition[info.Digest] = i + 1 + if len(layers) > 0 { + layer = layers[i%len(layers)] + } + // Force the storage layer to not try to match any compression that was used when the layer was first + // handed to it. + noCompression := archive.Uncompressed + diffOptions = &storage.DiffOptions{ + Compression: &noCompression, + } + if layer.UncompressedSize < 0 { + n = -1 + } else { + n = layer.UncompressedSize + } + logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest) + rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions) + if err != nil { + return nil, -1, "", err + } + return rc, n, layer.ID, err +} + +// GetManifest() reads the image's manifest. +func (s *storageImageSource) GetManifest() (manifestBlob []byte, MIMEType string, err error) { + if len(s.cachedManifest) == 0 { + cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.ID, "manifest") + if err != nil { + return nil, "", err + } + s.cachedManifest = cachedBlob + } + return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err +} + +// UpdatedLayerInfos() returns the list of layer blobs that make up the root filesystem of +// the image, after they've been decompressed. +func (s *storageImageSource) UpdatedLayerInfos() []types.BlobInfo { + simg, err := s.imageRef.transport.store.Image(s.ID) + if err != nil { + logrus.Errorf("error reading image %q: %v", s.ID, err) + return nil + } + updatedBlobInfos := []types.BlobInfo{} + layerID := simg.TopLayer + _, manifestType, err := s.GetManifest() + if err != nil { + logrus.Errorf("error reading image manifest for %q: %v", s.ID, err) + return nil + } + uncompressedLayerType := "" + switch manifestType { + case imgspecv1.MediaTypeImageManifest: + uncompressedLayerType = imgspecv1.MediaTypeImageLayer + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: + // This is actually a compressed type, but there's no uncompressed type defined + uncompressedLayerType = manifest.DockerV2Schema2LayerMediaType + } + for layerID != "" { + layer, err := s.imageRef.transport.store.Layer(layerID) + if err != nil { + logrus.Errorf("error reading layer %q in image %q: %v", layerID, s.ID, err) + return nil + } + if layer.UncompressedDigest == "" { + logrus.Errorf("uncompressed digest for layer %q is unknown", layerID) + return nil + } + if layer.UncompressedSize < 0 { + logrus.Errorf("uncompressed size for layer %q is unknown", layerID) + return nil + } + blobInfo := types.BlobInfo{ + Digest: layer.UncompressedDigest, + Size: layer.UncompressedSize, + MediaType: uncompressedLayerType, + } + updatedBlobInfos = append([]types.BlobInfo{blobInfo}, updatedBlobInfos...) + layerID = layer.Parent + } + return updatedBlobInfos +} + +// GetTargetManifest() is not supported. +func (s *storageImageSource) GetTargetManifest(d digest.Digest) (manifestBlob []byte, MIMEType string, err error) { + return nil, "", ErrNoManifestLists +} + +// GetSignatures() parses the image's signatures blob into a slice of byte slices. +func (s *storageImageSource) GetSignatures(ctx context.Context) (signatures [][]byte, err error) { + var offset int + sigslice := [][]byte{} + signature := []byte{} + if len(s.SignatureSizes) > 0 { + signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.ID, "signatures") + if err != nil { + return nil, errors.Wrapf(err, "error looking up signatures data for image %q", s.ID) + } + signature = signatureBlob + } + for _, length := range s.SignatureSizes { + sigslice = append(sigslice, signature[offset:offset+length]) + offset += length + } + if offset != len(signature) { + return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset) + } + return sigslice, nil +} + +// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until +// it's time to Commit() the image func newImageDestination(imageRef storageReference) (*storageImageDestination, error) { + directory, err := ioutil.TempDir(temporaryDirectoryForBigFiles, "storage") + if err != nil { + return nil, errors.Wrapf(err, "error creating a temporary directory") + } + // Break reading of the reference we're writing, so that copy.Image() won't try to rewrite + // schema1 image manifests to remove embedded references, since that changes the manifest's + // digest, and that makes the image unusable if we subsequently try to access it using a + // reference that mentions the no-longer-correct digest. + publicRef := imageRef + publicRef.name = nil image := &storageImageDestination{ imageRef: imageRef, - Tag: imageRef.reference, - Created: time.Now(), - ID: imageRef.id, - BlobList: []types.BlobInfo{}, - Layers: make(map[ddigest.Digest][]string), - BlobData: make(map[ddigest.Digest][]byte), + publicRef: publicRef, + directory: directory, + blobDiffIDs: make(map[digest.Digest]digest.Digest), + fileSizes: make(map[digest.Digest]int64), + filenames: make(map[digest.Digest]string), SignatureSizes: []int{}, } return image, nil } -func (s storageImageSource) Reference() types.ImageReference { - return s.imageRef -} - +// Reference returns a mostly-usable image reference that can't return a DockerReference, to +// avoid triggering logic in copy.Image() that rewrites schema 1 image manifests in order to +// remove image names that they contain which don't match the value we're using. func (s storageImageDestination) Reference() types.ImageReference { - return s.imageRef + return s.publicRef } -func (s storageImageSource) Close() error { - return nil -} - -func (s storageImageDestination) Close() error { - return nil +// Close cleans up the temporary directory. +func (s *storageImageDestination) Close() error { + return os.RemoveAll(s.directory) } +// ShouldCompressLayers indicates whether or not a caller should compress not-already-compressed +// data when handing it to us. func (s storageImageDestination) ShouldCompressLayers() bool { - // We ultimately have to decompress layers to populate trees on disk, - // so callers shouldn't bother compressing them before handing them to - // us, if they're not already compressed. + // We ultimately have to decompress layers to populate trees on disk, so callers shouldn't + // bother compressing them before handing them to us, if they're not already compressed. return false } -// putBlob stores a layer or data blob, optionally enforcing that a digest in -// blobinfo matches the incoming data. -func (s *storageImageDestination) putBlob(stream io.Reader, blobinfo types.BlobInfo, enforceDigestAndSize bool) (types.BlobInfo, error) { - blobSize := blobinfo.Size - digest := blobinfo.Digest +// PutBlob stores a layer or data blob in our temporary directory, checking that any information +// in the blobinfo matches the incoming data. +func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) { errorBlobInfo := types.BlobInfo{ Digest: "", Size: -1, } - // Try to read an initial snippet of the blob. - buf := [archive.HeaderSize]byte{} - n, err := io.ReadAtLeast(stream, buf[:], len(buf)) - if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { - return errorBlobInfo, err - } - // Set up to read the whole blob (the initial snippet, plus the rest) - // while digesting it with either the default, or the passed-in digest, - // if one was specified. - hasher := ddigest.Canonical.Digester() - if digest.Validate() == nil { - if a := digest.Algorithm(); a.Available() { + // Set up to digest the blob and count its size while saving it to a file. + hasher := digest.Canonical.Digester() + if blobinfo.Digest.Validate() == nil { + if a := blobinfo.Digest.Algorithm(); a.Available() { hasher = a.Digester() } } - hash := "" + diffID := digest.Canonical.Digester() + filename := filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1))) + file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) + if err != nil { + return errorBlobInfo, errors.Wrapf(err, "error creating temporary file %q", filename) + } + defer file.Close() counter := ioutils.NewWriteCounter(hasher.Hash()) - defragmented := io.MultiReader(bytes.NewBuffer(buf[:n]), stream) - multi := io.TeeReader(defragmented, counter) - if (n > 0) && archive.IsArchive(buf[:n]) { - // It's a filesystem layer. If it's not the first one in the - // image, we assume that the most recently added layer is its - // parent. - parentLayer := "" - for _, blob := range s.BlobList { - if layerList, ok := s.Layers[blob.Digest]; ok { - parentLayer = layerList[len(layerList)-1] - } - } - // If we have an expected content digest, generate a layer ID - // based on the parent's ID and the expected content digest. - id := "" - if digest.Validate() == nil { - id = ddigest.Canonical.FromBytes([]byte(parentLayer + "+" + digest.String())).Hex() - } - // Attempt to create the identified layer and import its contents. - layer, uncompressedSize, err := s.imageRef.transport.store.PutLayer(id, parentLayer, nil, "", true, multi) - if err != nil && errors.Cause(err) != storage.ErrDuplicateID { - logrus.Debugf("error importing layer blob %q as %q: %v", blobinfo.Digest, id, err) - return errorBlobInfo, err - } - if errors.Cause(err) == storage.ErrDuplicateID { - // We specified an ID, and there's already a layer with - // the same ID. Drain the input so that we can look at - // its length and digest. - _, err := io.Copy(ioutil.Discard, multi) - if err != nil && err != io.EOF { - logrus.Debugf("error digesting layer blob %q: %v", blobinfo.Digest, id, err) - return errorBlobInfo, err - } - hash = hasher.Digest().String() - } else { - // Applied the layer with the specified ID. Note the - // size info and computed digest. - hash = hasher.Digest().String() - layerMeta := storageLayerMetadata{ - Digest: hash, - CompressedSize: counter.Count, - Size: uncompressedSize, - } - if metadata, err := json.Marshal(&layerMeta); len(metadata) != 0 && err == nil { - s.imageRef.transport.store.SetMetadata(layer.ID, string(metadata)) - } - // Hang on to the new layer's ID. - id = layer.ID - } - // Check if the size looks right. - if enforceDigestAndSize && blobinfo.Size >= 0 && blobinfo.Size != counter.Count { - logrus.Debugf("layer blob %q size is %d, not %d, rejecting", blobinfo.Digest, counter.Count, blobinfo.Size) - if layer != nil { - // Something's wrong; delete the newly-created layer. - s.imageRef.transport.store.DeleteLayer(layer.ID) - } - return errorBlobInfo, ErrBlobSizeMismatch - } - // If the content digest was specified, verify it. - if enforceDigestAndSize && digest.Validate() == nil && digest.String() != hash { - logrus.Debugf("layer blob %q digests to %q, rejecting", blobinfo.Digest, hash) - if layer != nil { - // Something's wrong; delete the newly-created layer. - s.imageRef.transport.store.DeleteLayer(layer.ID) - } - return errorBlobInfo, ErrBlobDigestMismatch - } - // If we didn't get a blob size, return the one we calculated. - if blobSize == -1 { - blobSize = counter.Count - } - // If we didn't get a digest, construct one. - if digest == "" { - digest = ddigest.Digest(hash) - } - // Record that this layer blob is a layer, and the layer ID it - // ended up having. This is a list, in case the same blob is - // being applied more than once. - s.Layers[digest] = append(s.Layers[digest], id) - s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: counter.Count}) - if layer != nil { - logrus.Debugf("blob %q imported as a filesystem layer %q", blobinfo.Digest, id) - } else { - logrus.Debugf("layer blob %q already present as layer %q", blobinfo.Digest, id) - } - } else { - // It's just data. Finish scanning it in, check that our - // computed digest matches the passed-in digest, and store it, - // but leave it out of the blob-to-layer-ID map so that we can - // tell that it's not a layer. - blob, err := ioutil.ReadAll(multi) - if err != nil && err != io.EOF { - return errorBlobInfo, err - } - hash = hasher.Digest().String() - if enforceDigestAndSize && blobinfo.Size >= 0 && int64(len(blob)) != blobinfo.Size { - logrus.Debugf("blob %q size is %d, not %d, rejecting", blobinfo.Digest, int64(len(blob)), blobinfo.Size) - return errorBlobInfo, ErrBlobSizeMismatch - } - // If we were given a digest, verify that the content matches - // it. - if enforceDigestAndSize && digest.Validate() == nil && digest.String() != hash { - logrus.Debugf("blob %q digests to %q, rejecting", blobinfo.Digest, hash) - return errorBlobInfo, ErrBlobDigestMismatch - } - // If we didn't get a blob size, return the one we calculated. - if blobSize == -1 { - blobSize = int64(len(blob)) - } - // If we didn't get a digest, construct one. - if digest == "" { - digest = ddigest.Digest(hash) - } - // Save the blob for when we Commit(). - s.BlobData[digest] = blob - s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: int64(len(blob))}) - logrus.Debugf("blob %q imported as opaque data %q", blobinfo.Digest, digest) + reader := io.TeeReader(io.TeeReader(stream, counter), file) + decompressed, err := archive.DecompressStream(reader) + if err != nil { + return errorBlobInfo, errors.Wrap(err, "error setting up to decompress blob") + } + // Copy the data to the file. + _, err = io.Copy(diffID.Hash(), decompressed) + decompressed.Close() + if err != nil { + return errorBlobInfo, errors.Wrapf(err, "error storing blob to file %q", filename) + } + // Ensure that any information that we were given about the blob is correct. + if blobinfo.Digest.Validate() == nil && blobinfo.Digest != hasher.Digest() { + return errorBlobInfo, ErrBlobDigestMismatch + } + if blobinfo.Size >= 0 && blobinfo.Size != counter.Count { + return errorBlobInfo, ErrBlobSizeMismatch + } + // Record information about the blob. + s.blobDiffIDs[hasher.Digest()] = diffID.Digest() + s.fileSizes[hasher.Digest()] = counter.Count + s.filenames[hasher.Digest()] = filename + blobDigest := blobinfo.Digest + if blobDigest.Validate() != nil { + blobDigest = hasher.Digest() + } + blobSize := blobinfo.Size + if blobSize < 0 { + blobSize = counter.Count } return types.BlobInfo{ - Digest: digest, - Size: blobSize, + Digest: blobDigest, + Size: blobSize, + MediaType: blobinfo.MediaType, }, nil } -// PutBlob is used to both store filesystem layers and binary data that is part -// of the image. Filesystem layers are assumed to be imported in order, as -// that is required by some of the underlying storage drivers. -func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) { - return s.putBlob(stream, blobinfo, true) -} - -// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob. +// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be +// reapplied using ReapplyBlob. +// // Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. // If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); // it returns a non-nil error only on an unexpected failure. @@ -294,93 +355,373 @@ func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64, if blobinfo.Digest == "" { return false, -1, errors.Errorf(`Can not check for a blob with unknown digest`) } - for _, blob := range s.BlobList { - if blob.Digest == blobinfo.Digest { - return true, blob.Size, nil - } + if err := blobinfo.Digest.Validate(); err != nil { + return false, -1, errors.Wrapf(err, `Can not check for a blob with invalid digest`) } + // Check if we've already cached it in a file. + if size, ok := s.fileSizes[blobinfo.Digest]; ok { + return true, size, nil + } + // Check if we have a wasn't-compressed layer in storage that's based on that blob. + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, -1, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest) + } + if len(layers) > 0 { + // Save this for completeness. + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, layers[0].UncompressedSize, nil + } + // Check if we have a was-compressed layer in storage that's based on that blob. + layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, -1, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest) + } + if len(layers) > 0 { + // Record the uncompressed value so that we can use it to calculate layer IDs. + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, layers[0].CompressedSize, nil + } + // Nope, we don't have it. return false, -1, nil } +// ReapplyBlob is now a no-op, assuming HasBlob() says we already have it, since Commit() can just apply the +// same one when it walks the list in the manifest. func (s *storageImageDestination) ReapplyBlob(blobinfo types.BlobInfo) (types.BlobInfo, error) { - err := blobinfo.Digest.Validate() - if err != nil { - return types.BlobInfo{}, err + present, size, err := s.HasBlob(blobinfo) + if !present { + return types.BlobInfo{}, errors.Errorf("error reapplying blob %+v: blob was not previously applied", blobinfo) } - if layerList, ok := s.Layers[blobinfo.Digest]; !ok || len(layerList) < 1 { - b, err := s.imageRef.transport.store.ImageBigData(s.ID, blobinfo.Digest.String()) - if err != nil { - return types.BlobInfo{}, err + if err != nil { + return types.BlobInfo{}, errors.Wrapf(err, "error reapplying blob %+v", blobinfo) + } + blobinfo.Size = size + return blobinfo, nil +} + +// computeID computes a recommended image ID based on information we have so far. +func (s *storageImageDestination) computeID(m manifest.Manifest) string { + mb, err := m.Serialize() + if err != nil { + return "" + } + switch manifest.GuessMIMEType(mb) { + case manifest.DockerV2Schema2MediaType, imgspecv1.MediaTypeImageManifest: + // For Schema2 and OCI1(?), the ID is just the hex part of the digest of the config blob. + logrus.Debugf("trivial image ID for configured blob") + configInfo := m.ConfigInfo() + if configInfo.Digest.Validate() == nil { + return configInfo.Digest.Hex() } - return types.BlobInfo{Digest: blobinfo.Digest, Size: int64(len(b))}, nil + return "" + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: + // Convert the schema 1 compat info into a schema 2 config, constructing some of the fields + // that aren't directly comparable using info from the manifest. + logrus.Debugf("computing image ID using compat data") + s1, ok := m.(*manifest.Schema1) + if !ok { + logrus.Debugf("schema type was guessed wrong?") + return "" + } + if len(s1.History) == 0 { + logrus.Debugf("image has no layers") + return "" + } + s2 := struct { + manifest.Schema2Image + ID string `json:"id,omitempty"` + Parent string `json:"parent,omitempty"` + ParentID string `json:"parent_id,omitempty"` + LayerID string `json:"layer_id,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` + Size int64 `json:",omitempty"` + }{} + config := []byte(s1.History[0].V1Compatibility) + if json.Unmarshal(config, &s2) != nil { + logrus.Debugf("error decoding configuration") + return "" + } + // Images created with versions prior to 1.8.3 require us to rebuild the object. + if s2.DockerVersion != "" && versions.LessThan(s2.DockerVersion, "1.8.3") { + err = json.Unmarshal(config, &s2) + if err != nil { + logrus.Infof("error decoding compat image config %s: %v", string(config), err) + return "" + } + config, err = json.Marshal(&s2) + if err != nil { + logrus.Infof("error re-encoding compat image config %#v: %v", s2, err) + return "" + } + } + // Build the history. + for _, h := range s1.History { + compat := manifest.Schema1V1Compatibility{} + if json.Unmarshal([]byte(h.V1Compatibility), &compat) != nil { + logrus.Debugf("error decoding history information") + return "" + } + hitem := manifest.Schema2History{ + Created: compat.Created, + CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "), + Comment: compat.Comment, + EmptyLayer: compat.ThrowAway, + } + s2.History = append([]manifest.Schema2History{hitem}, s2.History...) + } + // Build the rootfs information. We need the decompressed sums that we've been + // calculating to fill in the DiffIDs. + s2.RootFS = &manifest.Schema2RootFS{ + Type: "layers", + } + for _, fslayer := range s1.FSLayers { + blobSum := fslayer.BlobSum + diffID, ok := s.blobDiffIDs[blobSum] + if !ok { + logrus.Infof("error looking up diffID for blob %q", string(blobSum)) + return "" + } + s2.RootFS.DiffIDs = append([]digest.Digest{diffID}, s2.RootFS.DiffIDs...) + } + // And now for some raw manipulation. + raw := make(map[string]*json.RawMessage) + err = json.Unmarshal(config, &raw) + if err != nil { + logrus.Infof("error re-decoding compat image config %#v: %v", s2, err) + return "" + } + // Drop some fields. + delete(raw, "id") + delete(raw, "parent") + delete(raw, "parent_id") + delete(raw, "layer_id") + delete(raw, "throwaway") + delete(raw, "Size") + // Add the history and rootfs information. + rootfs, err := json.Marshal(s2.RootFS) + if err != nil { + logrus.Infof("error encoding rootfs information %#v: %v", s2.RootFS, err) + return "" + } + rawRootfs := json.RawMessage(rootfs) + raw["rootfs"] = &rawRootfs + history, err := json.Marshal(s2.History) + if err != nil { + logrus.Infof("error encoding history information %#v: %v", s2.History, err) + return "" + } + rawHistory := json.RawMessage(history) + raw["history"] = &rawHistory + // Encode the result, and take the digest of that result. + config, err = json.Marshal(raw) + if err != nil { + logrus.Infof("error re-encoding compat image config %#v: %v", s2, err) + return "" + } + return digest.FromBytes(config).Hex() + case manifest.DockerV2ListMediaType: + logrus.Debugf("no image ID for manifest list") + // FIXME + case imgspecv1.MediaTypeImageIndex: + logrus.Debugf("no image ID for manifest index") + // FIXME + default: + logrus.Debugf("no image ID for unrecognized manifest type %q", manifest.GuessMIMEType(mb)) + // FIXME } - layerList := s.Layers[blobinfo.Digest] - rc, _, err := diffLayer(s.imageRef.transport.store, layerList[len(layerList)-1]) - if err != nil { - return types.BlobInfo{}, err + return "" +} + +// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig +// information out of it for Inspect(). +func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) { + if info.Digest == "" { + return nil, errors.Errorf(`no digest supplied when reading blob`) } - return s.putBlob(rc, blobinfo, false) + if err := info.Digest.Validate(); err != nil { + return nil, errors.Wrapf(err, `invalid digest supplied when reading blob`) + } + // Assume it's a file, since we're only calling this from a place that expects to read files. + if filename, ok := s.filenames[info.Digest]; ok { + contents, err2 := ioutil.ReadFile(filename) + if err2 != nil { + return nil, errors.Wrapf(err2, `error reading blob from file %q`, filename) + } + return contents, nil + } + // If it's not a file, it's a bug, because we're not expecting to be asked for a layer. + return nil, errors.New("blob not found") } func (s *storageImageDestination) Commit() error { - // Create the image record. + // Find the list of layer blobs. + if len(s.manifest) == 0 { + return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()") + } + man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest)) + if err != nil { + return errors.Wrapf(err, "error parsing manifest") + } + layerBlobs := man.LayerInfos() + // Extract or find the layers. lastLayer := "" - for _, blob := range s.BlobList { - if layerList, ok := s.Layers[blob.Digest]; ok { - lastLayer = layerList[len(layerList)-1] + addedLayers := []string{} + for _, blob := range layerBlobs { + var diff io.ReadCloser + // Check if there's already a layer with the ID that we'd give to the result of applying + // this layer blob to its parent, if it has one, or the blob's hex value otherwise. + diffID, haveDiffID := s.blobDiffIDs[blob.Digest] + if !haveDiffID { + // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), + // or to even check if we had it. + logrus.Debugf("looking for diffID for blob %+v", blob.Digest) + has, _, err := s.HasBlob(blob) + if err != nil { + return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String()) + } + if !has { + return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String()) + } + diffID, haveDiffID = s.blobDiffIDs[blob.Digest] + if !haveDiffID { + return errors.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String()) + } + } + id := diffID.Hex() + if lastLayer != "" { + id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex() + } + if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { + // There's already a layer that should have the right contents, just reuse it. + lastLayer = layer.ID + continue + } + // Check if we cached a file with that blobsum. If we didn't already have a layer with + // the blob's contents, we should have gotten a copy. + if filename, ok := s.filenames[blob.Digest]; ok { + // Use the file's contents to initialize the layer. + file, err2 := os.Open(filename) + if err2 != nil { + return errors.Wrapf(err2, "error opening file %q", filename) + } + defer file.Close() + diff = file + } + if diff == nil { + // Try to find a layer with contents matching that blobsum. + layer := "" + layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(blob.Digest) + if err2 == nil && len(layers) > 0 { + layer = layers[0].ID + } else { + layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest) + if err2 == nil && len(layers) > 0 { + layer = layers[0].ID + } + } + if layer == "" { + return errors.Wrapf(err2, "error locating layer for blob %q", blob.Digest) + } + // Use the layer's contents to initialize the new layer. + noCompression := archive.Uncompressed + diffOptions := &storage.DiffOptions{ + Compression: &noCompression, + } + diff, err2 = s.imageRef.transport.store.Diff("", layer, diffOptions) + if err2 != nil { + return errors.Wrapf(err2, "error reading layer %q for blob %q", layer, blob.Digest) + } + defer diff.Close() + } + if diff == nil { + // This shouldn't have happened. + return errors.Errorf("error applying blob %q: content not found", blob.Digest) + } + // Build the new layer using the diff, regardless of where it came from. + layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, diff) + if err != nil { + return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest) + } + lastLayer = layer.ID + addedLayers = append([]string{lastLayer}, addedLayers...) + } + // If one of those blobs was a configuration blob, then we can try to dig out the date when the image + // was originally created, in case we're just copying it. If not, no harm done. + var options *storage.ImageOptions + if inspect, err := man.Inspect(s.getConfigBlob); err == nil { + logrus.Debugf("setting image creation date to %s", inspect.Created) + options = &storage.ImageOptions{ + CreationDate: inspect.Created, } } - img, err := s.imageRef.transport.store.CreateImage(s.ID, nil, lastLayer, "", nil) + // Create the image record, pointing to the most-recently added layer. + intendedID := s.imageRef.id + if intendedID == "" { + intendedID = s.computeID(man) + } + oldNames := []string{} + img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options) if err != nil { if errors.Cause(err) != storage.ErrDuplicateID { logrus.Debugf("error creating image: %q", err) - return errors.Wrapf(err, "error creating image %q", s.ID) + return errors.Wrapf(err, "error creating image %q", intendedID) } - img, err = s.imageRef.transport.store.Image(s.ID) + img, err = s.imageRef.transport.store.Image(intendedID) if err != nil { - return errors.Wrapf(err, "error reading image %q", s.ID) + return errors.Wrapf(err, "error reading image %q", intendedID) } if img.TopLayer != lastLayer { - logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", s.ID) - return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", s.ID) + logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID) + return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", intendedID) } logrus.Debugf("reusing image ID %q", img.ID) + oldNames = append(oldNames, img.Names...) } else { logrus.Debugf("created new image ID %q", img.ID) } - s.ID = img.ID - names := img.Names - if s.Tag != "" { - names = append(names, s.Tag) + // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so + // we just need to screen out the ones that are actually layers to get the list of non-layers. + dataBlobs := make(map[digest.Digest]struct{}) + for blob := range s.filenames { + dataBlobs[blob] = struct{}{} } - // We have names to set, so move those names to this image. - if len(names) > 0 { + for _, layerBlob := range layerBlobs { + delete(dataBlobs, layerBlob.Digest) + } + for blob := range dataBlobs { + v, err := ioutil.ReadFile(s.filenames[blob]) + if err != nil { + return errors.Wrapf(err, "error copying non-layer blob %q to image", blob) + } + if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err) + return errors.Wrapf(err, "error saving big data %q for image %q", blob.String(), img.ID) + } + } + // Set the reference's name on the image. + if name := s.imageRef.DockerReference(); len(oldNames) > 0 || name != nil { + names := []string{} + if name != nil { + names = append(names, verboseName(name)) + } + if len(oldNames) > 0 { + names = append(names, oldNames...) + } if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } - logrus.Debugf("error setting names on image %q: %v", img.ID, err) - return err + logrus.Debugf("error setting names %v on image %q: %v", names, img.ID, err) + return errors.Wrapf(err, "error setting names %v on image %q", names, img.ID) } logrus.Debugf("set names of image %q to %v", img.ID, names) } - // Save the data blobs to disk, and drop their contents from memory. - keys := []ddigest.Digest{} - for k, v := range s.BlobData { - if err := s.imageRef.transport.store.SetImageBigData(img.ID, k.String(), v); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error saving big data %q for image %q: %v", k, img.ID, err) - return err - } - keys = append(keys, k) - } - for _, key := range keys { - delete(s.BlobData, key) - } - // Save the manifest, if we have one. - if err := s.imageRef.transport.store.SetImageBigData(s.ID, "manifest", s.Manifest); err != nil { + // Save the manifest. + if err := s.imageRef.transport.store.SetImageBigData(img.ID, "manifest", s.manifest); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } @@ -388,12 +729,14 @@ func (s *storageImageDestination) Commit() error { return err } // Save the signatures, if we have any. - if err := s.imageRef.transport.store.SetImageBigData(s.ID, "signatures", s.Signatures); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + if len(s.signatures) > 0 { + if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) + return err } - logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) - return err } // Save our metadata. metadata, err := json.Marshal(s) @@ -405,7 +748,7 @@ func (s *storageImageDestination) Commit() error { return err } if len(metadata) != 0 { - if err = s.imageRef.transport.store.SetMetadata(s.ID, string(metadata)); err != nil { + if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } @@ -418,7 +761,7 @@ func (s *storageImageDestination) Commit() error { } var manifestMIMETypes = []string{ - // TODO(runcom): we'll add OCI as part of another PR here + imgspecv1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType, @@ -428,23 +771,20 @@ func (s *storageImageDestination) SupportedManifestMIMETypes() []string { return manifestMIMETypes } -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +// PutManifest writes the manifest to the destination. func (s *storageImageDestination) PutManifest(manifest []byte) error { - s.Manifest = make([]byte, len(manifest)) - copy(s.Manifest, manifest) + s.manifest = make([]byte, len(manifest)) + copy(s.manifest, manifest) return nil } -// SupportsSignatures returns an error if we can't expect GetSignatures() to -// return data that was previously supplied to PutSignatures(). +// SupportsSignatures returns an error if we can't expect GetSignatures() to return data that was +// previously supplied to PutSignatures(). func (s *storageImageDestination) SupportsSignatures() error { return nil } -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// AcceptsForeignLayerURLs returns false iff foreign layers in the manifest should actually be // uploaded to the image destination, true otherwise. func (s *storageImageDestination) AcceptsForeignLayerURLs() bool { return false @@ -455,6 +795,7 @@ func (s *storageImageDestination) MustMatchRuntimeOS() bool { return true } +// PutSignatures records the image's signatures for committing as a single data blob. func (s *storageImageDestination) PutSignatures(signatures [][]byte) error { sizes := []int{} sigblob := []byte{} @@ -465,139 +806,55 @@ func (s *storageImageDestination) PutSignatures(signatures [][]byte) error { copy(newblob[len(sigblob):], sig) sigblob = newblob } - s.Signatures = sigblob + s.signatures = sigblob s.SignatureSizes = sizes return nil } -func (s *storageImageSource) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) { - rc, n, _, err = s.getBlobAndLayerID(info) - return rc, n, err -} - -func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { - err = info.Digest.Validate() - if err != nil { - return nil, -1, "", err - } - if layerList, ok := s.Layers[info.Digest]; !ok || len(layerList) < 1 { - b, err := s.imageRef.transport.store.ImageBigData(s.ID, info.Digest.String()) - if err != nil { - return nil, -1, "", err - } - r := bytes.NewReader(b) - logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) - return ioutil.NopCloser(r), int64(r.Len()), "", nil - } - // If the blob was "put" more than once, we have multiple layer IDs - // which should all produce the same diff. For the sake of tests that - // want to make sure we created different layers each time the blob was - // "put", though, cycle through the layers. - layerList := s.Layers[info.Digest] - position, ok := s.LayerPosition[info.Digest] - if !ok { - position = 0 - } - s.LayerPosition[info.Digest] = (position + 1) % len(layerList) - logrus.Debugf("exporting filesystem layer %q for blob %q", layerList[position], info.Digest) - rc, n, err = diffLayer(s.imageRef.transport.store, layerList[position]) - return rc, n, layerList[position], err -} - -func diffLayer(store storage.Store, layerID string) (rc io.ReadCloser, n int64, err error) { - layer, err := store.Layer(layerID) - if err != nil { - return nil, -1, err - } - layerMeta := storageLayerMetadata{ - CompressedSize: -1, - } - if layer.Metadata != "" { - if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil { - return nil, -1, errors.Wrapf(err, "error decoding metadata for layer %q", layerID) - } - } - if layerMeta.CompressedSize <= 0 { - n = -1 - } else { - n = layerMeta.CompressedSize - } - diff, err := store.Diff("", layer.ID, nil) - if err != nil { - return nil, -1, err - } - return diff, n, nil -} - -func (s *storageImageSource) GetManifest() (manifestBlob []byte, MIMEType string, err error) { - manifestBlob, err = s.imageRef.transport.store.ImageBigData(s.ID, "manifest") - return manifestBlob, manifest.GuessMIMEType(manifestBlob), err -} - -func (s *storageImageSource) GetTargetManifest(digest ddigest.Digest) (manifestBlob []byte, MIMEType string, err error) { - return nil, "", ErrNoManifestLists -} - -func (s *storageImageSource) GetSignatures(ctx context.Context) (signatures [][]byte, err error) { - var offset int - signature, err := s.imageRef.transport.store.ImageBigData(s.ID, "signatures") - if err != nil { - return nil, err - } - sigslice := [][]byte{} - for _, length := range s.SignatureSizes { - sigslice = append(sigslice, signature[offset:offset+length]) - offset += length - } - if offset != len(signature) { - return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset) - } - return sigslice, nil -} - +// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the +// signatures, and the uncompressed sizes of all of the image's layers. func (s *storageImageSource) getSize() (int64, error) { var sum int64 - names, err := s.imageRef.transport.store.ListImageBigData(s.imageRef.id) + // Size up the data blobs. + dataNames, err := s.imageRef.transport.store.ListImageBigData(s.ID) if err != nil { - return -1, errors.Wrapf(err, "error reading image %q", s.imageRef.id) + return -1, errors.Wrapf(err, "error reading image %q", s.ID) } - for _, name := range names { - bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.imageRef.id, name) + for _, dataName := range dataNames { + bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.ID, dataName) if err != nil { - return -1, errors.Wrapf(err, "error reading data blob size %q for %q", name, s.imageRef.id) + return -1, errors.Wrapf(err, "error reading data blob size %q for %q", dataName, s.ID) } sum += bigSize } + // Add the signature sizes. for _, sigSize := range s.SignatureSizes { sum += int64(sigSize) } - for _, layerList := range s.Layers { - for _, layerID := range layerList { - layer, err := s.imageRef.transport.store.Layer(layerID) - if err != nil { - return -1, err - } - layerMeta := storageLayerMetadata{ - Size: -1, - } - if layer.Metadata != "" { - if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil { - return -1, errors.Wrapf(err, "error decoding metadata for layer %q", layerID) - } - } - if layerMeta.Size < 0 { - return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID) - } - sum += layerMeta.Size + // Prepare to walk the layer list. + img, err := s.imageRef.transport.store.Image(s.ID) + if err != nil { + return -1, errors.Wrapf(err, "error reading image info %q", s.ID) + } + // Walk the layer list. + layerID := img.TopLayer + for layerID != "" { + layer, err := s.imageRef.transport.store.Layer(layerID) + if err != nil { + return -1, err } + if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 { + return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID) + } + sum += layer.UncompressedSize + if layer.Parent == "" { + break + } + layerID = layer.Parent } return sum, nil } -func (s *storageImage) Size() (int64, error) { - return s.size, nil -} - // newImage creates an image that also knows its size func newImage(s storageReference) (types.Image, error) { src, err := newImageSource(s) @@ -614,3 +871,8 @@ func newImage(s storageReference) (types.Image, error) { } return &storageImage{Image: img, size: size}, nil } + +// Size() returns the previously-computed size of the image, with no error. +func (s storageImage) Size() (int64, error) { + return s.size, nil +} diff --git a/vendor/github.com/containers/image/storage/storage_reference.go b/vendor/github.com/containers/image/storage/storage_reference.go index ded58705..fb8b0ccc 100644 --- a/vendor/github.com/containers/image/storage/storage_reference.go +++ b/vendor/github.com/containers/image/storage/storage_reference.go @@ -6,6 +6,7 @@ import ( "github.com/containers/image/docker/reference" "github.com/containers/image/types" "github.com/containers/storage" + digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -18,9 +19,11 @@ type storageReference struct { reference string id string name reference.Named + tag string + digest digest.Digest } -func newReference(transport storageTransport, reference, id string, name reference.Named) *storageReference { +func newReference(transport storageTransport, reference, id string, name reference.Named, tag string, digest digest.Digest) *storageReference { // We take a copy of the transport, which contains a pointer to the // store that it used for resolving this reference, so that the // transport that we'll return from Transport() won't be affected by @@ -30,6 +33,8 @@ func newReference(transport storageTransport, reference, id string, name referen reference: reference, id: id, name: name, + tag: tag, + digest: digest, } } @@ -76,8 +81,21 @@ func (s storageReference) Transport() types.ImageTransport { } } -// Return a name with a tag, if we have a name to base them on. +// Return a name with a tag or digest, if we have either, else return it bare. func (s storageReference) DockerReference() reference.Named { + if s.name == nil { + return nil + } + if s.tag != "" { + if namedTagged, err := reference.WithTag(s.name, s.tag); err == nil { + return namedTagged + } + } + if s.digest != "" { + if canonical, err := reference.WithDigest(s.name, s.digest); err == nil { + return canonical + } + } return s.name } @@ -91,7 +109,7 @@ func (s storageReference) StringWithinTransport() string { optionsList = ":" + strings.Join(options, ",") } storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]" - if s.name == nil { + if s.reference == "" { return storeSpec + "@" + s.id } if s.id == "" { @@ -120,11 +138,8 @@ func (s storageReference) PolicyConfigurationNamespaces() []string { driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]" namespaces := []string{} if s.name != nil { - if s.id != "" { - // The reference without the ID is also a valid namespace. - namespaces = append(namespaces, storeSpec+s.reference) - } - components := strings.Split(s.name.Name(), "/") + name := reference.TrimNamed(s.name) + components := strings.Split(name.String(), "/") for len(components) > 0 { namespaces = append(namespaces, storeSpec+strings.Join(components, "/")) components = components[:len(components)-1] diff --git a/vendor/github.com/containers/image/storage/storage_transport.go b/vendor/github.com/containers/image/storage/storage_transport.go index 1a0ebd04..39c81a58 100644 --- a/vendor/github.com/containers/image/storage/storage_transport.go +++ b/vendor/github.com/containers/image/storage/storage_transport.go @@ -11,11 +11,14 @@ import ( "github.com/containers/image/types" "github.com/containers/storage" "github.com/containers/storage/pkg/idtools" - "github.com/opencontainers/go-digest" - ddigest "github.com/opencontainers/go-digest" + digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" ) +const ( + minimumTruncatedIDLength = 3 +) + func init() { transports.Register(Transport) } @@ -101,60 +104,124 @@ func (s *storageTransport) DefaultGIDMap() []idtools.IDMap { // relative to the given store, and returns it in a reference object. func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) { var name reference.Named - var sum digest.Digest - var err error if ref == "" { - return nil, ErrInvalidReference + return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference") } if ref[0] == '[' { // Ignore the store specifier. closeIndex := strings.IndexRune(ref, ']') if closeIndex < 1 { - return nil, ErrInvalidReference + return nil, errors.Wrapf(ErrInvalidReference, "store specifier in %q did not end", ref) } ref = ref[closeIndex+1:] } - refInfo := strings.SplitN(ref, "@", 2) - if len(refInfo) == 1 { - // A name. - name, err = reference.ParseNormalizedNamed(refInfo[0]) - if err != nil { - return nil, err + + // The last segment, if there's more than one, is either a digest from a reference, or an image ID. + split := strings.LastIndex(ref, "@") + idOrDigest := "" + if split != -1 { + // Peel off that last bit so that we can work on the rest. + idOrDigest = ref[split+1:] + if idOrDigest == "" { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest) } - } else if len(refInfo) == 2 { - // An ID, possibly preceded by a name. - if refInfo[0] != "" { - name, err = reference.ParseNormalizedNamed(refInfo[0]) - if err != nil { - return nil, err - } - } - sum, err = digest.Parse(refInfo[1]) - if err != nil || sum.Validate() != nil { - sum, err = digest.Parse("sha256:" + refInfo[1]) - if err != nil || sum.Validate() != nil { - return nil, err - } - } - } else { // Coverage: len(refInfo) is always 1 or 2 - // Anything else: store specified in a form we don't - // recognize. - return nil, ErrInvalidReference + ref = ref[:split] } + + // The middle segment (now the last segment), if there is one, is a digest. + split = strings.LastIndex(ref, "@") + sum := digest.Digest("") + if split != -1 { + sum = digest.Digest(ref[split+1:]) + if sum == "" { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum) + } + ref = ref[:split] + } + + // If we have something that unambiguously should be a digest, validate it, and then the third part, + // if we have one, as an ID. + id := "" + if sum != "" { + if idSum, err := digest.Parse("sha256:" + idOrDigest); err != nil || idSum.Validate() != nil { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID", idOrDigest) + } + if err := sum.Validate(); err != nil { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum) + } + id = idOrDigest + if img, err := store.Image(idOrDigest); err == nil && img != nil && len(id) >= minimumTruncatedIDLength { + // The ID is a truncated version of the ID of an image that's present in local storage, + // so we might as well use the expanded value. + id = img.ID + } + } else if idOrDigest != "" { + // There was no middle portion, so the final portion could be either a digest or an ID. + if idSum, err := digest.Parse("sha256:" + idOrDigest); err == nil && idSum.Validate() == nil { + // It's an ID. + id = idOrDigest + } else if idSum, err := digest.Parse(idOrDigest); err == nil && idSum.Validate() == nil { + // It's a digest. + sum = idSum + } else if img, err := store.Image(idOrDigest); err == nil && img != nil && len(idOrDigest) >= minimumTruncatedIDLength { + // It's a truncated version of the ID of an image that's present in local storage, + // and we may need the expanded value. + id = img.ID + } else { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest) + } + } + + // If we only had one portion, then _maybe_ it's a truncated image ID. Only check on that if it's + // at least of what we guess is a reasonable minimum length, because we don't want a really short value + // like "a" matching an image by ID prefix when the input was actually meant to specify an image name. + if len(ref) >= minimumTruncatedIDLength && sum == "" && id == "" { + if img, err := store.Image(idOrDigest); err == nil && img != nil { + // It's a truncated version of the ID of an image that's present in local storage; + // we need to expand it. + id = img.ID + ref = "" + } + } + + // The initial portion is probably a name, possibly with a tag. + if ref != "" { + var err error + if name, err = reference.ParseNormalizedNamed(ref); err != nil { + return nil, errors.Wrapf(err, "error parsing named reference %q", ref) + } + } + if name == nil && sum == "" && id == "" { + return nil, errors.Errorf("error parsing reference") + } + + // Construct a copy of the store spec. optionsList := "" options := store.GraphOptions() if len(options) > 0 { optionsList = ":" + strings.Join(options, ",") } storeSpec := "[" + store.GraphDriverName() + "@" + store.GraphRoot() + "+" + store.RunRoot() + optionsList + "]" - id := "" - if sum.Validate() == nil { - id = sum.Hex() - } + + // Convert the name back into a reference string, if we got a name. refname := "" + tag := "" if name != nil { - name = reference.TagNameOnly(name) - refname = verboseName(name) + if sum.Validate() == nil { + canonical, err := reference.WithDigest(name, sum) + if err != nil { + return nil, errors.Wrapf(err, "error mixing name %q with digest %q", name, sum) + } + refname = verboseName(canonical) + } else { + name = reference.TagNameOnly(name) + tagged, ok := name.(reference.Tagged) + if !ok { + return nil, errors.Errorf("error parsing possibly-tagless name %q", ref) + } + refname = verboseName(name) + tag = tagged.Tag() + } } if refname == "" { logrus.Debugf("parsed reference into %q", storeSpec+"@"+id) @@ -163,7 +230,7 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) ( } else { logrus.Debugf("parsed reference into %q", storeSpec+refname+"@"+id) } - return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name), nil + return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name, tag, sum), nil } func (s *storageTransport) GetStore() (storage.Store, error) { @@ -182,11 +249,14 @@ func (s *storageTransport) GetStore() (storage.Store, error) { return s.store, nil } -// ParseReference takes a name and/or an ID ("_name_"/"@_id_"/"_name_@_id_"), +// ParseReference takes a name and a tag or digest and/or ID +// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"), // possibly prefixed with a store specifier in the form "[_graphroot_]" or // "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or // "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]", // tries to figure out which it is, and returns it in a reference object. +// If _id_ is the ID of an image that's present in local storage, it can be truncated, and +// even be specified as if it were a _name_, value. func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) { var store storage.Store // Check if there's a store location prefix. If there is, then it @@ -335,7 +405,7 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { if err != nil { return err } - _, err = ddigest.Parse("sha256:" + scopeInfo[1]) + _, err = digest.Parse("sha256:" + scopeInfo[1]) if err != nil { return err } @@ -345,11 +415,28 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { return nil } -func verboseName(name reference.Named) string { - name = reference.TagNameOnly(name) - tag := "" - if tagged, ok := name.(reference.NamedTagged); ok { - tag = ":" + tagged.Tag() +func verboseName(r reference.Reference) string { + if r == nil { + return "" } - return name.Name() + tag + named, isNamed := r.(reference.Named) + digested, isDigested := r.(reference.Digested) + tagged, isTagged := r.(reference.Tagged) + name := "" + tag := "" + sum := "" + if isNamed { + name = (reference.TrimNamed(named)).String() + } + if isTagged { + if tagged.Tag() != "" { + tag = ":" + tagged.Tag() + } + } + if isDigested { + if digested.Digest().Validate() == nil { + sum = "@" + digested.Digest().String() + } + } + return name + tag + sum } diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go index 4ede907b..bae7319a 100644 --- a/vendor/github.com/containers/image/types/types.go +++ b/vendor/github.com/containers/image/types/types.go @@ -96,6 +96,7 @@ type BlobInfo struct { Size int64 // -1 if unknown URLs []string Annotations map[string]string + MediaType string } // ImageSource is a service, possibly remote (= slow), to download components of a single image. @@ -118,10 +119,14 @@ type ImageSource interface { // out of a manifest list. GetTargetManifest(digest digest.Digest) ([]byte, string, error) // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). - // The Digest field in BlobInfo is guaranteed to be provided; Size may be -1. + // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. GetBlob(BlobInfo) (io.ReadCloser, int64, error) // GetSignatures returns the image's signatures. It may use a remote (= slow) service. GetSignatures(context.Context) ([][]byte, error) + // UpdatedLayerInfos returns either nil (meaning there are no updates), or updated values for the layer blobsums that are listed in the image's manifest. + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + UpdatedLayerInfos() []BlobInfo } // ImageDestination is a service, possibly remote (= slow), to store components of a single image. @@ -153,9 +158,10 @@ type ImageDestination interface { AcceptsForeignLayerURLs() bool // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. MustMatchRuntimeOS() bool - // PutBlob writes contents of stream and returns data representing the result (with all data filled in). + // PutBlob writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. + // inputInfo.MediaType describes the blob format, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. @@ -205,6 +211,10 @@ type UnparsedImage interface { Manifest() ([]byte, string, error) // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. Signatures(ctx context.Context) ([][]byte, error) + // UpdatedLayerInfos returns either nil (meaning there are no updates), or updated values for the layer blobsums that are listed in the image's manifest. + // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // WARNING: The list may contain duplicates, and they are semantically relevant. + UpdatedLayerInfos() []BlobInfo } // Image is the primary API for inspecting properties of images. @@ -215,7 +225,7 @@ type Image interface { // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. ConfigInfo() BlobInfo - // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. + // ConfigBlob returns the blob described by ConfigInfo, if ConfigInfo().Digest != ""; nil otherwise. // The result is cached; it is OK to call this however often you need. ConfigBlob() ([]byte, error) // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about @@ -223,7 +233,7 @@ type Image interface { // old image manifests work (docker v2s1 especially). OCIConfig() (*v1.Image, error) // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). - // The Digest field is guaranteed to be provided; Size may be -1. + // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // WARNING: The list may contain duplicates, and they are semantically relevant. LayerInfos() []BlobInfo // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. @@ -249,7 +259,7 @@ type Image interface { // ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest type ManifestUpdateOptions struct { - LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls) which should replace the originals, in order (the root layer first, and then successive layered layers) + LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored. EmbeddedDockerReference reference.Named ManifestMIMEType string // The values below are NOT requests to modify the image; they provide optional context which may or may not be used. diff --git a/vendor/github.com/containers/image/vendor.conf b/vendor/github.com/containers/image/vendor.conf index d5bae3b0..3263f580 100644 --- a/vendor/github.com/containers/image/vendor.conf +++ b/vendor/github.com/containers/image/vendor.conf @@ -1,5 +1,5 @@ github.com/sirupsen/logrus v1.0.0 -github.com/containers/storage 47536c89fcc545a87745e1a1573addc439409165 +github.com/containers/storage 9e0c323a4b425557f8310ee8d125634acd39d8f5 github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716 @@ -36,4 +36,5 @@ github.com/tchap/go-patricia v2.2.6 github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0 github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460 -github.com/gogo/protobuf/proto fcdc5011193ff531a548e9b0301828d5a5b97fd8 +github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8 +github.com/pquerna/ffjson master diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 249f98bc..ae601f43 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -650,10 +650,21 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) { func (d *Driver) Put(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return err + } mountpoint := path.Join(d.dir(id), "merged") if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } + if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil { + // If no lower, we used the diff directory, so no work to do + if os.IsNotExist(err) { + return nil + } + return err + } if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) } diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index c84bfaf9..f7e3dc34 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -1888,10 +1888,16 @@ func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Laye } storeLayers, err := m(store, d) if err != nil { - return nil, err + if errors.Cause(err) != ErrLayerUnknown { + return nil, err + } + continue } layers = append(layers, storeLayers...) } + if len(layers) == 0 { + return nil, ErrLayerUnknown + } return layers, nil }