2017-07-23 23:01:37 +00:00
|
|
|
package image
|
2017-06-16 17:24:00 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"encoding/json"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/Sirupsen/logrus"
|
|
|
|
"github.com/containers/image/docker/reference"
|
|
|
|
"github.com/containers/image/image"
|
|
|
|
is "github.com/containers/image/storage"
|
|
|
|
"github.com/containers/image/types"
|
|
|
|
"github.com/containers/storage"
|
|
|
|
"github.com/containers/storage/pkg/archive"
|
|
|
|
"github.com/docker/docker/pkg/ioutils"
|
|
|
|
"github.com/kubernetes-incubator/cri-o/cmd/kpod/docker"
|
|
|
|
digest "github.com/opencontainers/go-digest"
|
|
|
|
specs "github.com/opencontainers/image-spec/specs-go"
|
|
|
|
"github.com/opencontainers/image-spec/specs-go/v1"
|
|
|
|
"github.com/pkg/errors"
|
|
|
|
)
|
|
|
|
|
2017-07-23 23:01:37 +00:00
|
|
|
// CopyRef handles image references used for copying images to/from remotes
|
|
|
|
type CopyRef struct {
|
2017-06-16 17:24:00 +00:00
|
|
|
store storage.Store
|
|
|
|
compression archive.Compression
|
|
|
|
name reference.Named
|
|
|
|
names []string
|
|
|
|
layerID string
|
|
|
|
addHistory bool
|
|
|
|
oconfig []byte
|
|
|
|
dconfig []byte
|
|
|
|
created time.Time
|
|
|
|
createdBy string
|
|
|
|
annotations map[string]string
|
|
|
|
preferredManifestType string
|
|
|
|
exporting bool
|
|
|
|
}
|
|
|
|
|
2017-07-23 23:01:37 +00:00
|
|
|
type copySource struct {
|
2017-06-16 17:24:00 +00:00
|
|
|
path string
|
2017-07-23 23:01:37 +00:00
|
|
|
ref *CopyRef
|
2017-06-16 17:24:00 +00:00
|
|
|
store storage.Store
|
|
|
|
layerID string
|
|
|
|
names []string
|
|
|
|
addHistory bool
|
|
|
|
compression archive.Compression
|
|
|
|
config []byte
|
|
|
|
configDigest digest.Digest
|
|
|
|
manifest []byte
|
|
|
|
manifestType string
|
|
|
|
exporting bool
|
|
|
|
}
|
|
|
|
|
2017-07-23 23:01:37 +00:00
|
|
|
// NewImage creates a new image from the given system context
|
|
|
|
func (c *CopyRef) NewImage(sc *types.SystemContext) (types.Image, error) {
|
|
|
|
src, err := c.NewImageSource(sc, nil)
|
2017-06-16 17:24:00 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return image.FromSource(src)
|
|
|
|
}
|
|
|
|
|
|
|
|
func selectManifestType(preferred string, acceptable, supported []string) string {
|
|
|
|
selected := preferred
|
|
|
|
for _, accept := range acceptable {
|
|
|
|
if preferred == accept {
|
|
|
|
return preferred
|
|
|
|
}
|
|
|
|
for _, support := range supported {
|
|
|
|
if accept == support {
|
|
|
|
selected = accept
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return selected
|
|
|
|
}
|
|
|
|
|
2017-07-23 23:01:37 +00:00
|
|
|
// NewImageSource creates a new image source from the given system context and manifest
|
|
|
|
func (c *CopyRef) NewImageSource(sc *types.SystemContext, manifestTypes []string) (src types.ImageSource, err error) {
|
2017-06-16 17:24:00 +00:00
|
|
|
// Decide which type of manifest and configuration output we're going to provide.
|
|
|
|
supportedManifestTypes := []string{v1.MediaTypeImageManifest, docker.V2S2MediaTypeManifest}
|
2017-07-23 23:01:37 +00:00
|
|
|
manifestType := selectManifestType(c.preferredManifestType, manifestTypes, supportedManifestTypes)
|
2017-06-16 17:24:00 +00:00
|
|
|
// If it's not a format we support, return an error.
|
|
|
|
if manifestType != v1.MediaTypeImageManifest && manifestType != docker.V2S2MediaTypeManifest {
|
|
|
|
return nil, errors.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)",
|
|
|
|
manifestType, v1.MediaTypeImageManifest, docker.V2S2MediaTypeManifest)
|
|
|
|
}
|
|
|
|
// Start building the list of layers using the read-write layer.
|
|
|
|
layers := []string{}
|
2017-07-23 23:01:37 +00:00
|
|
|
layerID := c.layerID
|
|
|
|
layer, err := c.store.Layer(layerID)
|
2017-06-16 17:24:00 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "unable to read layer %q", layerID)
|
|
|
|
}
|
|
|
|
// Walk the list of parent layers, prepending each as we go.
|
|
|
|
for layer != nil {
|
|
|
|
layers = append(append([]string{}, layerID), layers...)
|
|
|
|
layerID = layer.Parent
|
|
|
|
if layerID == "" {
|
|
|
|
err = nil
|
|
|
|
break
|
|
|
|
}
|
2017-07-23 23:01:37 +00:00
|
|
|
layer, err = c.store.Layer(layerID)
|
2017-06-16 17:24:00 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "unable to read layer %q", layerID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
logrus.Debugf("layer list: %q", layers)
|
|
|
|
|
|
|
|
// Make a temporary directory to hold blobs.
|
|
|
|
path, err := ioutil.TempDir(os.TempDir(), "kpod")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
logrus.Debugf("using %q to hold temporary data", path)
|
|
|
|
defer func() {
|
|
|
|
if src == nil {
|
|
|
|
err2 := os.RemoveAll(path)
|
|
|
|
if err2 != nil {
|
|
|
|
logrus.Errorf("error removing %q: %v", path, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Build fresh copies of the configurations so that we don't mess with the values in the Builder
|
|
|
|
// object itself.
|
|
|
|
oimage := v1.Image{}
|
2017-07-23 23:01:37 +00:00
|
|
|
err = json.Unmarshal(c.oconfig, &oimage)
|
2017-06-16 17:24:00 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
dimage := docker.V2Image{}
|
2017-07-23 23:01:37 +00:00
|
|
|
err = json.Unmarshal(c.dconfig, &dimage)
|
2017-06-16 17:24:00 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start building manifests.
|
|
|
|
omanifest := v1.Manifest{
|
|
|
|
Versioned: specs.Versioned{
|
|
|
|
SchemaVersion: 2,
|
|
|
|
},
|
|
|
|
Config: v1.Descriptor{
|
|
|
|
MediaType: v1.MediaTypeImageConfig,
|
|
|
|
},
|
|
|
|
Layers: []v1.Descriptor{},
|
2017-07-23 23:01:37 +00:00
|
|
|
Annotations: c.annotations,
|
2017-06-16 17:24:00 +00:00
|
|
|
}
|
|
|
|
dmanifest := docker.V2S2Manifest{
|
|
|
|
V2Versioned: docker.V2Versioned{
|
|
|
|
SchemaVersion: 2,
|
|
|
|
MediaType: docker.V2S2MediaTypeManifest,
|
|
|
|
},
|
|
|
|
Config: docker.V2S2Descriptor{
|
|
|
|
MediaType: docker.V2S2MediaTypeImageConfig,
|
|
|
|
},
|
|
|
|
Layers: []docker.V2S2Descriptor{},
|
|
|
|
}
|
|
|
|
|
|
|
|
oimage.RootFS.Type = docker.TypeLayers
|
|
|
|
oimage.RootFS.DiffIDs = []digest.Digest{}
|
|
|
|
dimage.RootFS = &docker.V2S2RootFS{}
|
|
|
|
dimage.RootFS.Type = docker.TypeLayers
|
|
|
|
dimage.RootFS.DiffIDs = []digest.Digest{}
|
|
|
|
|
|
|
|
// Extract each layer and compute its digests, both compressed (if requested) and uncompressed.
|
|
|
|
for _, layerID := range layers {
|
|
|
|
omediaType := v1.MediaTypeImageLayer
|
|
|
|
dmediaType := docker.V2S2MediaTypeUncompressedLayer
|
|
|
|
// Figure out which media type we want to call this. Assume no compression.
|
2017-07-23 23:01:37 +00:00
|
|
|
if c.compression != archive.Uncompressed {
|
|
|
|
switch c.compression {
|
2017-06-16 17:24:00 +00:00
|
|
|
case archive.Gzip:
|
|
|
|
omediaType = v1.MediaTypeImageLayerGzip
|
|
|
|
dmediaType = docker.V2S2MediaTypeLayer
|
|
|
|
logrus.Debugf("compressing layer %q with gzip", layerID)
|
|
|
|
case archive.Bzip2:
|
|
|
|
// Until the image specs define a media type for bzip2-compressed layers, even if we know
|
|
|
|
// how to decompress them, we can't try to compress layers with bzip2.
|
|
|
|
return nil, errors.New("media type for bzip2-compressed layers is not defined")
|
|
|
|
default:
|
|
|
|
logrus.Debugf("compressing layer %q with unknown compressor(?)", layerID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// If we're not re-exporting the data, just fake up layer and diff IDs for the manifest.
|
2017-07-23 23:01:37 +00:00
|
|
|
if !c.exporting {
|
2017-06-16 17:24:00 +00:00
|
|
|
fakeLayerDigest := digest.NewDigestFromHex(digest.Canonical.String(), layerID)
|
|
|
|
// Add a note in the manifest about the layer. The blobs should be identified by their
|
|
|
|
// possibly-compressed blob digests, but just use the layer IDs here.
|
|
|
|
olayerDescriptor := v1.Descriptor{
|
|
|
|
MediaType: omediaType,
|
|
|
|
Digest: fakeLayerDigest,
|
|
|
|
Size: -1,
|
|
|
|
}
|
|
|
|
omanifest.Layers = append(omanifest.Layers, olayerDescriptor)
|
|
|
|
dlayerDescriptor := docker.V2S2Descriptor{
|
|
|
|
MediaType: dmediaType,
|
|
|
|
Digest: fakeLayerDigest,
|
|
|
|
Size: -1,
|
|
|
|
}
|
|
|
|
dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
|
|
|
|
// Add a note about the diffID, which should be uncompressed digest of the blob, but
|
|
|
|
// just use the layer ID here.
|
|
|
|
oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, fakeLayerDigest)
|
|
|
|
dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, fakeLayerDigest)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Start reading the layer.
|
2017-07-24 13:21:50 +00:00
|
|
|
rc, err := c.store.Diff("", layerID, nil)
|
2017-06-16 17:24:00 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "error extracting layer %q", layerID)
|
|
|
|
}
|
|
|
|
defer rc.Close()
|
|
|
|
// Set up to decompress the layer, in case it's coming out compressed. Due to implementation
|
|
|
|
// differences, the result may not match the digest the blob had when it was originally imported,
|
|
|
|
// so we have to recompute all of this anyway if we want to be sure the digests we use will be
|
|
|
|
// correct.
|
|
|
|
uncompressed, err := archive.DecompressStream(rc)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "error decompressing layer %q", layerID)
|
|
|
|
}
|
|
|
|
defer uncompressed.Close()
|
|
|
|
srcHasher := digest.Canonical.Digester()
|
|
|
|
reader := io.TeeReader(uncompressed, srcHasher.Hash())
|
|
|
|
// Set up to write the possibly-recompressed blob.
|
|
|
|
layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "error opening file for layer %q", layerID)
|
|
|
|
}
|
|
|
|
destHasher := digest.Canonical.Digester()
|
|
|
|
counter := ioutils.NewWriteCounter(layerFile)
|
|
|
|
multiWriter := io.MultiWriter(counter, destHasher.Hash())
|
|
|
|
// Compress the layer, if we're compressing it.
|
2017-07-23 23:01:37 +00:00
|
|
|
writer, err := archive.CompressStream(multiWriter, c.compression)
|
2017-06-16 17:24:00 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "error compressing layer %q", layerID)
|
|
|
|
}
|
|
|
|
size, err := io.Copy(writer, reader)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "error storing layer %q to file", layerID)
|
|
|
|
}
|
|
|
|
writer.Close()
|
|
|
|
layerFile.Close()
|
2017-07-23 23:01:37 +00:00
|
|
|
if c.compression == archive.Uncompressed {
|
2017-06-16 17:24:00 +00:00
|
|
|
if size != counter.Count {
|
|
|
|
return nil, errors.Errorf("error storing layer %q to file: inconsistent layer size (copied %d, wrote %d)", layerID, size, counter.Count)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
size = counter.Count
|
|
|
|
}
|
|
|
|
logrus.Debugf("layer %q size is %d bytes", layerID, size)
|
|
|
|
// Rename the layer so that we can more easily find it by digest later.
|
|
|
|
err = os.Rename(filepath.Join(path, "layer"), filepath.Join(path, destHasher.Digest().String()))
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "error storing layer %q to file", layerID)
|
|
|
|
}
|
|
|
|
// Add a note in the manifest about the layer. The blobs are identified by their possibly-
|
|
|
|
// compressed blob digests.
|
|
|
|
olayerDescriptor := v1.Descriptor{
|
|
|
|
MediaType: omediaType,
|
|
|
|
Digest: destHasher.Digest(),
|
|
|
|
Size: size,
|
|
|
|
}
|
|
|
|
omanifest.Layers = append(omanifest.Layers, olayerDescriptor)
|
|
|
|
dlayerDescriptor := docker.V2S2Descriptor{
|
|
|
|
MediaType: dmediaType,
|
|
|
|
Digest: destHasher.Digest(),
|
|
|
|
Size: size,
|
|
|
|
}
|
|
|
|
dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
|
|
|
|
// Add a note about the diffID, which is always an uncompressed value.
|
|
|
|
oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, srcHasher.Digest())
|
|
|
|
dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, srcHasher.Digest())
|
|
|
|
}
|
|
|
|
|
2017-07-23 23:01:37 +00:00
|
|
|
if c.addHistory {
|
2017-06-16 17:24:00 +00:00
|
|
|
// Build history notes in the image configurations.
|
|
|
|
onews := v1.History{
|
2017-07-23 23:01:37 +00:00
|
|
|
Created: &c.created,
|
|
|
|
CreatedBy: c.createdBy,
|
2017-06-16 17:24:00 +00:00
|
|
|
Author: oimage.Author,
|
|
|
|
EmptyLayer: false,
|
|
|
|
}
|
|
|
|
oimage.History = append(oimage.History, onews)
|
|
|
|
dnews := docker.V2S2History{
|
2017-07-23 23:01:37 +00:00
|
|
|
Created: c.created,
|
|
|
|
CreatedBy: c.createdBy,
|
2017-06-16 17:24:00 +00:00
|
|
|
Author: dimage.Author,
|
|
|
|
EmptyLayer: false,
|
|
|
|
}
|
|
|
|
dimage.History = append(dimage.History, dnews)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Encode the image configuration blob.
|
|
|
|
oconfig, err := json.Marshal(&oimage)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
logrus.Debugf("OCIv1 config = %s", oconfig)
|
|
|
|
|
|
|
|
// Add the configuration blob to the manifest.
|
|
|
|
omanifest.Config.Digest = digest.Canonical.FromBytes(oconfig)
|
|
|
|
omanifest.Config.Size = int64(len(oconfig))
|
|
|
|
omanifest.Config.MediaType = v1.MediaTypeImageConfig
|
|
|
|
|
|
|
|
// Encode the manifest.
|
|
|
|
omanifestbytes, err := json.Marshal(&omanifest)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
logrus.Debugf("OCIv1 manifest = %s", omanifestbytes)
|
|
|
|
|
|
|
|
// Encode the image configuration blob.
|
|
|
|
dconfig, err := json.Marshal(&dimage)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
logrus.Debugf("Docker v2s2 config = %s", dconfig)
|
|
|
|
|
|
|
|
// Add the configuration blob to the manifest.
|
|
|
|
dmanifest.Config.Digest = digest.Canonical.FromBytes(dconfig)
|
|
|
|
dmanifest.Config.Size = int64(len(dconfig))
|
|
|
|
dmanifest.Config.MediaType = docker.V2S2MediaTypeImageConfig
|
|
|
|
|
|
|
|
// Encode the manifest.
|
|
|
|
dmanifestbytes, err := json.Marshal(&dmanifest)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
logrus.Debugf("Docker v2s2 manifest = %s", dmanifestbytes)
|
|
|
|
|
|
|
|
// Decide which manifest and configuration blobs we'll actually output.
|
|
|
|
var config []byte
|
|
|
|
var manifest []byte
|
|
|
|
switch manifestType {
|
|
|
|
case v1.MediaTypeImageManifest:
|
|
|
|
manifest = omanifestbytes
|
|
|
|
config = oconfig
|
|
|
|
case docker.V2S2MediaTypeManifest:
|
|
|
|
manifest = dmanifestbytes
|
|
|
|
config = dconfig
|
|
|
|
default:
|
|
|
|
panic("unreachable code: unsupported manifest type")
|
|
|
|
}
|
2017-07-23 23:01:37 +00:00
|
|
|
src = ©Source{
|
2017-06-16 17:24:00 +00:00
|
|
|
path: path,
|
2017-07-23 23:01:37 +00:00
|
|
|
ref: c,
|
|
|
|
store: c.store,
|
|
|
|
layerID: c.layerID,
|
|
|
|
names: c.names,
|
|
|
|
addHistory: c.addHistory,
|
|
|
|
compression: c.compression,
|
2017-06-16 17:24:00 +00:00
|
|
|
config: config,
|
|
|
|
configDigest: digest.Canonical.FromBytes(config),
|
|
|
|
manifest: manifest,
|
|
|
|
manifestType: manifestType,
|
2017-07-23 23:01:37 +00:00
|
|
|
exporting: c.exporting,
|
2017-06-16 17:24:00 +00:00
|
|
|
}
|
|
|
|
return src, nil
|
|
|
|
}
|
|
|
|
|
2017-07-23 23:01:37 +00:00
|
|
|
// NewImageDestination creates a new image destination from the given system context
|
|
|
|
func (c *CopyRef) NewImageDestination(sc *types.SystemContext) (types.ImageDestination, error) {
|
2017-06-16 17:24:00 +00:00
|
|
|
return nil, errors.Errorf("can't write to a container")
|
|
|
|
}
|
|
|
|
|
2017-07-23 23:01:37 +00:00
|
|
|
// DockerReference gets the docker reference for the given CopyRef
|
|
|
|
func (c *CopyRef) DockerReference() reference.Named {
|
|
|
|
return c.name
|
2017-06-16 17:24:00 +00:00
|
|
|
}
|
|
|
|
|
2017-07-23 23:01:37 +00:00
|
|
|
// StringWithinTransport returns the first name of the copyRef
|
|
|
|
func (c *CopyRef) StringWithinTransport() string {
|
|
|
|
if len(c.names) > 0 {
|
|
|
|
return c.names[0]
|
2017-06-16 17:24:00 +00:00
|
|
|
}
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
2017-07-23 23:01:37 +00:00
|
|
|
// DeleteImage deletes an image in the CopyRef
|
|
|
|
func (c *CopyRef) DeleteImage(*types.SystemContext) error {
|
2017-06-16 17:24:00 +00:00
|
|
|
// we were never here
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-07-23 23:01:37 +00:00
|
|
|
// PolicyConfigurationIdentity returns the policy configuration for the CopyRef
|
|
|
|
func (c *CopyRef) PolicyConfigurationIdentity() string {
|
2017-06-16 17:24:00 +00:00
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
2017-07-23 23:01:37 +00:00
|
|
|
// PolicyConfigurationNamespaces returns the policy configuration namespace for the CopyRef
|
|
|
|
func (c *CopyRef) PolicyConfigurationNamespaces() []string {
|
2017-06-16 17:24:00 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-07-23 23:01:37 +00:00
|
|
|
// Transport returns an ImageTransport for the given CopyRef
|
|
|
|
func (c *CopyRef) Transport() types.ImageTransport {
|
2017-06-16 17:24:00 +00:00
|
|
|
return is.Transport
|
|
|
|
}
|
|
|
|
|
2017-07-23 23:01:37 +00:00
|
|
|
func (cs *copySource) Close() error {
|
|
|
|
err := os.RemoveAll(cs.path)
|
2017-06-16 17:24:00 +00:00
|
|
|
if err != nil {
|
2017-07-23 23:01:37 +00:00
|
|
|
logrus.Errorf("error removing %q: %v", cs.path, err)
|
2017-06-16 17:24:00 +00:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-07-23 23:01:37 +00:00
|
|
|
func (cs *copySource) Reference() types.ImageReference {
|
|
|
|
return cs.ref
|
2017-06-16 17:24:00 +00:00
|
|
|
}
|
|
|
|
|
2017-07-23 23:01:37 +00:00
|
|
|
func (cs *copySource) GetSignatures() ([][]byte, error) {
|
2017-06-16 17:24:00 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2017-07-23 23:01:37 +00:00
|
|
|
func (cs *copySource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
|
2017-06-16 17:24:00 +00:00
|
|
|
return []byte{}, "", errors.Errorf("TODO")
|
|
|
|
}
|
|
|
|
|
2017-07-23 23:01:37 +00:00
|
|
|
func (cs *copySource) GetManifest() ([]byte, string, error) {
|
|
|
|
return cs.manifest, cs.manifestType, nil
|
2017-06-16 17:24:00 +00:00
|
|
|
}
|
|
|
|
|
2017-07-23 23:01:37 +00:00
|
|
|
func (cs *copySource) GetBlob(blob types.BlobInfo) (reader io.ReadCloser, size int64, err error) {
|
|
|
|
if blob.Digest == cs.configDigest {
|
2017-06-16 17:24:00 +00:00
|
|
|
logrus.Debugf("start reading config")
|
2017-07-23 23:01:37 +00:00
|
|
|
reader := bytes.NewReader(cs.config)
|
2017-06-16 17:24:00 +00:00
|
|
|
closer := func() error {
|
|
|
|
logrus.Debugf("finished reading config")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return ioutils.NewReadCloserWrapper(reader, closer), reader.Size(), nil
|
|
|
|
}
|
2017-07-23 23:01:37 +00:00
|
|
|
layerFile, err := os.OpenFile(filepath.Join(cs.path, blob.Digest.String()), os.O_RDONLY, 0600)
|
2017-06-16 17:24:00 +00:00
|
|
|
if err != nil {
|
|
|
|
logrus.Debugf("error reading layer %q: %v", blob.Digest.String(), err)
|
|
|
|
return nil, -1, err
|
|
|
|
}
|
|
|
|
size = -1
|
|
|
|
st, err := layerFile.Stat()
|
|
|
|
if err != nil {
|
|
|
|
logrus.Warnf("error reading size of layer %q: %v", blob.Digest.String(), err)
|
|
|
|
} else {
|
|
|
|
size = st.Size()
|
|
|
|
}
|
|
|
|
logrus.Debugf("reading layer %q", blob.Digest.String())
|
|
|
|
closer := func() error {
|
|
|
|
layerFile.Close()
|
|
|
|
logrus.Debugf("finished reading layer %q", blob.Digest.String())
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil
|
|
|
|
}
|