Build and install from GOPATH
* Rename 'vendor/src' -> 'vendor' * Ignore vendor/ instead of vendor/src/ for lint * Rename 'cmd/client' -> 'cmd/ocic' to make it 'go install'able * Rename 'cmd/server' -> 'cmd/ocid' to make it 'go install'able * Update Makefile to build and install from GOPATH * Update tests to locate ocid/ocic in GOPATH/bin * Search for binaries in GOPATH/bin instead of PATH * Install tools using `go get -u`, so they are updated on each run Signed-off-by: Jonathan Yu <jawnsy@redhat.com>
This commit is contained in:
parent
9da2882d49
commit
6c9628cdb1
1111 changed files with 70 additions and 61 deletions
570
vendor/github.com/containers/image/storage/storage_image.go
generated
vendored
Normal file
570
vendor/github.com/containers/image/storage/storage_image.go
generated
vendored
Normal file
|
@ -0,0 +1,570 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/containers/storage/storage"
|
||||
ddigest "github.com/docker/distribution/digest"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrBlobDigestMismatch is returned when PutBlob() is given a blob
|
||||
// with a digest-based name that doesn't match its contents.
|
||||
ErrBlobDigestMismatch = errors.New("blob digest mismatch")
|
||||
// ErrBlobSizeMismatch is returned when PutBlob() is given a blob
|
||||
// with an expected size that doesn't match the reader.
|
||||
ErrBlobSizeMismatch = errors.New("blob size mismatch")
|
||||
// ErrNoManifestLists is returned when GetTargetManifest() is
|
||||
// called.
|
||||
ErrNoManifestLists = errors.New("manifest lists are not supported by this transport")
|
||||
// ErrNoSuchImage is returned when we attempt to access an image which
|
||||
// doesn't exist in the storage area.
|
||||
ErrNoSuchImage = storage.ErrNotAnImage
|
||||
)
|
||||
|
||||
type storageImageSource struct {
|
||||
imageRef storageReference
|
||||
Tag string `json:"tag,omitempty"`
|
||||
Created time.Time `json:"created-time,omitempty"`
|
||||
ID string `json:"id"`
|
||||
BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle
|
||||
Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs
|
||||
LayerPosition map[ddigest.Digest]int `json:"-"` // Where we are in reading a blob's layers
|
||||
SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice
|
||||
}
|
||||
|
||||
type storageImageDestination struct {
|
||||
imageRef storageReference
|
||||
Tag string `json:"tag,omitempty"`
|
||||
Created time.Time `json:"created-time,omitempty"`
|
||||
ID string `json:"id"`
|
||||
BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle
|
||||
Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs
|
||||
BlobData map[ddigest.Digest][]byte `json:"-"` // Map from names of blobs that aren't layers to contents, temporary
|
||||
Manifest []byte `json:"-"` // Manifest contents, temporary
|
||||
Signatures []byte `json:"-"` // Signature contents, temporary
|
||||
SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice
|
||||
}
|
||||
|
||||
type storageLayerMetadata struct {
|
||||
Digest string `json:"digest,omitempty"`
|
||||
Size int64 `json:"size"`
|
||||
CompressedSize int64 `json:"compressed-size,omitempty"`
|
||||
}
|
||||
|
||||
type storageImage struct {
|
||||
types.Image
|
||||
size int64
|
||||
}
|
||||
|
||||
// newImageSource sets us up to read out an image, which needs to already exist.
|
||||
func newImageSource(imageRef storageReference) (*storageImageSource, error) {
|
||||
id := imageRef.resolveID()
|
||||
if id == "" {
|
||||
logrus.Errorf("no image matching reference %q found", imageRef.StringWithinTransport())
|
||||
return nil, ErrNoSuchImage
|
||||
}
|
||||
img, err := imageRef.transport.store.GetImage(id)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading image %q: %v", id, err)
|
||||
}
|
||||
image := &storageImageSource{
|
||||
imageRef: imageRef,
|
||||
Created: time.Now(),
|
||||
ID: img.ID,
|
||||
BlobList: []types.BlobInfo{},
|
||||
Layers: make(map[ddigest.Digest][]string),
|
||||
LayerPosition: make(map[ddigest.Digest]int),
|
||||
SignatureSizes: []int{},
|
||||
}
|
||||
if err := json.Unmarshal([]byte(img.Metadata), image); err != nil {
|
||||
return nil, fmt.Errorf("error decoding metadata for source image: %v", err)
|
||||
}
|
||||
return image, nil
|
||||
}
|
||||
|
||||
// newImageDestination sets us up to write a new image.
|
||||
func newImageDestination(imageRef storageReference) (*storageImageDestination, error) {
|
||||
image := &storageImageDestination{
|
||||
imageRef: imageRef,
|
||||
Tag: imageRef.reference,
|
||||
Created: time.Now(),
|
||||
ID: imageRef.id,
|
||||
BlobList: []types.BlobInfo{},
|
||||
Layers: make(map[ddigest.Digest][]string),
|
||||
BlobData: make(map[ddigest.Digest][]byte),
|
||||
SignatureSizes: []int{},
|
||||
}
|
||||
return image, nil
|
||||
}
|
||||
|
||||
func (s storageImageSource) Reference() types.ImageReference {
|
||||
return s.imageRef
|
||||
}
|
||||
|
||||
func (s storageImageDestination) Reference() types.ImageReference {
|
||||
return s.imageRef
|
||||
}
|
||||
|
||||
func (s storageImageSource) Close() {
|
||||
}
|
||||
|
||||
func (s storageImageDestination) Close() {
|
||||
}
|
||||
|
||||
func (s storageImageDestination) ShouldCompressLayers() bool {
|
||||
// We ultimately have to decompress layers to populate trees on disk,
|
||||
// so callers shouldn't bother compressing them before handing them to
|
||||
// us, if they're not already compressed.
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlob is used to both store filesystem layers and binary data that is part
|
||||
// of the image. Filesystem layers are assumed to be imported in order, as
|
||||
// that is required by some of the underlying storage drivers.
|
||||
func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) {
|
||||
blobSize := int64(-1)
|
||||
digest := blobinfo.Digest
|
||||
errorBlobInfo := types.BlobInfo{
|
||||
Digest: "",
|
||||
Size: -1,
|
||||
}
|
||||
// Try to read an initial snippet of the blob.
|
||||
header := make([]byte, 10240)
|
||||
n, err := stream.Read(header)
|
||||
if err != nil && err != io.EOF {
|
||||
return errorBlobInfo, err
|
||||
}
|
||||
// Set up to read the whole blob (the initial snippet, plus the rest)
|
||||
// while digesting it with either the default, or the passed-in digest,
|
||||
// if one was specified.
|
||||
hasher := ddigest.Canonical.New()
|
||||
if digest.Validate() == nil {
|
||||
if a := digest.Algorithm(); a.Available() {
|
||||
hasher = a.New()
|
||||
}
|
||||
}
|
||||
hash := ""
|
||||
counter := ioutils.NewWriteCounter(hasher.Hash())
|
||||
defragmented := io.MultiReader(bytes.NewBuffer(header[:n]), stream)
|
||||
multi := io.TeeReader(defragmented, counter)
|
||||
if (n > 0) && archive.IsArchive(header[:n]) {
|
||||
// It's a filesystem layer. If it's not the first one in the
|
||||
// image, we assume that the most recently added layer is its
|
||||
// parent.
|
||||
parentLayer := ""
|
||||
for _, blob := range s.BlobList {
|
||||
if layerList, ok := s.Layers[blob.Digest]; ok {
|
||||
parentLayer = layerList[len(layerList)-1]
|
||||
}
|
||||
}
|
||||
// If we have an expected content digest, generate a layer ID
|
||||
// based on the parent's ID and the expected content digest.
|
||||
id := ""
|
||||
if digest.Validate() == nil {
|
||||
id = ddigest.Canonical.FromBytes([]byte(parentLayer + "+" + digest.String())).Hex()
|
||||
}
|
||||
// Attempt to create the identified layer and import its contents.
|
||||
layer, uncompressedSize, err := s.imageRef.transport.store.PutLayer(id, parentLayer, nil, "", true, multi)
|
||||
if err != nil && err != storage.ErrDuplicateID {
|
||||
logrus.Debugf("error importing layer blob %q as %q: %v", blobinfo.Digest, id, err)
|
||||
return errorBlobInfo, err
|
||||
}
|
||||
if err == storage.ErrDuplicateID {
|
||||
// We specified an ID, and there's already a layer with
|
||||
// the same ID. Drain the input so that we can look at
|
||||
// its length and digest.
|
||||
_, err := io.Copy(ioutil.Discard, multi)
|
||||
if err != nil && err != io.EOF {
|
||||
logrus.Debugf("error digesting layer blob %q: %v", blobinfo.Digest, id, err)
|
||||
return errorBlobInfo, err
|
||||
}
|
||||
hash = hasher.Digest().String()
|
||||
} else {
|
||||
// Applied the layer with the specified ID. Note the
|
||||
// size info and computed digest.
|
||||
hash = hasher.Digest().String()
|
||||
layerMeta := storageLayerMetadata{
|
||||
Digest: hash,
|
||||
CompressedSize: counter.Count,
|
||||
Size: uncompressedSize,
|
||||
}
|
||||
if metadata, err := json.Marshal(&layerMeta); len(metadata) != 0 && err == nil {
|
||||
s.imageRef.transport.store.SetMetadata(layer.ID, string(metadata))
|
||||
}
|
||||
// Hang on to the new layer's ID.
|
||||
id = layer.ID
|
||||
}
|
||||
blobSize = counter.Count
|
||||
// Check if the size looks right.
|
||||
if blobinfo.Size >= 0 && blobSize != blobinfo.Size {
|
||||
logrus.Debugf("blob %q size is %d, not %d, rejecting", blobinfo.Digest, blobSize, blobinfo.Size)
|
||||
if layer != nil {
|
||||
// Something's wrong; delete the newly-created layer.
|
||||
s.imageRef.transport.store.DeleteLayer(layer.ID)
|
||||
}
|
||||
return errorBlobInfo, ErrBlobSizeMismatch
|
||||
}
|
||||
// If the content digest was specified, verify it.
|
||||
if digest.Validate() == nil && digest.String() != hash {
|
||||
logrus.Debugf("blob %q digests to %q, rejecting", blobinfo.Digest, hash)
|
||||
if layer != nil {
|
||||
// Something's wrong; delete the newly-created layer.
|
||||
s.imageRef.transport.store.DeleteLayer(layer.ID)
|
||||
}
|
||||
return errorBlobInfo, ErrBlobDigestMismatch
|
||||
}
|
||||
// If we didn't get a digest, construct one.
|
||||
if digest == "" {
|
||||
digest = ddigest.Digest(hash)
|
||||
}
|
||||
// Record that this layer blob is a layer, and the layer ID it
|
||||
// ended up having. This is a list, in case the same blob is
|
||||
// being applied more than once.
|
||||
s.Layers[digest] = append(s.Layers[digest], id)
|
||||
s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: blobSize})
|
||||
if layer != nil {
|
||||
logrus.Debugf("blob %q imported as a filesystem layer %q", blobinfo.Digest, id)
|
||||
} else {
|
||||
logrus.Debugf("layer blob %q already present as layer %q", blobinfo.Digest, id)
|
||||
}
|
||||
} else {
|
||||
// It's just data. Finish scanning it in, check that our
|
||||
// computed digest matches the passed-in digest, and store it,
|
||||
// but leave it out of the blob-to-layer-ID map so that we can
|
||||
// tell that it's not a layer.
|
||||
blob, err := ioutil.ReadAll(multi)
|
||||
if err != nil && err != io.EOF {
|
||||
return errorBlobInfo, err
|
||||
}
|
||||
blobSize = int64(len(blob))
|
||||
hash = hasher.Digest().String()
|
||||
if blobinfo.Size >= 0 && blobSize != blobinfo.Size {
|
||||
logrus.Debugf("blob %q size is %d, not %d, rejecting", blobinfo.Digest, blobSize, blobinfo.Size)
|
||||
return errorBlobInfo, ErrBlobSizeMismatch
|
||||
}
|
||||
// If we were given a digest, verify that the content matches
|
||||
// it.
|
||||
if digest.Validate() == nil && digest.String() != hash {
|
||||
logrus.Debugf("blob %q digests to %q, rejecting", blobinfo.Digest, hash)
|
||||
return errorBlobInfo, ErrBlobDigestMismatch
|
||||
}
|
||||
// If we didn't get a digest, construct one.
|
||||
if digest == "" {
|
||||
digest = ddigest.Digest(hash)
|
||||
}
|
||||
// Save the blob for when we Commit().
|
||||
s.BlobData[digest] = blob
|
||||
s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: blobSize})
|
||||
logrus.Debugf("blob %q imported as opaque data %q", blobinfo.Digest, digest)
|
||||
}
|
||||
return types.BlobInfo{
|
||||
Digest: digest,
|
||||
Size: blobSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) {
|
||||
if blobinfo.Digest == "" {
|
||||
return false, -1, fmt.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
for _, blob := range s.BlobList {
|
||||
if blob.Digest == blobinfo.Digest {
|
||||
return true, blob.Size, nil
|
||||
}
|
||||
}
|
||||
return false, -1, types.ErrBlobNotFound
|
||||
}
|
||||
|
||||
func (s *storageImageDestination) ReapplyBlob(blobinfo types.BlobInfo) (types.BlobInfo, error) {
|
||||
err := blobinfo.Digest.Validate()
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
if layerList, ok := s.Layers[blobinfo.Digest]; !ok || len(layerList) < 1 {
|
||||
b, err := s.imageRef.transport.store.GetImageBigData(s.ID, blobinfo.Digest.String())
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
return types.BlobInfo{Digest: blobinfo.Digest, Size: int64(len(b))}, nil
|
||||
}
|
||||
layerList := s.Layers[blobinfo.Digest]
|
||||
rc, _, err := diffLayer(s.imageRef.transport.store, layerList[len(layerList)-1])
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
return s.PutBlob(rc, blobinfo)
|
||||
}
|
||||
|
||||
func (s *storageImageDestination) Commit() error {
|
||||
// Create the image record.
|
||||
lastLayer := ""
|
||||
for _, blob := range s.BlobList {
|
||||
if layerList, ok := s.Layers[blob.Digest]; ok {
|
||||
lastLayer = layerList[len(layerList)-1]
|
||||
}
|
||||
}
|
||||
img, err := s.imageRef.transport.store.CreateImage(s.ID, nil, lastLayer, "", nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("error creating image: %q", err)
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("created new image ID %q", img.ID)
|
||||
s.ID = img.ID
|
||||
if s.Tag != "" {
|
||||
// We have a name to set, so move the name to this image.
|
||||
if err := s.imageRef.transport.store.SetNames(img.ID, []string{s.Tag}); err != nil {
|
||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
||||
}
|
||||
logrus.Debugf("error setting names on image %q: %v", img.ID, err)
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("set name of image %q to %q", img.ID, s.Tag)
|
||||
}
|
||||
// Save the data blobs to disk, and drop their contents from memory.
|
||||
keys := []ddigest.Digest{}
|
||||
for k, v := range s.BlobData {
|
||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, k.String(), v); err != nil {
|
||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
||||
}
|
||||
logrus.Debugf("error saving big data %q for image %q: %v", k, img.ID, err)
|
||||
return err
|
||||
}
|
||||
keys = append(keys, k)
|
||||
}
|
||||
for _, key := range keys {
|
||||
delete(s.BlobData, key)
|
||||
}
|
||||
// Save the manifest, if we have one.
|
||||
if err := s.imageRef.transport.store.SetImageBigData(s.ID, "manifest", s.Manifest); err != nil {
|
||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
||||
}
|
||||
logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
|
||||
return err
|
||||
}
|
||||
// Save the signatures, if we have any.
|
||||
if err := s.imageRef.transport.store.SetImageBigData(s.ID, "signatures", s.Signatures); err != nil {
|
||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
||||
}
|
||||
logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
|
||||
return err
|
||||
}
|
||||
// Save our metadata.
|
||||
metadata, err := json.Marshal(s)
|
||||
if err != nil {
|
||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
||||
}
|
||||
logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err)
|
||||
return err
|
||||
}
|
||||
if len(metadata) != 0 {
|
||||
if err = s.imageRef.transport.store.SetMetadata(s.ID, string(metadata)); err != nil {
|
||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
||||
}
|
||||
logrus.Debugf("error saving metadata for image %q: %v", img.ID, err)
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("saved image metadata %q", string(metadata))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *storageImageDestination) SupportedManifestMIMETypes() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *storageImageDestination) PutManifest(manifest []byte) error {
|
||||
s.Manifest = make([]byte, len(manifest))
|
||||
copy(s.Manifest, manifest)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SupportsSignatures returns an error if we can't expect GetSignatures() to
|
||||
// return data that was previously supplied to PutSignatures().
|
||||
func (s *storageImageDestination) SupportsSignatures() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
|
||||
// uploaded to the image destination, true otherwise.
|
||||
func (s *storageImageDestination) AcceptsForeignLayerURLs() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *storageImageDestination) PutSignatures(signatures [][]byte) error {
|
||||
sizes := []int{}
|
||||
sigblob := []byte{}
|
||||
for _, sig := range signatures {
|
||||
sizes = append(sizes, len(sig))
|
||||
newblob := make([]byte, len(sigblob)+len(sig))
|
||||
copy(newblob, sigblob)
|
||||
copy(newblob[len(sigblob):], sig)
|
||||
sigblob = newblob
|
||||
}
|
||||
s.Signatures = sigblob
|
||||
s.SignatureSizes = sizes
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *storageImageSource) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) {
|
||||
rc, n, _, err = s.getBlobAndLayerID(info)
|
||||
return rc, n, err
|
||||
}
|
||||
|
||||
func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) {
|
||||
err = info.Digest.Validate()
|
||||
if err != nil {
|
||||
return nil, -1, "", err
|
||||
}
|
||||
if layerList, ok := s.Layers[info.Digest]; !ok || len(layerList) < 1 {
|
||||
b, err := s.imageRef.transport.store.GetImageBigData(s.ID, info.Digest.String())
|
||||
if err != nil {
|
||||
return nil, -1, "", err
|
||||
}
|
||||
r := bytes.NewReader(b)
|
||||
logrus.Debugf("exporting opaque data as blob %q", info.Digest.String())
|
||||
return ioutil.NopCloser(r), int64(r.Len()), "", nil
|
||||
}
|
||||
// If the blob was "put" more than once, we have multiple layer IDs
|
||||
// which should all produce the same diff. For the sake of tests that
|
||||
// want to make sure we created different layers each time the blob was
|
||||
// "put", though, cycle through the layers.
|
||||
layerList := s.Layers[info.Digest]
|
||||
position, ok := s.LayerPosition[info.Digest]
|
||||
if !ok {
|
||||
position = 0
|
||||
}
|
||||
s.LayerPosition[info.Digest] = (position + 1) % len(layerList)
|
||||
logrus.Debugf("exporting filesystem layer %q for blob %q", layerList[position], info.Digest)
|
||||
rc, n, err = diffLayer(s.imageRef.transport.store, layerList[position])
|
||||
return rc, n, layerList[position], err
|
||||
}
|
||||
|
||||
func diffLayer(store storage.Store, layerID string) (rc io.ReadCloser, n int64, err error) {
|
||||
layer, err := store.GetLayer(layerID)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
layerMeta := storageLayerMetadata{
|
||||
CompressedSize: -1,
|
||||
}
|
||||
if layer.Metadata != "" {
|
||||
if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil {
|
||||
return nil, -1, fmt.Errorf("error decoding metadata for layer %q: %v", layerID, err)
|
||||
}
|
||||
}
|
||||
if layerMeta.CompressedSize <= 0 {
|
||||
n = -1
|
||||
} else {
|
||||
n = layerMeta.CompressedSize
|
||||
}
|
||||
diff, err := store.Diff("", layer.ID)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
return diff, n, nil
|
||||
}
|
||||
|
||||
func (s *storageImageSource) GetManifest() (manifestBlob []byte, MIMEType string, err error) {
|
||||
manifestBlob, err = s.imageRef.transport.store.GetImageBigData(s.ID, "manifest")
|
||||
return manifestBlob, manifest.GuessMIMEType(manifestBlob), err
|
||||
}
|
||||
|
||||
func (s *storageImageSource) GetTargetManifest(digest ddigest.Digest) (manifestBlob []byte, MIMEType string, err error) {
|
||||
return nil, "", ErrNoManifestLists
|
||||
}
|
||||
|
||||
func (s *storageImageSource) GetSignatures() (signatures [][]byte, err error) {
|
||||
var offset int
|
||||
signature, err := s.imageRef.transport.store.GetImageBigData(s.ID, "signatures")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sigslice := [][]byte{}
|
||||
for _, length := range s.SignatureSizes {
|
||||
sigslice = append(sigslice, signature[offset:offset+length])
|
||||
offset += length
|
||||
}
|
||||
if offset != len(signature) {
|
||||
return nil, fmt.Errorf("signatures data contained %d extra bytes", len(signatures)-offset)
|
||||
}
|
||||
return sigslice, nil
|
||||
}
|
||||
|
||||
func (s *storageImageSource) getSize() (int64, error) {
|
||||
var sum int64
|
||||
names, err := s.imageRef.transport.store.ListImageBigData(s.imageRef.id)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("error reading image %q: %v", s.imageRef.id, err)
|
||||
}
|
||||
for _, name := range names {
|
||||
bigSize, err := s.imageRef.transport.store.GetImageBigDataSize(s.imageRef.id, name)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("error reading data blob size %q for %q: %v", name, s.imageRef.id, err)
|
||||
}
|
||||
sum += bigSize
|
||||
}
|
||||
for _, sigSize := range s.SignatureSizes {
|
||||
sum += int64(sigSize)
|
||||
}
|
||||
for _, layerList := range s.Layers {
|
||||
for _, layerID := range layerList {
|
||||
layer, err := s.imageRef.transport.store.GetLayer(layerID)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
layerMeta := storageLayerMetadata{
|
||||
Size: -1,
|
||||
}
|
||||
if layer.Metadata != "" {
|
||||
if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil {
|
||||
return -1, fmt.Errorf("error decoding metadata for layer %q: %v", layerID, err)
|
||||
}
|
||||
}
|
||||
if layerMeta.Size < 0 {
|
||||
return -1, fmt.Errorf("size for layer %q is unknown, failing getSize()", layerID)
|
||||
}
|
||||
sum += layerMeta.Size
|
||||
}
|
||||
}
|
||||
return sum, nil
|
||||
}
|
||||
|
||||
func (s *storageImage) Size() (int64, error) {
|
||||
return s.size, nil
|
||||
}
|
||||
|
||||
// newImage creates an image that also knows its size
|
||||
func newImage(s storageReference) (types.Image, error) {
|
||||
src, err := newImageSource(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
img, err := image.FromSource(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
size, err := src.getSize()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &storageImage{Image: img, size: size}, nil
|
||||
}
|
127
vendor/github.com/containers/image/storage/storage_reference.go
generated
vendored
Normal file
127
vendor/github.com/containers/image/storage/storage_reference.go
generated
vendored
Normal file
|
@ -0,0 +1,127 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/types"
|
||||
)
|
||||
|
||||
// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte
|
||||
// value hex-encoded into a 64-character string, and a reference to a Store
|
||||
// where an image is, or would be, kept.
|
||||
type storageReference struct {
|
||||
transport storageTransport
|
||||
reference string
|
||||
id string
|
||||
name reference.Named
|
||||
}
|
||||
|
||||
func newReference(transport storageTransport, reference, id string, name reference.Named) *storageReference {
|
||||
// We take a copy of the transport, which contains a pointer to the
|
||||
// store that it used for resolving this reference, so that the
|
||||
// transport that we'll return from Transport() won't be affected by
|
||||
// further calls to the original transport's SetStore() method.
|
||||
return &storageReference{
|
||||
transport: transport,
|
||||
reference: reference,
|
||||
id: id,
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve the reference's name to an image ID in the store, if there's already
|
||||
// one present with the same name or ID.
|
||||
func (s *storageReference) resolveID() string {
|
||||
if s.id == "" {
|
||||
image, err := s.transport.store.GetImage(s.reference)
|
||||
if image != nil && err == nil {
|
||||
s.id = image.ID
|
||||
}
|
||||
}
|
||||
return s.id
|
||||
}
|
||||
|
||||
// Return a Transport object that defaults to using the same store that we used
|
||||
// to build this reference object.
|
||||
func (s storageReference) Transport() types.ImageTransport {
|
||||
return &storageTransport{
|
||||
store: s.transport.store,
|
||||
}
|
||||
}
|
||||
|
||||
// Return a name with a tag, if we have a name to base them on.
|
||||
func (s storageReference) DockerReference() reference.Named {
|
||||
return s.name
|
||||
}
|
||||
|
||||
// Return a name with a tag, prefixed with the graph root and driver name, to
|
||||
// disambiguate between images which may be present in multiple stores and
|
||||
// share only their names.
|
||||
func (s storageReference) StringWithinTransport() string {
|
||||
storeSpec := "[" + s.transport.store.GetGraphDriverName() + "@" + s.transport.store.GetGraphRoot() + "]"
|
||||
if s.name == nil {
|
||||
return storeSpec + "@" + s.id
|
||||
}
|
||||
if s.id == "" {
|
||||
return storeSpec + s.reference
|
||||
}
|
||||
return storeSpec + s.reference + "@" + s.id
|
||||
}
|
||||
|
||||
func (s storageReference) PolicyConfigurationIdentity() string {
|
||||
return s.StringWithinTransport()
|
||||
}
|
||||
|
||||
// Also accept policy that's tied to the combination of the graph root and
|
||||
// driver name, to apply to all images stored in the Store, and to just the
|
||||
// graph root, in case we're using multiple drivers in the same directory for
|
||||
// some reason.
|
||||
func (s storageReference) PolicyConfigurationNamespaces() []string {
|
||||
storeSpec := "[" + s.transport.store.GetGraphDriverName() + "@" + s.transport.store.GetGraphRoot() + "]"
|
||||
driverlessStoreSpec := "[" + s.transport.store.GetGraphRoot() + "]"
|
||||
namespaces := []string{}
|
||||
if s.name != nil {
|
||||
if s.id != "" {
|
||||
// The reference without the ID is also a valid namespace.
|
||||
namespaces = append(namespaces, storeSpec+s.reference)
|
||||
}
|
||||
components := strings.Split(s.name.FullName(), "/")
|
||||
for len(components) > 0 {
|
||||
namespaces = append(namespaces, storeSpec+strings.Join(components, "/"))
|
||||
components = components[:len(components)-1]
|
||||
}
|
||||
}
|
||||
namespaces = append(namespaces, storeSpec)
|
||||
namespaces = append(namespaces, driverlessStoreSpec)
|
||||
return namespaces
|
||||
}
|
||||
|
||||
func (s storageReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
|
||||
return newImage(s)
|
||||
}
|
||||
|
||||
func (s storageReference) DeleteImage(ctx *types.SystemContext) error {
|
||||
id := s.resolveID()
|
||||
if id == "" {
|
||||
logrus.Errorf("reference %q does not resolve to an image ID", s.StringWithinTransport())
|
||||
return ErrNoSuchImage
|
||||
}
|
||||
layers, err := s.transport.store.DeleteImage(id, true)
|
||||
if err == nil {
|
||||
logrus.Debugf("deleted image %q", id)
|
||||
for _, layer := range layers {
|
||||
logrus.Debugf("deleted layer %q", layer)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s storageReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
|
||||
return newImageSource(s)
|
||||
}
|
||||
|
||||
func (s storageReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
|
||||
return newImageDestination(s)
|
||||
}
|
285
vendor/github.com/containers/image/storage/storage_transport.go
generated
vendored
Normal file
285
vendor/github.com/containers/image/storage/storage_transport.go
generated
vendored
Normal file
|
@ -0,0 +1,285 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage/storage"
|
||||
"github.com/docker/distribution/digest"
|
||||
ddigest "github.com/docker/distribution/digest"
|
||||
)
|
||||
|
||||
var (
|
||||
// Transport is an ImageTransport that uses either a default
|
||||
// storage.Store or one that's it's explicitly told to use.
|
||||
Transport StoreTransport = &storageTransport{}
|
||||
// ErrInvalidReference is returned when ParseReference() is passed an
|
||||
// empty reference.
|
||||
ErrInvalidReference = errors.New("invalid reference")
|
||||
// ErrPathNotAbsolute is returned when a graph root is not an absolute
|
||||
// path name.
|
||||
ErrPathNotAbsolute = errors.New("path name is not absolute")
|
||||
idRegexp = regexp.MustCompile("^(sha256:)?([0-9a-fA-F]{64})$")
|
||||
)
|
||||
|
||||
// StoreTransport is an ImageTransport that uses a storage.Store to parse
|
||||
// references, either its own default or one that it's told to use.
|
||||
type StoreTransport interface {
|
||||
types.ImageTransport
|
||||
// SetStore sets the default store for this transport.
|
||||
SetStore(storage.Store)
|
||||
// GetImage retrieves the image from the transport's store that's named
|
||||
// by the reference.
|
||||
GetImage(types.ImageReference) (*storage.Image, error)
|
||||
// GetStoreImage retrieves the image from a specified store that's named
|
||||
// by the reference.
|
||||
GetStoreImage(storage.Store, types.ImageReference) (*storage.Image, error)
|
||||
// ParseStoreReference parses a reference, overriding any store
|
||||
// specification that it may contain.
|
||||
ParseStoreReference(store storage.Store, reference string) (*storageReference, error)
|
||||
}
|
||||
|
||||
type storageTransport struct {
|
||||
store storage.Store
|
||||
}
|
||||
|
||||
func (s *storageTransport) Name() string {
|
||||
// Still haven't really settled on a name.
|
||||
return "containers-storage"
|
||||
}
|
||||
|
||||
// SetStore sets the Store object which the Transport will use for parsing
|
||||
// references when information about a Store is not directly specified as part
|
||||
// of the reference. If one is not set, the library will attempt to initialize
|
||||
// one with default settings when a reference needs to be parsed. Calling
|
||||
// SetStore does not affect previously parsed references.
|
||||
func (s *storageTransport) SetStore(store storage.Store) {
|
||||
s.store = store
|
||||
}
|
||||
|
||||
// ParseStoreReference takes a name or an ID, tries to figure out which it is
|
||||
// relative to the given store, and returns it in a reference object.
|
||||
func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) {
|
||||
var name reference.Named
|
||||
var sum digest.Digest
|
||||
var err error
|
||||
if ref == "" {
|
||||
return nil, ErrInvalidReference
|
||||
}
|
||||
if ref[0] == '[' {
|
||||
// Ignore the store specifier.
|
||||
closeIndex := strings.IndexRune(ref, ']')
|
||||
if closeIndex < 1 {
|
||||
return nil, ErrInvalidReference
|
||||
}
|
||||
ref = ref[closeIndex+1:]
|
||||
}
|
||||
refInfo := strings.SplitN(ref, "@", 2)
|
||||
if len(refInfo) == 1 {
|
||||
// A name.
|
||||
name, err = reference.ParseNamed(refInfo[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if len(refInfo) == 2 {
|
||||
// An ID, possibly preceded by a name.
|
||||
if refInfo[0] != "" {
|
||||
name, err = reference.ParseNamed(refInfo[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
sum, err = digest.ParseDigest("sha256:" + refInfo[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else { // Coverage: len(refInfo) is always 1 or 2
|
||||
// Anything else: store specified in a form we don't
|
||||
// recognize.
|
||||
return nil, ErrInvalidReference
|
||||
}
|
||||
storeSpec := "[" + store.GetGraphDriverName() + "@" + store.GetGraphRoot() + "]"
|
||||
id := ""
|
||||
if sum.Validate() == nil {
|
||||
id = sum.Hex()
|
||||
}
|
||||
refname := ""
|
||||
if name != nil {
|
||||
name = reference.WithDefaultTag(name)
|
||||
refname = verboseName(name)
|
||||
}
|
||||
if refname == "" {
|
||||
logrus.Debugf("parsed reference into %q", storeSpec+"@"+id)
|
||||
} else if id == "" {
|
||||
logrus.Debugf("parsed reference into %q", storeSpec+refname)
|
||||
} else {
|
||||
logrus.Debugf("parsed reference into %q", storeSpec+refname+"@"+id)
|
||||
}
|
||||
return newReference(storageTransport{store: store}, refname, id, name), nil
|
||||
}
|
||||
|
||||
func (s *storageTransport) GetStore() (storage.Store, error) {
|
||||
// Return the transport's previously-set store. If we don't have one
|
||||
// of those, initialize one now.
|
||||
if s.store == nil {
|
||||
store, err := storage.GetStore(storage.DefaultStoreOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.store = store
|
||||
}
|
||||
return s.store, nil
|
||||
}
|
||||
|
||||
// ParseReference takes a name and/or an ID ("_name_"/"@_id_"/"_name_@_id_"),
|
||||
// possibly prefixed with a store specifier in the form "[_graphroot_]" or
|
||||
// "[_driver_@_graphroot_]", tries to figure out which it is, and returns it in
|
||||
// a reference object. If the _graphroot_ is a location other than the default,
|
||||
// it needs to have been previously opened using storage.GetStore(), so that it
|
||||
// can figure out which run root goes with the graph root.
|
||||
func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) {
|
||||
store, err := s.GetStore()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Check if there's a store location prefix. If there is, then it
|
||||
// needs to match a store that was previously initialized using
|
||||
// storage.GetStore(), or be enough to let the storage library fill out
|
||||
// the rest using knowledge that it has from elsewhere.
|
||||
if reference[0] == '[' {
|
||||
closeIndex := strings.IndexRune(reference, ']')
|
||||
if closeIndex < 1 {
|
||||
return nil, ErrInvalidReference
|
||||
}
|
||||
storeSpec := reference[1:closeIndex]
|
||||
reference = reference[closeIndex+1:]
|
||||
storeInfo := strings.SplitN(storeSpec, "@", 2)
|
||||
if len(storeInfo) == 1 && storeInfo[0] != "" {
|
||||
// One component: the graph root.
|
||||
if !filepath.IsAbs(storeInfo[0]) {
|
||||
return nil, ErrPathNotAbsolute
|
||||
}
|
||||
store2, err := storage.GetStore(storage.StoreOptions{
|
||||
GraphRoot: storeInfo[0],
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
store = store2
|
||||
} else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" {
|
||||
// Two components: the driver type and the graph root.
|
||||
if !filepath.IsAbs(storeInfo[1]) {
|
||||
return nil, ErrPathNotAbsolute
|
||||
}
|
||||
store2, err := storage.GetStore(storage.StoreOptions{
|
||||
GraphDriverName: storeInfo[0],
|
||||
GraphRoot: storeInfo[1],
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
store = store2
|
||||
} else {
|
||||
// Anything else: store specified in a form we don't
|
||||
// recognize.
|
||||
return nil, ErrInvalidReference
|
||||
}
|
||||
}
|
||||
return s.ParseStoreReference(store, reference)
|
||||
}
|
||||
|
||||
func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) {
|
||||
dref := ref.DockerReference()
|
||||
if dref == nil {
|
||||
if sref, ok := ref.(*storageReference); ok {
|
||||
if sref.id != "" {
|
||||
if img, err := store.GetImage(sref.id); err == nil {
|
||||
return img, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, ErrInvalidReference
|
||||
}
|
||||
return store.GetImage(verboseName(dref))
|
||||
}
|
||||
|
||||
func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) {
|
||||
store, err := s.GetStore()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.GetStoreImage(store, ref)
|
||||
}
|
||||
|
||||
func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||
// Check that there's a store location prefix. Values we're passed are
|
||||
// expected to come from PolicyConfigurationIdentity or
|
||||
// PolicyConfigurationNamespaces, so if there's no store location,
|
||||
// something's wrong.
|
||||
if scope[0] != '[' {
|
||||
return ErrInvalidReference
|
||||
}
|
||||
// Parse the store location prefix.
|
||||
closeIndex := strings.IndexRune(scope, ']')
|
||||
if closeIndex < 1 {
|
||||
return ErrInvalidReference
|
||||
}
|
||||
storeSpec := scope[1:closeIndex]
|
||||
scope = scope[closeIndex+1:]
|
||||
storeInfo := strings.SplitN(storeSpec, "@", 2)
|
||||
if len(storeInfo) == 1 && storeInfo[0] != "" {
|
||||
// One component: the graph root.
|
||||
if !filepath.IsAbs(storeInfo[0]) {
|
||||
return ErrPathNotAbsolute
|
||||
}
|
||||
} else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" {
|
||||
// Two components: the driver type and the graph root.
|
||||
if !filepath.IsAbs(storeInfo[1]) {
|
||||
return ErrPathNotAbsolute
|
||||
}
|
||||
} else {
|
||||
// Anything else: store specified in a form we don't
|
||||
// recognize.
|
||||
return ErrInvalidReference
|
||||
}
|
||||
// That might be all of it, and that's okay.
|
||||
if scope == "" {
|
||||
return nil
|
||||
}
|
||||
// But if there is anything left, it has to be a name, with or without
|
||||
// a tag, with or without an ID, since we don't return namespace values
|
||||
// that are just bare IDs.
|
||||
scopeInfo := strings.SplitN(scope, "@", 2)
|
||||
if len(scopeInfo) == 1 && scopeInfo[0] != "" {
|
||||
_, err := reference.ParseNamed(scopeInfo[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if len(scopeInfo) == 2 && scopeInfo[0] != "" && scopeInfo[1] != "" {
|
||||
_, err := reference.ParseNamed(scopeInfo[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = ddigest.ParseDigest("sha256:" + scopeInfo[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return ErrInvalidReference
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func verboseName(name reference.Named) string {
|
||||
name = reference.WithDefaultTag(name)
|
||||
tag := ""
|
||||
if tagged, ok := name.(reference.NamedTagged); ok {
|
||||
tag = tagged.Tag()
|
||||
}
|
||||
return name.FullName() + ":" + tag
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue