Bump image, storage, and image-spec
Bump containers/image (pulling in its new dependency on ostree-go), containers/storage, and updated image-spec. This pulls in the OCI v1.0 specifications and code that allows us to support 1.0 images. Signed-off-by: Dan Walsh <dwalsh@redhat.com> Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
This commit is contained in:
parent
5138691c3b
commit
d76645680f
117 changed files with 3965 additions and 991 deletions
475
vendor/github.com/containers/storage/layers.go
generated
vendored
475
vendor/github.com/containers/storage/layers.go
generated
vendored
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
@ -16,6 +15,8 @@ import (
|
|||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
"github.com/containers/storage/pkg/truncindex"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/vbatts/tar-split/tar/asm"
|
||||
"github.com/vbatts/tar-split/tar/storage"
|
||||
)
|
||||
|
@ -66,6 +67,38 @@ type Layer struct {
|
|||
// mounted at the mount point.
|
||||
MountCount int `json:"-"`
|
||||
|
||||
// Created is the datestamp for when this layer was created. Older
|
||||
// versions of the library did not track this information, so callers
|
||||
// will likely want to use the IsZero() method to verify that a value
|
||||
// is set before using it.
|
||||
Created time.Time `json:"created,omitempty"`
|
||||
|
||||
// CompressedDigest is the digest of the blob that was last passed to
|
||||
// ApplyDiff() or Put(), as it was presented to us.
|
||||
CompressedDigest digest.Digest `json:"compressed-diff-digest,omitempty"`
|
||||
|
||||
// CompressedSize is the length of the blob that was last passed to
|
||||
// ApplyDiff() or Put(), as it was presented to us. If
|
||||
// CompressedDigest is not set, this should be treated as if it were an
|
||||
// uninitialized value.
|
||||
CompressedSize int64 `json:"compressed-size,omitempty"`
|
||||
|
||||
// UncompressedDigest is the digest of the blob that was last passed to
|
||||
// ApplyDiff() or Put(), after we decompressed it. Often referred to
|
||||
// as a DiffID.
|
||||
UncompressedDigest digest.Digest `json:"diff-digest,omitempty"`
|
||||
|
||||
// UncompressedSize is the length of the blob that was last passed to
|
||||
// ApplyDiff() or Put(), after we decompressed it. If
|
||||
// UncompressedDigest is not set, this should be treated as if it were
|
||||
// an uninitialized value.
|
||||
UncompressedSize int64 `json:"diff-size,omitempty"`
|
||||
|
||||
// CompressionType is the type of compression which we detected on the blob
|
||||
// that was last passed to ApplyDiff() or Put().
|
||||
CompressionType archive.Compression `json:"compression,omitempty"`
|
||||
|
||||
// Flags is arbitrary data about the layer.
|
||||
Flags map[string]interface{} `json:"flags,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -75,12 +108,74 @@ type layerMountPoint struct {
|
|||
MountCount int `json:"count"`
|
||||
}
|
||||
|
||||
// DiffOptions override the default behavior of Diff() methods.
|
||||
type DiffOptions struct {
|
||||
// Compression, if set overrides the default compressor when generating a diff.
|
||||
Compression *archive.Compression
|
||||
}
|
||||
|
||||
// ROLayerStore wraps a graph driver, adding the ability to refer to layers by
|
||||
// name, and keeping track of parent-child relationships, along with a list of
|
||||
// all known layers.
|
||||
type ROLayerStore interface {
|
||||
ROFileBasedStore
|
||||
ROMetadataStore
|
||||
|
||||
// Exists checks if a layer with the specified name or ID is known.
|
||||
Exists(id string) bool
|
||||
|
||||
// Get retrieves information about a layer given an ID or name.
|
||||
Get(id string) (*Layer, error)
|
||||
|
||||
// Status returns an slice of key-value pairs, suitable for human consumption,
|
||||
// relaying whatever status information the underlying driver can share.
|
||||
Status() ([][2]string, error)
|
||||
|
||||
// Changes returns a slice of Change structures, which contain a pathname
|
||||
// (Path) and a description of what sort of change (Kind) was made by the
|
||||
// layer (either ChangeModify, ChangeAdd, or ChangeDelete), relative to a
|
||||
// specified layer. By default, the layer's parent is used as a reference.
|
||||
Changes(from, to string) ([]archive.Change, error)
|
||||
|
||||
// Diff produces a tarstream which can be applied to a layer with the contents
|
||||
// of the first layer to produce a layer with the contents of the second layer.
|
||||
// By default, the parent of the second layer is used as the first
|
||||
// layer, so it need not be specified. Options can be used to override
|
||||
// default behavior, but are also not required.
|
||||
Diff(from, to string, options *DiffOptions) (io.ReadCloser, error)
|
||||
|
||||
// DiffSize produces an estimate of the length of the tarstream which would be
|
||||
// produced by Diff.
|
||||
DiffSize(from, to string) (int64, error)
|
||||
|
||||
// Size produces a cached value for the uncompressed size of the layer,
|
||||
// if one is known, or -1 if it is not known. If the layer can not be
|
||||
// found, it returns an error.
|
||||
Size(name string) (int64, error)
|
||||
|
||||
// Lookup attempts to translate a name to an ID. Most methods do this
|
||||
// implicitly.
|
||||
Lookup(name string) (string, error)
|
||||
|
||||
// LayersByCompressedDigest returns a slice of the layers with the
|
||||
// specified compressed digest value recorded for them.
|
||||
LayersByCompressedDigest(d digest.Digest) ([]Layer, error)
|
||||
|
||||
// LayersByUncompressedDigest returns a slice of the layers with the
|
||||
// specified uncompressed digest value recorded for them.
|
||||
LayersByUncompressedDigest(d digest.Digest) ([]Layer, error)
|
||||
|
||||
// Layers returns a slice of the known layers.
|
||||
Layers() ([]Layer, error)
|
||||
}
|
||||
|
||||
// LayerStore wraps a graph driver, adding the ability to refer to layers by
|
||||
// name, and keeping track of parent-child relationships, along with a list of
|
||||
// all known layers.
|
||||
type LayerStore interface {
|
||||
FileBasedStore
|
||||
MetadataStore
|
||||
ROLayerStore
|
||||
RWFileBasedStore
|
||||
RWMetadataStore
|
||||
FlaggableStore
|
||||
|
||||
// Create creates a new layer, optionally giving it a specified ID rather than
|
||||
|
@ -98,20 +193,10 @@ type LayerStore interface {
|
|||
// Put combines the functions of CreateWithFlags and ApplyDiff.
|
||||
Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff archive.Reader) (*Layer, int64, error)
|
||||
|
||||
// Exists checks if a layer with the specified name or ID is known.
|
||||
Exists(id string) bool
|
||||
|
||||
// Get retrieves information about a layer given an ID or name.
|
||||
Get(id string) (*Layer, error)
|
||||
|
||||
// SetNames replaces the list of names associated with a layer with the
|
||||
// supplied values.
|
||||
SetNames(id string, names []string) error
|
||||
|
||||
// Status returns an slice of key-value pairs, suitable for human consumption,
|
||||
// relaying whatever status information the underlying driver can share.
|
||||
Status() ([][2]string, error)
|
||||
|
||||
// Delete deletes a layer with the specified name or ID.
|
||||
Delete(id string) error
|
||||
|
||||
|
@ -126,48 +211,31 @@ type LayerStore interface {
|
|||
// Unmount unmounts a layer when it is no longer in use.
|
||||
Unmount(id string) error
|
||||
|
||||
// Changes returns a slice of Change structures, which contain a pathname
|
||||
// (Path) and a description of what sort of change (Kind) was made by the
|
||||
// layer (either ChangeModify, ChangeAdd, or ChangeDelete), relative to a
|
||||
// specified layer. By default, the layer's parent is used as a reference.
|
||||
Changes(from, to string) ([]archive.Change, error)
|
||||
|
||||
// Diff produces a tarstream which can be applied to a layer with the contents
|
||||
// of the first layer to produce a layer with the contents of the second layer.
|
||||
// By default, the parent of the second layer is used as the first
|
||||
// layer, so it need not be specified.
|
||||
Diff(from, to string) (io.ReadCloser, error)
|
||||
|
||||
// DiffSize produces an estimate of the length of the tarstream which would be
|
||||
// produced by Diff.
|
||||
DiffSize(from, to string) (int64, error)
|
||||
|
||||
// ApplyDiff reads a tarstream which was created by a previous call to Diff and
|
||||
// applies its changes to a specified layer.
|
||||
ApplyDiff(to string, diff archive.Reader) (int64, error)
|
||||
|
||||
// Lookup attempts to translate a name to an ID. Most methods do this
|
||||
// implicitly.
|
||||
Lookup(name string) (string, error)
|
||||
|
||||
// Layers returns a slice of the known layers.
|
||||
Layers() ([]Layer, error)
|
||||
}
|
||||
|
||||
type layerStore struct {
|
||||
lockfile Locker
|
||||
rundir string
|
||||
driver drivers.Driver
|
||||
layerdir string
|
||||
layers []Layer
|
||||
idindex *truncindex.TruncIndex
|
||||
byid map[string]*Layer
|
||||
byname map[string]*Layer
|
||||
bymount map[string]*Layer
|
||||
lockfile Locker
|
||||
rundir string
|
||||
driver drivers.Driver
|
||||
layerdir string
|
||||
layers []*Layer
|
||||
idindex *truncindex.TruncIndex
|
||||
byid map[string]*Layer
|
||||
byname map[string]*Layer
|
||||
bymount map[string]*Layer
|
||||
bycompressedsum map[digest.Digest][]string
|
||||
byuncompressedsum map[digest.Digest][]string
|
||||
}
|
||||
|
||||
func (r *layerStore) Layers() ([]Layer, error) {
|
||||
return r.layers, nil
|
||||
layers := make([]Layer, len(r.layers))
|
||||
for i := range r.layers {
|
||||
layers[i] = *(r.layers[i])
|
||||
}
|
||||
return layers, nil
|
||||
}
|
||||
|
||||
func (r *layerStore) mountspath() string {
|
||||
|
@ -179,36 +247,41 @@ func (r *layerStore) layerspath() string {
|
|||
}
|
||||
|
||||
func (r *layerStore) Load() error {
|
||||
needSave := false
|
||||
shouldSave := false
|
||||
rpath := r.layerspath()
|
||||
data, err := ioutil.ReadFile(rpath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
layers := []Layer{}
|
||||
layers := []*Layer{}
|
||||
idlist := []string{}
|
||||
ids := make(map[string]*Layer)
|
||||
names := make(map[string]*Layer)
|
||||
mounts := make(map[string]*Layer)
|
||||
parents := make(map[string][]*Layer)
|
||||
compressedsums := make(map[digest.Digest][]string)
|
||||
uncompressedsums := make(map[digest.Digest][]string)
|
||||
if err = json.Unmarshal(data, &layers); len(data) == 0 || err == nil {
|
||||
for n, layer := range layers {
|
||||
ids[layer.ID] = &layers[n]
|
||||
ids[layer.ID] = layers[n]
|
||||
idlist = append(idlist, layer.ID)
|
||||
for _, name := range layer.Names {
|
||||
if conflict, ok := names[name]; ok {
|
||||
r.removeName(conflict, name)
|
||||
needSave = true
|
||||
shouldSave = true
|
||||
}
|
||||
names[name] = &layers[n]
|
||||
names[name] = layers[n]
|
||||
}
|
||||
if pslice, ok := parents[layer.Parent]; ok {
|
||||
parents[layer.Parent] = append(pslice, &layers[n])
|
||||
} else {
|
||||
parents[layer.Parent] = []*Layer{&layers[n]}
|
||||
if layer.CompressedDigest != "" {
|
||||
compressedsums[layer.CompressedDigest] = append(compressedsums[layer.CompressedDigest], layer.ID)
|
||||
}
|
||||
if layer.UncompressedDigest != "" {
|
||||
uncompressedsums[layer.UncompressedDigest] = append(uncompressedsums[layer.UncompressedDigest], layer.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
if shouldSave && !r.IsReadWrite() {
|
||||
return errors.New("layer store assigns the same name to multiple layers")
|
||||
}
|
||||
mpath := r.mountspath()
|
||||
data, err = ioutil.ReadFile(mpath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
|
@ -231,29 +304,35 @@ func (r *layerStore) Load() error {
|
|||
r.byid = ids
|
||||
r.byname = names
|
||||
r.bymount = mounts
|
||||
r.bycompressedsum = compressedsums
|
||||
r.byuncompressedsum = uncompressedsums
|
||||
err = nil
|
||||
// Last step: try to remove anything that a previous user of this
|
||||
// storage area marked for deletion but didn't manage to actually
|
||||
// delete.
|
||||
for _, layer := range r.layers {
|
||||
if cleanup, ok := layer.Flags[incompleteFlag]; ok {
|
||||
if b, ok := cleanup.(bool); ok && b {
|
||||
err = r.Delete(layer.ID)
|
||||
if err != nil {
|
||||
break
|
||||
// Last step: if we're writable, try to remove anything that a previous
|
||||
// user of this storage area marked for deletion but didn't manage to
|
||||
// actually delete.
|
||||
if r.IsReadWrite() {
|
||||
for _, layer := range r.layers {
|
||||
if cleanup, ok := layer.Flags[incompleteFlag]; ok {
|
||||
if b, ok := cleanup.(bool); ok && b {
|
||||
err = r.Delete(layer.ID)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
shouldSave = true
|
||||
}
|
||||
needSave = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if needSave {
|
||||
r.Touch()
|
||||
return r.Save()
|
||||
if shouldSave {
|
||||
return r.Save()
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *layerStore) Save() error {
|
||||
if !r.IsReadWrite() {
|
||||
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath())
|
||||
}
|
||||
rpath := r.layerspath()
|
||||
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
|
||||
return err
|
||||
|
@ -283,6 +362,7 @@ func (r *layerStore) Save() error {
|
|||
if err := ioutils.AtomicWriteFile(rpath, jldata, 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Touch()
|
||||
return ioutils.AtomicWriteFile(mpath, jmdata, 0600)
|
||||
}
|
||||
|
||||
|
@ -314,6 +394,28 @@ func newLayerStore(rundir string, layerdir string, driver drivers.Driver) (Layer
|
|||
return &rlstore, nil
|
||||
}
|
||||
|
||||
func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (ROLayerStore, error) {
|
||||
lockfile, err := GetROLockfile(filepath.Join(layerdir, "layers.lock"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lockfile.Lock()
|
||||
defer lockfile.Unlock()
|
||||
rlstore := layerStore{
|
||||
lockfile: lockfile,
|
||||
driver: driver,
|
||||
rundir: rundir,
|
||||
layerdir: layerdir,
|
||||
byid: make(map[string]*Layer),
|
||||
bymount: make(map[string]*Layer),
|
||||
byname: make(map[string]*Layer),
|
||||
}
|
||||
if err := rlstore.Load(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &rlstore, nil
|
||||
}
|
||||
|
||||
func (r *layerStore) lookup(id string) (*Layer, bool) {
|
||||
if layer, ok := r.byid[id]; ok {
|
||||
return layer, ok
|
||||
|
@ -326,7 +428,24 @@ func (r *layerStore) lookup(id string) (*Layer, bool) {
|
|||
return nil, false
|
||||
}
|
||||
|
||||
func (r *layerStore) Size(name string) (int64, error) {
|
||||
layer, ok := r.lookup(name)
|
||||
if !ok {
|
||||
return -1, ErrLayerUnknown
|
||||
}
|
||||
// We use the presence of a non-empty digest as an indicator that the size value was intentionally set, and that
|
||||
// a zero value is not just present because it was never set to anything else (which can happen if the layer was
|
||||
// created by a version of this library that didn't keep track of digest and size information).
|
||||
if layer.UncompressedDigest != "" {
|
||||
return layer.UncompressedSize, nil
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
func (r *layerStore) ClearFlag(id string, flag string) error {
|
||||
if !r.IsReadWrite() {
|
||||
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on layers at %q", r.layerspath())
|
||||
}
|
||||
layer, ok := r.lookup(id)
|
||||
if !ok {
|
||||
return ErrLayerUnknown
|
||||
|
@ -336,6 +455,9 @@ func (r *layerStore) ClearFlag(id string, flag string) error {
|
|||
}
|
||||
|
||||
func (r *layerStore) SetFlag(id string, flag string, value interface{}) error {
|
||||
if !r.IsReadWrite() {
|
||||
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on layers at %q", r.layerspath())
|
||||
}
|
||||
layer, ok := r.lookup(id)
|
||||
if !ok {
|
||||
return ErrLayerUnknown
|
||||
|
@ -349,6 +471,9 @@ func (r *layerStore) Status() ([][2]string, error) {
|
|||
}
|
||||
|
||||
func (r *layerStore) Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff archive.Reader) (layer *Layer, size int64, err error) {
|
||||
if !r.IsReadWrite() {
|
||||
return nil, -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new layers at %q", r.layerspath())
|
||||
}
|
||||
size = -1
|
||||
if err := os.MkdirAll(r.rundir, 0700); err != nil {
|
||||
return nil, -1, err
|
||||
|
@ -383,15 +508,15 @@ func (r *layerStore) Put(id, parent string, names []string, mountLabel string, o
|
|||
err = r.driver.Create(id, parent, mountLabel, options)
|
||||
}
|
||||
if err == nil {
|
||||
newLayer := Layer{
|
||||
layer = &Layer{
|
||||
ID: id,
|
||||
Parent: parent,
|
||||
Names: names,
|
||||
MountLabel: mountLabel,
|
||||
Created: time.Now().UTC(),
|
||||
Flags: make(map[string]interface{}),
|
||||
}
|
||||
r.layers = append(r.layers, newLayer)
|
||||
layer = &r.layers[len(r.layers)-1]
|
||||
r.layers = append(r.layers, layer)
|
||||
r.idindex.Add(id)
|
||||
r.byid[id] = layer
|
||||
for _, name := range names {
|
||||
|
@ -441,6 +566,9 @@ func (r *layerStore) Create(id, parent string, names []string, mountLabel string
|
|||
}
|
||||
|
||||
func (r *layerStore) Mount(id, mountLabel string) (string, error) {
|
||||
if !r.IsReadWrite() {
|
||||
return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
|
||||
}
|
||||
layer, ok := r.lookup(id)
|
||||
if !ok {
|
||||
return "", ErrLayerUnknown
|
||||
|
@ -466,6 +594,9 @@ func (r *layerStore) Mount(id, mountLabel string) (string, error) {
|
|||
}
|
||||
|
||||
func (r *layerStore) Unmount(id string) error {
|
||||
if !r.IsReadWrite() {
|
||||
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
|
||||
}
|
||||
layer, ok := r.lookup(id)
|
||||
if !ok {
|
||||
layerByMount, ok := r.bymount[filepath.Clean(id)]
|
||||
|
@ -495,6 +626,9 @@ func (r *layerStore) removeName(layer *Layer, name string) {
|
|||
}
|
||||
|
||||
func (r *layerStore) SetNames(id string, names []string) error {
|
||||
if !r.IsReadWrite() {
|
||||
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change layer name assignments at %q", r.layerspath())
|
||||
}
|
||||
if layer, ok := r.lookup(id); ok {
|
||||
for _, name := range layer.Names {
|
||||
delete(r.byname, name)
|
||||
|
@ -519,6 +653,9 @@ func (r *layerStore) Metadata(id string) (string, error) {
|
|||
}
|
||||
|
||||
func (r *layerStore) SetMetadata(id, metadata string) error {
|
||||
if !r.IsReadWrite() {
|
||||
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer metadata at %q", r.layerspath())
|
||||
}
|
||||
if layer, ok := r.lookup(id); ok {
|
||||
layer.Metadata = metadata
|
||||
return r.Save()
|
||||
|
@ -531,6 +668,9 @@ func (r *layerStore) tspath(id string) string {
|
|||
}
|
||||
|
||||
func (r *layerStore) Delete(id string) error {
|
||||
if !r.IsReadWrite() {
|
||||
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath())
|
||||
}
|
||||
layer, ok := r.lookup(id)
|
||||
if !ok {
|
||||
return ErrLayerUnknown
|
||||
|
@ -549,13 +689,21 @@ func (r *layerStore) Delete(id string) error {
|
|||
if layer.MountPoint != "" {
|
||||
delete(r.bymount, layer.MountPoint)
|
||||
}
|
||||
newLayers := []Layer{}
|
||||
for _, candidate := range r.layers {
|
||||
if candidate.ID != id {
|
||||
newLayers = append(newLayers, candidate)
|
||||
toDeleteIndex := -1
|
||||
for i, candidate := range r.layers {
|
||||
if candidate.ID == id {
|
||||
toDeleteIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if toDeleteIndex != -1 {
|
||||
// delete the layer at toDeleteIndex
|
||||
if toDeleteIndex == len(r.layers)-1 {
|
||||
r.layers = r.layers[:len(r.layers)-1]
|
||||
} else {
|
||||
r.layers = append(r.layers[:toDeleteIndex], r.layers[toDeleteIndex+1:]...)
|
||||
}
|
||||
}
|
||||
r.layers = newLayers
|
||||
if err = r.Save(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -583,6 +731,9 @@ func (r *layerStore) Get(id string) (*Layer, error) {
|
|||
}
|
||||
|
||||
func (r *layerStore) Wipe() error {
|
||||
if !r.IsReadWrite() {
|
||||
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath())
|
||||
}
|
||||
ids := []string{}
|
||||
for id := range r.byid {
|
||||
ids = append(ids, id)
|
||||
|
@ -657,48 +808,64 @@ func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (r *layerStore) Diff(from, to string) (io.ReadCloser, error) {
|
||||
func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) {
|
||||
var metadata storage.Unpacker
|
||||
|
||||
from, to, toLayer, err := r.findParentAndLayer(from, to)
|
||||
if err != nil {
|
||||
return nil, ErrLayerUnknown
|
||||
}
|
||||
compression := archive.Uncompressed
|
||||
if cflag, ok := toLayer.Flags[compressionFlag]; ok {
|
||||
if ctype, ok := cflag.(float64); ok {
|
||||
compression = archive.Compression(ctype)
|
||||
} else if ctype, ok := cflag.(archive.Compression); ok {
|
||||
compression = archive.Compression(ctype)
|
||||
}
|
||||
// Default to applying the type of compression that we noted was used
|
||||
// for the layerdiff when it was applied.
|
||||
compression := toLayer.CompressionType
|
||||
// If a particular compression type (or no compression) was selected,
|
||||
// use that instead.
|
||||
if options != nil && options.Compression != nil {
|
||||
compression = *options.Compression
|
||||
}
|
||||
maybeCompressReadCloser := func(rc io.ReadCloser) (io.ReadCloser, error) {
|
||||
// Depending on whether or not compression is desired, return either the
|
||||
// passed-in ReadCloser, or a new one that provides its readers with a
|
||||
// compressed version of the data that the original would have provided
|
||||
// to its readers.
|
||||
if compression == archive.Uncompressed {
|
||||
return rc, nil
|
||||
}
|
||||
preader, pwriter := io.Pipe()
|
||||
compressor, err := archive.CompressStream(pwriter, compression)
|
||||
if err != nil {
|
||||
rc.Close()
|
||||
pwriter.Close()
|
||||
preader.Close()
|
||||
return nil, err
|
||||
}
|
||||
go func() {
|
||||
defer pwriter.Close()
|
||||
defer compressor.Close()
|
||||
defer rc.Close()
|
||||
io.Copy(compressor, rc)
|
||||
}()
|
||||
return preader, nil
|
||||
}
|
||||
|
||||
if from != toLayer.Parent {
|
||||
diff, err := r.driver.Diff(to, from)
|
||||
if err == nil && (compression != archive.Uncompressed) {
|
||||
preader, pwriter := io.Pipe()
|
||||
compressor, err := archive.CompressStream(pwriter, compression)
|
||||
if err != nil {
|
||||
diff.Close()
|
||||
pwriter.Close()
|
||||
return nil, err
|
||||
}
|
||||
go func() {
|
||||
io.Copy(compressor, diff)
|
||||
diff.Close()
|
||||
compressor.Close()
|
||||
pwriter.Close()
|
||||
}()
|
||||
diff = preader
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return diff, err
|
||||
return maybeCompressReadCloser(diff)
|
||||
}
|
||||
|
||||
tsfile, err := os.Open(r.tspath(to))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return r.driver.Diff(to, from)
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
diff, err := r.driver.Diff(to, from)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return maybeCompressReadCloser(diff)
|
||||
}
|
||||
defer tsfile.Close()
|
||||
|
||||
|
@ -720,33 +887,16 @@ func (r *layerStore) Diff(from, to string) (io.ReadCloser, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
var stream io.ReadCloser
|
||||
if compression != archive.Uncompressed {
|
||||
preader, pwriter := io.Pipe()
|
||||
compressor, err := archive.CompressStream(pwriter, compression)
|
||||
if err != nil {
|
||||
fgetter.Close()
|
||||
pwriter.Close()
|
||||
preader.Close()
|
||||
return nil, err
|
||||
}
|
||||
go func() {
|
||||
asm.WriteOutputTarStream(fgetter, metadata, compressor)
|
||||
compressor.Close()
|
||||
pwriter.Close()
|
||||
}()
|
||||
stream = preader
|
||||
} else {
|
||||
stream = asm.NewOutputTarStream(fgetter, metadata)
|
||||
}
|
||||
return ioutils.NewReadCloserWrapper(stream, func() error {
|
||||
err1 := stream.Close()
|
||||
tarstream := asm.NewOutputTarStream(fgetter, metadata)
|
||||
rc := ioutils.NewReadCloserWrapper(tarstream, func() error {
|
||||
err1 := tarstream.Close()
|
||||
err2 := fgetter.Close()
|
||||
if err2 == nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}), nil
|
||||
})
|
||||
return maybeCompressReadCloser(rc)
|
||||
}
|
||||
|
||||
func (r *layerStore) DiffSize(from, to string) (size int64, err error) {
|
||||
|
@ -758,6 +908,10 @@ func (r *layerStore) DiffSize(from, to string) (size int64, err error) {
|
|||
}
|
||||
|
||||
func (r *layerStore) ApplyDiff(to string, diff archive.Reader) (size int64, err error) {
|
||||
if !r.IsReadWrite() {
|
||||
return -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer contents at %q", r.layerspath())
|
||||
}
|
||||
|
||||
layer, ok := r.lookup(to)
|
||||
if !ok {
|
||||
return -1, ErrLayerUnknown
|
||||
|
@ -770,7 +924,9 @@ func (r *layerStore) ApplyDiff(to string, diff archive.Reader) (size int64, err
|
|||
}
|
||||
|
||||
compression := archive.DetectCompression(header[:n])
|
||||
defragmented := io.MultiReader(bytes.NewBuffer(header[:n]), diff)
|
||||
compressedDigest := digest.Canonical.Digester()
|
||||
compressedCounter := ioutils.NewWriteCounter(compressedDigest.Hash())
|
||||
defragmented := io.TeeReader(io.MultiReader(bytes.NewBuffer(header[:n]), diff), compressedCounter)
|
||||
|
||||
tsdata := bytes.Buffer{}
|
||||
compressor, err := gzip.NewWriterLevel(&tsdata, gzip.BestSpeed)
|
||||
|
@ -778,15 +934,20 @@ func (r *layerStore) ApplyDiff(to string, diff archive.Reader) (size int64, err
|
|||
compressor = gzip.NewWriter(&tsdata)
|
||||
}
|
||||
metadata := storage.NewJSONPacker(compressor)
|
||||
decompressed, err := archive.DecompressStream(defragmented)
|
||||
uncompressed, err := archive.DecompressStream(defragmented)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
payload, err := asm.NewInputTarStream(decompressed, metadata, storage.NewDiscardFilePutter())
|
||||
uncompressedDigest := digest.Canonical.Digester()
|
||||
uncompressedCounter := ioutils.NewWriteCounter(uncompressedDigest.Hash())
|
||||
payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, uncompressedCounter), metadata, storage.NewDiscardFilePutter())
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
size, err = r.driver.ApplyDiff(layer.ID, layer.Parent, payload)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
compressor.Close()
|
||||
if err == nil {
|
||||
if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil {
|
||||
|
@ -797,15 +958,57 @@ func (r *layerStore) ApplyDiff(to string, diff archive.Reader) (size int64, err
|
|||
}
|
||||
}
|
||||
|
||||
if compression != archive.Uncompressed {
|
||||
layer.Flags[compressionFlag] = compression
|
||||
} else {
|
||||
delete(layer.Flags, compressionFlag)
|
||||
updateDigestMap := func(m *map[digest.Digest][]string, oldvalue, newvalue digest.Digest, id string) {
|
||||
var newList []string
|
||||
if oldvalue != "" {
|
||||
for _, value := range (*m)[oldvalue] {
|
||||
if value != id {
|
||||
newList = append(newList, value)
|
||||
}
|
||||
}
|
||||
if len(newList) > 0 {
|
||||
(*m)[oldvalue] = newList
|
||||
} else {
|
||||
delete(*m, oldvalue)
|
||||
}
|
||||
}
|
||||
if newvalue != "" {
|
||||
(*m)[newvalue] = append((*m)[newvalue], id)
|
||||
}
|
||||
}
|
||||
updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest.Digest(), layer.ID)
|
||||
layer.CompressedDigest = compressedDigest.Digest()
|
||||
layer.CompressedSize = compressedCounter.Count
|
||||
updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, uncompressedDigest.Digest(), layer.ID)
|
||||
layer.UncompressedDigest = uncompressedDigest.Digest()
|
||||
layer.UncompressedSize = uncompressedCounter.Count
|
||||
layer.CompressionType = compression
|
||||
|
||||
err = r.Save()
|
||||
|
||||
return size, err
|
||||
}
|
||||
|
||||
func (r *layerStore) layersByDigestMap(m map[digest.Digest][]string, d digest.Digest) ([]Layer, error) {
|
||||
var layers []Layer
|
||||
for _, layerID := range m[d] {
|
||||
layer, ok := r.lookup(layerID)
|
||||
if !ok {
|
||||
return nil, ErrLayerUnknown
|
||||
}
|
||||
layers = append(layers, *layer)
|
||||
}
|
||||
return layers, nil
|
||||
}
|
||||
|
||||
func (r *layerStore) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) {
|
||||
return r.layersByDigestMap(r.bycompressedsum, d)
|
||||
}
|
||||
|
||||
func (r *layerStore) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) {
|
||||
return r.layersByDigestMap(r.byuncompressedsum, d)
|
||||
}
|
||||
|
||||
func (r *layerStore) Lock() {
|
||||
r.lockfile.Lock()
|
||||
}
|
||||
|
@ -822,6 +1025,10 @@ func (r *layerStore) Modified() (bool, error) {
|
|||
return r.lockfile.Modified()
|
||||
}
|
||||
|
||||
func (r *layerStore) IsReadWrite() bool {
|
||||
return r.lockfile.IsReadWrite()
|
||||
}
|
||||
|
||||
func (r *layerStore) TouchedSince(when time.Time) bool {
|
||||
return r.lockfile.TouchedSince(when)
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue