Update containers/image and containers/storage
Bump containers/image to 3d0304a02154dddc8f97cc833aa0861cea5e9ade, and containers/storage to 0d32dfce498e06c132c60dac945081bf44c22464. Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
This commit is contained in:
parent
2fa1f3f74a
commit
0651d3a8de
64 changed files with 4121 additions and 1636 deletions
127
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
127
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
|
@ -3,7 +3,6 @@
|
|||
package overlay
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
@ -26,7 +25,6 @@ import (
|
|||
"github.com/containers/storage/pkg/locker"
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/containers/storage/pkg/parsers"
|
||||
"github.com/containers/storage/pkg/parsers/kernel"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
|
@ -124,22 +122,6 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if err := supportsOverlay(); err != nil {
|
||||
return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support overlay fs")
|
||||
}
|
||||
|
||||
// require kernel 4.0.0 to ensure multiple lower dirs are supported
|
||||
v, err := kernel.GetKernelVersion()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 0, Minor: 0}) < 0 {
|
||||
if !opts.overrideKernelCheck {
|
||||
return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay")
|
||||
}
|
||||
logrus.Warn("Using pre-4.0.0 kernel for overlay, mount failures may require kernel update")
|
||||
}
|
||||
|
||||
fsMagic, err := graphdriver.GetFSMagic(home)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -153,22 +135,18 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
|||
case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
|
||||
logrus.Errorf("'overlay' is not supported over %s", backingFs)
|
||||
return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s", backingFs)
|
||||
case graphdriver.FsMagicBtrfs:
|
||||
// Support for OverlayFS on BTRFS was added in kernel 4.7
|
||||
// See https://btrfs.wiki.kernel.org/index.php/Changelog
|
||||
if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 7, Minor: 0}) < 0 {
|
||||
if !opts.overrideKernelCheck {
|
||||
logrus.Errorf("'overlay' requires kernel 4.7 to use on %s", backingFs)
|
||||
return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' requires kernel 4.7 to use on %s", backingFs)
|
||||
}
|
||||
logrus.Warn("Using pre-4.7.0 kernel for overlay on btrfs, may require kernel update")
|
||||
}
|
||||
}
|
||||
|
||||
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
supportsDType, err := supportsOverlay(home, fsMagic, rootUID, rootGID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support overlay fs")
|
||||
}
|
||||
|
||||
// Create the driver home dir
|
||||
if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
|
||||
return nil, err
|
||||
|
@ -178,16 +156,6 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
|||
return nil, err
|
||||
}
|
||||
|
||||
supportsDType, err := fsutils.SupportsDType(home)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !supportsDType {
|
||||
logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay", backingFs))
|
||||
// TODO: Will make fatal when CRI-O Has AMI built on RHEL7.4
|
||||
// return nil, overlayutils.ErrDTypeNotSupported("overlay", backingFs)
|
||||
}
|
||||
|
||||
d := &Driver{
|
||||
name: "overlay",
|
||||
home: home,
|
||||
|
@ -210,10 +178,10 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
|||
}
|
||||
} else if opts.quota.Size > 0 {
|
||||
// if xfs is not the backing fs then error out if the storage-opt overlay.size is used.
|
||||
return nil, fmt.Errorf("Storage Option overlay.size only supported for backingFS XFS. Found %v", backingFs)
|
||||
return nil, fmt.Errorf("Storage option overlay.size only supported for backingFS XFS. Found %v", backingFs)
|
||||
}
|
||||
|
||||
logrus.Debugf("backingFs=%s, projectQuotaSupported=%v", backingFs, projectQuotaSupported)
|
||||
logrus.Debugf("backingFs=%s, projectQuotaSupported=%v", backingFs, projectQuotaSupported)
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
@ -227,20 +195,20 @@ func parseOptions(options []string) (*overlayOptions, error) {
|
|||
}
|
||||
key = strings.ToLower(key)
|
||||
switch key {
|
||||
case "overlay.override_kernel_check", "overlay2.override_kernel_check":
|
||||
case ".override_kernel_check", "overlay.override_kernel_check", "overlay2.override_kernel_check":
|
||||
logrus.Debugf("overlay: override_kernelcheck=%s", val)
|
||||
o.overrideKernelCheck, err = strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "overlay.size", "overlay2.size":
|
||||
case ".size", "overlay.size", "overlay2.size":
|
||||
logrus.Debugf("overlay: size=%s", val)
|
||||
size, err := units.RAMInBytes(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o.quota.Size = uint64(size)
|
||||
case "overlay.imagestore", "overlay2.imagestore":
|
||||
case ".imagestore", "overlay.imagestore", "overlay2.imagestore":
|
||||
logrus.Debugf("overlay: imagestore=%s", val)
|
||||
// Additional read only image stores to use for lower paths
|
||||
for _, store := range strings.Split(val, ",") {
|
||||
|
@ -264,25 +232,59 @@ func parseOptions(options []string) (*overlayOptions, error) {
|
|||
return o, nil
|
||||
}
|
||||
|
||||
func supportsOverlay() error {
|
||||
// We can try to modprobe overlay first before looking at
|
||||
// proc/filesystems for when overlay is supported
|
||||
func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGID int) (supportsDType bool, err error) {
|
||||
// We can try to modprobe overlay first
|
||||
exec.Command("modprobe", "overlay").Run()
|
||||
|
||||
f, err := os.Open("/proc/filesystems")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
if s.Text() == "nodev\toverlay" {
|
||||
return nil
|
||||
layerDir, err := ioutil.TempDir(home, "compat")
|
||||
if err == nil {
|
||||
// Check if reading the directory's contents populates the d_type field, which is required
|
||||
// for proper operation of the overlay filesystem.
|
||||
supportsDType, err = fsutils.SupportsDType(layerDir)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !supportsDType {
|
||||
logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay", backingFs))
|
||||
// TODO: Will make fatal when CRI-O Has AMI built on RHEL7.4
|
||||
// return nil, overlayutils.ErrDTypeNotSupported("overlay", backingFs)
|
||||
}
|
||||
|
||||
// Try a test mount in the specific location we're looking at using.
|
||||
mergedDir := filepath.Join(layerDir, "merged")
|
||||
lower1Dir := filepath.Join(layerDir, "lower1")
|
||||
lower2Dir := filepath.Join(layerDir, "lower2")
|
||||
defer func() {
|
||||
// Permitted to fail, since the various subdirectories
|
||||
// can be empty or not even there, and the home might
|
||||
// legitimately be not empty
|
||||
_ = unix.Unmount(mergedDir, unix.MNT_DETACH)
|
||||
_ = os.RemoveAll(layerDir)
|
||||
_ = os.Remove(home)
|
||||
}()
|
||||
_ = idtools.MkdirAs(mergedDir, 0700, rootUID, rootGID)
|
||||
_ = idtools.MkdirAs(lower1Dir, 0700, rootUID, rootGID)
|
||||
_ = idtools.MkdirAs(lower2Dir, 0700, rootUID, rootGID)
|
||||
flags := fmt.Sprintf("lowerdir=%s:%s", lower1Dir, lower2Dir)
|
||||
if len(flags) < unix.Getpagesize() {
|
||||
if mountFrom(filepath.Dir(home), "overlay", mergedDir, "overlay", 0, flags) == nil {
|
||||
logrus.Debugf("overlay test mount with multiple lowers succeeded")
|
||||
return supportsDType, nil
|
||||
}
|
||||
}
|
||||
flags = fmt.Sprintf("lowerdir=%s", lower1Dir)
|
||||
if len(flags) < unix.Getpagesize() {
|
||||
if mountFrom(filepath.Dir(home), "overlay", mergedDir, "overlay", 0, flags) == nil {
|
||||
logrus.Errorf("overlay test mount with multiple lowers failed, but succeeded with a single lower")
|
||||
return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay")
|
||||
}
|
||||
}
|
||||
logrus.Errorf("'overlay' is not supported over %s at %q", backingFs, home)
|
||||
return supportsDType, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s at %q", backingFs, home)
|
||||
}
|
||||
|
||||
logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
|
||||
return errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
|
||||
return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
|
||||
}
|
||||
|
||||
func useNaiveDiff(home string) bool {
|
||||
|
@ -650,10 +652,21 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) {
|
|||
func (d *Driver) Put(id string) error {
|
||||
d.locker.Lock(id)
|
||||
defer d.locker.Unlock(id)
|
||||
dir := d.dir(id)
|
||||
if _, err := os.Stat(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
mountpoint := path.Join(d.dir(id), "merged")
|
||||
if count := d.ctr.Decrement(mountpoint); count > 0 {
|
||||
return nil
|
||||
}
|
||||
if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil {
|
||||
// If no lower, we used the diff directory, so no work to do
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil {
|
||||
logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err)
|
||||
}
|
||||
|
|
5
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
|
@ -36,6 +36,11 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
|||
for _, option := range options {
|
||||
if strings.HasPrefix(option, "vfs.imagestore=") {
|
||||
d.homes = append(d.homes, strings.Split(option[15:], ",")...)
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(option, ".imagestore=") {
|
||||
d.homes = append(d.homes, strings.Split(option[12:], ",")...)
|
||||
continue
|
||||
}
|
||||
}
|
||||
return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil
|
||||
|
|
103
vendor/github.com/containers/storage/images.go
generated
vendored
103
vendor/github.com/containers/storage/images.go
generated
vendored
|
@ -14,12 +14,22 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// ImageDigestBigDataKey is the name of the big data item whose
|
||||
// contents we consider useful for computing a "digest" of the
|
||||
// image, by which we can locate the image later.
|
||||
ImageDigestBigDataKey = "manifest"
|
||||
)
|
||||
|
||||
// An Image is a reference to a layer and an associated metadata string.
|
||||
type Image struct {
|
||||
// ID is either one which was specified at create-time, or a random
|
||||
// value which was generated by the library.
|
||||
ID string `json:"id"`
|
||||
|
||||
// Digest is a digest value that we can use to locate the image.
|
||||
Digest digest.Digest `json:"digest,omitempty"`
|
||||
|
||||
// Names is an optional set of user-defined convenience values. The
|
||||
// image can be referred to by its ID or any of its names. Names are
|
||||
// unique among images.
|
||||
|
@ -28,7 +38,7 @@ type Image struct {
|
|||
// TopLayer is the ID of the topmost layer of the image itself, if the
|
||||
// image contains one or more layers. Multiple images can refer to the
|
||||
// same top layer.
|
||||
TopLayer string `json:"layer"`
|
||||
TopLayer string `json:"layer,omitempty"`
|
||||
|
||||
// Metadata is data we keep for the convenience of the caller. It is not
|
||||
// expected to be large, since it is kept in memory.
|
||||
|
@ -74,6 +84,10 @@ type ROImageStore interface {
|
|||
|
||||
// Images returns a slice enumerating the known images.
|
||||
Images() ([]Image, error)
|
||||
|
||||
// Images returns a slice enumerating the images which have a big data
|
||||
// item with the name ImageDigestBigDataKey and the specified digest.
|
||||
ByDigest(d digest.Digest) ([]*Image, error)
|
||||
}
|
||||
|
||||
// ImageStore provides bookkeeping for information about Images.
|
||||
|
@ -87,7 +101,7 @@ type ImageStore interface {
|
|||
// Create creates an image that has a specified ID (or a random one) and
|
||||
// optional names, using the specified layer as its topmost (hopefully
|
||||
// read-only) layer. That layer can be referenced by multiple images.
|
||||
Create(id string, names []string, layer, metadata string, created time.Time) (*Image, error)
|
||||
Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error)
|
||||
|
||||
// SetNames replaces the list of names associated with an image with the
|
||||
// supplied values.
|
||||
|
@ -107,6 +121,7 @@ type imageStore struct {
|
|||
idindex *truncindex.TruncIndex
|
||||
byid map[string]*Image
|
||||
byname map[string]*Image
|
||||
bydigest map[digest.Digest][]*Image
|
||||
}
|
||||
|
||||
func (r *imageStore) Images() ([]Image, error) {
|
||||
|
@ -140,6 +155,7 @@ func (r *imageStore) Load() error {
|
|||
idlist := []string{}
|
||||
ids := make(map[string]*Image)
|
||||
names := make(map[string]*Image)
|
||||
digests := make(map[digest.Digest][]*Image)
|
||||
if err = json.Unmarshal(data, &images); len(data) == 0 || err == nil {
|
||||
idlist = make([]string, 0, len(images))
|
||||
for n, image := range images {
|
||||
|
@ -152,6 +168,16 @@ func (r *imageStore) Load() error {
|
|||
}
|
||||
names[name] = images[n]
|
||||
}
|
||||
// Implicit digest
|
||||
if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok {
|
||||
digests[digest] = append(digests[digest], images[n])
|
||||
}
|
||||
// Explicit digest
|
||||
if image.Digest == "" {
|
||||
image.Digest = image.BigDataDigests[ImageDigestBigDataKey]
|
||||
} else if image.Digest != image.BigDataDigests[ImageDigestBigDataKey] {
|
||||
digests[image.Digest] = append(digests[image.Digest], images[n])
|
||||
}
|
||||
}
|
||||
}
|
||||
if shouldSave && !r.IsReadWrite() {
|
||||
|
@ -161,6 +187,7 @@ func (r *imageStore) Load() error {
|
|||
r.idindex = truncindex.NewTruncIndex(idlist)
|
||||
r.byid = ids
|
||||
r.byname = names
|
||||
r.bydigest = digests
|
||||
if shouldSave {
|
||||
return r.Save()
|
||||
}
|
||||
|
@ -199,6 +226,7 @@ func newImageStore(dir string) (ImageStore, error) {
|
|||
images: []*Image{},
|
||||
byid: make(map[string]*Image),
|
||||
byname: make(map[string]*Image),
|
||||
bydigest: make(map[digest.Digest][]*Image),
|
||||
}
|
||||
if err := istore.Load(); err != nil {
|
||||
return nil, err
|
||||
|
@ -219,6 +247,7 @@ func newROImageStore(dir string) (ROImageStore, error) {
|
|||
images: []*Image{},
|
||||
byid: make(map[string]*Image),
|
||||
byname: make(map[string]*Image),
|
||||
bydigest: make(map[digest.Digest][]*Image),
|
||||
}
|
||||
if err := istore.Load(); err != nil {
|
||||
return nil, err
|
||||
|
@ -265,7 +294,7 @@ func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
|
|||
return r.Save()
|
||||
}
|
||||
|
||||
func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time) (image *Image, err error) {
|
||||
func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (image *Image, err error) {
|
||||
if !r.IsReadWrite() {
|
||||
return nil, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new images at %q", r.imagespath())
|
||||
}
|
||||
|
@ -292,6 +321,7 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
|
|||
if err == nil {
|
||||
image = &Image{
|
||||
ID: id,
|
||||
Digest: searchableDigest,
|
||||
Names: names,
|
||||
TopLayer: layer,
|
||||
Metadata: metadata,
|
||||
|
@ -304,6 +334,10 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
|
|||
r.images = append(r.images, image)
|
||||
r.idindex.Add(id)
|
||||
r.byid[id] = image
|
||||
if searchableDigest != "" {
|
||||
list := r.bydigest[searchableDigest]
|
||||
r.bydigest[searchableDigest] = append(list, image)
|
||||
}
|
||||
for _, name := range names {
|
||||
r.byname[name] = image
|
||||
}
|
||||
|
@ -383,6 +417,28 @@ func (r *imageStore) Delete(id string) error {
|
|||
r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...)
|
||||
}
|
||||
}
|
||||
if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok {
|
||||
// remove the image from the digest-based index
|
||||
if list, ok := r.bydigest[digest]; ok {
|
||||
prunedList := imageSliceWithoutValue(list, image)
|
||||
if len(prunedList) == 0 {
|
||||
delete(r.bydigest, digest)
|
||||
} else {
|
||||
r.bydigest[digest] = prunedList
|
||||
}
|
||||
}
|
||||
}
|
||||
if image.Digest != "" {
|
||||
// remove the image's hard-coded digest from the digest-based index
|
||||
if list, ok := r.bydigest[image.Digest]; ok {
|
||||
prunedList := imageSliceWithoutValue(list, image)
|
||||
if len(prunedList) == 0 {
|
||||
delete(r.bydigest, image.Digest)
|
||||
} else {
|
||||
r.bydigest[image.Digest] = prunedList
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := r.Save(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -411,6 +467,13 @@ func (r *imageStore) Exists(id string) bool {
|
|||
return ok
|
||||
}
|
||||
|
||||
func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) {
|
||||
if images, ok := r.bydigest[d]; ok {
|
||||
return images, nil
|
||||
}
|
||||
return nil, ErrImageUnknown
|
||||
}
|
||||
|
||||
func (r *imageStore) BigData(id, key string) ([]byte, error) {
|
||||
if key == "" {
|
||||
return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve image big data value for empty name")
|
||||
|
@ -486,6 +549,17 @@ func (r *imageStore) BigDataNames(id string) ([]string, error) {
|
|||
return image.BigDataNames, nil
|
||||
}
|
||||
|
||||
func imageSliceWithoutValue(slice []*Image, value *Image) []*Image {
|
||||
modified := make([]*Image, 0, len(slice))
|
||||
for _, v := range slice {
|
||||
if v == value {
|
||||
continue
|
||||
}
|
||||
modified = append(modified, v)
|
||||
}
|
||||
return modified
|
||||
}
|
||||
|
||||
func (r *imageStore) SetBigData(id, key string, data []byte) error {
|
||||
if key == "" {
|
||||
return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for image big data item")
|
||||
|
@ -528,6 +602,29 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
|
|||
image.BigDataNames = append(image.BigDataNames, key)
|
||||
save = true
|
||||
}
|
||||
if key == ImageDigestBigDataKey {
|
||||
if oldDigest != "" && oldDigest != newDigest && oldDigest != image.Digest {
|
||||
// remove the image from the list of images in the digest-based
|
||||
// index which corresponds to the old digest for this item, unless
|
||||
// it's also the hard-coded digest
|
||||
if list, ok := r.bydigest[oldDigest]; ok {
|
||||
prunedList := imageSliceWithoutValue(list, image)
|
||||
if len(prunedList) == 0 {
|
||||
delete(r.bydigest, oldDigest)
|
||||
} else {
|
||||
r.bydigest[oldDigest] = prunedList
|
||||
}
|
||||
}
|
||||
}
|
||||
// add the image to the list of images in the digest-based index which
|
||||
// corresponds to the new digest for this item, unless it's already there
|
||||
list := r.bydigest[newDigest]
|
||||
if len(list) == len(imageSliceWithoutValue(list, image)) {
|
||||
// the list isn't shortened by trying to prune this image from it,
|
||||
// so it's not in there yet
|
||||
r.bydigest[newDigest] = append(list, image)
|
||||
}
|
||||
}
|
||||
if save {
|
||||
err = r.Save()
|
||||
}
|
||||
|
|
60
vendor/github.com/containers/storage/images_ffjson.go
generated
vendored
60
vendor/github.com/containers/storage/images_ffjson.go
generated
vendored
|
@ -38,6 +38,11 @@ func (j *Image) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
|
|||
buf.WriteString(`{ "id":`)
|
||||
fflib.WriteJsonString(buf, string(j.ID))
|
||||
buf.WriteByte(',')
|
||||
if len(j.Digest) != 0 {
|
||||
buf.WriteString(`"digest":`)
|
||||
fflib.WriteJsonString(buf, string(j.Digest))
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
if len(j.Names) != 0 {
|
||||
buf.WriteString(`"names":`)
|
||||
if j.Names != nil {
|
||||
|
@ -54,9 +59,11 @@ func (j *Image) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
|
|||
}
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
buf.WriteString(`"layer":`)
|
||||
fflib.WriteJsonString(buf, string(j.TopLayer))
|
||||
buf.WriteByte(',')
|
||||
if len(j.TopLayer) != 0 {
|
||||
buf.WriteString(`"layer":`)
|
||||
fflib.WriteJsonString(buf, string(j.TopLayer))
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
if len(j.Metadata) != 0 {
|
||||
buf.WriteString(`"metadata":`)
|
||||
fflib.WriteJsonString(buf, string(j.Metadata))
|
||||
|
@ -144,6 +151,8 @@ const (
|
|||
|
||||
ffjtImageID
|
||||
|
||||
ffjtImageDigest
|
||||
|
||||
ffjtImageNames
|
||||
|
||||
ffjtImageTopLayer
|
||||
|
@ -163,6 +172,8 @@ const (
|
|||
|
||||
var ffjKeyImageID = []byte("id")
|
||||
|
||||
var ffjKeyImageDigest = []byte("digest")
|
||||
|
||||
var ffjKeyImageNames = []byte("names")
|
||||
|
||||
var ffjKeyImageTopLayer = []byte("layer")
|
||||
|
@ -266,6 +277,14 @@ mainparse:
|
|||
goto mainparse
|
||||
}
|
||||
|
||||
case 'd':
|
||||
|
||||
if bytes.Equal(ffjKeyImageDigest, kn) {
|
||||
currentKey = ffjtImageDigest
|
||||
state = fflib.FFParse_want_colon
|
||||
goto mainparse
|
||||
}
|
||||
|
||||
case 'f':
|
||||
|
||||
if bytes.Equal(ffjKeyImageFlags, kn) {
|
||||
|
@ -356,6 +375,12 @@ mainparse:
|
|||
goto mainparse
|
||||
}
|
||||
|
||||
if fflib.EqualFoldRight(ffjKeyImageDigest, kn) {
|
||||
currentKey = ffjtImageDigest
|
||||
state = fflib.FFParse_want_colon
|
||||
goto mainparse
|
||||
}
|
||||
|
||||
if fflib.SimpleLetterEqualFold(ffjKeyImageID, kn) {
|
||||
currentKey = ffjtImageID
|
||||
state = fflib.FFParse_want_colon
|
||||
|
@ -382,6 +407,9 @@ mainparse:
|
|||
case ffjtImageID:
|
||||
goto handle_ID
|
||||
|
||||
case ffjtImageDigest:
|
||||
goto handle_Digest
|
||||
|
||||
case ffjtImageNames:
|
||||
goto handle_Names
|
||||
|
||||
|
@ -446,6 +474,32 @@ handle_ID:
|
|||
state = fflib.FFParse_after_value
|
||||
goto mainparse
|
||||
|
||||
handle_Digest:
|
||||
|
||||
/* handler: j.Digest type=digest.Digest kind=string quoted=false*/
|
||||
|
||||
{
|
||||
|
||||
{
|
||||
if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
|
||||
return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok))
|
||||
}
|
||||
}
|
||||
|
||||
if tok == fflib.FFTok_null {
|
||||
|
||||
} else {
|
||||
|
||||
outBuf := fs.Output.Bytes()
|
||||
|
||||
j.Digest = digest.Digest(string(outBuf))
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
state = fflib.FFParse_after_value
|
||||
goto mainparse
|
||||
|
||||
handle_Names:
|
||||
|
||||
/* handler: j.Names type=[]string kind=slice quoted=false*/
|
||||
|
|
50
vendor/github.com/containers/storage/store.go
generated
vendored
50
vendor/github.com/containers/storage/store.go
generated
vendored
|
@ -370,6 +370,10 @@ type Store interface {
|
|||
// and may have different metadata, big data items, and flags.
|
||||
ImagesByTopLayer(id string) ([]*Image, error)
|
||||
|
||||
// ImagesByDigest returns a list of images which contain a big data item
|
||||
// named ImageDigestBigDataKey whose contents have the specified digest.
|
||||
ImagesByDigest(d digest.Digest) ([]*Image, error)
|
||||
|
||||
// Container returns a specific container.
|
||||
Container(id string) (*Container, error)
|
||||
|
||||
|
@ -430,6 +434,8 @@ type ImageOptions struct {
|
|||
// CreationDate, if not zero, will override the default behavior of marking the image as having been
|
||||
// created when CreateImage() was called, recording CreationDate instead.
|
||||
CreationDate time.Time
|
||||
// Digest is a hard-coded digest value that we can use to look up the image. It is optional.
|
||||
Digest digest.Digest
|
||||
}
|
||||
|
||||
// ContainerOptions is used for passing options to a Store's CreateContainer() method.
|
||||
|
@ -487,11 +493,6 @@ func GetStore(options StoreOptions) (Store, error) {
|
|||
if err := os.MkdirAll(options.RunRoot, 0700); err != nil && !os.IsExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
for _, subdir := range []string{} {
|
||||
if err := os.MkdirAll(filepath.Join(options.RunRoot, subdir), 0700); err != nil && !os.IsExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := os.MkdirAll(options.GraphRoot, 0700); err != nil && !os.IsExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -834,11 +835,11 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, o
|
|||
}
|
||||
|
||||
creationDate := time.Now().UTC()
|
||||
if options != nil {
|
||||
if options != nil && !options.CreationDate.IsZero() {
|
||||
creationDate = options.CreationDate
|
||||
}
|
||||
|
||||
return ristore.Create(id, names, layer, metadata, creationDate)
|
||||
return ristore.Create(id, names, layer, metadata, creationDate, options.Digest)
|
||||
}
|
||||
|
||||
func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) {
|
||||
|
@ -1888,10 +1889,16 @@ func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Laye
|
|||
}
|
||||
storeLayers, err := m(store, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if errors.Cause(err) != ErrLayerUnknown {
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
layers = append(layers, storeLayers...)
|
||||
}
|
||||
if len(layers) == 0 {
|
||||
return nil, ErrLayerUnknown
|
||||
}
|
||||
return layers, nil
|
||||
}
|
||||
|
||||
|
@ -2080,6 +2087,33 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) {
|
|||
return images, nil
|
||||
}
|
||||
|
||||
func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) {
|
||||
images := []*Image{}
|
||||
|
||||
istore, err := s.ImageStore()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
istores, err := s.ROImageStores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, store := range append([]ROImageStore{istore}, istores...) {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
if modified, err := store.Modified(); modified || err != nil {
|
||||
store.Load()
|
||||
}
|
||||
imageList, err := store.ByDigest(d)
|
||||
if err != nil && err != ErrImageUnknown {
|
||||
return nil, err
|
||||
}
|
||||
images = append(images, imageList...)
|
||||
}
|
||||
return images, nil
|
||||
}
|
||||
|
||||
func (s *store) Container(id string) (*Container, error) {
|
||||
rcstore, err := s.ContainerStore()
|
||||
if err != nil {
|
||||
|
|
2
vendor/github.com/containers/storage/vendor.conf
generated
vendored
2
vendor/github.com/containers/storage/vendor.conf
generated
vendored
|
@ -15,7 +15,7 @@ github.com/pmezard/go-difflib v1.0.0
|
|||
github.com/sirupsen/logrus v1.0.0
|
||||
github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
|
||||
github.com/tchap/go-patricia v2.2.6
|
||||
github.com/vbatts/tar-split bd4c5d64c3e9297f410025a3b1bd0c58f659e721
|
||||
github.com/vbatts/tar-split v0.10.2
|
||||
golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
|
||||
golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5
|
||||
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue