Vendor in latest containers/storage
Container/storage has been enhanced to speed up the compiling and loading of json files. This should make make cri-o a little bit faster. Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
parent
774d44589c
commit
70b1661e10
36 changed files with 11686 additions and 56 deletions
101
vendor/github.com/containers/storage/containers.go
generated
vendored
101
vendor/github.com/containers/storage/containers.go
generated
vendored
|
@ -10,6 +10,8 @@ import (
|
|||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
"github.com/containers/storage/pkg/truncindex"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// A Container is a reference to a read-write layer with metadata.
|
||||
|
@ -44,6 +46,10 @@ type Container struct {
|
|||
// that has been stored, if they're known.
|
||||
BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"`
|
||||
|
||||
// BigDataDigests maps the names in BigDataNames to the digests of the
|
||||
// data that has been stored, if they're known.
|
||||
BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"`
|
||||
|
||||
// Created is the datestamp for when this container was created. Older
|
||||
// versions of the library did not track this information, so callers
|
||||
// will likely want to use the IsZero() method to verify that a value
|
||||
|
@ -133,6 +139,7 @@ func (r *containerStore) Load() error {
|
|||
ids := make(map[string]*Container)
|
||||
names := make(map[string]*Container)
|
||||
if err = json.Unmarshal(data, &containers); len(data) == 0 || err == nil {
|
||||
idlist = make([]string, 0, len(containers))
|
||||
for n, container := range containers {
|
||||
idlist = append(idlist, container.ID)
|
||||
ids[container.ID] = containers[n]
|
||||
|
@ -223,6 +230,9 @@ func (r *containerStore) SetFlag(id string, flag string, value interface{}) erro
|
|||
if !ok {
|
||||
return ErrContainerUnknown
|
||||
}
|
||||
if container.Flags == nil {
|
||||
container.Flags = make(map[string]interface{})
|
||||
}
|
||||
container.Flags[flag] = value
|
||||
return r.Save()
|
||||
}
|
||||
|
@ -247,15 +257,16 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
|
|||
}
|
||||
if err == nil {
|
||||
container = &Container{
|
||||
ID: id,
|
||||
Names: names,
|
||||
ImageID: image,
|
||||
LayerID: layer,
|
||||
Metadata: metadata,
|
||||
BigDataNames: []string{},
|
||||
BigDataSizes: make(map[string]int64),
|
||||
Created: time.Now().UTC(),
|
||||
Flags: make(map[string]interface{}),
|
||||
ID: id,
|
||||
Names: names,
|
||||
ImageID: image,
|
||||
LayerID: layer,
|
||||
Metadata: metadata,
|
||||
BigDataNames: []string{},
|
||||
BigDataSizes: make(map[string]int64),
|
||||
BigDataDigests: make(map[string]digest.Digest),
|
||||
Created: time.Now().UTC(),
|
||||
Flags: make(map[string]interface{}),
|
||||
}
|
||||
r.containers = append(r.containers, container)
|
||||
r.byid[id] = container
|
||||
|
@ -362,6 +373,9 @@ func (r *containerStore) Exists(id string) bool {
|
|||
}
|
||||
|
||||
func (r *containerStore) BigData(id, key string) ([]byte, error) {
|
||||
if key == "" {
|
||||
return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve container big data value for empty name")
|
||||
}
|
||||
c, ok := r.lookup(id)
|
||||
if !ok {
|
||||
return nil, ErrContainerUnknown
|
||||
|
@ -370,16 +384,61 @@ func (r *containerStore) BigData(id, key string) ([]byte, error) {
|
|||
}
|
||||
|
||||
func (r *containerStore) BigDataSize(id, key string) (int64, error) {
|
||||
if key == "" {
|
||||
return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of container big data with empty name")
|
||||
}
|
||||
c, ok := r.lookup(id)
|
||||
if !ok {
|
||||
return -1, ErrContainerUnknown
|
||||
}
|
||||
if c.BigDataSizes == nil {
|
||||
c.BigDataSizes = make(map[string]int64)
|
||||
}
|
||||
if size, ok := c.BigDataSizes[key]; ok {
|
||||
return size, nil
|
||||
}
|
||||
if data, err := r.BigData(id, key); err == nil && data != nil {
|
||||
if r.SetBigData(id, key, data) == nil {
|
||||
c, ok := r.lookup(id)
|
||||
if !ok {
|
||||
return -1, ErrContainerUnknown
|
||||
}
|
||||
if size, ok := c.BigDataSizes[key]; ok {
|
||||
return size, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1, ErrSizeUnknown
|
||||
}
|
||||
|
||||
func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) {
|
||||
if key == "" {
|
||||
return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of container big data value with empty name")
|
||||
}
|
||||
c, ok := r.lookup(id)
|
||||
if !ok {
|
||||
return "", ErrContainerUnknown
|
||||
}
|
||||
if c.BigDataDigests == nil {
|
||||
c.BigDataDigests = make(map[string]digest.Digest)
|
||||
}
|
||||
if d, ok := c.BigDataDigests[key]; ok {
|
||||
return d, nil
|
||||
}
|
||||
if data, err := r.BigData(id, key); err == nil && data != nil {
|
||||
if r.SetBigData(id, key, data) == nil {
|
||||
c, ok := r.lookup(id)
|
||||
if !ok {
|
||||
return "", ErrContainerUnknown
|
||||
}
|
||||
if d, ok := c.BigDataDigests[key]; ok {
|
||||
return d, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", ErrDigestUnknown
|
||||
}
|
||||
|
||||
func (r *containerStore) BigDataNames(id string) ([]string, error) {
|
||||
c, ok := r.lookup(id)
|
||||
if !ok {
|
||||
|
@ -389,6 +448,9 @@ func (r *containerStore) BigDataNames(id string) ([]string, error) {
|
|||
}
|
||||
|
||||
func (r *containerStore) SetBigData(id, key string, data []byte) error {
|
||||
if key == "" {
|
||||
return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for container big data item")
|
||||
}
|
||||
c, ok := r.lookup(id)
|
||||
if !ok {
|
||||
return ErrContainerUnknown
|
||||
|
@ -399,19 +461,28 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error {
|
|||
err := ioutils.AtomicWriteFile(r.datapath(c.ID, key), data, 0600)
|
||||
if err == nil {
|
||||
save := false
|
||||
oldSize, ok := c.BigDataSizes[key]
|
||||
if c.BigDataSizes == nil {
|
||||
c.BigDataSizes = make(map[string]int64)
|
||||
}
|
||||
oldSize, sizeOk := c.BigDataSizes[key]
|
||||
c.BigDataSizes[key] = int64(len(data))
|
||||
if !ok || oldSize != c.BigDataSizes[key] {
|
||||
if c.BigDataDigests == nil {
|
||||
c.BigDataDigests = make(map[string]digest.Digest)
|
||||
}
|
||||
oldDigest, digestOk := c.BigDataDigests[key]
|
||||
newDigest := digest.Canonical.FromBytes(data)
|
||||
c.BigDataDigests[key] = newDigest
|
||||
if !sizeOk || oldSize != c.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
|
||||
save = true
|
||||
}
|
||||
add := true
|
||||
addName := true
|
||||
for _, name := range c.BigDataNames {
|
||||
if name == key {
|
||||
add = false
|
||||
addName = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if add {
|
||||
if addName {
|
||||
c.BigDataNames = append(c.BigDataNames, key)
|
||||
save = true
|
||||
}
|
||||
|
@ -423,7 +494,7 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error {
|
|||
}
|
||||
|
||||
func (r *containerStore) Wipe() error {
|
||||
ids := []string{}
|
||||
ids := make([]string, 0, len(r.byid))
|
||||
for id := range r.byid {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
|
|
1194
vendor/github.com/containers/storage/containers_ffjson.go
generated
vendored
Normal file
1194
vendor/github.com/containers/storage/containers_ffjson.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
2
vendor/github.com/containers/storage/drivers/fsdiff.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/fsdiff.go
generated
vendored
|
@ -94,7 +94,7 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err
|
|||
// are extracted from tar's with full second precision on modified time.
|
||||
// We need this hack here to make sure calls within same second receive
|
||||
// correct result.
|
||||
time.Sleep(time.Until(startTime.Truncate(time.Second).Add(time.Second)))
|
||||
time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now()))
|
||||
return err
|
||||
}), nil
|
||||
}
|
||||
|
|
31
vendor/github.com/containers/storage/drivers/overlay/check.go
generated
vendored
31
vendor/github.com/containers/storage/drivers/overlay/check.go
generated
vendored
|
@ -8,6 +8,7 @@ import (
|
|||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/storage/pkg/system"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -15,10 +16,11 @@ import (
|
|||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// hasOpaqueCopyUpBug checks whether the filesystem has a bug
|
||||
// doesSupportNativeDiff checks whether the filesystem has a bug
|
||||
// which copies up the opaque flag when copying up an opaque
|
||||
// directory. When this bug exists naive diff should be used.
|
||||
func hasOpaqueCopyUpBug(d string) error {
|
||||
// directory or the kernel enable CONFIG_OVERLAY_FS_REDIRECT_DIR.
|
||||
// When these exist naive diff should be used.
|
||||
func doesSupportNativeDiff(d string) error {
|
||||
td, err := ioutil.TempDir(d, "opaque-bug-check")
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -29,10 +31,13 @@ func hasOpaqueCopyUpBug(d string) error {
|
|||
}
|
||||
}()
|
||||
|
||||
// Make directories l1/d, l2/d, l3, work, merged
|
||||
// Make directories l1/d, l1/d1, l2/d, l3, work, merged
|
||||
if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -75,5 +80,23 @@ func hasOpaqueCopyUpBug(d string) error {
|
|||
return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix")
|
||||
}
|
||||
|
||||
// rename "d1" to "d2"
|
||||
if err := os.Rename(filepath.Join(td, "merged", "d1"), filepath.Join(td, "merged", "d2")); err != nil {
|
||||
// if rename failed with syscall.EXDEV, the kernel doesn't have CONFIG_OVERLAY_FS_REDIRECT_DIR enabled
|
||||
if err.(*os.LinkError).Err == syscall.EXDEV {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, "failed to rename dir in merged directory")
|
||||
}
|
||||
// get the xattr of "d2"
|
||||
xattrRedirect, err := system.Lgetxattr(filepath.Join(td, "l3", "d2"), "trusted.overlay.redirect")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to read redirect flag on upper layer")
|
||||
}
|
||||
|
||||
if string(xattrRedirect) == "d1" {
|
||||
return errors.New("kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
1
vendor/github.com/containers/storage/drivers/overlay/mount.go
generated
vendored
1
vendor/github.com/containers/storage/drivers/overlay/mount.go
generated
vendored
|
@ -49,7 +49,6 @@ func mountFrom(dir, device, target, mType string, flags uintptr, label string) e
|
|||
output := bytes.NewBuffer(nil)
|
||||
cmd.Stdout = output
|
||||
cmd.Stderr = output
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
w.Close()
|
||||
return fmt.Errorf("mountfrom error on re-exec cmd: %v", err)
|
||||
|
|
9
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
9
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
|
@ -228,7 +228,7 @@ func parseOptions(options []string) (*overlayOptions, error) {
|
|||
key = strings.ToLower(key)
|
||||
switch key {
|
||||
case "overlay.override_kernel_check", "overlay2.override_kernel_check":
|
||||
logrus.Debugf("overlay: overide_kernelcheck=%s", val)
|
||||
logrus.Debugf("overlay: override_kernelcheck=%s", val)
|
||||
o.overrideKernelCheck, err = strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -287,8 +287,8 @@ func supportsOverlay() error {
|
|||
|
||||
func useNaiveDiff(home string) bool {
|
||||
useNaiveDiffLock.Do(func() {
|
||||
if err := hasOpaqueCopyUpBug(home); err != nil {
|
||||
logrus.Warnf("Not using native diff for overlay: %v", err)
|
||||
if err := doesSupportNativeDiff(home); err != nil {
|
||||
logrus.Warnf("Not using native diff for overlay, this may cause degraded performance for building images: %v", err)
|
||||
useNaiveDiffOnly = true
|
||||
}
|
||||
})
|
||||
|
@ -654,8 +654,7 @@ func (d *Driver) Put(id string) error {
|
|||
if count := d.ctr.Decrement(mountpoint); count > 0 {
|
||||
return nil
|
||||
}
|
||||
err := unix.Unmount(mountpoint, unix.MNT_DETACH)
|
||||
if err != nil {
|
||||
if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil {
|
||||
logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err)
|
||||
}
|
||||
return nil
|
||||
|
|
25
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
25
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
|
@ -4,6 +4,7 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/chrootarchive"
|
||||
|
@ -25,13 +26,18 @@ func init() {
|
|||
// This sets the home directory for the driver and returns NaiveDiffDriver.
|
||||
func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
|
||||
d := &Driver{
|
||||
home: home,
|
||||
homes: []string{home},
|
||||
idMappings: idtools.NewIDMappingsFromMaps(uidMaps, gidMaps),
|
||||
}
|
||||
rootIDs := d.idMappings.RootPair()
|
||||
if err := idtools.MkdirAllAndChown(home, 0700, rootIDs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, option := range options {
|
||||
if strings.HasPrefix(option, "vfs.imagestore=") {
|
||||
d.homes = append(d.homes, strings.Split(option[15:], ",")...)
|
||||
}
|
||||
}
|
||||
return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil
|
||||
}
|
||||
|
||||
|
@ -40,7 +46,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
|||
// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support.
|
||||
// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver
|
||||
type Driver struct {
|
||||
home string
|
||||
homes []string
|
||||
idMappings *idtools.IDMappings
|
||||
}
|
||||
|
||||
|
@ -98,7 +104,17 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
|
|||
}
|
||||
|
||||
func (d *Driver) dir(id string) string {
|
||||
return filepath.Join(d.home, "dir", filepath.Base(id))
|
||||
for i, home := range d.homes {
|
||||
if i > 0 {
|
||||
home = filepath.Join(home, d.String())
|
||||
}
|
||||
candidate := filepath.Join(home, "dir", filepath.Base(id))
|
||||
fi, err := os.Stat(candidate)
|
||||
if err == nil && fi.IsDir() {
|
||||
return candidate
|
||||
}
|
||||
}
|
||||
return filepath.Join(d.homes[0], "dir", filepath.Base(id))
|
||||
}
|
||||
|
||||
// Remove deletes the content from the directory for a given id.
|
||||
|
@ -132,5 +148,8 @@ func (d *Driver) Exists(id string) bool {
|
|||
|
||||
// AdditionalImageStores returns additional image stores supported by the driver
|
||||
func (d *Driver) AdditionalImageStores() []string {
|
||||
if len(d.homes) > 1 {
|
||||
return d.homes[1:]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
4
vendor/github.com/containers/storage/errors.go
generated
vendored
4
vendor/github.com/containers/storage/errors.go
generated
vendored
|
@ -49,4 +49,8 @@ var (
|
|||
ErrDuplicateImageNames = errors.New("read-only image store assigns the same name to multiple images")
|
||||
// ErrDuplicateLayerNames indicates that the read-only store uses the same name for multiple layers.
|
||||
ErrDuplicateLayerNames = errors.New("read-only layer store assigns the same name to multiple layers")
|
||||
// ErrInvalidBigDataName indicates that the name for a big data item is not acceptable; it may be empty.
|
||||
ErrInvalidBigDataName = errors.New("not a valid name for a big data item")
|
||||
// ErrDigestUnknown indicates that we were unable to compute the digest of a specified item.
|
||||
ErrDigestUnknown = errors.New("could not compute digest of item")
|
||||
)
|
||||
|
|
98
vendor/github.com/containers/storage/images.go
generated
vendored
98
vendor/github.com/containers/storage/images.go
generated
vendored
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
"github.com/containers/storage/pkg/truncindex"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
@ -42,6 +43,10 @@ type Image struct {
|
|||
// that has been stored, if they're known.
|
||||
BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"`
|
||||
|
||||
// BigDataDigests maps the names in BigDataNames to the digests of the
|
||||
// data that has been stored, if they're known.
|
||||
BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"`
|
||||
|
||||
// Created is the datestamp for when this image was created. Older
|
||||
// versions of the library did not track this information, so callers
|
||||
// will likely want to use the IsZero() method to verify that a value
|
||||
|
@ -136,6 +141,7 @@ func (r *imageStore) Load() error {
|
|||
ids := make(map[string]*Image)
|
||||
names := make(map[string]*Image)
|
||||
if err = json.Unmarshal(data, &images); len(data) == 0 || err == nil {
|
||||
idlist = make([]string, 0, len(images))
|
||||
for n, image := range images {
|
||||
ids[image.ID] = images[n]
|
||||
idlist = append(idlist, image.ID)
|
||||
|
@ -252,6 +258,9 @@ func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
|
|||
if !ok {
|
||||
return ErrImageUnknown
|
||||
}
|
||||
if image.Flags == nil {
|
||||
image.Flags = make(map[string]interface{})
|
||||
}
|
||||
image.Flags[flag] = value
|
||||
return r.Save()
|
||||
}
|
||||
|
@ -282,14 +291,15 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
|
|||
}
|
||||
if err == nil {
|
||||
image = &Image{
|
||||
ID: id,
|
||||
Names: names,
|
||||
TopLayer: layer,
|
||||
Metadata: metadata,
|
||||
BigDataNames: []string{},
|
||||
BigDataSizes: make(map[string]int64),
|
||||
Created: created,
|
||||
Flags: make(map[string]interface{}),
|
||||
ID: id,
|
||||
Names: names,
|
||||
TopLayer: layer,
|
||||
Metadata: metadata,
|
||||
BigDataNames: []string{},
|
||||
BigDataSizes: make(map[string]int64),
|
||||
BigDataDigests: make(map[string]digest.Digest),
|
||||
Created: created,
|
||||
Flags: make(map[string]interface{}),
|
||||
}
|
||||
r.images = append(r.images, image)
|
||||
r.idindex.Add(id)
|
||||
|
@ -402,6 +412,9 @@ func (r *imageStore) Exists(id string) bool {
|
|||
}
|
||||
|
||||
func (r *imageStore) BigData(id, key string) ([]byte, error) {
|
||||
if key == "" {
|
||||
return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve image big data value for empty name")
|
||||
}
|
||||
image, ok := r.lookup(id)
|
||||
if !ok {
|
||||
return nil, ErrImageUnknown
|
||||
|
@ -410,16 +423,61 @@ func (r *imageStore) BigData(id, key string) ([]byte, error) {
|
|||
}
|
||||
|
||||
func (r *imageStore) BigDataSize(id, key string) (int64, error) {
|
||||
if key == "" {
|
||||
return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of image big data with empty name")
|
||||
}
|
||||
image, ok := r.lookup(id)
|
||||
if !ok {
|
||||
return -1, ErrImageUnknown
|
||||
}
|
||||
if image.BigDataSizes == nil {
|
||||
image.BigDataSizes = make(map[string]int64)
|
||||
}
|
||||
if size, ok := image.BigDataSizes[key]; ok {
|
||||
return size, nil
|
||||
}
|
||||
if data, err := r.BigData(id, key); err == nil && data != nil {
|
||||
if r.SetBigData(id, key, data) == nil {
|
||||
image, ok := r.lookup(id)
|
||||
if !ok {
|
||||
return -1, ErrImageUnknown
|
||||
}
|
||||
if size, ok := image.BigDataSizes[key]; ok {
|
||||
return size, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1, ErrSizeUnknown
|
||||
}
|
||||
|
||||
func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) {
|
||||
if key == "" {
|
||||
return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of image big data value with empty name")
|
||||
}
|
||||
image, ok := r.lookup(id)
|
||||
if !ok {
|
||||
return "", ErrImageUnknown
|
||||
}
|
||||
if image.BigDataDigests == nil {
|
||||
image.BigDataDigests = make(map[string]digest.Digest)
|
||||
}
|
||||
if d, ok := image.BigDataDigests[key]; ok {
|
||||
return d, nil
|
||||
}
|
||||
if data, err := r.BigData(id, key); err == nil && data != nil {
|
||||
if r.SetBigData(id, key, data) == nil {
|
||||
image, ok := r.lookup(id)
|
||||
if !ok {
|
||||
return "", ErrImageUnknown
|
||||
}
|
||||
if d, ok := image.BigDataDigests[key]; ok {
|
||||
return d, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", ErrDigestUnknown
|
||||
}
|
||||
|
||||
func (r *imageStore) BigDataNames(id string) ([]string, error) {
|
||||
image, ok := r.lookup(id)
|
||||
if !ok {
|
||||
|
@ -429,6 +487,9 @@ func (r *imageStore) BigDataNames(id string) ([]string, error) {
|
|||
}
|
||||
|
||||
func (r *imageStore) SetBigData(id, key string, data []byte) error {
|
||||
if key == "" {
|
||||
return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for image big data item")
|
||||
}
|
||||
if !r.IsReadWrite() {
|
||||
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with images at %q", r.imagespath())
|
||||
}
|
||||
|
@ -441,20 +502,29 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
|
|||
}
|
||||
err := ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600)
|
||||
if err == nil {
|
||||
add := true
|
||||
save := false
|
||||
oldSize, ok := image.BigDataSizes[key]
|
||||
if image.BigDataSizes == nil {
|
||||
image.BigDataSizes = make(map[string]int64)
|
||||
}
|
||||
oldSize, sizeOk := image.BigDataSizes[key]
|
||||
image.BigDataSizes[key] = int64(len(data))
|
||||
if !ok || oldSize != image.BigDataSizes[key] {
|
||||
if image.BigDataDigests == nil {
|
||||
image.BigDataDigests = make(map[string]digest.Digest)
|
||||
}
|
||||
oldDigest, digestOk := image.BigDataDigests[key]
|
||||
newDigest := digest.Canonical.FromBytes(data)
|
||||
image.BigDataDigests[key] = newDigest
|
||||
if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
|
||||
save = true
|
||||
}
|
||||
addName := true
|
||||
for _, name := range image.BigDataNames {
|
||||
if name == key {
|
||||
add = false
|
||||
addName = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if add {
|
||||
if addName {
|
||||
image.BigDataNames = append(image.BigDataNames, key)
|
||||
save = true
|
||||
}
|
||||
|
@ -469,7 +539,7 @@ func (r *imageStore) Wipe() error {
|
|||
if !r.IsReadWrite() {
|
||||
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath())
|
||||
}
|
||||
ids := []string{}
|
||||
ids := make([]string, 0, len(r.byid))
|
||||
for id := range r.byid {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
|
|
1148
vendor/github.com/containers/storage/images_ffjson.go
generated
vendored
Normal file
1148
vendor/github.com/containers/storage/images_ffjson.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
11
vendor/github.com/containers/storage/layers.go
generated
vendored
11
vendor/github.com/containers/storage/layers.go
generated
vendored
|
@ -254,6 +254,7 @@ func (r *layerStore) Load() error {
|
|||
compressedsums := make(map[digest.Digest][]string)
|
||||
uncompressedsums := make(map[digest.Digest][]string)
|
||||
if err = json.Unmarshal(data, &layers); len(data) == 0 || err == nil {
|
||||
idlist = make([]string, 0, len(layers))
|
||||
for n, layer := range layers {
|
||||
ids[layer.ID] = layers[n]
|
||||
idlist = append(idlist, layer.ID)
|
||||
|
@ -305,6 +306,9 @@ func (r *layerStore) Load() error {
|
|||
// actually delete.
|
||||
if r.IsReadWrite() {
|
||||
for _, layer := range r.layers {
|
||||
if layer.Flags == nil {
|
||||
layer.Flags = make(map[string]interface{})
|
||||
}
|
||||
if cleanup, ok := layer.Flags[incompleteFlag]; ok {
|
||||
if b, ok := cleanup.(bool); ok && b {
|
||||
err = r.Delete(layer.ID)
|
||||
|
@ -338,7 +342,7 @@ func (r *layerStore) Save() error {
|
|||
if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
mounts := []layerMountPoint{}
|
||||
mounts := make([]layerMountPoint, 0, len(r.layers))
|
||||
for _, layer := range r.layers {
|
||||
if layer.MountPoint != "" && layer.MountCount > 0 {
|
||||
mounts = append(mounts, layerMountPoint{
|
||||
|
@ -455,6 +459,9 @@ func (r *layerStore) SetFlag(id string, flag string, value interface{}) error {
|
|||
if !ok {
|
||||
return ErrLayerUnknown
|
||||
}
|
||||
if layer.Flags == nil {
|
||||
layer.Flags = make(map[string]interface{})
|
||||
}
|
||||
layer.Flags[flag] = value
|
||||
return r.Save()
|
||||
}
|
||||
|
@ -733,7 +740,7 @@ func (r *layerStore) Wipe() error {
|
|||
if !r.IsReadWrite() {
|
||||
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath())
|
||||
}
|
||||
ids := []string{}
|
||||
ids := make([]string, 0, len(r.byid))
|
||||
for id := range r.byid {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
|
|
1713
vendor/github.com/containers/storage/layers_ffjson.go
generated
vendored
Normal file
1713
vendor/github.com/containers/storage/layers_ffjson.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
97
vendor/github.com/containers/storage/pkg/archive/example_changes.go
generated
vendored
Normal file
97
vendor/github.com/containers/storage/pkg/archive/example_changes.go
generated
vendored
Normal file
|
@ -0,0 +1,97 @@
|
|||
// +build ignore
|
||||
|
||||
// Simple tool to create an archive stream from an old and new directory
|
||||
//
|
||||
// By default it will stream the comparison of two temporary directories with junk files
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
flDebug = flag.Bool("D", false, "debugging output")
|
||||
flNewDir = flag.String("newdir", "", "")
|
||||
flOldDir = flag.String("olddir", "", "")
|
||||
log = logrus.New()
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Usage = func() {
|
||||
fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
|
||||
fmt.Printf("%s [OPTIONS]\n", os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
flag.Parse()
|
||||
log.Out = os.Stderr
|
||||
if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
var newDir, oldDir string
|
||||
|
||||
if len(*flNewDir) == 0 {
|
||||
var err error
|
||||
newDir, err = ioutil.TempDir("", "storage-test-newDir")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(newDir)
|
||||
if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
newDir = *flNewDir
|
||||
}
|
||||
|
||||
if len(*flOldDir) == 0 {
|
||||
oldDir, err := ioutil.TempDir("", "storage-test-oldDir")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(oldDir)
|
||||
} else {
|
||||
oldDir = *flOldDir
|
||||
}
|
||||
|
||||
changes, err := archive.ChangesDirs(newDir, oldDir)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
a, err := archive.ExportChanges(newDir, changes)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer a.Close()
|
||||
|
||||
i, err := io.Copy(os.Stdout, a)
|
||||
if err != nil && err != io.EOF {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
|
||||
}
|
||||
|
||||
func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
|
||||
fileData := []byte("fooo")
|
||||
for n := 0; n < numberOfFiles; n++ {
|
||||
fileName := fmt.Sprintf("file-%d", n)
|
||||
if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if makeLinks {
|
||||
if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
totalSize := numberOfFiles * len(fileData)
|
||||
return totalSize, nil
|
||||
}
|
79
vendor/github.com/containers/storage/store.go
generated
vendored
79
vendor/github.com/containers/storage/store.go
generated
vendored
|
@ -20,7 +20,7 @@ import (
|
|||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
"github.com/opencontainers/go-digest"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
@ -87,6 +87,10 @@ type ROBigDataStore interface {
|
|||
// data associated with this ID, if it has previously been set.
|
||||
BigDataSize(id, key string) (int64, error)
|
||||
|
||||
// BigDataDigest retrieves the digest of a (potentially large) piece of
|
||||
// data associated with this ID, if it has previously been set.
|
||||
BigDataDigest(id, key string) (digest.Digest, error)
|
||||
|
||||
// BigDataNames() returns a list of the names of previously-stored pieces of
|
||||
// data.
|
||||
BigDataNames(id string) ([]string, error)
|
||||
|
@ -327,6 +331,10 @@ type Store interface {
|
|||
// of named data associated with an image.
|
||||
ImageBigDataSize(id, key string) (int64, error)
|
||||
|
||||
// ImageBigDataDigest retrieves the digest of a (possibly large) chunk
|
||||
// of named data associated with an image.
|
||||
ImageBigDataDigest(id, key string) (digest.Digest, error)
|
||||
|
||||
// SetImageBigData stores a (possibly large) chunk of named data associated
|
||||
// with an image.
|
||||
SetImageBigData(id, key string, data []byte) error
|
||||
|
@ -343,6 +351,10 @@ type Store interface {
|
|||
// chunk of named data associated with a container.
|
||||
ContainerBigDataSize(id, key string) (int64, error)
|
||||
|
||||
// ContainerBigDataDigest retrieves the digest of a (possibly large)
|
||||
// chunk of named data associated with a container.
|
||||
ContainerBigDataDigest(id, key string) (digest.Digest, error)
|
||||
|
||||
// SetContainerBigData stores a (possibly large) chunk of named data
|
||||
// associated with a container.
|
||||
SetContainerBigData(id, key string, data []byte) error
|
||||
|
@ -728,11 +740,14 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
|
|||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
rlstores, err := s.ROLayerStores()
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
rcstore, err := s.ContainerStore()
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
|
||||
rlstore.Lock()
|
||||
defer rlstore.Unlock()
|
||||
if modified, err := rlstore.Modified(); modified || err != nil {
|
||||
|
@ -747,9 +762,15 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
|
|||
id = stringid.GenerateRandomID()
|
||||
}
|
||||
if parent != "" {
|
||||
if l, err := rlstore.Get(parent); err == nil && l != nil {
|
||||
parent = l.ID
|
||||
} else {
|
||||
var ilayer *Layer
|
||||
for _, lstore := range append([]ROLayerStore{rlstore}, rlstores...) {
|
||||
if l, err := lstore.Get(parent); err == nil && l != nil {
|
||||
ilayer = l
|
||||
parent = ilayer.ID
|
||||
break
|
||||
}
|
||||
}
|
||||
if ilayer == nil {
|
||||
return nil, -1, ErrLayerUnknown
|
||||
}
|
||||
containers, err := rcstore.Containers()
|
||||
|
@ -1026,6 +1047,30 @@ func (s *store) ImageBigDataSize(id, key string) (int64, error) {
|
|||
return -1, ErrSizeUnknown
|
||||
}
|
||||
|
||||
func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) {
|
||||
ristore, err := s.ImageStore()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
stores, err := s.ROImageStores()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
stores = append([]ROImageStore{ristore}, stores...)
|
||||
for _, ristore := range stores {
|
||||
ristore.Lock()
|
||||
defer ristore.Unlock()
|
||||
if modified, err := ristore.Modified(); modified || err != nil {
|
||||
ristore.Load()
|
||||
}
|
||||
d, err := ristore.BigDataDigest(id, key)
|
||||
if err == nil && d.Validate() == nil {
|
||||
return d, nil
|
||||
}
|
||||
}
|
||||
return "", ErrDigestUnknown
|
||||
}
|
||||
|
||||
func (s *store) ImageBigData(id, key string) ([]byte, error) {
|
||||
istore, err := s.ImageStore()
|
||||
if err != nil {
|
||||
|
@ -1089,10 +1134,22 @@ func (s *store) ContainerBigDataSize(id, key string) (int64, error) {
|
|||
if modified, err := rcstore.Modified(); modified || err != nil {
|
||||
rcstore.Load()
|
||||
}
|
||||
|
||||
return rcstore.BigDataSize(id, key)
|
||||
}
|
||||
|
||||
func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) {
|
||||
rcstore, err := s.ContainerStore()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
rcstore.Lock()
|
||||
defer rcstore.Unlock()
|
||||
if modified, err := rcstore.Modified(); modified || err != nil {
|
||||
rcstore.Load()
|
||||
}
|
||||
return rcstore.BigDataDigest(id, key)
|
||||
}
|
||||
|
||||
func (s *store) ContainerBigData(id, key string) ([]byte, error) {
|
||||
rcstore, err := s.ContainerStore()
|
||||
if err != nil {
|
||||
|
@ -1103,7 +1160,6 @@ func (s *store) ContainerBigData(id, key string) ([]byte, error) {
|
|||
if modified, err := rcstore.Modified(); modified || err != nil {
|
||||
rcstore.Load()
|
||||
}
|
||||
|
||||
return rcstore.BigData(id, key)
|
||||
}
|
||||
|
||||
|
@ -1117,7 +1173,6 @@ func (s *store) SetContainerBigData(id, key string, data []byte) error {
|
|||
if modified, err := rcstore.Modified(); modified || err != nil {
|
||||
rcstore.Load()
|
||||
}
|
||||
|
||||
return rcstore.SetBigData(id, key, data)
|
||||
}
|
||||
|
||||
|
@ -1841,10 +1896,16 @@ func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Laye
|
|||
}
|
||||
|
||||
func (s *store) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) {
|
||||
if err := d.Validate(); err != nil {
|
||||
return nil, errors.Wrapf(err, "error looking for compressed layers matching digest %q", d)
|
||||
}
|
||||
return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByCompressedDigest(d) }, d)
|
||||
}
|
||||
|
||||
func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) {
|
||||
if err := d.Validate(); err != nil {
|
||||
return nil, errors.Wrapf(err, "error looking for layers matching digest %q", d)
|
||||
}
|
||||
return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d)
|
||||
}
|
||||
|
||||
|
@ -2238,7 +2299,7 @@ func makeBigDataBaseName(key string) string {
|
|||
}
|
||||
|
||||
func stringSliceWithoutValue(slice []string, value string) []string {
|
||||
modified := []string{}
|
||||
modified := make([]string, 0, len(slice))
|
||||
for _, v := range slice {
|
||||
if v == value {
|
||||
continue
|
||||
|
|
2
vendor/github.com/containers/storage/vendor.conf
generated
vendored
2
vendor/github.com/containers/storage/vendor.conf
generated
vendored
|
@ -3,7 +3,6 @@ github.com/Microsoft/go-winio 307e919c663683a9000576fdc855acaf9534c165
|
|||
github.com/Microsoft/hcsshim a8d9cc56cbce765a7eebdf4792e6ceceeff3edb8
|
||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||
github.com/docker/engine-api 4290f40c056686fcaa5c9caf02eac1dde9315adf
|
||||
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
|
||||
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
|
||||
github.com/mattn/go-shellwords 753a2322a99f87c0eff284980e77f53041555bc6
|
||||
github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
|
||||
|
@ -19,3 +18,4 @@ github.com/tchap/go-patricia v2.2.6
|
|||
github.com/vbatts/tar-split bd4c5d64c3e9297f410025a3b1bd0c58f659e721
|
||||
golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
|
||||
golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5
|
||||
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue