cri-o/vendor/github.com/containers/storage/images.go
Nalin Dahyabhai 0651d3a8de Update containers/image and containers/storage
Bump containers/image to 3d0304a02154dddc8f97cc833aa0861cea5e9ade, and
containers/storage to 0d32dfce498e06c132c60dac945081bf44c22464.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2017-12-14 11:06:23 -05:00

673 lines
18 KiB
Go

package storage
import (
"encoding/json"
"io/ioutil"
"os"
"path/filepath"
"time"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/truncindex"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
const (
// ImageDigestBigDataKey is the name of the big data item whose
// contents we consider useful for computing a "digest" of the
// image, by which we can locate the image later.
ImageDigestBigDataKey = "manifest"
)
// An Image is a reference to a layer and an associated metadata string.
type Image struct {
// ID is either one which was specified at create-time, or a random
// value which was generated by the library.
ID string `json:"id"`
// Digest is a digest value that we can use to locate the image.
Digest digest.Digest `json:"digest,omitempty"`
// Names is an optional set of user-defined convenience values. The
// image can be referred to by its ID or any of its names. Names are
// unique among images.
Names []string `json:"names,omitempty"`
// TopLayer is the ID of the topmost layer of the image itself, if the
// image contains one or more layers. Multiple images can refer to the
// same top layer.
TopLayer string `json:"layer,omitempty"`
// Metadata is data we keep for the convenience of the caller. It is not
// expected to be large, since it is kept in memory.
Metadata string `json:"metadata,omitempty"`
// BigDataNames is a list of names of data items that we keep for the
// convenience of the caller. They can be large, and are only in
// memory when being read from or written to disk.
BigDataNames []string `json:"big-data-names,omitempty"`
// BigDataSizes maps the names in BigDataNames to the sizes of the data
// that has been stored, if they're known.
BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"`
// BigDataDigests maps the names in BigDataNames to the digests of the
// data that has been stored, if they're known.
BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"`
// Created is the datestamp for when this image was created. Older
// versions of the library did not track this information, so callers
// will likely want to use the IsZero() method to verify that a value
// is set before using it.
Created time.Time `json:"created,omitempty"`
Flags map[string]interface{} `json:"flags,omitempty"`
}
// ROImageStore provides bookkeeping for information about Images.
type ROImageStore interface {
ROFileBasedStore
ROMetadataStore
ROBigDataStore
// Exists checks if there is an image with the given ID or name.
Exists(id string) bool
// Get retrieves information about an image given an ID or name.
Get(id string) (*Image, error)
// Lookup attempts to translate a name to an ID. Most methods do this
// implicitly.
Lookup(name string) (string, error)
// Images returns a slice enumerating the known images.
Images() ([]Image, error)
// Images returns a slice enumerating the images which have a big data
// item with the name ImageDigestBigDataKey and the specified digest.
ByDigest(d digest.Digest) ([]*Image, error)
}
// ImageStore provides bookkeeping for information about Images.
type ImageStore interface {
ROImageStore
RWFileBasedStore
RWMetadataStore
RWBigDataStore
FlaggableStore
// Create creates an image that has a specified ID (or a random one) and
// optional names, using the specified layer as its topmost (hopefully
// read-only) layer. That layer can be referenced by multiple images.
Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error)
// SetNames replaces the list of names associated with an image with the
// supplied values.
SetNames(id string, names []string) error
// Delete removes the record of the image.
Delete(id string) error
// Wipe removes records of all images.
Wipe() error
}
type imageStore struct {
lockfile Locker
dir string
images []*Image
idindex *truncindex.TruncIndex
byid map[string]*Image
byname map[string]*Image
bydigest map[digest.Digest][]*Image
}
func (r *imageStore) Images() ([]Image, error) {
images := make([]Image, len(r.images))
for i := range r.images {
images[i] = *(r.images[i])
}
return images, nil
}
func (r *imageStore) imagespath() string {
return filepath.Join(r.dir, "images.json")
}
func (r *imageStore) datadir(id string) string {
return filepath.Join(r.dir, id)
}
func (r *imageStore) datapath(id, key string) string {
return filepath.Join(r.datadir(id), makeBigDataBaseName(key))
}
func (r *imageStore) Load() error {
shouldSave := false
rpath := r.imagespath()
data, err := ioutil.ReadFile(rpath)
if err != nil && !os.IsNotExist(err) {
return err
}
images := []*Image{}
idlist := []string{}
ids := make(map[string]*Image)
names := make(map[string]*Image)
digests := make(map[digest.Digest][]*Image)
if err = json.Unmarshal(data, &images); len(data) == 0 || err == nil {
idlist = make([]string, 0, len(images))
for n, image := range images {
ids[image.ID] = images[n]
idlist = append(idlist, image.ID)
for _, name := range image.Names {
if conflict, ok := names[name]; ok {
r.removeName(conflict, name)
shouldSave = true
}
names[name] = images[n]
}
// Implicit digest
if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok {
digests[digest] = append(digests[digest], images[n])
}
// Explicit digest
if image.Digest == "" {
image.Digest = image.BigDataDigests[ImageDigestBigDataKey]
} else if image.Digest != image.BigDataDigests[ImageDigestBigDataKey] {
digests[image.Digest] = append(digests[image.Digest], images[n])
}
}
}
if shouldSave && !r.IsReadWrite() {
return ErrDuplicateImageNames
}
r.images = images
r.idindex = truncindex.NewTruncIndex(idlist)
r.byid = ids
r.byname = names
r.bydigest = digests
if shouldSave {
return r.Save()
}
return nil
}
func (r *imageStore) Save() error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the image store at %q", r.imagespath())
}
rpath := r.imagespath()
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
return err
}
jdata, err := json.Marshal(&r.images)
if err != nil {
return err
}
defer r.Touch()
return ioutils.AtomicWriteFile(rpath, jdata, 0600)
}
func newImageStore(dir string) (ImageStore, error) {
if err := os.MkdirAll(dir, 0700); err != nil {
return nil, err
}
lockfile, err := GetLockfile(filepath.Join(dir, "images.lock"))
if err != nil {
return nil, err
}
lockfile.Lock()
defer lockfile.Unlock()
istore := imageStore{
lockfile: lockfile,
dir: dir,
images: []*Image{},
byid: make(map[string]*Image),
byname: make(map[string]*Image),
bydigest: make(map[digest.Digest][]*Image),
}
if err := istore.Load(); err != nil {
return nil, err
}
return &istore, nil
}
func newROImageStore(dir string) (ROImageStore, error) {
lockfile, err := GetROLockfile(filepath.Join(dir, "images.lock"))
if err != nil {
return nil, err
}
lockfile.Lock()
defer lockfile.Unlock()
istore := imageStore{
lockfile: lockfile,
dir: dir,
images: []*Image{},
byid: make(map[string]*Image),
byname: make(map[string]*Image),
bydigest: make(map[digest.Digest][]*Image),
}
if err := istore.Load(); err != nil {
return nil, err
}
return &istore, nil
}
func (r *imageStore) lookup(id string) (*Image, bool) {
if image, ok := r.byid[id]; ok {
return image, ok
} else if image, ok := r.byname[id]; ok {
return image, ok
} else if longid, err := r.idindex.Get(id); err == nil {
image, ok := r.byid[longid]
return image, ok
}
return nil, false
}
func (r *imageStore) ClearFlag(id string, flag string) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on images at %q", r.imagespath())
}
image, ok := r.lookup(id)
if !ok {
return ErrImageUnknown
}
delete(image.Flags, flag)
return r.Save()
}
func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on images at %q", r.imagespath())
}
image, ok := r.lookup(id)
if !ok {
return ErrImageUnknown
}
if image.Flags == nil {
image.Flags = make(map[string]interface{})
}
image.Flags[flag] = value
return r.Save()
}
func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (image *Image, err error) {
if !r.IsReadWrite() {
return nil, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new images at %q", r.imagespath())
}
if id == "" {
id = stringid.GenerateRandomID()
_, idInUse := r.byid[id]
for idInUse {
id = stringid.GenerateRandomID()
_, idInUse = r.byid[id]
}
}
if _, idInUse := r.byid[id]; idInUse {
return nil, ErrDuplicateID
}
names = dedupeNames(names)
for _, name := range names {
if _, nameInUse := r.byname[name]; nameInUse {
return nil, ErrDuplicateName
}
}
if created.IsZero() {
created = time.Now().UTC()
}
if err == nil {
image = &Image{
ID: id,
Digest: searchableDigest,
Names: names,
TopLayer: layer,
Metadata: metadata,
BigDataNames: []string{},
BigDataSizes: make(map[string]int64),
BigDataDigests: make(map[string]digest.Digest),
Created: created,
Flags: make(map[string]interface{}),
}
r.images = append(r.images, image)
r.idindex.Add(id)
r.byid[id] = image
if searchableDigest != "" {
list := r.bydigest[searchableDigest]
r.bydigest[searchableDigest] = append(list, image)
}
for _, name := range names {
r.byname[name] = image
}
err = r.Save()
}
return image, err
}
func (r *imageStore) Metadata(id string) (string, error) {
if image, ok := r.lookup(id); ok {
return image.Metadata, nil
}
return "", ErrImageUnknown
}
func (r *imageStore) SetMetadata(id, metadata string) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify image metadata at %q", r.imagespath())
}
if image, ok := r.lookup(id); ok {
image.Metadata = metadata
return r.Save()
}
return ErrImageUnknown
}
func (r *imageStore) removeName(image *Image, name string) {
image.Names = stringSliceWithoutValue(image.Names, name)
}
func (r *imageStore) SetNames(id string, names []string) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change image name assignments at %q", r.imagespath())
}
names = dedupeNames(names)
if image, ok := r.lookup(id); ok {
for _, name := range image.Names {
delete(r.byname, name)
}
for _, name := range names {
if otherImage, ok := r.byname[name]; ok {
r.removeName(otherImage, name)
}
r.byname[name] = image
}
image.Names = names
return r.Save()
}
return ErrImageUnknown
}
func (r *imageStore) Delete(id string) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath())
}
image, ok := r.lookup(id)
if !ok {
return ErrImageUnknown
}
id = image.ID
toDeleteIndex := -1
for i, candidate := range r.images {
if candidate.ID == id {
toDeleteIndex = i
}
}
delete(r.byid, id)
r.idindex.Delete(id)
for _, name := range image.Names {
delete(r.byname, name)
}
if toDeleteIndex != -1 {
// delete the image at toDeleteIndex
if toDeleteIndex == len(r.images)-1 {
r.images = r.images[:len(r.images)-1]
} else {
r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...)
}
}
if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok {
// remove the image from the digest-based index
if list, ok := r.bydigest[digest]; ok {
prunedList := imageSliceWithoutValue(list, image)
if len(prunedList) == 0 {
delete(r.bydigest, digest)
} else {
r.bydigest[digest] = prunedList
}
}
}
if image.Digest != "" {
// remove the image's hard-coded digest from the digest-based index
if list, ok := r.bydigest[image.Digest]; ok {
prunedList := imageSliceWithoutValue(list, image)
if len(prunedList) == 0 {
delete(r.bydigest, image.Digest)
} else {
r.bydigest[image.Digest] = prunedList
}
}
}
if err := r.Save(); err != nil {
return err
}
if err := os.RemoveAll(r.datadir(id)); err != nil {
return err
}
return nil
}
func (r *imageStore) Get(id string) (*Image, error) {
if image, ok := r.lookup(id); ok {
return image, nil
}
return nil, ErrImageUnknown
}
func (r *imageStore) Lookup(name string) (id string, err error) {
if image, ok := r.lookup(name); ok {
return image.ID, nil
}
return "", ErrImageUnknown
}
func (r *imageStore) Exists(id string) bool {
_, ok := r.lookup(id)
return ok
}
func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) {
if images, ok := r.bydigest[d]; ok {
return images, nil
}
return nil, ErrImageUnknown
}
func (r *imageStore) BigData(id, key string) ([]byte, error) {
if key == "" {
return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve image big data value for empty name")
}
image, ok := r.lookup(id)
if !ok {
return nil, ErrImageUnknown
}
return ioutil.ReadFile(r.datapath(image.ID, key))
}
func (r *imageStore) BigDataSize(id, key string) (int64, error) {
if key == "" {
return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of image big data with empty name")
}
image, ok := r.lookup(id)
if !ok {
return -1, ErrImageUnknown
}
if image.BigDataSizes == nil {
image.BigDataSizes = make(map[string]int64)
}
if size, ok := image.BigDataSizes[key]; ok {
return size, nil
}
if data, err := r.BigData(id, key); err == nil && data != nil {
if r.SetBigData(id, key, data) == nil {
image, ok := r.lookup(id)
if !ok {
return -1, ErrImageUnknown
}
if size, ok := image.BigDataSizes[key]; ok {
return size, nil
}
}
}
return -1, ErrSizeUnknown
}
func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) {
if key == "" {
return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of image big data value with empty name")
}
image, ok := r.lookup(id)
if !ok {
return "", ErrImageUnknown
}
if image.BigDataDigests == nil {
image.BigDataDigests = make(map[string]digest.Digest)
}
if d, ok := image.BigDataDigests[key]; ok {
return d, nil
}
if data, err := r.BigData(id, key); err == nil && data != nil {
if r.SetBigData(id, key, data) == nil {
image, ok := r.lookup(id)
if !ok {
return "", ErrImageUnknown
}
if d, ok := image.BigDataDigests[key]; ok {
return d, nil
}
}
}
return "", ErrDigestUnknown
}
func (r *imageStore) BigDataNames(id string) ([]string, error) {
image, ok := r.lookup(id)
if !ok {
return nil, ErrImageUnknown
}
return image.BigDataNames, nil
}
func imageSliceWithoutValue(slice []*Image, value *Image) []*Image {
modified := make([]*Image, 0, len(slice))
for _, v := range slice {
if v == value {
continue
}
modified = append(modified, v)
}
return modified
}
func (r *imageStore) SetBigData(id, key string, data []byte) error {
if key == "" {
return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for image big data item")
}
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with images at %q", r.imagespath())
}
image, ok := r.lookup(id)
if !ok {
return ErrImageUnknown
}
if err := os.MkdirAll(r.datadir(image.ID), 0700); err != nil {
return err
}
err := ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600)
if err == nil {
save := false
if image.BigDataSizes == nil {
image.BigDataSizes = make(map[string]int64)
}
oldSize, sizeOk := image.BigDataSizes[key]
image.BigDataSizes[key] = int64(len(data))
if image.BigDataDigests == nil {
image.BigDataDigests = make(map[string]digest.Digest)
}
oldDigest, digestOk := image.BigDataDigests[key]
newDigest := digest.Canonical.FromBytes(data)
image.BigDataDigests[key] = newDigest
if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
save = true
}
addName := true
for _, name := range image.BigDataNames {
if name == key {
addName = false
break
}
}
if addName {
image.BigDataNames = append(image.BigDataNames, key)
save = true
}
if key == ImageDigestBigDataKey {
if oldDigest != "" && oldDigest != newDigest && oldDigest != image.Digest {
// remove the image from the list of images in the digest-based
// index which corresponds to the old digest for this item, unless
// it's also the hard-coded digest
if list, ok := r.bydigest[oldDigest]; ok {
prunedList := imageSliceWithoutValue(list, image)
if len(prunedList) == 0 {
delete(r.bydigest, oldDigest)
} else {
r.bydigest[oldDigest] = prunedList
}
}
}
// add the image to the list of images in the digest-based index which
// corresponds to the new digest for this item, unless it's already there
list := r.bydigest[newDigest]
if len(list) == len(imageSliceWithoutValue(list, image)) {
// the list isn't shortened by trying to prune this image from it,
// so it's not in there yet
r.bydigest[newDigest] = append(list, image)
}
}
if save {
err = r.Save()
}
}
return err
}
func (r *imageStore) Wipe() error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath())
}
ids := make([]string, 0, len(r.byid))
for id := range r.byid {
ids = append(ids, id)
}
for _, id := range ids {
if err := r.Delete(id); err != nil {
return err
}
}
return nil
}
func (r *imageStore) Lock() {
r.lockfile.Lock()
}
func (r *imageStore) Unlock() {
r.lockfile.Unlock()
}
func (r *imageStore) Touch() error {
return r.lockfile.Touch()
}
func (r *imageStore) Modified() (bool, error) {
return r.lockfile.Modified()
}
func (r *imageStore) IsReadWrite() bool {
return r.lockfile.IsReadWrite()
}
func (r *imageStore) TouchedSince(when time.Time) bool {
return r.lockfile.TouchedSince(when)
}