Vendor in latest from containers/storage

Container storage now supports a configuration file as well as alternate
image storages.  This means we can start to take advantage of network
based image stores for running containers.

Signed-off-by: Dan Walsh <dwalsh@redhat.com>
This commit is contained in:
Dan Walsh 2017-06-08 07:44:52 -04:00
parent 4d46ae6e1e
commit 23f20f1e5b
16 changed files with 1286 additions and 733 deletions

View file

@ -6,7 +6,7 @@ k8s.io/apiserver release-1.6 https://github.com/kubernetes/apiserver
# #
github.com/Sirupsen/logrus v0.11.5 github.com/Sirupsen/logrus v0.11.5
github.com/containers/image b36d6535410088370aaaee7ec8522863b5e43489 github.com/containers/image b36d6535410088370aaaee7ec8522863b5e43489
github.com/containers/storage 2c75d14b978bff468e7d5ec3ff8a003eca443209 github.com/containers/storage 74bc9c18a31d0e6fb1a11ce4563159b944a73d2e
github.com/containernetworking/cni v0.4.0 github.com/containernetworking/cni v0.4.0
google.golang.org/grpc v1.0.1-GA https://github.com/grpc/grpc-go google.golang.org/grpc v1.0.1-GA https://github.com/grpc/grpc-go
github.com/opencontainers/selinux v1.0.0-rc1 github.com/opencontainers/selinux v1.0.0-rc1

View file

@ -50,6 +50,12 @@ type Container struct {
// that has been stored, if they're known. // that has been stored, if they're known.
BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"` BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"`
// Created is the datestamp for when this container was created. Older
// versions of the library did not track this information, so callers
// will likely want to use the IsZero() method to verify that a value
// is set before using it.
Created time.Time `json:"created,omitempty"`
Flags map[string]interface{} `json:"flags,omitempty"` Flags map[string]interface{} `json:"flags,omitempty"`
} }
@ -93,7 +99,7 @@ type ContainerStore interface {
type containerStore struct { type containerStore struct {
lockfile Locker lockfile Locker
dir string dir string
containers []Container containers []*Container
idindex *truncindex.TruncIndex idindex *truncindex.TruncIndex
byid map[string]*Container byid map[string]*Container
bylayer map[string]*Container bylayer map[string]*Container
@ -101,7 +107,11 @@ type containerStore struct {
} }
func (r *containerStore) Containers() ([]Container, error) { func (r *containerStore) Containers() ([]Container, error) {
return r.containers, nil containers := make([]Container, len(r.containers))
for i := range r.containers {
containers[i] = *(r.containers[i])
}
return containers, nil
} }
func (r *containerStore) containerspath() string { func (r *containerStore) containerspath() string {
@ -123,7 +133,7 @@ func (r *containerStore) Load() error {
if err != nil && !os.IsNotExist(err) { if err != nil && !os.IsNotExist(err) {
return err return err
} }
containers := []Container{} containers := []*Container{}
layers := make(map[string]*Container) layers := make(map[string]*Container)
idlist := []string{} idlist := []string{}
ids := make(map[string]*Container) ids := make(map[string]*Container)
@ -131,14 +141,14 @@ func (r *containerStore) Load() error {
if err = json.Unmarshal(data, &containers); len(data) == 0 || err == nil { if err = json.Unmarshal(data, &containers); len(data) == 0 || err == nil {
for n, container := range containers { for n, container := range containers {
idlist = append(idlist, container.ID) idlist = append(idlist, container.ID)
ids[container.ID] = &containers[n] ids[container.ID] = containers[n]
layers[container.LayerID] = &containers[n] layers[container.LayerID] = containers[n]
for _, name := range container.Names { for _, name := range container.Names {
if conflict, ok := names[name]; ok { if conflict, ok := names[name]; ok {
r.removeName(conflict, name) r.removeName(conflict, name)
needSave = true needSave = true
} }
names[name] = &containers[n] names[name] = containers[n]
} }
} }
} }
@ -148,7 +158,6 @@ func (r *containerStore) Load() error {
r.bylayer = layers r.bylayer = layers
r.byname = names r.byname = names
if needSave { if needSave {
r.Touch()
return r.Save() return r.Save()
} }
return nil return nil
@ -163,6 +172,7 @@ func (r *containerStore) Save() error {
if err != nil { if err != nil {
return err return err
} }
defer r.Touch()
return ioutils.AtomicWriteFile(rpath, jdata, 0600) return ioutils.AtomicWriteFile(rpath, jdata, 0600)
} }
@ -179,7 +189,7 @@ func newContainerStore(dir string) (ContainerStore, error) {
cstore := containerStore{ cstore := containerStore{
lockfile: lockfile, lockfile: lockfile,
dir: dir, dir: dir,
containers: []Container{}, containers: []*Container{},
byid: make(map[string]*Container), byid: make(map[string]*Container),
bylayer: make(map[string]*Container), bylayer: make(map[string]*Container),
byname: make(map[string]*Container), byname: make(map[string]*Container),
@ -241,7 +251,7 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
} }
} }
if err == nil { if err == nil {
newContainer := Container{ container = &Container{
ID: id, ID: id,
Names: names, Names: names,
ImageID: image, ImageID: image,
@ -249,10 +259,10 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
Metadata: metadata, Metadata: metadata,
BigDataNames: []string{}, BigDataNames: []string{},
BigDataSizes: make(map[string]int64), BigDataSizes: make(map[string]int64),
Created: time.Now().UTC(),
Flags: make(map[string]interface{}), Flags: make(map[string]interface{}),
} }
r.containers = append(r.containers, newContainer) r.containers = append(r.containers, container)
container = &r.containers[len(r.containers)-1]
r.byid[id] = container r.byid[id] = container
r.idindex.Add(id) r.idindex.Add(id)
r.bylayer[layer] = container r.bylayer[layer] = container
@ -306,10 +316,11 @@ func (r *containerStore) Delete(id string) error {
return ErrContainerUnknown return ErrContainerUnknown
} }
id = container.ID id = container.ID
newContainers := []Container{} toDeleteIndex := -1
for _, candidate := range r.containers { for i, candidate := range r.containers {
if candidate.ID != id { if candidate.ID == id {
newContainers = append(newContainers, candidate) toDeleteIndex = i
break
} }
} }
delete(r.byid, id) delete(r.byid, id)
@ -318,7 +329,14 @@ func (r *containerStore) Delete(id string) error {
for _, name := range container.Names { for _, name := range container.Names {
delete(r.byname, name) delete(r.byname, name)
} }
r.containers = newContainers if toDeleteIndex != -1 {
// delete the container at toDeleteIndex
if toDeleteIndex == len(r.containers)-1 {
r.containers = r.containers[:len(r.containers)-1]
} else {
r.containers = append(r.containers[:toDeleteIndex], r.containers[toDeleteIndex+1:]...)
}
}
if err := r.Save(); err != nil { if err := r.Save(); err != nil {
return err return err
} }
@ -437,6 +455,10 @@ func (r *containerStore) Modified() (bool, error) {
return r.lockfile.Modified() return r.lockfile.Modified()
} }
func (r *containerStore) IsReadWrite() bool {
return r.lockfile.IsReadWrite()
}
func (r *containerStore) TouchedSince(when time.Time) bool { func (r *containerStore) TouchedSince(when time.Time) bool {
return r.lockfile.TouchedSince(when) return r.lockfile.TouchedSince(when)
} }

View file

@ -372,6 +372,12 @@ func (a *Driver) Diff(id, parent string) (archive.Archive, error) {
}) })
} }
// AdditionalImageStores returns additional image stores supported by the driver
func (a *Driver) AdditionalImageStores() []string {
var imageStores []string
return imageStores
}
type fileGetNilCloser struct { type fileGetNilCloser struct {
storage.FileGetter storage.FileGetter
} }

View file

@ -518,3 +518,9 @@ func (d *Driver) Exists(id string) bool {
_, err := os.Stat(dir) _, err := os.Stat(dir)
return err == nil return err == nil
} }
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
var imageStores []string
return imageStores
}

View file

@ -28,7 +28,6 @@ import (
"github.com/containers/storage/pkg/loopback" "github.com/containers/storage/pkg/loopback"
"github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/parsers" "github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/storageversion"
"github.com/docker/go-units" "github.com/docker/go-units"
"github.com/opencontainers/selinux/go-selinux/label" "github.com/opencontainers/selinux/go-selinux/label"
@ -1668,17 +1667,17 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
} }
// https://github.com/docker/docker/issues/4036 // https://github.com/docker/docker/issues/4036
if supported := devicemapper.UdevSetSyncSupport(true); !supported { // if supported := devicemapper.UdevSetSyncSupport(true); !supported {
if storageversion.IAmStatic == "true" { // if storageversion.IAmStatic == "true" {
logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a dynamic binary to use devicemapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option") // logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a dynamic binary to use devicemapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option")
} else { // } else {
logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option") // logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option")
} // }
//
if !devices.overrideUdevSyncCheck { // if !devices.overrideUdevSyncCheck {
return graphdriver.ErrNotSupported // return graphdriver.ErrNotSupported
} // }
} // }
//create the root dir of the devmapper driver ownership to match this //create the root dir of the devmapper driver ownership to match this
//daemon's remapped root uid/gid so containers can start properly //daemon's remapped root uid/gid so containers can start properly

View file

@ -224,3 +224,9 @@ func (d *Driver) Put(id string) error {
func (d *Driver) Exists(id string) bool { func (d *Driver) Exists(id string) bool {
return d.DeviceSet.HasDevice(id) return d.DeviceSet.HasDevice(id)
} }
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
var imageStores []string
return imageStores
}

View file

@ -74,6 +74,8 @@ type ProtoDriver interface {
// held by the driver, e.g., unmounting all layered filesystems // held by the driver, e.g., unmounting all layered filesystems
// known to this driver. // known to this driver.
Cleanup() error Cleanup() error
// AdditionalImageStores returns additional image stores supported by the driver
AdditionalImageStores() []string
} }
// Driver is the interface for layered/snapshot file system drivers. // Driver is the interface for layered/snapshot file system drivers.

View file

@ -10,6 +10,7 @@ import (
"os" "os"
"os/exec" "os/exec"
"path" "path"
"path/filepath"
"strconv" "strconv"
"strings" "strings"
"syscall" "syscall"
@ -82,6 +83,7 @@ type Driver struct {
uidMaps []idtools.IDMap uidMaps []idtools.IDMap
gidMaps []idtools.IDMap gidMaps []idtools.IDMap
ctr *graphdriver.RefCounter ctr *graphdriver.RefCounter
opts *overlayOptions
} }
var backingFs = "<unknown>" var backingFs = "<unknown>"
@ -149,6 +151,7 @@ func InitWithName(name, home string, options []string, uidMaps, gidMaps []idtool
uidMaps: uidMaps, uidMaps: uidMaps,
gidMaps: gidMaps, gidMaps: gidMaps,
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)),
opts: opts,
} }
return d, nil return d, nil
@ -170,6 +173,7 @@ func InitAsOverlay2(home string, options []string, uidMaps, gidMaps []idtools.ID
type overlayOptions struct { type overlayOptions struct {
overrideKernelCheck bool overrideKernelCheck bool
imageStores []string
} }
func parseOptions(options []string) (*overlayOptions, error) { func parseOptions(options []string) (*overlayOptions, error) {
@ -186,6 +190,22 @@ func parseOptions(options []string) (*overlayOptions, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
case "overlay.imagestore":
// Additional read only image stores to use for lower paths
for _, store := range strings.Split(val, ",") {
store = filepath.Clean(store)
if !filepath.IsAbs(store) {
return nil, fmt.Errorf("overlay: image path %q is not absolute. Can not be relative", store)
}
st, err := os.Stat(store)
if err != nil {
return nil, fmt.Errorf("overlay: Can't stat imageStore dir %s: %v", store, err)
}
if !st.IsDir() {
return nil, fmt.Errorf("overlay: image path %q must be a directory", store)
}
o.imageStores = append(o.imageStores, store)
}
default: default:
return nil, fmt.Errorf("overlay: Unknown option %s", key) return nil, fmt.Errorf("overlay: Unknown option %s", key)
} }
@ -357,8 +377,18 @@ func (d *Driver) getLower(parent string) (string, error) {
return strings.Join(lowers, ":"), nil return strings.Join(lowers, ":"), nil
} }
func (d *Driver) dir(id string) string { func (d *Driver) dir(val string) string {
return path.Join(d.home, id) newpath := path.Join(d.home, val)
if _, err := os.Stat(newpath); err != nil {
for _, p := range d.AdditionalImageStores() {
l := path.Join(p, d.name, val)
_, err = os.Stat(l)
if err == nil {
return l
}
}
}
return newpath
} }
func (d *Driver) getLowerDirs(id string) ([]string, error) { func (d *Driver) getLowerDirs(id string) ([]string, error) {
@ -366,11 +396,12 @@ func (d *Driver) getLowerDirs(id string) ([]string, error) {
lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile))
if err == nil { if err == nil {
for _, s := range strings.Split(string(lowers), ":") { for _, s := range strings.Split(string(lowers), ":") {
lp, err := os.Readlink(path.Join(d.home, s)) lower := d.dir(s)
lp, err := os.Readlink(lower)
if err != nil { if err != nil {
return nil, err return nil, err
} }
lowersArray = append(lowersArray, path.Clean(path.Join(d.home, "link", lp))) lowersArray = append(lowersArray, path.Clean(d.dir(path.Join("link", lp))))
} }
} else if !os.IsNotExist(err) { } else if !os.IsNotExist(err) {
return nil, err return nil, err
@ -411,6 +442,31 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
return "", err return "", err
} }
newlowers := ""
for _, l := range strings.Split(string(lowers), ":") {
lower := ""
newpath := path.Join(d.home, l)
if _, err := os.Stat(newpath); err != nil {
for _, p := range d.AdditionalImageStores() {
lower = path.Join(p, d.name, l)
if _, err2 := os.Stat(lower); err2 == nil {
break
}
lower = ""
}
if lower == "" {
return "", fmt.Errorf("Can't stat lower layer %q: %v", newpath, err)
}
} else {
lower = l
}
if newlowers == "" {
newlowers = lower
} else {
newlowers = newlowers + ":" + lower
}
}
mergedDir := path.Join(dir, "merged") mergedDir := path.Join(dir, "merged")
if count := d.ctr.Increment(mergedDir); count > 1 { if count := d.ctr.Increment(mergedDir); count > 1 {
return mergedDir, nil return mergedDir, nil
@ -424,7 +480,7 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
}() }()
workDir := path.Join(dir, "work") workDir := path.Join(dir, "work")
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work")) opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", newlowers, path.Join(id, "diff"), path.Join(id, "work"))
mountLabel = label.FormatMountLabel(opts, mountLabel) mountLabel = label.FormatMountLabel(opts, mountLabel)
if len(mountLabel) > syscall.Getpagesize() { if len(mountLabel) > syscall.Getpagesize() {
return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountLabel)) return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountLabel))
@ -527,3 +583,8 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
return archive.OverlayChanges(layers, diffPath) return archive.OverlayChanges(layers, diffPath)
} }
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
return d.opts.imageStores
}

View file

@ -143,3 +143,9 @@ func (d *Driver) Exists(id string) bool {
_, err := os.Stat(d.dir(id)) _, err := os.Stat(d.dir(id))
return err == nil return err == nil
} }
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
var imageStores []string
return imageStores
}

View file

@ -403,3 +403,9 @@ func (d *Driver) Exists(id string) bool {
defer d.Unlock() defer d.Unlock()
return d.filesystemsCache[d.zfsPath(id)] == true return d.filesystemsCache[d.zfsPath(id)] == true
} }
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
var imageStores []string
return imageStores
}

View file

@ -2,7 +2,6 @@ package storage
import ( import (
"encoding/json" "encoding/json"
"errors"
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
@ -11,6 +10,7 @@ import (
"github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/truncindex" "github.com/containers/storage/pkg/truncindex"
"github.com/pkg/errors"
) )
var ( var (
@ -46,24 +46,20 @@ type Image struct {
// that has been stored, if they're known. // that has been stored, if they're known.
BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"` BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"`
// Created is the datestamp for when this image was created. Older
// versions of the library did not track this information, so callers
// will likely want to use the IsZero() method to verify that a value
// is set before using it.
Created time.Time `json:"created,omitempty"`
Flags map[string]interface{} `json:"flags,omitempty"` Flags map[string]interface{} `json:"flags,omitempty"`
} }
// ImageStore provides bookkeeping for information about Images. // ROImageStore provides bookkeeping for information about Images.
type ImageStore interface { type ROImageStore interface {
FileBasedStore ROFileBasedStore
MetadataStore ROMetadataStore
BigDataStore ROBigDataStore
FlaggableStore
// Create creates an image that has a specified ID (or a random one) and
// optional names, using the specified layer as its topmost (hopefully
// read-only) layer. That layer can be referenced by multiple images.
Create(id string, names []string, layer, metadata string) (*Image, error)
// SetNames replaces the list of names associated with an image with the
// supplied values.
SetNames(id string, names []string) error
// Exists checks if there is an image with the given ID or name. // Exists checks if there is an image with the given ID or name.
Exists(id string) bool Exists(id string) bool
@ -71,12 +67,6 @@ type ImageStore interface {
// Get retrieves information about an image given an ID or name. // Get retrieves information about an image given an ID or name.
Get(id string) (*Image, error) Get(id string) (*Image, error)
// Delete removes the record of the image.
Delete(id string) error
// Wipe removes records of all images.
Wipe() error
// Lookup attempts to translate a name to an ID. Most methods do this // Lookup attempts to translate a name to an ID. Most methods do this
// implicitly. // implicitly.
Lookup(name string) (string, error) Lookup(name string) (string, error)
@ -85,17 +75,45 @@ type ImageStore interface {
Images() ([]Image, error) Images() ([]Image, error)
} }
// ImageStore provides bookkeeping for information about Images.
type ImageStore interface {
ROImageStore
RWFileBasedStore
RWMetadataStore
RWBigDataStore
FlaggableStore
// Create creates an image that has a specified ID (or a random one) and
// optional names, using the specified layer as its topmost (hopefully
// read-only) layer. That layer can be referenced by multiple images.
Create(id string, names []string, layer, metadata string, created time.Time) (*Image, error)
// SetNames replaces the list of names associated with an image with the
// supplied values.
SetNames(id string, names []string) error
// Delete removes the record of the image.
Delete(id string) error
// Wipe removes records of all images.
Wipe() error
}
type imageStore struct { type imageStore struct {
lockfile Locker lockfile Locker
dir string dir string
images []Image images []*Image
idindex *truncindex.TruncIndex idindex *truncindex.TruncIndex
byid map[string]*Image byid map[string]*Image
byname map[string]*Image byname map[string]*Image
} }
func (r *imageStore) Images() ([]Image, error) { func (r *imageStore) Images() ([]Image, error) {
return r.images, nil images := make([]Image, len(r.images))
for i := range r.images {
images[i] = *(r.images[i])
}
return images, nil
} }
func (r *imageStore) imagespath() string { func (r *imageStore) imagespath() string {
@ -111,41 +129,46 @@ func (r *imageStore) datapath(id, key string) string {
} }
func (r *imageStore) Load() error { func (r *imageStore) Load() error {
needSave := false shouldSave := false
rpath := r.imagespath() rpath := r.imagespath()
data, err := ioutil.ReadFile(rpath) data, err := ioutil.ReadFile(rpath)
if err != nil && !os.IsNotExist(err) { if err != nil && !os.IsNotExist(err) {
return err return err
} }
images := []Image{} images := []*Image{}
idlist := []string{} idlist := []string{}
ids := make(map[string]*Image) ids := make(map[string]*Image)
names := make(map[string]*Image) names := make(map[string]*Image)
if err = json.Unmarshal(data, &images); len(data) == 0 || err == nil { if err = json.Unmarshal(data, &images); len(data) == 0 || err == nil {
for n, image := range images { for n, image := range images {
ids[image.ID] = &images[n] ids[image.ID] = images[n]
idlist = append(idlist, image.ID) idlist = append(idlist, image.ID)
for _, name := range image.Names { for _, name := range image.Names {
if conflict, ok := names[name]; ok { if conflict, ok := names[name]; ok {
r.removeName(conflict, name) r.removeName(conflict, name)
needSave = true shouldSave = true
} }
names[name] = &images[n] names[name] = images[n]
} }
} }
} }
if shouldSave && !r.IsReadWrite() {
return errors.New("image store assigns the same name to multiple images")
}
r.images = images r.images = images
r.idindex = truncindex.NewTruncIndex(idlist) r.idindex = truncindex.NewTruncIndex(idlist)
r.byid = ids r.byid = ids
r.byname = names r.byname = names
if needSave { if shouldSave {
r.Touch()
return r.Save() return r.Save()
} }
return nil return nil
} }
func (r *imageStore) Save() error { func (r *imageStore) Save() error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the image store at %q", r.imagespath())
}
rpath := r.imagespath() rpath := r.imagespath()
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
return err return err
@ -154,6 +177,7 @@ func (r *imageStore) Save() error {
if err != nil { if err != nil {
return err return err
} }
defer r.Touch()
return ioutils.AtomicWriteFile(rpath, jdata, 0600) return ioutils.AtomicWriteFile(rpath, jdata, 0600)
} }
@ -170,7 +194,27 @@ func newImageStore(dir string) (ImageStore, error) {
istore := imageStore{ istore := imageStore{
lockfile: lockfile, lockfile: lockfile,
dir: dir, dir: dir,
images: []Image{}, images: []*Image{},
byid: make(map[string]*Image),
byname: make(map[string]*Image),
}
if err := istore.Load(); err != nil {
return nil, err
}
return &istore, nil
}
func newROImageStore(dir string) (ROImageStore, error) {
lockfile, err := GetROLockfile(filepath.Join(dir, "images.lock"))
if err != nil {
return nil, err
}
lockfile.Lock()
defer lockfile.Unlock()
istore := imageStore{
lockfile: lockfile,
dir: dir,
images: []*Image{},
byid: make(map[string]*Image), byid: make(map[string]*Image),
byname: make(map[string]*Image), byname: make(map[string]*Image),
} }
@ -193,6 +237,9 @@ func (r *imageStore) lookup(id string) (*Image, bool) {
} }
func (r *imageStore) ClearFlag(id string, flag string) error { func (r *imageStore) ClearFlag(id string, flag string) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on images at %q", r.imagespath())
}
image, ok := r.lookup(id) image, ok := r.lookup(id)
if !ok { if !ok {
return ErrImageUnknown return ErrImageUnknown
@ -202,6 +249,9 @@ func (r *imageStore) ClearFlag(id string, flag string) error {
} }
func (r *imageStore) SetFlag(id string, flag string, value interface{}) error { func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on images at %q", r.imagespath())
}
image, ok := r.lookup(id) image, ok := r.lookup(id)
if !ok { if !ok {
return ErrImageUnknown return ErrImageUnknown
@ -210,7 +260,10 @@ func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
return r.Save() return r.Save()
} }
func (r *imageStore) Create(id string, names []string, layer, metadata string) (image *Image, err error) { func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time) (image *Image, err error) {
if !r.IsReadWrite() {
return nil, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new images at %q", r.imagespath())
}
if id == "" { if id == "" {
id = stringid.GenerateRandomID() id = stringid.GenerateRandomID()
_, idInUse := r.byid[id] _, idInUse := r.byid[id]
@ -227,18 +280,21 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string) (
return nil, ErrDuplicateName return nil, ErrDuplicateName
} }
} }
if created.IsZero() {
created = time.Now().UTC()
}
if err == nil { if err == nil {
newImage := Image{ image = &Image{
ID: id, ID: id,
Names: names, Names: names,
TopLayer: layer, TopLayer: layer,
Metadata: metadata, Metadata: metadata,
BigDataNames: []string{}, BigDataNames: []string{},
BigDataSizes: make(map[string]int64), BigDataSizes: make(map[string]int64),
Created: created,
Flags: make(map[string]interface{}), Flags: make(map[string]interface{}),
} }
r.images = append(r.images, newImage) r.images = append(r.images, image)
image = &r.images[len(r.images)-1]
r.idindex.Add(id) r.idindex.Add(id)
r.byid[id] = image r.byid[id] = image
for _, name := range names { for _, name := range names {
@ -257,6 +313,9 @@ func (r *imageStore) Metadata(id string) (string, error) {
} }
func (r *imageStore) SetMetadata(id, metadata string) error { func (r *imageStore) SetMetadata(id, metadata string) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify image metadata at %q", r.imagespath())
}
if image, ok := r.lookup(id); ok { if image, ok := r.lookup(id); ok {
image.Metadata = metadata image.Metadata = metadata
return r.Save() return r.Save()
@ -269,6 +328,9 @@ func (r *imageStore) removeName(image *Image, name string) {
} }
func (r *imageStore) SetNames(id string, names []string) error { func (r *imageStore) SetNames(id string, names []string) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change image name assignments at %q", r.imagespath())
}
if image, ok := r.lookup(id); ok { if image, ok := r.lookup(id); ok {
for _, name := range image.Names { for _, name := range image.Names {
delete(r.byname, name) delete(r.byname, name)
@ -286,15 +348,18 @@ func (r *imageStore) SetNames(id string, names []string) error {
} }
func (r *imageStore) Delete(id string) error { func (r *imageStore) Delete(id string) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath())
}
image, ok := r.lookup(id) image, ok := r.lookup(id)
if !ok { if !ok {
return ErrImageUnknown return ErrImageUnknown
} }
id = image.ID id = image.ID
newImages := []Image{} toDeleteIndex := -1
for _, candidate := range r.images { for i, candidate := range r.images {
if candidate.ID != id { if candidate.ID == id {
newImages = append(newImages, candidate) toDeleteIndex = i
} }
} }
delete(r.byid, id) delete(r.byid, id)
@ -302,7 +367,14 @@ func (r *imageStore) Delete(id string) error {
for _, name := range image.Names { for _, name := range image.Names {
delete(r.byname, name) delete(r.byname, name)
} }
r.images = newImages if toDeleteIndex != -1 {
// delete the image at toDeleteIndex
if toDeleteIndex == len(r.images)-1 {
r.images = r.images[:len(r.images)-1]
} else {
r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...)
}
}
if err := r.Save(); err != nil { if err := r.Save(); err != nil {
return err return err
} }
@ -359,6 +431,9 @@ func (r *imageStore) BigDataNames(id string) ([]string, error) {
} }
func (r *imageStore) SetBigData(id, key string, data []byte) error { func (r *imageStore) SetBigData(id, key string, data []byte) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with images at %q", r.imagespath())
}
image, ok := r.lookup(id) image, ok := r.lookup(id)
if !ok { if !ok {
return ErrImageUnknown return ErrImageUnknown
@ -393,6 +468,9 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
} }
func (r *imageStore) Wipe() error { func (r *imageStore) Wipe() error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath())
}
ids := []string{} ids := []string{}
for id := range r.byid { for id := range r.byid {
ids = append(ids, id) ids = append(ids, id)
@ -421,6 +499,10 @@ func (r *imageStore) Modified() (bool, error) {
return r.lockfile.Modified() return r.lockfile.Modified()
} }
func (r *imageStore) IsReadWrite() bool {
return r.lockfile.IsReadWrite()
}
func (r *imageStore) TouchedSince(when time.Time) bool { func (r *imageStore) TouchedSince(when time.Time) bool {
return r.lockfile.TouchedSince(when) return r.lockfile.TouchedSince(when)
} }

View file

@ -4,7 +4,6 @@ import (
"bytes" "bytes"
"compress/gzip" "compress/gzip"
"encoding/json" "encoding/json"
"errors"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
@ -16,6 +15,8 @@ import (
"github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/truncindex" "github.com/containers/storage/pkg/truncindex"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/vbatts/tar-split/tar/asm" "github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage" "github.com/vbatts/tar-split/tar/storage"
) )
@ -66,6 +67,38 @@ type Layer struct {
// mounted at the mount point. // mounted at the mount point.
MountCount int `json:"-"` MountCount int `json:"-"`
// Created is the datestamp for when this layer was created. Older
// versions of the library did not track this information, so callers
// will likely want to use the IsZero() method to verify that a value
// is set before using it.
Created time.Time `json:"created,omitempty"`
// CompressedDigest is the digest of the blob that was last passed to
// ApplyDiff() or Put(), as it was presented to us.
CompressedDigest digest.Digest `json:"compressed-diff-digest,omitempty"`
// CompressedSize is the length of the blob that was last passed to
// ApplyDiff() or Put(), as it was presented to us. If
// CompressedDigest is not set, this should be treated as if it were an
// uninitialized value.
CompressedSize int64 `json:"compressed-size,omitempty"`
// UncompressedDigest is the digest of the blob that was last passed to
// ApplyDiff() or Put(), after we decompressed it. Often referred to
// as a DiffID.
UncompressedDigest digest.Digest `json:"diff-digest,omitempty"`
// UncompressedSize is the length of the blob that was last passed to
// ApplyDiff() or Put(), after we decompressed it. If
// UncompressedDigest is not set, this should be treated as if it were
// an uninitialized value.
UncompressedSize int64 `json:"diff-size,omitempty"`
// CompressionType is the type of compression which we detected on the blob
// that was last passed to ApplyDiff() or Put().
CompressionType archive.Compression `json:"compression,omitempty"`
// Flags is arbitrary data about the layer.
Flags map[string]interface{} `json:"flags,omitempty"` Flags map[string]interface{} `json:"flags,omitempty"`
} }
@ -75,12 +108,74 @@ type layerMountPoint struct {
MountCount int `json:"count"` MountCount int `json:"count"`
} }
// DiffOptions override the default behavior of Diff() methods.
type DiffOptions struct {
// Compression, if set overrides the default compressor when generating a diff.
Compression *archive.Compression
}
// ROLayerStore wraps a graph driver, adding the ability to refer to layers by
// name, and keeping track of parent-child relationships, along with a list of
// all known layers.
type ROLayerStore interface {
ROFileBasedStore
ROMetadataStore
// Exists checks if a layer with the specified name or ID is known.
Exists(id string) bool
// Get retrieves information about a layer given an ID or name.
Get(id string) (*Layer, error)
// Status returns an slice of key-value pairs, suitable for human consumption,
// relaying whatever status information the underlying driver can share.
Status() ([][2]string, error)
// Changes returns a slice of Change structures, which contain a pathname
// (Path) and a description of what sort of change (Kind) was made by the
// layer (either ChangeModify, ChangeAdd, or ChangeDelete), relative to a
// specified layer. By default, the layer's parent is used as a reference.
Changes(from, to string) ([]archive.Change, error)
// Diff produces a tarstream which can be applied to a layer with the contents
// of the first layer to produce a layer with the contents of the second layer.
// By default, the parent of the second layer is used as the first
// layer, so it need not be specified. Options can be used to override
// default behavior, but are also not required.
Diff(from, to string, options *DiffOptions) (io.ReadCloser, error)
// DiffSize produces an estimate of the length of the tarstream which would be
// produced by Diff.
DiffSize(from, to string) (int64, error)
// Size produces a cached value for the uncompressed size of the layer,
// if one is known, or -1 if it is not known. If the layer can not be
// found, it returns an error.
Size(name string) (int64, error)
// Lookup attempts to translate a name to an ID. Most methods do this
// implicitly.
Lookup(name string) (string, error)
// LayersByCompressedDigest returns a slice of the layers with the
// specified compressed digest value recorded for them.
LayersByCompressedDigest(d digest.Digest) ([]Layer, error)
// LayersByUncompressedDigest returns a slice of the layers with the
// specified uncompressed digest value recorded for them.
LayersByUncompressedDigest(d digest.Digest) ([]Layer, error)
// Layers returns a slice of the known layers.
Layers() ([]Layer, error)
}
// LayerStore wraps a graph driver, adding the ability to refer to layers by // LayerStore wraps a graph driver, adding the ability to refer to layers by
// name, and keeping track of parent-child relationships, along with a list of // name, and keeping track of parent-child relationships, along with a list of
// all known layers. // all known layers.
type LayerStore interface { type LayerStore interface {
FileBasedStore ROLayerStore
MetadataStore RWFileBasedStore
RWMetadataStore
FlaggableStore FlaggableStore
// Create creates a new layer, optionally giving it a specified ID rather than // Create creates a new layer, optionally giving it a specified ID rather than
@ -98,20 +193,10 @@ type LayerStore interface {
// Put combines the functions of CreateWithFlags and ApplyDiff. // Put combines the functions of CreateWithFlags and ApplyDiff.
Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff archive.Reader) (*Layer, int64, error) Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff archive.Reader) (*Layer, int64, error)
// Exists checks if a layer with the specified name or ID is known.
Exists(id string) bool
// Get retrieves information about a layer given an ID or name.
Get(id string) (*Layer, error)
// SetNames replaces the list of names associated with a layer with the // SetNames replaces the list of names associated with a layer with the
// supplied values. // supplied values.
SetNames(id string, names []string) error SetNames(id string, names []string) error
// Status returns an slice of key-value pairs, suitable for human consumption,
// relaying whatever status information the underlying driver can share.
Status() ([][2]string, error)
// Delete deletes a layer with the specified name or ID. // Delete deletes a layer with the specified name or ID.
Delete(id string) error Delete(id string) error
@ -126,32 +211,9 @@ type LayerStore interface {
// Unmount unmounts a layer when it is no longer in use. // Unmount unmounts a layer when it is no longer in use.
Unmount(id string) error Unmount(id string) error
// Changes returns a slice of Change structures, which contain a pathname
// (Path) and a description of what sort of change (Kind) was made by the
// layer (either ChangeModify, ChangeAdd, or ChangeDelete), relative to a
// specified layer. By default, the layer's parent is used as a reference.
Changes(from, to string) ([]archive.Change, error)
// Diff produces a tarstream which can be applied to a layer with the contents
// of the first layer to produce a layer with the contents of the second layer.
// By default, the parent of the second layer is used as the first
// layer, so it need not be specified.
Diff(from, to string) (io.ReadCloser, error)
// DiffSize produces an estimate of the length of the tarstream which would be
// produced by Diff.
DiffSize(from, to string) (int64, error)
// ApplyDiff reads a tarstream which was created by a previous call to Diff and // ApplyDiff reads a tarstream which was created by a previous call to Diff and
// applies its changes to a specified layer. // applies its changes to a specified layer.
ApplyDiff(to string, diff archive.Reader) (int64, error) ApplyDiff(to string, diff archive.Reader) (int64, error)
// Lookup attempts to translate a name to an ID. Most methods do this
// implicitly.
Lookup(name string) (string, error)
// Layers returns a slice of the known layers.
Layers() ([]Layer, error)
} }
type layerStore struct { type layerStore struct {
@ -159,15 +221,21 @@ type layerStore struct {
rundir string rundir string
driver drivers.Driver driver drivers.Driver
layerdir string layerdir string
layers []Layer layers []*Layer
idindex *truncindex.TruncIndex idindex *truncindex.TruncIndex
byid map[string]*Layer byid map[string]*Layer
byname map[string]*Layer byname map[string]*Layer
bymount map[string]*Layer bymount map[string]*Layer
bycompressedsum map[digest.Digest][]string
byuncompressedsum map[digest.Digest][]string
} }
func (r *layerStore) Layers() ([]Layer, error) { func (r *layerStore) Layers() ([]Layer, error) {
return r.layers, nil layers := make([]Layer, len(r.layers))
for i := range r.layers {
layers[i] = *(r.layers[i])
}
return layers, nil
} }
func (r *layerStore) mountspath() string { func (r *layerStore) mountspath() string {
@ -179,36 +247,41 @@ func (r *layerStore) layerspath() string {
} }
func (r *layerStore) Load() error { func (r *layerStore) Load() error {
needSave := false shouldSave := false
rpath := r.layerspath() rpath := r.layerspath()
data, err := ioutil.ReadFile(rpath) data, err := ioutil.ReadFile(rpath)
if err != nil && !os.IsNotExist(err) { if err != nil && !os.IsNotExist(err) {
return err return err
} }
layers := []Layer{} layers := []*Layer{}
idlist := []string{} idlist := []string{}
ids := make(map[string]*Layer) ids := make(map[string]*Layer)
names := make(map[string]*Layer) names := make(map[string]*Layer)
mounts := make(map[string]*Layer) mounts := make(map[string]*Layer)
parents := make(map[string][]*Layer) compressedsums := make(map[digest.Digest][]string)
uncompressedsums := make(map[digest.Digest][]string)
if err = json.Unmarshal(data, &layers); len(data) == 0 || err == nil { if err = json.Unmarshal(data, &layers); len(data) == 0 || err == nil {
for n, layer := range layers { for n, layer := range layers {
ids[layer.ID] = &layers[n] ids[layer.ID] = layers[n]
idlist = append(idlist, layer.ID) idlist = append(idlist, layer.ID)
for _, name := range layer.Names { for _, name := range layer.Names {
if conflict, ok := names[name]; ok { if conflict, ok := names[name]; ok {
r.removeName(conflict, name) r.removeName(conflict, name)
needSave = true shouldSave = true
} }
names[name] = &layers[n] names[name] = layers[n]
} }
if pslice, ok := parents[layer.Parent]; ok { if layer.CompressedDigest != "" {
parents[layer.Parent] = append(pslice, &layers[n]) compressedsums[layer.CompressedDigest] = append(compressedsums[layer.CompressedDigest], layer.ID)
} else { }
parents[layer.Parent] = []*Layer{&layers[n]} if layer.UncompressedDigest != "" {
uncompressedsums[layer.UncompressedDigest] = append(uncompressedsums[layer.UncompressedDigest], layer.ID)
} }
} }
} }
if shouldSave && !r.IsReadWrite() {
return errors.New("layer store assigns the same name to multiple layers")
}
mpath := r.mountspath() mpath := r.mountspath()
data, err = ioutil.ReadFile(mpath) data, err = ioutil.ReadFile(mpath)
if err != nil && !os.IsNotExist(err) { if err != nil && !os.IsNotExist(err) {
@ -231,10 +304,13 @@ func (r *layerStore) Load() error {
r.byid = ids r.byid = ids
r.byname = names r.byname = names
r.bymount = mounts r.bymount = mounts
r.bycompressedsum = compressedsums
r.byuncompressedsum = uncompressedsums
err = nil err = nil
// Last step: try to remove anything that a previous user of this // Last step: if we're writable, try to remove anything that a previous
// storage area marked for deletion but didn't manage to actually // user of this storage area marked for deletion but didn't manage to
// delete. // actually delete.
if r.IsReadWrite() {
for _, layer := range r.layers { for _, layer := range r.layers {
if cleanup, ok := layer.Flags[incompleteFlag]; ok { if cleanup, ok := layer.Flags[incompleteFlag]; ok {
if b, ok := cleanup.(bool); ok && b { if b, ok := cleanup.(bool); ok && b {
@ -242,18 +318,21 @@ func (r *layerStore) Load() error {
if err != nil { if err != nil {
break break
} }
needSave = true shouldSave = true
} }
} }
} }
if needSave { if shouldSave {
r.Touch()
return r.Save() return r.Save()
} }
}
return err return err
} }
func (r *layerStore) Save() error { func (r *layerStore) Save() error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath())
}
rpath := r.layerspath() rpath := r.layerspath()
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
return err return err
@ -283,6 +362,7 @@ func (r *layerStore) Save() error {
if err := ioutils.AtomicWriteFile(rpath, jldata, 0600); err != nil { if err := ioutils.AtomicWriteFile(rpath, jldata, 0600); err != nil {
return err return err
} }
defer r.Touch()
return ioutils.AtomicWriteFile(mpath, jmdata, 0600) return ioutils.AtomicWriteFile(mpath, jmdata, 0600)
} }
@ -314,6 +394,28 @@ func newLayerStore(rundir string, layerdir string, driver drivers.Driver) (Layer
return &rlstore, nil return &rlstore, nil
} }
func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (ROLayerStore, error) {
lockfile, err := GetROLockfile(filepath.Join(layerdir, "layers.lock"))
if err != nil {
return nil, err
}
lockfile.Lock()
defer lockfile.Unlock()
rlstore := layerStore{
lockfile: lockfile,
driver: driver,
rundir: rundir,
layerdir: layerdir,
byid: make(map[string]*Layer),
bymount: make(map[string]*Layer),
byname: make(map[string]*Layer),
}
if err := rlstore.Load(); err != nil {
return nil, err
}
return &rlstore, nil
}
func (r *layerStore) lookup(id string) (*Layer, bool) { func (r *layerStore) lookup(id string) (*Layer, bool) {
if layer, ok := r.byid[id]; ok { if layer, ok := r.byid[id]; ok {
return layer, ok return layer, ok
@ -326,7 +428,24 @@ func (r *layerStore) lookup(id string) (*Layer, bool) {
return nil, false return nil, false
} }
func (r *layerStore) Size(name string) (int64, error) {
layer, ok := r.lookup(name)
if !ok {
return -1, ErrLayerUnknown
}
// We use the presence of a non-empty digest as an indicator that the size value was intentionally set, and that
// a zero value is not just present because it was never set to anything else (which can happen if the layer was
// created by a version of this library that didn't keep track of digest and size information).
if layer.UncompressedDigest != "" {
return layer.UncompressedSize, nil
}
return -1, nil
}
func (r *layerStore) ClearFlag(id string, flag string) error { func (r *layerStore) ClearFlag(id string, flag string) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on layers at %q", r.layerspath())
}
layer, ok := r.lookup(id) layer, ok := r.lookup(id)
if !ok { if !ok {
return ErrLayerUnknown return ErrLayerUnknown
@ -336,6 +455,9 @@ func (r *layerStore) ClearFlag(id string, flag string) error {
} }
func (r *layerStore) SetFlag(id string, flag string, value interface{}) error { func (r *layerStore) SetFlag(id string, flag string, value interface{}) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on layers at %q", r.layerspath())
}
layer, ok := r.lookup(id) layer, ok := r.lookup(id)
if !ok { if !ok {
return ErrLayerUnknown return ErrLayerUnknown
@ -349,6 +471,9 @@ func (r *layerStore) Status() ([][2]string, error) {
} }
func (r *layerStore) Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff archive.Reader) (layer *Layer, size int64, err error) { func (r *layerStore) Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff archive.Reader) (layer *Layer, size int64, err error) {
if !r.IsReadWrite() {
return nil, -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new layers at %q", r.layerspath())
}
size = -1 size = -1
if err := os.MkdirAll(r.rundir, 0700); err != nil { if err := os.MkdirAll(r.rundir, 0700); err != nil {
return nil, -1, err return nil, -1, err
@ -383,15 +508,15 @@ func (r *layerStore) Put(id, parent string, names []string, mountLabel string, o
err = r.driver.Create(id, parent, mountLabel, options) err = r.driver.Create(id, parent, mountLabel, options)
} }
if err == nil { if err == nil {
newLayer := Layer{ layer = &Layer{
ID: id, ID: id,
Parent: parent, Parent: parent,
Names: names, Names: names,
MountLabel: mountLabel, MountLabel: mountLabel,
Created: time.Now().UTC(),
Flags: make(map[string]interface{}), Flags: make(map[string]interface{}),
} }
r.layers = append(r.layers, newLayer) r.layers = append(r.layers, layer)
layer = &r.layers[len(r.layers)-1]
r.idindex.Add(id) r.idindex.Add(id)
r.byid[id] = layer r.byid[id] = layer
for _, name := range names { for _, name := range names {
@ -441,6 +566,9 @@ func (r *layerStore) Create(id, parent string, names []string, mountLabel string
} }
func (r *layerStore) Mount(id, mountLabel string) (string, error) { func (r *layerStore) Mount(id, mountLabel string) (string, error) {
if !r.IsReadWrite() {
return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
}
layer, ok := r.lookup(id) layer, ok := r.lookup(id)
if !ok { if !ok {
return "", ErrLayerUnknown return "", ErrLayerUnknown
@ -466,6 +594,9 @@ func (r *layerStore) Mount(id, mountLabel string) (string, error) {
} }
func (r *layerStore) Unmount(id string) error { func (r *layerStore) Unmount(id string) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
}
layer, ok := r.lookup(id) layer, ok := r.lookup(id)
if !ok { if !ok {
layerByMount, ok := r.bymount[filepath.Clean(id)] layerByMount, ok := r.bymount[filepath.Clean(id)]
@ -495,6 +626,9 @@ func (r *layerStore) removeName(layer *Layer, name string) {
} }
func (r *layerStore) SetNames(id string, names []string) error { func (r *layerStore) SetNames(id string, names []string) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change layer name assignments at %q", r.layerspath())
}
if layer, ok := r.lookup(id); ok { if layer, ok := r.lookup(id); ok {
for _, name := range layer.Names { for _, name := range layer.Names {
delete(r.byname, name) delete(r.byname, name)
@ -519,6 +653,9 @@ func (r *layerStore) Metadata(id string) (string, error) {
} }
func (r *layerStore) SetMetadata(id, metadata string) error { func (r *layerStore) SetMetadata(id, metadata string) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer metadata at %q", r.layerspath())
}
if layer, ok := r.lookup(id); ok { if layer, ok := r.lookup(id); ok {
layer.Metadata = metadata layer.Metadata = metadata
return r.Save() return r.Save()
@ -531,6 +668,9 @@ func (r *layerStore) tspath(id string) string {
} }
func (r *layerStore) Delete(id string) error { func (r *layerStore) Delete(id string) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath())
}
layer, ok := r.lookup(id) layer, ok := r.lookup(id)
if !ok { if !ok {
return ErrLayerUnknown return ErrLayerUnknown
@ -549,13 +689,21 @@ func (r *layerStore) Delete(id string) error {
if layer.MountPoint != "" { if layer.MountPoint != "" {
delete(r.bymount, layer.MountPoint) delete(r.bymount, layer.MountPoint)
} }
newLayers := []Layer{} toDeleteIndex := -1
for _, candidate := range r.layers { for i, candidate := range r.layers {
if candidate.ID != id { if candidate.ID == id {
newLayers = append(newLayers, candidate) toDeleteIndex = i
break
}
}
if toDeleteIndex != -1 {
// delete the layer at toDeleteIndex
if toDeleteIndex == len(r.layers)-1 {
r.layers = r.layers[:len(r.layers)-1]
} else {
r.layers = append(r.layers[:toDeleteIndex], r.layers[toDeleteIndex+1:]...)
} }
} }
r.layers = newLayers
if err = r.Save(); err != nil { if err = r.Save(); err != nil {
return err return err
} }
@ -583,6 +731,9 @@ func (r *layerStore) Get(id string) (*Layer, error) {
} }
func (r *layerStore) Wipe() error { func (r *layerStore) Wipe() error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath())
}
ids := []string{} ids := []string{}
for id := range r.byid { for id := range r.byid {
ids = append(ids, id) ids = append(ids, id)
@ -657,20 +808,20 @@ func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) {
}, nil }, nil
} }
func (r *layerStore) Diff(from, to string) (io.ReadCloser, error) { func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) {
var metadata storage.Unpacker var metadata storage.Unpacker
from, to, toLayer, err := r.findParentAndLayer(from, to) from, to, toLayer, err := r.findParentAndLayer(from, to)
if err != nil { if err != nil {
return nil, ErrLayerUnknown return nil, ErrLayerUnknown
} }
compression := archive.Uncompressed // Default to applying the type of encryption that we noted was used
if cflag, ok := toLayer.Flags[compressionFlag]; ok { // for the layerdiff when it was applied.
if ctype, ok := cflag.(float64); ok { compression := toLayer.CompressionType
compression = archive.Compression(ctype) // If a particular compression type (or no compression) was selected,
} else if ctype, ok := cflag.(archive.Compression); ok { // use that instead.
compression = archive.Compression(ctype) if options != nil && options.Compression != nil {
} compression = *options.Compression
} }
if from != toLayer.Parent { if from != toLayer.Parent {
diff, err := r.driver.Diff(to, from) diff, err := r.driver.Diff(to, from)
@ -758,6 +909,10 @@ func (r *layerStore) DiffSize(from, to string) (size int64, err error) {
} }
func (r *layerStore) ApplyDiff(to string, diff archive.Reader) (size int64, err error) { func (r *layerStore) ApplyDiff(to string, diff archive.Reader) (size int64, err error) {
if !r.IsReadWrite() {
return -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer contents at %q", r.layerspath())
}
layer, ok := r.lookup(to) layer, ok := r.lookup(to)
if !ok { if !ok {
return -1, ErrLayerUnknown return -1, ErrLayerUnknown
@ -770,7 +925,9 @@ func (r *layerStore) ApplyDiff(to string, diff archive.Reader) (size int64, err
} }
compression := archive.DetectCompression(header[:n]) compression := archive.DetectCompression(header[:n])
defragmented := io.MultiReader(bytes.NewBuffer(header[:n]), diff) compressedDigest := digest.Canonical.Digester()
compressedCounter := ioutils.NewWriteCounter(compressedDigest.Hash())
defragmented := io.TeeReader(io.MultiReader(bytes.NewBuffer(header[:n]), diff), compressedCounter)
tsdata := bytes.Buffer{} tsdata := bytes.Buffer{}
compressor, err := gzip.NewWriterLevel(&tsdata, gzip.BestSpeed) compressor, err := gzip.NewWriterLevel(&tsdata, gzip.BestSpeed)
@ -778,15 +935,20 @@ func (r *layerStore) ApplyDiff(to string, diff archive.Reader) (size int64, err
compressor = gzip.NewWriter(&tsdata) compressor = gzip.NewWriter(&tsdata)
} }
metadata := storage.NewJSONPacker(compressor) metadata := storage.NewJSONPacker(compressor)
decompressed, err := archive.DecompressStream(defragmented) uncompressed, err := archive.DecompressStream(defragmented)
if err != nil { if err != nil {
return -1, err return -1, err
} }
payload, err := asm.NewInputTarStream(decompressed, metadata, storage.NewDiscardFilePutter()) uncompressedDigest := digest.Canonical.Digester()
uncompressedCounter := ioutils.NewWriteCounter(uncompressedDigest.Hash())
payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, uncompressedCounter), metadata, storage.NewDiscardFilePutter())
if err != nil { if err != nil {
return -1, err return -1, err
} }
size, err = r.driver.ApplyDiff(layer.ID, layer.Parent, payload) size, err = r.driver.ApplyDiff(layer.ID, layer.Parent, payload)
if err != nil {
return -1, err
}
compressor.Close() compressor.Close()
if err == nil { if err == nil {
if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil { if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil {
@ -797,15 +959,57 @@ func (r *layerStore) ApplyDiff(to string, diff archive.Reader) (size int64, err
} }
} }
if compression != archive.Uncompressed { updateDigestMap := func(m *map[digest.Digest][]string, oldvalue, newvalue digest.Digest, id string) {
layer.Flags[compressionFlag] = compression var newList []string
} else { if oldvalue != "" {
delete(layer.Flags, compressionFlag) for _, value := range (*m)[oldvalue] {
if value != id {
newList = append(newList, value)
} }
}
if len(newList) > 0 {
(*m)[oldvalue] = newList
} else {
delete(*m, oldvalue)
}
}
if newvalue != "" {
(*m)[newvalue] = append((*m)[newvalue], id)
}
}
updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest.Digest(), layer.ID)
layer.CompressedDigest = compressedDigest.Digest()
layer.CompressedSize = compressedCounter.Count
updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, uncompressedDigest.Digest(), layer.ID)
layer.UncompressedDigest = uncompressedDigest.Digest()
layer.UncompressedSize = uncompressedCounter.Count
layer.CompressionType = compression
err = r.Save()
return size, err return size, err
} }
func (r *layerStore) layersByDigestMap(m map[digest.Digest][]string, d digest.Digest) ([]Layer, error) {
var layers []Layer
for _, layerID := range m[d] {
layer, ok := r.lookup(layerID)
if !ok {
return nil, ErrLayerUnknown
}
layers = append(layers, *layer)
}
return layers, nil
}
func (r *layerStore) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) {
return r.layersByDigestMap(r.bycompressedsum, d)
}
func (r *layerStore) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) {
return r.layersByDigestMap(r.byuncompressedsum, d)
}
func (r *layerStore) Lock() { func (r *layerStore) Lock() {
r.lockfile.Lock() r.lockfile.Lock()
} }
@ -822,6 +1026,10 @@ func (r *layerStore) Modified() (bool, error) {
return r.lockfile.Modified() return r.lockfile.Modified()
} }
func (r *layerStore) IsReadWrite() bool {
return r.lockfile.IsReadWrite()
}
func (r *layerStore) TouchedSince(when time.Time) bool { func (r *layerStore) TouchedSince(when time.Time) bool {
return r.lockfile.TouchedSince(when) return r.lockfile.TouchedSince(when)
} }

View file

@ -1,14 +1,15 @@
package storage package storage
import ( import (
"fmt"
"os" "os"
"path/filepath" "path/filepath"
"sync" "sync"
"time" "time"
"golang.org/x/sys/unix"
"github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/stringid"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
) )
// A Locker represents a file lock where the file is used to cache an // A Locker represents a file lock where the file is used to cache an
@ -27,6 +28,9 @@ type Locker interface {
// TouchedSince() checks if the most recent writer modified the file (likely using Touch()) after the specified time. // TouchedSince() checks if the most recent writer modified the file (likely using Touch()) after the specified time.
TouchedSince(when time.Time) bool TouchedSince(when time.Time) bool
// IsReadWrite() checks if the lock file is read-write
IsReadWrite() bool
} }
type lockfile struct { type lockfile struct {
@ -34,36 +38,70 @@ type lockfile struct {
file string file string
fd uintptr fd uintptr
lw string lw string
locktype int16
} }
var ( var (
lockfiles map[string]*lockfile lockfiles map[string]*lockfile
lockfilesLock sync.Mutex lockfilesLock sync.Mutex
// ErrLockReadOnly indicates that the caller only took a read-only lock, and is not allowed to write
ErrLockReadOnly = errors.New("lock is not a read-write lock")
) )
// GetLockfile opens a lock file, creating it if necessary. The Locker object // GetLockfile opens a read-write lock file, creating it if necessary. The
// return will be returned unlocked. // Locker object it returns will be returned unlocked.
func GetLockfile(path string) (Locker, error) { func GetLockfile(path string) (Locker, error) {
lockfilesLock.Lock() lockfilesLock.Lock()
defer lockfilesLock.Unlock() defer lockfilesLock.Unlock()
if lockfiles == nil { if lockfiles == nil {
lockfiles = make(map[string]*lockfile) lockfiles = make(map[string]*lockfile)
} }
if locker, ok := lockfiles[filepath.Clean(path)]; ok { cleanPath := filepath.Clean(path)
if locker, ok := lockfiles[cleanPath]; ok {
if !locker.IsReadWrite() {
return nil, errors.Wrapf(ErrLockReadOnly, "lock %q is a read-only lock", cleanPath)
}
return locker, nil return locker, nil
} }
fd, err := unix.Open(filepath.Clean(path), os.O_RDWR|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR) fd, err := unix.Open(cleanPath, os.O_RDWR|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR)
if err != nil { if err != nil {
return nil, err return nil, errors.Wrapf(err, "error opening %q", cleanPath)
} }
locker := &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID()} unix.CloseOnExec(fd)
locker := &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: unix.F_WRLCK}
lockfiles[filepath.Clean(path)] = locker lockfiles[filepath.Clean(path)] = locker
return locker, nil return locker, nil
} }
// GetROLockfile opens a read-only lock file. The Locker object it returns
// will be returned unlocked.
func GetROLockfile(path string) (Locker, error) {
lockfilesLock.Lock()
defer lockfilesLock.Unlock()
if lockfiles == nil {
lockfiles = make(map[string]*lockfile)
}
cleanPath := filepath.Clean(path)
if locker, ok := lockfiles[cleanPath]; ok {
if locker.IsReadWrite() {
return nil, fmt.Errorf("lock %q is a read-write lock", cleanPath)
}
return locker, nil
}
fd, err := unix.Open(cleanPath, os.O_RDONLY, 0)
if err != nil {
return nil, errors.Wrapf(err, "error opening %q", cleanPath)
}
unix.CloseOnExec(fd)
locker := &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: unix.F_RDLCK}
lockfiles[filepath.Clean(path)] = locker
return locker, nil
}
// Lock locks the lock file
func (l *lockfile) Lock() { func (l *lockfile) Lock() {
lk := unix.Flock_t{ lk := unix.Flock_t{
Type: unix.F_WRLCK, Type: l.locktype,
Whence: int16(os.SEEK_SET), Whence: int16(os.SEEK_SET),
Start: 0, Start: 0,
Len: 0, Len: 0,
@ -75,6 +113,7 @@ func (l *lockfile) Lock() {
} }
} }
// Unlock unlocks the lock file
func (l *lockfile) Unlock() { func (l *lockfile) Unlock() {
lk := unix.Flock_t{ lk := unix.Flock_t{
Type: unix.F_UNLCK, Type: unix.F_UNLCK,
@ -89,6 +128,7 @@ func (l *lockfile) Unlock() {
l.mu.Unlock() l.mu.Unlock()
} }
// Touch updates the lock file with the UID of the user
func (l *lockfile) Touch() error { func (l *lockfile) Touch() error {
l.lw = stringid.GenerateRandomID() l.lw = stringid.GenerateRandomID()
id := []byte(l.lw) id := []byte(l.lw)
@ -110,6 +150,7 @@ func (l *lockfile) Touch() error {
return nil return nil
} }
// Modified indicates if the lock file has been updated since the last time it was loaded
func (l *lockfile) Modified() (bool, error) { func (l *lockfile) Modified() (bool, error) {
id := []byte(l.lw) id := []byte(l.lw)
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET) _, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
@ -128,6 +169,7 @@ func (l *lockfile) Modified() (bool, error) {
return l.lw != lw, nil return l.lw != lw, nil
} }
// TouchedSince indicates if the lock file has been touched since the specified time
func (l *lockfile) TouchedSince(when time.Time) bool { func (l *lockfile) TouchedSince(when time.Time) bool {
st := unix.Stat_t{} st := unix.Stat_t{}
err := unix.Fstat(int(l.fd), &st) err := unix.Fstat(int(l.fd), &st)
@ -137,3 +179,8 @@ func (l *lockfile) TouchedSince(when time.Time) bool {
touched := time.Unix(statTMtimeUnix(st)) touched := time.Unix(statTMtimeUnix(st))
return when.Before(touched) return when.Before(touched)
} }
// IsRWLock indicates if the lock file is a read-write lock
func (l *lockfile) IsReadWrite() bool {
return (l.locktype == unix.F_WRLCK)
}

View file

@ -1,13 +0,0 @@
// +build !containersstorageautogen
// Package storageversion is auto-generated at build-time
package storageversion
// Default build-time variable for library-import.
// This file is overridden on build with build-time informations.
const (
GitCommit string = "library-import"
Version string = "library-import"
BuildTime string = "library-import"
IAmStatic string = "library-import"
)

File diff suppressed because it is too large Load diff

View file

@ -1,3 +1,4 @@
github.com/BurntSushi/toml master
github.com/Microsoft/go-winio 307e919c663683a9000576fdc855acaf9534c165 github.com/Microsoft/go-winio 307e919c663683a9000576fdc855acaf9534c165
github.com/Microsoft/hcsshim 0f615c198a84e0344b4ed49c464d8833d4648dfc github.com/Microsoft/hcsshim 0f615c198a84e0344b4ed49c464d8833d4648dfc
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
@ -7,9 +8,11 @@ github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
github.com/go-check/check 20d25e2804050c1cd24a7eea1e7a6447dd0e74ec github.com/go-check/check 20d25e2804050c1cd24a7eea1e7a6447dd0e74ec
github.com/mattn/go-shellwords 753a2322a99f87c0eff284980e77f53041555bc6 github.com/mattn/go-shellwords 753a2322a99f87c0eff284980e77f53041555bc6
github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062 github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
github.com/opencontainers/go-digest master
github.com/opencontainers/runc 6c22e77604689db8725fa866f0f2ec0b3e8c3a07 github.com/opencontainers/runc 6c22e77604689db8725fa866f0f2ec0b3e8c3a07
github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d
github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9 github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9
github.com/pkg/errors master
github.com/tchap/go-patricia v2.2.6 github.com/tchap/go-patricia v2.2.6
github.com/vbatts/tar-split bd4c5d64c3e9297f410025a3b1bd0c58f659e721 github.com/vbatts/tar-split bd4c5d64c3e9297f410025a3b1bd0c58f659e721
github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3