Merge pull request #948 from rhatdan/quota

Add Overlay Quota support
This commit is contained in:
Daniel J Walsh 2017-10-02 08:48:06 -04:00 committed by GitHub
commit 88fc20be1e
59 changed files with 2797 additions and 1896 deletions

View file

@ -98,7 +98,7 @@ func main() {
}, },
cli.StringFlag{ cli.StringFlag{
Name: "storage-driver, s", Name: "storage-driver, s",
Usage: "select which storage driver is used to manage storage of images and containers (default is overlay2)", Usage: "select which storage driver is used to manage storage of images and containers (default is overlay)",
}, },
cli.StringSliceFlag{ cli.StringSliceFlag{
Name: "storage-opt", Name: "storage-opt",

View file

@ -47,10 +47,10 @@
- src: test/redhat_sigstore.yaml - src: test/redhat_sigstore.yaml
dest: /etc/containers/registries.d/registry.access.redhat.com.yaml dest: /etc/containers/registries.d/registry.access.redhat.com.yaml
- name: run with overlay2 - name: run with overlay
replace: replace:
regexp: 'storage_driver = ""' regexp: 'storage_driver = ""'
replace: 'storage_driver = "overlay2"' replace: 'storage_driver = "overlay"'
name: /etc/crio/crio.conf name: /etc/crio/crio.conf
backup: yes backup: yes
@ -69,11 +69,11 @@
regexp: 'docker\.io' regexp: 'docker\.io'
state: present state: present
- name: add overlay2 storage opts on RHEL/CentOS - name: add overlay storage opts on RHEL/CentOS
lineinfile: lineinfile:
dest: /etc/crio/crio.conf dest: /etc/crio/crio.conf
line: '"overlay2.override_kernel_check=1"' line: '"overlay.override_kernel_check=1"'
insertafter: 'storage_option = \[' insertafter: 'storage_option = \['
regexp: 'overlay2\.override_kernel_check=1' regexp: 'overlay\.override_kernel_check=1'
state: present state: present
when: ansible_distribution == 'RedHat' or ansible_distribution == 'CentOS' when: ansible_distribution == 'RedHat' or ansible_distribution == 'CentOS'

View file

@ -19,6 +19,14 @@
async: 600 async: 600
poll: 10 poll: 10
- name: Add Btrfs for Fedora
package:
name: "{{ item }}"
state: present
with_items:
- btrfs-progs-devel
when: ansible_distribution in ['Fedora']
- name: Update all packages - name: Update all packages
package: package:
name: '*' name: '*'

View file

@ -17,7 +17,7 @@
- name: set extra storage options - name: set extra storage options
set_fact: set_fact:
extra_storage_opts: " --storage-opt overlay2.override_kernel_check=1" extra_storage_opts: " --storage-opt overlay.override_kernel_check=1"
when: ansible_distribution == 'RedHat' or ansible_distribution == 'CentOS' when: ansible_distribution == 'RedHat' or ansible_distribution == 'CentOS'
- name: set extra shell for non-xunit tests - name: set extra shell for non-xunit tests
@ -26,7 +26,7 @@
when: not xunit when: not xunit
- name: run integration tests - name: run integration tests
shell: "CGROUP_MANAGER=cgroupfs STORAGE_OPTS='--storage-driver=overlay2{{ extra_storage_opts | default('') }}' make localintegration{{ extra_shell_suffix }}" shell: "CGROUP_MANAGER=cgroupfs STORAGE_OPTS='--storage-driver=overlay{{ extra_storage_opts | default('') }}' make localintegration{{ extra_shell_suffix }}"
args: args:
chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o" chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o"
ignore_errors: yes ignore_errors: yes

View file

@ -4,7 +4,6 @@
xunit: false xunit: false
rpm_pkgs: rpm_pkgs:
- btrfs-progs-devel
- container-selinux - container-selinux
- curl - curl
- device-mapper-devel - device-mapper-devel

View file

@ -36,11 +36,35 @@ The `crio` table supports the following options:
CRIO state dir (default: "/var/run/containers/storage") CRIO state dir (default: "/var/run/containers/storage")
**storage_driver**="" **storage_driver**=""
CRIO storage driver (default is "devicemapper") CRIO storage driver (default is "overlay")
Note:
**overlay** and **overlay2** are the same driver
**storage_option**=[] **storage_option**=[]
CRIO storage driver option list (no default) CRIO storage driver option list (no default)
Values:
"STORAGE_DRIVER.imagestore=/PATH",
Paths to additional container image stores. These are read/only and are usually stored on remote network shares, based on overlay storage format.
storage_option=[ "overlay.imagestore=/mnt/overlay", ]
"STORAGE_DRIVER.size=SIZE"
Maximum size of a container image. Default is 10GB. The size flag sets quota on the size of container images.
storage_option=[ "overlay.size=1G", ]
Note: Not all drivers support all options.
Note: In order to use the **size** option for quota on *overlay* storage you must use the *xfs* file system. The mount point that the *overlay* file system must be setup with the *pquota* flag at mount time. If you are setting up / to be used with quota, you have to modify the linux boot line in /etc/grubq2.conf and add the rootflags=pquota flag.
Example:
linux16 /vmlinuz-4.12.13-300.fc26.x86_64 root=/dev/mapper/fedora-root ro rd.lvm.lv=fedora/root rd.lvm.lv=fedora/swap rhgb quiet LANG=en_US.UTF-8 rootflags=pquota
## CRIO.API TABLE ## CRIO.API TABLE
**listen**="" **listen**=""

View file

@ -146,11 +146,11 @@ kpod inspect redis:alpine
"Size": 3965955, "Size": 3965955,
"VirtualSize": 19808086, "VirtualSize": 19808086,
"GraphDriver": { "GraphDriver": {
"Name": "overlay2", "Name": "overlay",
"Data": { "Data": {
"MergedDir": "/var/lib/containers/storage/overlay2/2059d805c90e034cb773d9722232ef018a72143dd31113b470fb876baeccd700/merged", "MergedDir": "/var/lib/containers/storage/overlay/2059d805c90e034cb773d9722232ef018a72143dd31113b470fb876baeccd700/merged",
"UpperDir": "/var/lib/containers/storage/overlay2/2059d805c90e034cb773d9722232ef018a72143dd31113b470fb876baeccd700/diff", "UpperDir": "/var/lib/containers/storage/overlay/2059d805c90e034cb773d9722232ef018a72143dd31113b470fb876baeccd700/diff",
"WorkDir": "/var/lib/containers/storage/overlay2/2059d805c90e034cb773d9722232ef018a72143dd31113b470fb876baeccd700/work" "WorkDir": "/var/lib/containers/storage/overlay/2059d805c90e034cb773d9722232ef018a72143dd31113b470fb876baeccd700/work"
} }
}, },
"RootFS": { "RootFS": {

View file

@ -38,13 +38,13 @@ SELinux label for the mount point
kpod mount c831414b10a3 kpod mount c831414b10a3
/var/lib/containers/storage/overlay2/f3ac502d97b5681989dff84dfedc8354239bcecbdc2692f9a639f4e080a02364/merged /var/lib/containers/storage/overlay/f3ac502d97b5681989dff84dfedc8354239bcecbdc2692f9a639f4e080a02364/merged
kpod mount kpod mount
c831414b10a3 /var/lib/containers/storage/overlay2/f3ac502d97b5681989dff84dfedc8354239bcecbdc2692f9a639f4e080a02364/merged c831414b10a3 /var/lib/containers/storage/overlay/f3ac502d97b5681989dff84dfedc8354239bcecbdc2692f9a639f4e080a02364/merged
a7060253093b /var/lib/containers/storage/overlay2/0ff7d7ca68bed1ace424f9df154d2dd7b5a125c19d887f17653cbcd5b6e30ba1/merged a7060253093b /var/lib/containers/storage/overlay/0ff7d7ca68bed1ace424f9df154d2dd7b5a125c19d887f17653cbcd5b6e30ba1/merged
## SEE ALSO ## SEE ALSO
kpod(1), kpod-umount(1), mount(8) kpod(1), kpod-umount(1), mount(8)

View file

@ -7,7 +7,7 @@ github.com/sirupsen/logrus v1.0.0
github.com/containers/image d17474f39dae1da15ab9ae033d57ebefcf62f77a github.com/containers/image d17474f39dae1da15ab9ae033d57ebefcf62f77a
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
github.com/ostreedev/ostree-go master github.com/ostreedev/ostree-go master
github.com/containers/storage 9c85fa701316a49afdf85d55a0d7cb582ed03625 github.com/containers/storage 64bf27465d0d1edd89e7a4ce49866fea01145782
github.com/containernetworking/cni v0.4.0 github.com/containernetworking/cni v0.4.0
google.golang.org/grpc v1.0.4 https://github.com/grpc/grpc-go google.golang.org/grpc v1.0.4 https://github.com/grpc/grpc-go
github.com/opencontainers/selinux v1.0.0-rc1 github.com/opencontainers/selinux v1.0.0-rc1

View file

@ -239,6 +239,7 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
if _, idInUse := r.byid[id]; idInUse { if _, idInUse := r.byid[id]; idInUse {
return nil, ErrDuplicateID return nil, ErrDuplicateID
} }
names = dedupeNames(names)
for _, name := range names { for _, name := range names {
if _, nameInUse := r.byname[name]; nameInUse { if _, nameInUse := r.byname[name]; nameInUse {
return nil, ErrDuplicateName return nil, ErrDuplicateName
@ -288,6 +289,7 @@ func (r *containerStore) removeName(container *Container, name string) {
} }
func (r *containerStore) SetNames(id string, names []string) error { func (r *containerStore) SetNames(id string, names []string) error {
names = dedupeNames(names)
if container, ok := r.lookup(id); ok { if container, ok := r.lookup(id); ok {
for _, name := range container.Names { for _, name := range container.Names {
delete(r.byname, name) delete(r.byname, name)

View file

@ -33,22 +33,22 @@ import (
"path/filepath" "path/filepath"
"strings" "strings"
"sync" "sync"
"syscall" "time"
"github.com/sirupsen/logrus"
"github.com/vbatts/tar-split/tar/storage"
"github.com/containers/storage/drivers" "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/locker"
mountpk "github.com/containers/storage/pkg/mount" mountpk "github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/system"
rsystem "github.com/opencontainers/runc/libcontainer/system" rsystem "github.com/opencontainers/runc/libcontainer/system"
"github.com/opencontainers/selinux/go-selinux/label" "github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vbatts/tar-split/tar/storage"
"golang.org/x/sys/unix"
) )
var ( var (
@ -75,6 +75,8 @@ type Driver struct {
ctr *graphdriver.RefCounter ctr *graphdriver.RefCounter
pathCacheLock sync.Mutex pathCacheLock sync.Mutex
pathCache map[string]string pathCache map[string]string
naiveDiff graphdriver.DiffDriver
locker *locker.Locker
} }
// Init returns a new AUFS driver. // Init returns a new AUFS driver.
@ -84,6 +86,7 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
// Try to load the aufs kernel module // Try to load the aufs kernel module
if err := supportsAufs(); err != nil { if err := supportsAufs(); err != nil {
return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support aufs") return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support aufs")
} }
fsMagic, err := graphdriver.GetFSMagic(root) fsMagic, err := graphdriver.GetFSMagic(root)
@ -112,6 +115,7 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
gidMaps: gidMaps, gidMaps: gidMaps,
pathCache: make(map[string]string), pathCache: make(map[string]string),
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)), ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)),
locker: locker.New(),
} }
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
@ -138,6 +142,32 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
return nil, err return nil, err
} }
} }
logger := logrus.WithFields(logrus.Fields{
"module": "graphdriver",
"driver": "aufs",
})
for _, path := range []string{"mnt", "diff"} {
p := filepath.Join(root, path)
entries, err := ioutil.ReadDir(p)
if err != nil {
logger.WithError(err).WithField("dir", p).Error("error reading dir entries")
continue
}
for _, entry := range entries {
if !entry.IsDir() {
continue
}
if strings.HasSuffix(entry.Name(), "-removing") {
logger.WithField("dir", entry.Name()).Debug("Cleaning up stale layer dir")
if err := system.EnsureRemoveAll(filepath.Join(p, entry.Name())); err != nil {
logger.WithField("dir", entry.Name()).WithError(err).Error("Error removing stale layer dir")
}
}
}
}
a.naiveDiff = graphdriver.NewNaiveDiffDriver(a, uidMaps, gidMaps)
return a, nil return a, nil
} }
@ -201,17 +231,22 @@ func (a *Driver) Exists(id string) bool {
return true return true
} }
// AdditionalImageStores returns additional image stores supported by the driver
func (a *Driver) AdditionalImageStores() []string {
return nil
}
// CreateReadWrite creates a layer that is writable for use as a container // CreateReadWrite creates a layer that is writable for use as a container
// file system. // file system.
func (a *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
return a.Create(id, parent, mountLabel, storageOpt) return a.Create(id, parent, opts)
} }
// Create three folders for each id // Create three folders for each id
// mnt, layers, and diff // mnt, layers, and diff
func (a *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
if len(storageOpt) != 0 { if opts != nil && len(opts.StorageOpt) != 0 {
return fmt.Errorf("--storage-opt is not supported for aufs") return fmt.Errorf("--storage-opt is not supported for aufs")
} }
@ -226,7 +261,7 @@ func (a *Driver) Create(id, parent, mountLabel string, storageOpt map[string]str
defer f.Close() defer f.Close()
if parent != "" { if parent != "" {
ids, err := getParentIds(a.rootPath(), parent) ids, err := getParentIDs(a.rootPath(), parent)
if err != nil { if err != nil {
return err return err
} }
@ -269,35 +304,68 @@ func (a *Driver) createDirsFor(id string) error {
// Remove will unmount and remove the given id. // Remove will unmount and remove the given id.
func (a *Driver) Remove(id string) error { func (a *Driver) Remove(id string) error {
a.locker.Lock(id)
defer a.locker.Unlock(id)
a.pathCacheLock.Lock() a.pathCacheLock.Lock()
mountpoint, exists := a.pathCache[id] mountpoint, exists := a.pathCache[id]
a.pathCacheLock.Unlock() a.pathCacheLock.Unlock()
if !exists { if !exists {
mountpoint = a.getMountpoint(id) mountpoint = a.getMountpoint(id)
} }
if err := a.unmount(mountpoint); err != nil {
// no need to return here, we can still try to remove since the `Rename` will fail below if still mounted
logrus.Debugf("aufs: error while unmounting %s: %v", mountpoint, err)
}
// Atomically remove each directory in turn by first moving it out of the logger := logrus.WithFields(logrus.Fields{
// way (so that container runtimes don't find it anymore) before doing removal of "module": "graphdriver",
// the whole tree. "driver": "aufs",
tmpMntPath := path.Join(a.mntPath(), fmt.Sprintf("%s-removing", id)) "layer": id,
if err := os.Rename(mountpoint, tmpMntPath); err != nil && !os.IsNotExist(err) { })
var retries int
for {
mounted, err := a.mounted(mountpoint)
if err != nil {
if os.IsNotExist(err) {
break
}
return err return err
} }
defer os.RemoveAll(tmpMntPath) if !mounted {
break
tmpDiffpath := path.Join(a.diffPath(), fmt.Sprintf("%s-removing", id)) }
if err := os.Rename(a.getDiffPath(id), tmpDiffpath); err != nil && !os.IsNotExist(err) {
return err err = a.unmount(mountpoint)
if err == nil {
break
}
if err != unix.EBUSY {
return errors.Wrapf(err, "aufs: unmount error: %s", mountpoint)
}
if retries >= 5 {
return errors.Wrapf(err, "aufs: unmount error after retries: %s", mountpoint)
}
// If unmount returns EBUSY, it could be a transient error. Sleep and retry.
retries++
logger.Warnf("unmount failed due to EBUSY: retry count: %d", retries)
time.Sleep(100 * time.Millisecond)
} }
defer os.RemoveAll(tmpDiffpath)
// Remove the layers file for the id // Remove the layers file for the id
if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) {
return err return errors.Wrapf(err, "error removing layers dir for %s", id)
}
if err := atomicRemove(a.getDiffPath(id)); err != nil {
return errors.Wrapf(err, "could not remove diff path for id %s", id)
}
// Atomically remove each directory in turn by first moving it out of the
// way (so that container runtime doesn't find it anymore) before doing removal of
// the whole tree.
if err := atomicRemove(mountpoint); err != nil {
if errors.Cause(err) == unix.EBUSY {
logger.WithField("dir", mountpoint).WithError(err).Warn("error performing atomic remove due to EBUSY")
}
return errors.Wrapf(err, "could not remove mountpoint for id %s", id)
} }
a.pathCacheLock.Lock() a.pathCacheLock.Lock()
@ -306,9 +374,29 @@ func (a *Driver) Remove(id string) error {
return nil return nil
} }
func atomicRemove(source string) error {
target := source + "-removing"
err := os.Rename(source, target)
switch {
case err == nil, os.IsNotExist(err):
case os.IsExist(err):
// Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove
if _, e := os.Stat(source); !os.IsNotExist(e) {
return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up")
}
default:
return errors.Wrapf(err, "error preparing atomic delete")
}
return system.EnsureRemoveAll(target)
}
// Get returns the rootfs path for the id. // Get returns the rootfs path for the id.
// This will mount the dir at it's given path // This will mount the dir at its given path
func (a *Driver) Get(id, mountLabel string) (string, error) { func (a *Driver) Get(id, mountLabel string) (string, error) {
a.locker.Lock(id)
defer a.locker.Unlock(id)
parents, err := a.getParentLayerPaths(id) parents, err := a.getParentLayerPaths(id)
if err != nil && !os.IsNotExist(err) { if err != nil && !os.IsNotExist(err) {
return "", err return "", err
@ -344,6 +432,8 @@ func (a *Driver) Get(id, mountLabel string) (string, error) {
// Put unmounts and updates list of active mounts. // Put unmounts and updates list of active mounts.
func (a *Driver) Put(id string) error { func (a *Driver) Put(id string) error {
a.locker.Lock(id)
defer a.locker.Unlock(id)
a.pathCacheLock.Lock() a.pathCacheLock.Lock()
m, exists := a.pathCache[id] m, exists := a.pathCache[id]
if !exists { if !exists {
@ -362,9 +452,22 @@ func (a *Driver) Put(id string) error {
return err return err
} }
// isParent returns if the passed in parent is the direct parent of the passed in layer
func (a *Driver) isParent(id, parent string) bool {
parents, _ := getParentIDs(a.rootPath(), id)
if parent == "" && len(parents) > 0 {
return false
}
return !(len(parents) > 0 && parent != parents[0])
}
// Diff produces an archive of the changes between the specified // Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "". // layer and its parent layer which may be "".
func (a *Driver) Diff(id, parent string) (io.ReadCloser, error) { func (a *Driver) Diff(id, parent string) (io.ReadCloser, error) {
if !a.isParent(id, parent) {
return a.naiveDiff.Diff(id, parent)
}
// AUFS doesn't need the parent layer to produce a diff. // AUFS doesn't need the parent layer to produce a diff.
return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
Compression: archive.Uncompressed, Compression: archive.Uncompressed,
@ -374,12 +477,6 @@ func (a *Driver) Diff(id, parent string) (io.ReadCloser, error) {
}) })
} }
// AdditionalImageStores returns additional image stores supported by the driver
func (a *Driver) AdditionalImageStores() []string {
var imageStores []string
return imageStores
}
type fileGetNilCloser struct { type fileGetNilCloser struct {
storage.FileGetter storage.FileGetter
} }
@ -406,6 +503,9 @@ func (a *Driver) applyDiff(id string, diff io.Reader) error {
// and its parent and returns the size in bytes of the changes // and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory. // relative to its base filesystem directory.
func (a *Driver) DiffSize(id, parent string) (size int64, err error) { func (a *Driver) DiffSize(id, parent string) (size int64, err error) {
if !a.isParent(id, parent) {
return a.naiveDiff.DiffSize(id, parent)
}
// AUFS doesn't need the parent layer to calculate the diff size. // AUFS doesn't need the parent layer to calculate the diff size.
return directory.Size(path.Join(a.rootPath(), "diff", id)) return directory.Size(path.Join(a.rootPath(), "diff", id))
} }
@ -414,7 +514,11 @@ func (a *Driver) DiffSize(id, parent string) (size int64, err error) {
// layer with the specified id and parent, returning the size of the // layer with the specified id and parent, returning the size of the
// new layer in bytes. // new layer in bytes.
func (a *Driver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { func (a *Driver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) {
// AUFS doesn't need the parent id to apply the diff. if !a.isParent(id, parent) {
return a.naiveDiff.ApplyDiff(id, parent, diff)
}
// AUFS doesn't need the parent id to apply the diff if it is the direct parent.
if err = a.applyDiff(id, diff); err != nil { if err = a.applyDiff(id, diff); err != nil {
return return
} }
@ -425,6 +529,10 @@ func (a *Driver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err e
// Changes produces a list of changes between the specified layer // Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes. // and its parent layer. If parent is "", then all changes will be ADD changes.
func (a *Driver) Changes(id, parent string) ([]archive.Change, error) { func (a *Driver) Changes(id, parent string) ([]archive.Change, error) {
if !a.isParent(id, parent) {
return a.naiveDiff.Changes(id, parent)
}
// AUFS doesn't have snapshots, so we need to get changes from all parent // AUFS doesn't have snapshots, so we need to get changes from all parent
// layers. // layers.
layers, err := a.getParentLayerPaths(id) layers, err := a.getParentLayerPaths(id)
@ -435,7 +543,7 @@ func (a *Driver) Changes(id, parent string) ([]archive.Change, error) {
} }
func (a *Driver) getParentLayerPaths(id string) ([]string, error) { func (a *Driver) getParentLayerPaths(id string) ([]string, error) {
parentIds, err := getParentIds(a.rootPath(), id) parentIds, err := getParentIDs(a.rootPath(), id)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -500,7 +608,7 @@ func (a *Driver) Cleanup() error {
for _, m := range dirs { for _, m := range dirs {
if err := a.unmount(m); err != nil { if err := a.unmount(m); err != nil {
logrus.Debugf("aufs error unmounting %s: %s", stringid.TruncateID(m), err) logrus.Debugf("aufs error unmounting %s: %s", m, err)
} }
} }
return mountpk.Unmount(a.root) return mountpk.Unmount(a.root)
@ -518,32 +626,20 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro
offset := 54 offset := 54
if useDirperm() { if useDirperm() {
offset += len("dirperm1") offset += len(",dirperm1")
} }
b := make([]byte, syscall.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel b := make([]byte, unix.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel
bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) bp := copy(b, fmt.Sprintf("br:%s=rw", rw))
firstMount := true index := 0
i := 0 for ; index < len(ro); index++ {
layer := fmt.Sprintf(":%s=ro+wh", ro[index])
for {
for ; i < len(ro); i++ {
layer := fmt.Sprintf(":%s=ro+wh", ro[i])
if firstMount {
if bp+len(layer) > len(b) { if bp+len(layer) > len(b) {
break break
} }
bp += copy(b[bp:], layer) bp += copy(b[bp:], layer)
} else {
data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel)
if err = mount("none", target, "aufs", syscall.MS_REMOUNT, data); err != nil {
return
}
}
} }
if firstMount {
opts := "dio,xino=/dev/shm/aufs.xino" opts := "dio,xino=/dev/shm/aufs.xino"
if useDirperm() { if useDirperm() {
opts += ",dirperm1" opts += ",dirperm1"
@ -552,11 +648,12 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro
if err = mount("none", target, "aufs", 0, data); err != nil { if err = mount("none", target, "aufs", 0, data); err != nil {
return return
} }
firstMount = false
}
if i == len(ro) { for ; index < len(ro); index++ {
break layer := fmt.Sprintf(":%s=ro+wh", ro[index])
data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel)
if err = mount("none", target, "aufs", unix.MS_REMOUNT, data); err != nil {
return
} }
} }

View file

@ -29,7 +29,7 @@ func loadIds(root string) ([]string, error) {
// //
// If there are no lines in the file then the id has no parent // If there are no lines in the file then the id has no parent
// and an empty slice is returned. // and an empty slice is returned.
func getParentIds(root, id string) ([]string, error) { func getParentIDs(root, id string) ([]string, error) {
f, err := os.Open(path.Join(root, "layers", id)) f, err := os.Open(path.Join(root, "layers", id))
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -4,9 +4,9 @@ package aufs
import ( import (
"os/exec" "os/exec"
"syscall"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
) )
// Unmount the target specified. // Unmount the target specified.
@ -14,7 +14,7 @@ func Unmount(target string) error {
if err := exec.Command("auplink", target, "flush").Run(); err != nil { if err := exec.Command("auplink", target, "flush").Run(); err != nil {
logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err) logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err)
} }
if err := syscall.Unmount(target, 0); err != nil { if err := unix.Unmount(target, 0); err != nil {
return err return err
} }
return nil return nil

View file

@ -1,7 +1,7 @@
package aufs package aufs
import "syscall" import "golang.org/x/sys/unix"
func mount(source string, target string, fstype string, flags uintptr, data string) error { func mount(source string, target string, fstype string, flags uintptr, data string) error {
return syscall.Mount(source, target, fstype, flags, data) return unix.Mount(source, target, fstype, flags, data)
} }

View file

@ -2,7 +2,7 @@
package aufs package aufs
import "github.com/pkg/errors" import "errors"
// MsRemount declared to specify a non-linux system mount. // MsRemount declared to specify a non-linux system mount.
const MsRemount = 0 const MsRemount = 0

View file

@ -16,31 +16,32 @@ import "C"
import ( import (
"fmt" "fmt"
"io/ioutil"
"math"
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
"strconv"
"strings" "strings"
"syscall" "sync"
"unsafe" "unsafe"
"github.com/containers/storage/drivers" "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/parsers" "github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/system"
"github.com/docker/go-units" "github.com/docker/go-units"
"github.com/opencontainers/selinux/go-selinux/label" "github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
) )
func init() { func init() {
graphdriver.Register("btrfs", Init) graphdriver.Register("btrfs", Init)
} }
var (
quotaEnabled = false
userDiskQuota = false
)
type btrfsOptions struct { type btrfsOptions struct {
minSpace uint64 minSpace uint64
size uint64 size uint64
@ -71,18 +72,11 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
return nil, err return nil, err
} }
opt, err := parseOptions(options) opt, userDiskQuota, err := parseOptions(options)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if userDiskQuota {
if err := subvolEnableQuota(home); err != nil {
return nil, err
}
quotaEnabled = true
}
driver := &Driver{ driver := &Driver{
home: home, home: home,
uidMaps: uidMaps, uidMaps: uidMaps,
@ -90,30 +84,37 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
options: opt, options: opt,
} }
if userDiskQuota {
if err := driver.subvolEnableQuota(); err != nil {
return nil, err
}
}
return graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), nil return graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), nil
} }
func parseOptions(opt []string) (btrfsOptions, error) { func parseOptions(opt []string) (btrfsOptions, bool, error) {
var options btrfsOptions var options btrfsOptions
userDiskQuota := false
for _, option := range opt { for _, option := range opt {
key, val, err := parsers.ParseKeyValueOpt(option) key, val, err := parsers.ParseKeyValueOpt(option)
if err != nil { if err != nil {
return options, err return options, userDiskQuota, err
} }
key = strings.ToLower(key) key = strings.ToLower(key)
switch key { switch key {
case "btrfs.min_space": case "btrfs.min_space":
minSpace, err := units.RAMInBytes(val) minSpace, err := units.RAMInBytes(val)
if err != nil { if err != nil {
return options, err return options, userDiskQuota, err
} }
userDiskQuota = true userDiskQuota = true
options.minSpace = uint64(minSpace) options.minSpace = uint64(minSpace)
default: default:
return options, fmt.Errorf("Unknown option %s", key) return options, userDiskQuota, fmt.Errorf("Unknown option %s", key)
} }
} }
return options, nil return options, userDiskQuota, nil
} }
// Driver contains information about the filesystem mounted. // Driver contains information about the filesystem mounted.
@ -123,6 +124,8 @@ type Driver struct {
uidMaps []idtools.IDMap uidMaps []idtools.IDMap
gidMaps []idtools.IDMap gidMaps []idtools.IDMap
options btrfsOptions options btrfsOptions
quotaEnabled bool
once sync.Once
} }
// String prints the name of the driver (btrfs). // String prints the name of the driver (btrfs).
@ -151,11 +154,9 @@ func (d *Driver) Metadata(id string) (map[string]string, error) {
// Cleanup unmounts the home directory. // Cleanup unmounts the home directory.
func (d *Driver) Cleanup() error { func (d *Driver) Cleanup() error {
if quotaEnabled { if err := d.subvolDisableQuota(); err != nil {
if err := subvolDisableQuota(d.home); err != nil {
return err return err
} }
}
return mount.Unmount(d.home) return mount.Unmount(d.home)
} }
@ -197,7 +198,7 @@ func subvolCreate(path, name string) error {
args.name[i] = C.char(c) args.name[i] = C.char(c)
} }
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE,
uintptr(unsafe.Pointer(&args))) uintptr(unsafe.Pointer(&args)))
if errno != 0 { if errno != 0 {
return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error())
@ -225,7 +226,7 @@ func subvolSnapshot(src, dest, name string) error {
C.set_name_btrfs_ioctl_vol_args_v2(&args, cs) C.set_name_btrfs_ioctl_vol_args_v2(&args, cs)
C.free(unsafe.Pointer(cs)) C.free(unsafe.Pointer(cs))
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2,
uintptr(unsafe.Pointer(&args))) uintptr(unsafe.Pointer(&args)))
if errno != 0 { if errno != 0 {
return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error())
@ -234,8 +235,8 @@ func subvolSnapshot(src, dest, name string) error {
} }
func isSubvolume(p string) (bool, error) { func isSubvolume(p string) (bool, error) {
var bufStat syscall.Stat_t var bufStat unix.Stat_t
if err := syscall.Lstat(p, &bufStat); err != nil { if err := unix.Lstat(p, &bufStat); err != nil {
return false, err return false, err
} }
@ -243,7 +244,7 @@ func isSubvolume(p string) (bool, error) {
return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil
} }
func subvolDelete(dirpath, name string) error { func subvolDelete(dirpath, name string, quotaEnabled bool) error {
dir, err := openDir(dirpath) dir, err := openDir(dirpath)
if err != nil { if err != nil {
return err return err
@ -271,7 +272,7 @@ func subvolDelete(dirpath, name string) error {
return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err)
} }
if sv { if sv {
if err := subvolDelete(path.Dir(p), f.Name()); err != nil { if err := subvolDelete(path.Dir(p), f.Name(), quotaEnabled); err != nil {
return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err)
} }
} }
@ -282,12 +283,27 @@ func subvolDelete(dirpath, name string) error {
return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err)
} }
if quotaEnabled {
if qgroupid, err := subvolLookupQgroup(fullPath); err == nil {
var args C.struct_btrfs_ioctl_qgroup_create_args
args.qgroupid = C.__u64(qgroupid)
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_CREATE,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
logrus.Errorf("Failed to delete btrfs qgroup %v for %s: %v", qgroupid, fullPath, errno.Error())
}
} else {
logrus.Errorf("Failed to lookup btrfs qgroup for %s: %v", fullPath, err.Error())
}
}
// all subvolumes have been removed // all subvolumes have been removed
// now remove the one originally passed in // now remove the one originally passed in
for i, c := range []byte(name) { for i, c := range []byte(name) {
args.name[i] = C.char(c) args.name[i] = C.char(c)
} }
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY,
uintptr(unsafe.Pointer(&args))) uintptr(unsafe.Pointer(&args)))
if errno != 0 { if errno != 0 {
return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error()) return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error())
@ -295,8 +311,27 @@ func subvolDelete(dirpath, name string) error {
return nil return nil
} }
func subvolEnableQuota(path string) error { func (d *Driver) updateQuotaStatus() {
dir, err := openDir(path) d.once.Do(func() {
if !d.quotaEnabled {
// In case quotaEnabled is not set, check qgroup and update quotaEnabled as needed
if err := subvolQgroupStatus(d.home); err != nil {
// quota is still not enabled
return
}
d.quotaEnabled = true
}
})
}
func (d *Driver) subvolEnableQuota() error {
d.updateQuotaStatus()
if d.quotaEnabled {
return nil
}
dir, err := openDir(d.home)
if err != nil { if err != nil {
return err return err
} }
@ -304,17 +339,25 @@ func subvolEnableQuota(path string) error {
var args C.struct_btrfs_ioctl_quota_ctl_args var args C.struct_btrfs_ioctl_quota_ctl_args
args.cmd = C.BTRFS_QUOTA_CTL_ENABLE args.cmd = C.BTRFS_QUOTA_CTL_ENABLE
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL,
uintptr(unsafe.Pointer(&args))) uintptr(unsafe.Pointer(&args)))
if errno != 0 { if errno != 0 {
return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error()) return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error())
} }
d.quotaEnabled = true
return nil return nil
} }
func subvolDisableQuota(path string) error { func (d *Driver) subvolDisableQuota() error {
dir, err := openDir(path) d.updateQuotaStatus()
if !d.quotaEnabled {
return nil
}
dir, err := openDir(d.home)
if err != nil { if err != nil {
return err return err
} }
@ -322,24 +365,32 @@ func subvolDisableQuota(path string) error {
var args C.struct_btrfs_ioctl_quota_ctl_args var args C.struct_btrfs_ioctl_quota_ctl_args
args.cmd = C.BTRFS_QUOTA_CTL_DISABLE args.cmd = C.BTRFS_QUOTA_CTL_DISABLE
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL,
uintptr(unsafe.Pointer(&args))) uintptr(unsafe.Pointer(&args)))
if errno != 0 { if errno != 0 {
return fmt.Errorf("Failed to disable btrfs quota for %s: %v", dir, errno.Error()) return fmt.Errorf("Failed to disable btrfs quota for %s: %v", dir, errno.Error())
} }
d.quotaEnabled = false
return nil return nil
} }
func subvolRescanQuota(path string) error { func (d *Driver) subvolRescanQuota() error {
dir, err := openDir(path) d.updateQuotaStatus()
if !d.quotaEnabled {
return nil
}
dir, err := openDir(d.home)
if err != nil { if err != nil {
return err return err
} }
defer closeDir(dir) defer closeDir(dir)
var args C.struct_btrfs_ioctl_quota_rescan_args var args C.struct_btrfs_ioctl_quota_rescan_args
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT, _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT,
uintptr(unsafe.Pointer(&args))) uintptr(unsafe.Pointer(&args)))
if errno != 0 { if errno != 0 {
return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error()) return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error())
@ -358,7 +409,7 @@ func subvolLimitQgroup(path string, size uint64) error {
var args C.struct_btrfs_ioctl_qgroup_limit_args var args C.struct_btrfs_ioctl_qgroup_limit_args
args.lim.max_referenced = C.__u64(size) args.lim.max_referenced = C.__u64(size)
args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT, _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT,
uintptr(unsafe.Pointer(&args))) uintptr(unsafe.Pointer(&args)))
if errno != 0 { if errno != 0 {
return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error()) return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error())
@ -367,6 +418,60 @@ func subvolLimitQgroup(path string, size uint64) error {
return nil return nil
} }
// subvolQgroupStatus performs a BTRFS_IOC_TREE_SEARCH on the root path
// with search key of BTRFS_QGROUP_STATUS_KEY.
// In case qgroup is enabled, the retuned key type will match BTRFS_QGROUP_STATUS_KEY.
// For more details please see https://github.com/kdave/btrfs-progs/blob/v4.9/qgroup.c#L1035
func subvolQgroupStatus(path string) error {
dir, err := openDir(path)
if err != nil {
return err
}
defer closeDir(dir)
var args C.struct_btrfs_ioctl_search_args
args.key.tree_id = C.BTRFS_QUOTA_TREE_OBJECTID
args.key.min_type = C.BTRFS_QGROUP_STATUS_KEY
args.key.max_type = C.BTRFS_QGROUP_STATUS_KEY
args.key.max_objectid = C.__u64(math.MaxUint64)
args.key.max_offset = C.__u64(math.MaxUint64)
args.key.max_transid = C.__u64(math.MaxUint64)
args.key.nr_items = 4096
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_TREE_SEARCH,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("Failed to search qgroup for %s: %v", path, errno.Error())
}
sh := (*C.struct_btrfs_ioctl_search_header)(unsafe.Pointer(&args.buf))
if sh._type != C.BTRFS_QGROUP_STATUS_KEY {
return fmt.Errorf("Invalid qgroup search header type for %s: %v", path, sh._type)
}
return nil
}
func subvolLookupQgroup(path string) (uint64, error) {
dir, err := openDir(path)
if err != nil {
return 0, err
}
defer closeDir(dir)
var args C.struct_btrfs_ioctl_ino_lookup_args
args.objectid = C.BTRFS_FIRST_FREE_OBJECTID
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_INO_LOOKUP,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return 0, fmt.Errorf("Failed to lookup qgroup for %s: %v", dir, errno.Error())
}
if args.treeid == 0 {
return 0, fmt.Errorf("Invalid qgroup id for %s: 0", dir)
}
return uint64(args.treeid), nil
}
func (d *Driver) subvolumesDir() string { func (d *Driver) subvolumesDir() string {
return path.Join(d.home, "subvolumes") return path.Join(d.home, "subvolumes")
} }
@ -375,14 +480,23 @@ func (d *Driver) subvolumesDirID(id string) string {
return path.Join(d.subvolumesDir(), id) return path.Join(d.subvolumesDir(), id)
} }
func (d *Driver) quotasDir() string {
return path.Join(d.home, "quotas")
}
func (d *Driver) quotasDirID(id string) string {
return path.Join(d.quotasDir(), id)
}
// CreateReadWrite creates a layer that is writable for use as a container // CreateReadWrite creates a layer that is writable for use as a container
// file system. // file system.
func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
return d.Create(id, parent, mountLabel, storageOpt) return d.Create(id, parent, opts)
} }
// Create the filesystem with given id. // Create the filesystem with given id.
func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
quotas := path.Join(d.home, "quotas")
subvolumes := path.Join(d.home, "subvolumes") subvolumes := path.Join(d.home, "subvolumes")
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
if err != nil { if err != nil {
@ -409,14 +523,26 @@ func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]str
} }
} }
var storageOpt map[string]string
if opts != nil {
storageOpt = opts.StorageOpt
}
if _, ok := storageOpt["size"]; ok { if _, ok := storageOpt["size"]; ok {
driver := &Driver{} driver := &Driver{}
if err := d.parseStorageOpt(storageOpt, driver); err != nil { if err := d.parseStorageOpt(storageOpt, driver); err != nil {
return err return err
} }
if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil { if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil {
return err return err
} }
if err := idtools.MkdirAllAs(quotas, 0700, rootUID, rootGID); err != nil {
return err
}
if err := ioutil.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil {
return err
}
} }
// if we have a remapped root (user namespaces enabled), change the created snapshot // if we have a remapped root (user namespaces enabled), change the created snapshot
@ -427,6 +553,11 @@ func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]str
} }
} }
mountLabel := ""
if opts != nil {
mountLabel = opts.MountLabel
}
return label.Relabel(path.Join(subvolumes, id), mountLabel, false) return label.Relabel(path.Join(subvolumes, id), mountLabel, false)
} }
@ -459,12 +590,9 @@ func (d *Driver) setStorageSize(dir string, driver *Driver) error {
return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace))) return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace)))
} }
if !quotaEnabled { if err := d.subvolEnableQuota(); err != nil {
if err := subvolEnableQuota(d.home); err != nil {
return err return err
} }
quotaEnabled = true
}
if err := subvolLimitQgroup(dir, driver.options.size); err != nil { if err := subvolLimitQgroup(dir, driver.options.size); err != nil {
return err return err
@ -479,13 +607,25 @@ func (d *Driver) Remove(id string) error {
if _, err := os.Stat(dir); err != nil { if _, err := os.Stat(dir); err != nil {
return err return err
} }
if err := subvolDelete(d.subvolumesDir(), id); err != nil { quotasDir := d.quotasDirID(id)
if _, err := os.Stat(quotasDir); err == nil {
if err := os.Remove(quotasDir); err != nil {
return err return err
} }
if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { } else if !os.IsNotExist(err) {
return err return err
} }
if err := subvolRescanQuota(d.home); err != nil {
// Call updateQuotaStatus() to invoke status update
d.updateQuotaStatus()
if err := subvolDelete(d.subvolumesDir(), id, d.quotaEnabled); err != nil {
return err
}
if err := system.EnsureRemoveAll(dir); err != nil {
return err
}
if err := d.subvolRescanQuota(); err != nil {
return err return err
} }
return nil return nil
@ -503,6 +643,17 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
return "", fmt.Errorf("%s: not a directory", dir) return "", fmt.Errorf("%s: not a directory", dir)
} }
if quota, err := ioutil.ReadFile(d.quotasDirID(id)); err == nil {
if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace {
if err := d.subvolEnableQuota(); err != nil {
return "", err
}
if err := subvolLimitQgroup(dir, size); err != nil {
return "", err
}
}
}
return dir, nil return dir, nil
} }
@ -522,6 +673,5 @@ func (d *Driver) Exists(id string) bool {
// AdditionalImageStores returns additional image stores supported by the driver // AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string { func (d *Driver) AdditionalImageStores() []string {
var imageStores []string return nil
return imageStores
} }

View file

@ -22,30 +22,21 @@ func NewRefCounter(c Checker) *RefCounter {
} }
} }
// Increment increaes the ref count for the given id and returns the current count // Increment increases the ref count for the given id and returns the current count
func (c *RefCounter) Increment(path string) int { func (c *RefCounter) Increment(path string) int {
c.mu.Lock() return c.incdec(path, func(minfo *minfo) {
m := c.counts[path] minfo.count++
if m == nil { })
m = &minfo{}
c.counts[path] = m
}
// if we are checking this path for the first time check to make sure
// if it was already mounted on the system and make sure we have a correct ref
// count if it is mounted as it is in use.
if !m.check {
m.check = true
if c.checker.IsMounted(path) {
m.count++
}
}
m.count++
c.mu.Unlock()
return m.count
} }
// Decrement decreases the ref count for the given id and returns the current count // Decrement decreases the ref count for the given id and returns the current count
func (c *RefCounter) Decrement(path string) int { func (c *RefCounter) Decrement(path string) int {
return c.incdec(path, func(minfo *minfo) {
minfo.count--
})
}
func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int {
c.mu.Lock() c.mu.Lock()
m := c.counts[path] m := c.counts[path]
if m == nil { if m == nil {
@ -61,7 +52,8 @@ func (c *RefCounter) Decrement(path string) int {
m.count++ m.count++
} }
} }
m.count-- infoOp(m)
count := m.count
c.mu.Unlock() c.mu.Unlock()
return m.count return count
} }

View file

@ -0,0 +1,236 @@
package devmapper
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"reflect"
"strings"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type directLVMConfig struct {
Device string
ThinpPercent uint64
ThinpMetaPercent uint64
AutoExtendPercent uint64
AutoExtendThreshold uint64
}
var (
errThinpPercentMissing = errors.New("must set both `dm.thinp_percent` and `dm.thinp_metapercent` if either is specified")
errThinpPercentTooBig = errors.New("combined `dm.thinp_percent` and `dm.thinp_metapercent` must not be greater than 100")
errMissingSetupDevice = errors.New("must provide device path in `dm.setup_device` in order to configure direct-lvm")
)
func validateLVMConfig(cfg directLVMConfig) error {
if reflect.DeepEqual(cfg, directLVMConfig{}) {
return nil
}
if cfg.Device == "" {
return errMissingSetupDevice
}
if (cfg.ThinpPercent > 0 && cfg.ThinpMetaPercent == 0) || cfg.ThinpMetaPercent > 0 && cfg.ThinpPercent == 0 {
return errThinpPercentMissing
}
if cfg.ThinpPercent+cfg.ThinpMetaPercent > 100 {
return errThinpPercentTooBig
}
return nil
}
func checkDevAvailable(dev string) error {
lvmScan, err := exec.LookPath("lvmdiskscan")
if err != nil {
logrus.Debug("could not find lvmdiskscan")
return nil
}
out, err := exec.Command(lvmScan).CombinedOutput()
if err != nil {
logrus.WithError(err).Error(string(out))
return nil
}
if !bytes.Contains(out, []byte(dev)) {
return errors.Errorf("%s is not available for use with devicemapper", dev)
}
return nil
}
func checkDevInVG(dev string) error {
pvDisplay, err := exec.LookPath("pvdisplay")
if err != nil {
logrus.Debug("could not find pvdisplay")
return nil
}
out, err := exec.Command(pvDisplay, dev).CombinedOutput()
if err != nil {
logrus.WithError(err).Error(string(out))
return nil
}
scanner := bufio.NewScanner(bytes.NewReader(bytes.TrimSpace(out)))
for scanner.Scan() {
fields := strings.SplitAfter(strings.TrimSpace(scanner.Text()), "VG Name")
if len(fields) > 1 {
// got "VG Name" line"
vg := strings.TrimSpace(fields[1])
if len(vg) > 0 {
return errors.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg)
}
logrus.Error(fields)
break
}
}
return nil
}
func checkDevHasFS(dev string) error {
blkid, err := exec.LookPath("blkid")
if err != nil {
logrus.Debug("could not find blkid")
return nil
}
out, err := exec.Command(blkid, dev).CombinedOutput()
if err != nil {
logrus.WithError(err).Error(string(out))
return nil
}
fields := bytes.Fields(out)
for _, f := range fields {
kv := bytes.Split(f, []byte{'='})
if bytes.Equal(kv[0], []byte("TYPE")) {
v := bytes.Trim(kv[1], "\"")
if len(v) > 0 {
return errors.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev)
}
return nil
}
}
return nil
}
func verifyBlockDevice(dev string, force bool) error {
if err := checkDevAvailable(dev); err != nil {
return err
}
if err := checkDevInVG(dev); err != nil {
return err
}
if force {
return nil
}
if err := checkDevHasFS(dev); err != nil {
return err
}
return nil
}
func readLVMConfig(root string) (directLVMConfig, error) {
var cfg directLVMConfig
p := filepath.Join(root, "setup-config.json")
b, err := ioutil.ReadFile(p)
if err != nil {
if os.IsNotExist(err) {
return cfg, nil
}
return cfg, errors.Wrap(err, "error reading existing setup config")
}
// check if this is just an empty file, no need to produce a json error later if so
if len(b) == 0 {
return cfg, nil
}
err = json.Unmarshal(b, &cfg)
return cfg, errors.Wrap(err, "error unmarshaling previous device setup config")
}
func writeLVMConfig(root string, cfg directLVMConfig) error {
p := filepath.Join(root, "setup-config.json")
b, err := json.Marshal(cfg)
if err != nil {
return errors.Wrap(err, "error marshalling direct lvm config")
}
err = ioutil.WriteFile(p, b, 0600)
return errors.Wrap(err, "error writing direct lvm config to file")
}
func setupDirectLVM(cfg directLVMConfig) error {
lvmProfileDir := "/etc/lvm/profile"
binaries := []string{"pvcreate", "vgcreate", "lvcreate", "lvconvert", "lvchange", "thin_check"}
for _, bin := range binaries {
if _, err := exec.LookPath(bin); err != nil {
return errors.Wrap(err, "error looking up command `"+bin+"` while setting up direct lvm")
}
}
err := os.MkdirAll(lvmProfileDir, 0755)
if err != nil {
return errors.Wrap(err, "error creating lvm profile directory")
}
if cfg.AutoExtendPercent == 0 {
cfg.AutoExtendPercent = 20
}
if cfg.AutoExtendThreshold == 0 {
cfg.AutoExtendThreshold = 80
}
if cfg.ThinpPercent == 0 {
cfg.ThinpPercent = 95
}
if cfg.ThinpMetaPercent == 0 {
cfg.ThinpMetaPercent = 1
}
out, err := exec.Command("pvcreate", "-f", cfg.Device).CombinedOutput()
if err != nil {
return errors.Wrap(err, string(out))
}
out, err = exec.Command("vgcreate", "storage", cfg.Device).CombinedOutput()
if err != nil {
return errors.Wrap(err, string(out))
}
out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpool", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpPercent)).CombinedOutput()
if err != nil {
return errors.Wrap(err, string(out))
}
out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpoolmeta", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpMetaPercent)).CombinedOutput()
if err != nil {
return errors.Wrap(err, string(out))
}
out, err = exec.Command("lvconvert", "-y", "--zero", "n", "-c", "512K", "--thinpool", "storage/thinpool", "--poolmetadata", "storage/thinpoolmeta").CombinedOutput()
if err != nil {
return errors.Wrap(err, string(out))
}
profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent)
err = ioutil.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600)
if err != nil {
return errors.Wrap(err, "error writing storage thinp autoextend profile")
}
out, err = exec.Command("lvchange", "--metadataprofile", "storage-thinpool", "storage/thinpool").CombinedOutput()
return errors.Wrap(err, string(out))
}

View file

@ -12,25 +12,25 @@ import (
"os/exec" "os/exec"
"path" "path"
"path/filepath" "path/filepath"
"reflect"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"syscall"
"time" "time"
"github.com/sirupsen/logrus"
"github.com/containers/storage/drivers" "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/devicemapper" "github.com/containers/storage/pkg/devicemapper"
"github.com/containers/storage/pkg/dmesg"
"github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/loopback" "github.com/containers/storage/pkg/loopback"
"github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/parsers" "github.com/containers/storage/pkg/parsers"
"github.com/docker/go-units" "github.com/containers/storage/pkg/parsers/kernel"
units "github.com/docker/go-units"
"github.com/opencontainers/selinux/go-selinux/label" "github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
) )
var ( var (
@ -41,15 +41,12 @@ var (
defaultUdevSyncOverride = false defaultUdevSyncOverride = false
maxDeviceID = 0xffffff // 24 bit, pool limit maxDeviceID = 0xffffff // 24 bit, pool limit
deviceIDMapSz = (maxDeviceID + 1) / 8 deviceIDMapSz = (maxDeviceID + 1) / 8
// We retry device removal so many a times that even error messages
// will fill up console during normal operation. So only log Fatal
// messages by default.
logLevel = devicemapper.LogLevelFatal
driverDeferredRemovalSupport = false driverDeferredRemovalSupport = false
enableDeferredRemoval = false enableDeferredRemoval = false
enableDeferredDeletion = false enableDeferredDeletion = false
userBaseSize = false userBaseSize = false
defaultMinFreeSpacePercent uint32 = 10 defaultMinFreeSpacePercent uint32 = 10
lvmSetupConfigForce bool
) )
const deviceSetMetaFile string = "deviceset-metadata" const deviceSetMetaFile string = "deviceset-metadata"
@ -122,6 +119,8 @@ type DeviceSet struct {
uidMaps []idtools.IDMap uidMaps []idtools.IDMap
gidMaps []idtools.IDMap gidMaps []idtools.IDMap
minFreeSpacePercent uint32 //min free space percentage in thinpool minFreeSpacePercent uint32 //min free space percentage in thinpool
xfsNospaceRetries string // max retries when xfs receives ENOSPC
lvmSetupConfig directLVMConfig
} }
// DiskUsage contains information about disk usage and is used when reporting Status of a device. // DiskUsage contains information about disk usage and is used when reporting Status of a device.
@ -170,7 +169,7 @@ type Status struct {
MinFreeSpace uint64 MinFreeSpace uint64
} }
// Structure used to export image/container metadata in docker inspect. // Structure used to export image/container metadata in inspect.
type deviceMetadata struct { type deviceMetadata struct {
deviceID int deviceID int
deviceSize uint64 // size in bytes deviceSize uint64 // size in bytes
@ -379,10 +378,7 @@ func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool {
var mask byte var mask byte
i := deviceID % 8 i := deviceID % 8
mask = (1 << uint(i)) mask = (1 << uint(i))
if (devices.deviceIDMap[deviceID/8] & mask) != 0 { return (devices.deviceIDMap[deviceID/8] & mask) == 0
return false
}
return true
} }
// Should be called with devices.Lock() held. // Should be called with devices.Lock() held.
@ -409,8 +405,8 @@ func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) {
// This function relies on that device hash map has been loaded in advance. // This function relies on that device hash map has been loaded in advance.
// Should be called with devices.Lock() held. // Should be called with devices.Lock() held.
func (devices *DeviceSet) constructDeviceIDMap() { func (devices *DeviceSet) constructDeviceIDMap() {
logrus.Debugf("devmapper: constructDeviceIDMap()") logrus.Debug("devmapper: constructDeviceIDMap()")
defer logrus.Debugf("devmapper: constructDeviceIDMap() END") defer logrus.Debug("devmapper: constructDeviceIDMap() END")
for _, info := range devices.Devices { for _, info := range devices.Devices {
devices.markDeviceIDUsed(info.DeviceID) devices.markDeviceIDUsed(info.DeviceID)
@ -458,8 +454,8 @@ func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo)
} }
func (devices *DeviceSet) loadDeviceFilesOnStart() error { func (devices *DeviceSet) loadDeviceFilesOnStart() error {
logrus.Debugf("devmapper: loadDeviceFilesOnStart()") logrus.Debug("devmapper: loadDeviceFilesOnStart()")
defer logrus.Debugf("devmapper: loadDeviceFilesOnStart() END") defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END")
var scan = func(path string, info os.FileInfo, err error) error { var scan = func(path string, info os.FileInfo, err error) error {
if err != nil { if err != nil {
@ -479,11 +475,10 @@ func (devices *DeviceSet) loadDeviceFilesOnStart() error {
} }
// Should be called with devices.Lock() held. // Should be called with devices.Lock() held.
func (devices *DeviceSet) unregisterDevice(id int, hash string) error { func (devices *DeviceSet) unregisterDevice(hash string) error {
logrus.Debugf("devmapper: unregisterDevice(%v, %v)", id, hash) logrus.Debugf("devmapper: unregisterDevice(%v)", hash)
info := &devInfo{ info := &devInfo{
Hash: hash, Hash: hash,
DeviceID: id,
} }
delete(devices.Devices, hash) delete(devices.Devices, hash)
@ -528,7 +523,7 @@ func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bo
// Make sure deferred removal on device is canceled, if one was // Make sure deferred removal on device is canceled, if one was
// scheduled. // scheduled.
if err := devices.cancelDeferredRemoval(info); err != nil { if err := devices.cancelDeferredRemovalIfNeeded(info); err != nil {
return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err) return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err)
} }
@ -539,11 +534,11 @@ func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bo
return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size) return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size)
} }
// Return true only if kernel supports xfs and mkfs.xfs is available // xfsSupported checks if xfs is supported, returns nil if it is, otherwise an error
func xfsSupported() bool { func xfsSupported() error {
// Make sure mkfs.xfs is available // Make sure mkfs.xfs is available
if _, err := exec.LookPath("mkfs.xfs"); err != nil { if _, err := exec.LookPath("mkfs.xfs"); err != nil {
return false return err // error text is descriptive enough
} }
// Check if kernel supports xfs filesystem or not. // Check if kernel supports xfs filesystem or not.
@ -551,43 +546,48 @@ func xfsSupported() bool {
f, err := os.Open("/proc/filesystems") f, err := os.Open("/proc/filesystems")
if err != nil { if err != nil {
logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) return errors.Wrapf(err, "error checking for xfs support")
return false
} }
defer f.Close() defer f.Close()
s := bufio.NewScanner(f) s := bufio.NewScanner(f)
for s.Scan() { for s.Scan() {
if strings.HasSuffix(s.Text(), "\txfs") { if strings.HasSuffix(s.Text(), "\txfs") {
return true return nil
} }
} }
if err := s.Err(); err != nil { if err := s.Err(); err != nil {
logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) return errors.Wrapf(err, "error checking for xfs support")
} }
return false
return errors.New(`kernel does not support xfs, or "modprobe xfs" failed`)
} }
func determineDefaultFS() string { func determineDefaultFS() string {
if xfsSupported() { err := xfsSupported()
if err == nil {
return "xfs" return "xfs"
} }
logrus.Warn("devmapper: XFS is not supported in your system. Either the kernel doesn't support it or mkfs.xfs is not in your PATH. Defaulting to ext4 filesystem") logrus.Warnf("devmapper: XFS is not supported in your system (%v). Defaulting to ext4 filesystem", err)
return "ext4" return "ext4"
} }
// mkfsOptions tries to figure out whether some additional mkfs options are required
func mkfsOptions(fs string) []string {
if fs == "xfs" && !kernel.CheckKernelVersion(3, 16, 0) {
// For kernels earlier than 3.16 (and newer xfsutils),
// some xfs features need to be explicitly disabled.
return []string{"-m", "crc=0,finobt=0"}
}
return []string{}
}
func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) {
devname := info.DevName() devname := info.DevName()
args := []string{}
for _, arg := range devices.mkfsArgs {
args = append(args, arg)
}
args = append(args, devname)
if devices.filesystem == "" { if devices.filesystem == "" {
devices.filesystem = determineDefaultFS() devices.filesystem = determineDefaultFS()
} }
@ -595,7 +595,11 @@ func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) {
return err return err
} }
logrus.Infof("devmapper: Creating filesystem %s on device %s", devices.filesystem, info.Name()) args := mkfsOptions(devices.filesystem)
args = append(args, devices.mkfsArgs...)
args = append(args, devname)
logrus.Infof("devmapper: Creating filesystem %s on device %s, mkfs args: %v", devices.filesystem, info.Name(), args)
defer func() { defer func() {
if err != nil { if err != nil {
logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err) logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err)
@ -833,7 +837,7 @@ func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) {
} }
if err := devices.closeTransaction(); err != nil { if err := devices.closeTransaction(); err != nil {
devices.unregisterDevice(deviceID, hash) devices.unregisterDevice(hash)
devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID)
devices.markDeviceIDFree(deviceID) devices.markDeviceIDFree(deviceID)
return nil, err return nil, err
@ -841,11 +845,57 @@ func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) {
return info, nil return info, nil
} }
func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error { func (devices *DeviceSet) takeSnapshot(hash string, baseInfo *devInfo, size uint64) error {
if err := devices.poolHasFreeSpace(); err != nil { var (
devinfo *devicemapper.Info
err error
)
if err = devices.poolHasFreeSpace(); err != nil {
return err return err
} }
if devices.deferredRemove {
devinfo, err = devicemapper.GetInfoWithDeferred(baseInfo.Name())
if err != nil {
return err
}
if devinfo != nil && devinfo.DeferredRemove != 0 {
err = devices.cancelDeferredRemoval(baseInfo)
if err != nil {
// If Error is ErrEnxio. Device is probably already gone. Continue.
if errors.Cause(err) != devicemapper.ErrEnxio {
return err
}
devinfo = nil
} else {
defer devices.deactivateDevice(baseInfo)
}
}
} else {
devinfo, err = devicemapper.GetInfo(baseInfo.Name())
if err != nil {
return err
}
}
doSuspend := devinfo != nil && devinfo.Exists != 0
if doSuspend {
if err = devicemapper.SuspendDevice(baseInfo.Name()); err != nil {
return err
}
defer devicemapper.ResumeDevice(baseInfo.Name())
}
if err = devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil {
return err
}
return nil
}
func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error {
deviceID, err := devices.getNextFreeDeviceID() deviceID, err := devices.getNextFreeDeviceID()
if err != nil { if err != nil {
return err return err
@ -858,7 +908,7 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInf
} }
for { for {
if err := devicemapper.CreateSnapDevice(devices.getPoolDevName(), deviceID, baseInfo.Name(), baseInfo.DeviceID); err != nil { if err := devicemapper.CreateSnapDeviceRaw(devices.getPoolDevName(), deviceID, baseInfo.DeviceID); err != nil {
if devicemapper.DeviceIDExists(err) { if devicemapper.DeviceIDExists(err) {
// Device ID already exists. This should not // Device ID already exists. This should not
// happen. Now we have a mechanism to find // happen. Now we have a mechanism to find
@ -888,7 +938,7 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInf
} }
if err := devices.closeTransaction(); err != nil { if err := devices.closeTransaction(); err != nil {
devices.unregisterDevice(deviceID, hash) devices.unregisterDevice(hash)
devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID)
devices.markDeviceIDFree(deviceID) devices.markDeviceIDFree(deviceID)
return err return err
@ -1134,7 +1184,7 @@ func (devices *DeviceSet) growFS(info *devInfo) error {
defer devices.deactivateDevice(info) defer devices.deactivateDevice(info)
fsMountPoint := "/run/containers/mnt" fsMountPoint := "/run/containers/storage/mnt"
if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) { if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) {
if err := os.MkdirAll(fsMountPoint, 0700); err != nil { if err := os.MkdirAll(fsMountPoint, 0700); err != nil {
return err return err
@ -1150,10 +1200,10 @@ func (devices *DeviceSet) growFS(info *devInfo) error {
options = joinMountOptions(options, devices.mountOptions) options = joinMountOptions(options, devices.mountOptions)
if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil {
return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), fsMountPoint, err) return fmt.Errorf("Error mounting '%s' on '%s': %s\n%v", info.DevName(), fsMountPoint, err, string(dmesg.Dmesg(256)))
} }
defer syscall.Unmount(fsMountPoint, syscall.MNT_DETACH) defer unix.Unmount(fsMountPoint, unix.MNT_DETACH)
switch devices.BaseDeviceFilesystem { switch devices.BaseDeviceFilesystem {
case "ext4": case "ext4":
@ -1216,37 +1266,16 @@ func (devices *DeviceSet) setupBaseImage() error {
} }
func setCloseOnExec(name string) { func setCloseOnExec(name string) {
if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { fileInfos, _ := ioutil.ReadDir("/proc/self/fd")
for _, i := range fileInfos { for _, i := range fileInfos {
link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name()))
if link == name { if link == name {
fd, err := strconv.Atoi(i.Name()) fd, err := strconv.Atoi(i.Name())
if err == nil { if err == nil {
syscall.CloseOnExec(fd) unix.CloseOnExec(fd)
} }
} }
} }
}
}
// DMLog implements logging using DevMapperLogger interface.
func (devices *DeviceSet) DMLog(level int, file string, line int, dmError int, message string) {
// By default libdm sends us all the messages including debug ones.
// We need to filter out messages here and figure out which one
// should be printed.
if level > logLevel {
return
}
// FIXME(vbatts) push this back into ./pkg/devicemapper/
if level <= devicemapper.LogLevelErr {
logrus.Errorf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
} else if level <= devicemapper.LogLevelInfo {
logrus.Infof("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
} else {
// FIXME(vbatts) push this back into ./pkg/devicemapper/
logrus.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
}
} }
func major(device uint64) uint64 { func major(device uint64) uint64 {
@ -1356,10 +1385,7 @@ func (devices *DeviceSet) saveTransactionMetaData() error {
} }
func (devices *DeviceSet) removeTransactionMetaData() error { func (devices *DeviceSet) removeTransactionMetaData() error {
if err := os.RemoveAll(devices.transactionMetaFile()); err != nil { return os.RemoveAll(devices.transactionMetaFile())
return err
}
return nil
} }
func (devices *DeviceSet) rollbackTransaction() error { func (devices *DeviceSet) rollbackTransaction() error {
@ -1464,12 +1490,9 @@ func (devices *DeviceSet) closeTransaction() error {
} }
func determineDriverCapabilities(version string) error { func determineDriverCapabilities(version string) error {
/* // Kernel driver version >= 4.27.0 support deferred removal
* Driver version 4.27.0 and greater support deferred activation
* feature.
*/
logrus.Debugf("devicemapper: driver version is %s", version) logrus.Debugf("devicemapper: kernel dm driver version is %s", version)
versionSplit := strings.Split(version, ".") versionSplit := strings.Split(version, ".")
major, err := strconv.Atoi(versionSplit[0]) major, err := strconv.Atoi(versionSplit[0])
@ -1505,12 +1528,13 @@ func determineDriverCapabilities(version string) error {
// Determine the major and minor number of loopback device // Determine the major and minor number of loopback device
func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) {
stat, err := file.Stat() var stat unix.Stat_t
err := unix.Stat(file.Name(), &stat)
if err != nil { if err != nil {
return 0, 0, err return 0, 0, err
} }
dev := stat.Sys().(*syscall.Stat_t).Rdev dev := stat.Rdev
majorNum := major(dev) majorNum := major(dev)
minorNum := minor(dev) minorNum := minor(dev)
@ -1648,36 +1672,19 @@ func (devices *DeviceSet) enableDeferredRemovalDeletion() error {
return nil return nil
} }
func (devices *DeviceSet) initDevmapper(doInit bool) error { func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) {
// give ourselves to libdm as a log handler
devicemapper.LogInit(devices)
version, err := devicemapper.GetDriverVersion()
if err != nil {
// Can't even get driver version, assume not supported
return errors.Wrap(graphdriver.ErrNotSupported, "unable to determine version of device mapper")
}
if err := determineDriverCapabilities(version); err != nil {
return errors.Wrap(graphdriver.ErrNotSupported, "unable to determine device mapper driver capabilities")
}
if err := devices.enableDeferredRemovalDeletion(); err != nil { if err := devices.enableDeferredRemovalDeletion(); err != nil {
return err return err
} }
// https://github.com/docker/docker/issues/4036 // https://github.com/docker/docker/issues/4036
// if supported := devicemapper.UdevSetSyncSupport(true); !supported { if supported := devicemapper.UdevSetSyncSupport(true); !supported {
// if storageversion.IAmStatic == "true" { logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/dockerd/#storage-driver-options")
// logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a dynamic binary to use devicemapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option")
// } else { if !devices.overrideUdevSyncCheck {
// logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option") return graphdriver.ErrNotSupported
// } }
// }
// if !devices.overrideUdevSyncCheck {
// return graphdriver.ErrNotSupported
// }
// }
//create the root dir of the devmapper driver ownership to match this //create the root dir of the devmapper driver ownership to match this
//daemon's remapped root uid/gid so containers can start properly //daemon's remapped root uid/gid so containers can start properly
@ -1692,20 +1699,47 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
return err return err
} }
// Set the device prefix from the device id and inode of the container root dir prevSetupConfig, err := readLVMConfig(devices.root)
st, err := os.Stat(devices.root)
if err != nil { if err != nil {
return err
}
if !reflect.DeepEqual(devices.lvmSetupConfig, directLVMConfig{}) {
if devices.thinPoolDevice != "" {
return errors.New("cannot setup direct-lvm when `dm.thinpooldev` is also specified")
}
if !reflect.DeepEqual(prevSetupConfig, devices.lvmSetupConfig) {
if !reflect.DeepEqual(prevSetupConfig, directLVMConfig{}) {
return errors.New("changing direct-lvm config is not supported")
}
logrus.WithField("storage-driver", "devicemapper").WithField("direct-lvm-config", devices.lvmSetupConfig).Debugf("Setting up direct lvm mode")
if err := verifyBlockDevice(devices.lvmSetupConfig.Device, lvmSetupConfigForce); err != nil {
return err
}
if err := setupDirectLVM(devices.lvmSetupConfig); err != nil {
return err
}
if err := writeLVMConfig(devices.root, devices.lvmSetupConfig); err != nil {
return err
}
}
devices.thinPoolDevice = "storage-thinpool"
logrus.WithField("storage-driver", "devicemapper").Debugf("Setting dm.thinpooldev to %q", devices.thinPoolDevice)
}
// Set the device prefix from the device id and inode of the storage root dir
var st unix.Stat_t
if err := unix.Stat(devices.root, &st); err != nil {
return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err) return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err)
} }
sysSt := st.Sys().(*syscall.Stat_t)
// "reg-" stands for "regular file". // "reg-" stands for "regular file".
// In the future we might use "dev-" for "device file", etc. // In the future we might use "dev-" for "device file", etc.
// container-maj,min[-inode] stands for: // container-maj,min[-inode] stands for:
// - Managed by container storage // - Managed by container storage
// - The target of this device is at major <maj> and minor <min> // - The target of this device is at major <maj> and minor <min>
// - If <inode> is defined, use that file inside the device as a loopback image. Otherwise use the device itself. // - If <inode> is defined, use that file inside the device as a loopback image. Otherwise use the device itself.
devices.devicePrefix = fmt.Sprintf("container-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) devices.devicePrefix = fmt.Sprintf("container-%d:%d-%d", major(st.Dev), minor(st.Dev), st.Ino)
logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix) logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix)
// Check for the existence of the thin-pool device // Check for the existence of the thin-pool device
@ -1748,7 +1782,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
hasData := devices.hasImage("data") hasData := devices.hasImage("data")
if !doInit && !hasData { if !doInit && !hasData {
return errors.New("Loopback data file not found") return errors.New("loopback data file not found")
} }
if !hasData { if !hasData {
@ -1781,7 +1815,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
hasMetadata := devices.hasImage("metadata") hasMetadata := devices.hasImage("metadata")
if !doInit && !hasMetadata { if !doInit && !hasMetadata {
return errors.New("Loopback metadata file not found") return errors.New("loopback metadata file not found")
} }
if !hasMetadata { if !hasMetadata {
@ -1811,6 +1845,14 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil {
return err return err
} }
defer func() {
if retErr != nil {
err = devices.deactivatePool()
if err != nil {
logrus.Warnf("devmapper: Failed to deactivatePool: %v", err)
}
}
}()
} }
// Pool already exists and caller did not pass us a pool. That means // Pool already exists and caller did not pass us a pool. That means
@ -1857,8 +1899,8 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
// AddDevice adds a device and registers in the hash. // AddDevice adds a device and registers in the hash.
func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string]string) error { func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string]string) error {
logrus.Debugf("devmapper: AddDevice(hash=%s basehash=%s)", hash, baseHash) logrus.Debugf("devmapper: AddDevice START(hash=%s basehash=%s)", hash, baseHash)
defer logrus.Debugf("devmapper: AddDevice(hash=%s basehash=%s) END", hash, baseHash) defer logrus.Debugf("devmapper: AddDevice END(hash=%s basehash=%s)", hash, baseHash)
// If a deleted device exists, return error. // If a deleted device exists, return error.
baseInfo, err := devices.lookupDeviceWithLock(baseHash) baseInfo, err := devices.lookupDeviceWithLock(baseHash)
@ -1895,7 +1937,7 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string
return fmt.Errorf("devmapper: Container size cannot be smaller than %s", units.HumanSize(float64(baseInfo.Size))) return fmt.Errorf("devmapper: Container size cannot be smaller than %s", units.HumanSize(float64(baseInfo.Size)))
} }
if err := devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil { if err := devices.takeSnapshot(hash, baseInfo, size); err != nil {
return err return err
} }
@ -1975,7 +2017,7 @@ func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) erro
} }
if err == nil { if err == nil {
if err := devices.unregisterDevice(info.DeviceID, info.Hash); err != nil { if err := devices.unregisterDevice(info.Hash); err != nil {
return err return err
} }
// If device was already in deferred delete state that means // If device was already in deferred delete state that means
@ -1996,8 +2038,8 @@ func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) erro
// Issue discard only if device open count is zero. // Issue discard only if device open count is zero.
func (devices *DeviceSet) issueDiscard(info *devInfo) error { func (devices *DeviceSet) issueDiscard(info *devInfo) error {
logrus.Debugf("devmapper: issueDiscard(device: %s). START", info.Hash) logrus.Debugf("devmapper: issueDiscard START(device: %s).", info.Hash)
defer logrus.Debugf("devmapper: issueDiscard(device: %s). END", info.Hash) defer logrus.Debugf("devmapper: issueDiscard END(device: %s).", info.Hash)
// This is a workaround for the kernel not discarding block so // This is a workaround for the kernel not discarding block so
// on the thin pool when we remove a thinp device, so we do it // on the thin pool when we remove a thinp device, so we do it
// manually. // manually.
@ -2030,7 +2072,16 @@ func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error {
} }
// Try to deactivate device in case it is active. // Try to deactivate device in case it is active.
if err := devices.deactivateDevice(info); err != nil { // If deferred removal is enabled and deferred deletion is disabled
// then make sure device is removed synchronously. There have been
// some cases of device being busy for short duration and we would
// rather busy wait for device removal to take care of these cases.
deferredRemove := devices.deferredRemove
if !devices.deferredDelete {
deferredRemove = false
}
if err := devices.deactivateDeviceMode(info, deferredRemove); err != nil {
logrus.Debugf("devmapper: Error deactivating device: %s", err) logrus.Debugf("devmapper: Error deactivating device: %s", err)
return err return err
} }
@ -2046,8 +2097,8 @@ func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error {
// removal. If one wants to override that and want DeleteDevice() to fail if // removal. If one wants to override that and want DeleteDevice() to fail if
// device was busy and could not be deleted, set syncDelete=true. // device was busy and could not be deleted, set syncDelete=true.
func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error {
logrus.Debugf("devmapper: DeleteDevice(hash=%v syncDelete=%v) START", hash, syncDelete) logrus.Debugf("devmapper: DeleteDevice START(hash=%v syncDelete=%v)", hash, syncDelete)
defer logrus.Debugf("devmapper: DeleteDevice(hash=%v syncDelete=%v) END", hash, syncDelete) defer logrus.Debugf("devmapper: DeleteDevice END(hash=%v syncDelete=%v)", hash, syncDelete)
info, err := devices.lookupDeviceWithLock(hash) info, err := devices.lookupDeviceWithLock(hash)
if err != nil { if err != nil {
return err return err
@ -2063,8 +2114,8 @@ func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error {
} }
func (devices *DeviceSet) deactivatePool() error { func (devices *DeviceSet) deactivatePool() error {
logrus.Debug("devmapper: deactivatePool()") logrus.Debug("devmapper: deactivatePool() START")
defer logrus.Debug("devmapper: deactivatePool END") defer logrus.Debug("devmapper: deactivatePool() END")
devname := devices.getPoolDevName() devname := devices.getPoolDevName()
devinfo, err := devicemapper.GetInfo(devname) devinfo, err := devicemapper.GetInfo(devname)
@ -2087,7 +2138,12 @@ func (devices *DeviceSet) deactivatePool() error {
} }
func (devices *DeviceSet) deactivateDevice(info *devInfo) error { func (devices *DeviceSet) deactivateDevice(info *devInfo) error {
logrus.Debugf("devmapper: deactivateDevice(%s)", info.Hash) return devices.deactivateDeviceMode(info, devices.deferredRemove)
}
func (devices *DeviceSet) deactivateDeviceMode(info *devInfo, deferredRemove bool) error {
var err error
logrus.Debugf("devmapper: deactivateDevice START(%s)", info.Hash)
defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash) defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash)
devinfo, err := devicemapper.GetInfo(info.Name()) devinfo, err := devicemapper.GetInfo(info.Name())
@ -2099,14 +2155,17 @@ func (devices *DeviceSet) deactivateDevice(info *devInfo) error {
return nil return nil
} }
if devices.deferredRemove { if deferredRemove {
if err := devicemapper.RemoveDeviceDeferred(info.Name()); err != nil { err = devicemapper.RemoveDeviceDeferred(info.Name())
return err
}
} else { } else {
if err := devices.removeDevice(info.Name()); err != nil { err = devices.removeDevice(info.Name())
return err
} }
// This function's semantics is such that it does not return an
// error if device does not exist. So if device went away by
// the time we actually tried to remove it, do not return error.
if errors.Cause(err) != devicemapper.ErrEnxio {
return err
} }
return nil return nil
} }
@ -2137,41 +2196,53 @@ func (devices *DeviceSet) removeDevice(devname string) error {
return err return err
} }
func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { func (devices *DeviceSet) cancelDeferredRemovalIfNeeded(info *devInfo) error {
if !devices.deferredRemove { if !devices.deferredRemove {
return nil return nil
} }
logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name()) logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded START(%s)", info.Name())
defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name()) defer logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded END(%s)", info.Name())
devinfo, err := devicemapper.GetInfoWithDeferred(info.Name()) devinfo, err := devicemapper.GetInfoWithDeferred(info.Name())
if err != nil {
return err
}
if devinfo != nil && devinfo.DeferredRemove == 0 { if devinfo != nil && devinfo.DeferredRemove == 0 {
return nil return nil
} }
// Cancel deferred remove // Cancel deferred remove
for i := 0; i < 100; i++ { if err := devices.cancelDeferredRemoval(info); err != nil {
err = devicemapper.CancelDeferredRemove(info.Name()) // If Error is ErrEnxio. Device is probably already gone. Continue.
if err == nil {
break
}
if errors.Cause(err) == devicemapper.ErrEnxio {
// Device is probably already gone. Return success.
return nil
}
if errors.Cause(err) != devicemapper.ErrBusy { if errors.Cause(err) != devicemapper.ErrBusy {
return err return err
} }
}
return nil
}
func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error {
logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name())
defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name())
var err error
// Cancel deferred remove
for i := 0; i < 100; i++ {
err = devicemapper.CancelDeferredRemove(info.Name())
if err != nil {
if errors.Cause(err) != devicemapper.ErrBusy {
// If we see EBUSY it may be a transient error, // If we see EBUSY it may be a transient error,
// sleep a bit a retry a few times. // sleep a bit a retry a few times.
devices.Unlock() devices.Unlock()
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
devices.Lock() devices.Lock()
continue
}
}
break
} }
return err return err
} }
@ -2209,9 +2280,6 @@ func (devices *DeviceSet) Shutdown(home string) error {
if err != nil { if err != nil {
return err return err
} }
if p == path.Join(home, "mnt") {
return nil
}
if !info.IsDir() { if !info.IsDir() {
return nil return nil
} }
@ -2220,7 +2288,7 @@ func (devices *DeviceSet) Shutdown(home string) error {
// We use MNT_DETACH here in case it is still busy in some running // We use MNT_DETACH here in case it is still busy in some running
// container. This means it'll go away from the global scope directly, // container. This means it'll go away from the global scope directly,
// and the device will be released when that container dies. // and the device will be released when that container dies.
if err := syscall.Unmount(p, syscall.MNT_DETACH); err != nil { if err := unix.Unmount(p, unix.MNT_DETACH); err != nil {
logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err) logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err)
} }
} }
@ -2263,6 +2331,34 @@ func (devices *DeviceSet) Shutdown(home string) error {
return nil return nil
} }
// Recent XFS changes allow changing behavior of filesystem in case of errors.
// When thin pool gets full and XFS gets ENOSPC error, currently it tries
// IO infinitely and sometimes it can block the container process
// and process can't be killWith 0 value, XFS will not retry upon error
// and instead will shutdown filesystem.
func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error {
dmDevicePath, err := os.Readlink(info.DevName())
if err != nil {
return fmt.Errorf("devmapper: readlink failed for device %v:%v", info.DevName(), err)
}
dmDeviceName := path.Base(dmDevicePath)
filePath := "/sys/fs/xfs/" + dmDeviceName + "/error/metadata/ENOSPC/max_retries"
maxRetriesFile, err := os.OpenFile(filePath, os.O_WRONLY, 0)
if err != nil {
return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%v", err)
}
defer maxRetriesFile.Close()
// Set max retries to 0
_, err = maxRetriesFile.WriteString(devices.xfsNospaceRetries)
if err != nil {
return fmt.Errorf("devmapper: Failed to write string %v to file %v:%v", devices.xfsNospaceRetries, filePath, err)
}
return nil
}
// MountDevice mounts the device if not already mounted. // MountDevice mounts the device if not already mounted.
func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
info, err := devices.lookupDeviceWithLock(hash) info, err := devices.lookupDeviceWithLock(hash)
@ -2300,7 +2396,15 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
options = joinMountOptions(options, label.FormatMountLabel("", mountLabel)) options = joinMountOptions(options, label.FormatMountLabel("", mountLabel))
if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { if err := mount.Mount(info.DevName(), path, fstype, options); err != nil {
return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s", info.DevName(), path, err) return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s\n%v", info.DevName(), path, err, string(dmesg.Dmesg(256)))
}
if fstype == "xfs" && devices.xfsNospaceRetries != "" {
if err := devices.xfsSetNospaceRetries(info); err != nil {
unix.Unmount(path, unix.MNT_DETACH)
devices.deactivateDevice(info)
return err
}
} }
return nil return nil
@ -2308,8 +2412,8 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
// UnmountDevice unmounts the device and removes it from hash. // UnmountDevice unmounts the device and removes it from hash.
func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
logrus.Debugf("devmapper: UnmountDevice(hash=%s)", hash) logrus.Debugf("devmapper: UnmountDevice START(hash=%s)", hash)
defer logrus.Debugf("devmapper: UnmountDevice(hash=%s) END", hash) defer logrus.Debugf("devmapper: UnmountDevice END(hash=%s)", hash)
info, err := devices.lookupDeviceWithLock(hash) info, err := devices.lookupDeviceWithLock(hash)
if err != nil { if err != nil {
@ -2323,16 +2427,12 @@ func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
defer devices.Unlock() defer devices.Unlock()
logrus.Debugf("devmapper: Unmount(%s)", mountPath) logrus.Debugf("devmapper: Unmount(%s)", mountPath)
if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil { if err := unix.Unmount(mountPath, unix.MNT_DETACH); err != nil {
return err return err
} }
logrus.Debug("devmapper: Unmount done") logrus.Debug("devmapper: Unmount done")
if err := devices.deactivateDevice(info); err != nil { return devices.deactivateDevice(info)
return err
}
return nil
} }
// HasDevice returns true if the device metadata exists. // HasDevice returns true if the device metadata exists.
@ -2424,8 +2524,8 @@ func (devices *DeviceSet) MetadataDevicePath() string {
} }
func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) {
buf := new(syscall.Statfs_t) buf := new(unix.Statfs_t)
if err := syscall.Statfs(loopFile, buf); err != nil { if err := unix.Statfs(loopFile, buf); err != nil {
logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err) logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err)
return 0, err return 0, err
} }
@ -2534,22 +2634,25 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
minFreeSpacePercent: defaultMinFreeSpacePercent, minFreeSpacePercent: defaultMinFreeSpacePercent,
} }
// Pick up initialization settings, if any were saved before version, err := devicemapper.GetDriverVersion()
defaultsFile := path.Join(root, "defaults") if err != nil {
defaultsBytes, err := ioutil.ReadFile(defaultsFile) // Can't even get driver version, assume not supported
defaults := []string{} return nil, graphdriver.ErrNotSupported
settings := map[string]string{} }
if err == nil && len(defaultsBytes) > 0 {
defaults = strings.Split(string(defaultsBytes), "\n") if err := determineDriverCapabilities(version); err != nil {
return nil, graphdriver.ErrNotSupported
}
if driverDeferredRemovalSupport && devicemapper.LibraryDeferredRemovalSupport {
// enable deferred stuff by default
enableDeferredDeletion = true
enableDeferredRemoval = true
} }
foundBlkDiscard := false foundBlkDiscard := false
nthOption := 0 var lvmSetupConfig directLVMConfig
for _, option := range append(defaults, options...) { for _, option := range options {
nthOption = nthOption + 1
if len(option) == 0 {
continue
}
key, val, err := parsers.ParseKeyValueOpt(option) key, val, err := parsers.ParseKeyValueOpt(option)
if err != nil { if err != nil {
return nil, err return nil, err
@ -2637,15 +2740,78 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
} }
devices.minFreeSpacePercent = uint32(minFreeSpacePercent) devices.minFreeSpacePercent = uint32(minFreeSpacePercent)
case "dm.xfs_nospace_max_retries":
_, err := strconv.ParseUint(val, 10, 64)
if err != nil {
return nil, err
}
devices.xfsNospaceRetries = val
case "dm.directlvm_device":
lvmSetupConfig.Device = val
case "dm.directlvm_device_force":
lvmSetupConfigForce, err = strconv.ParseBool(val)
if err != nil {
return nil, err
}
case "dm.thinp_percent":
per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32)
if err != nil {
return nil, errors.Wrapf(err, "could not parse `dm.thinp_percent=%s`", val)
}
if per >= 100 {
return nil, errors.New("dm.thinp_percent must be greater than 0 and less than 100")
}
lvmSetupConfig.ThinpPercent = per
case "dm.thinp_metapercent":
per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32)
if err != nil {
return nil, errors.Wrapf(err, "could not parse `dm.thinp_metapercent=%s`", val)
}
if per >= 100 {
return nil, errors.New("dm.thinp_metapercent must be greater than 0 and less than 100")
}
lvmSetupConfig.ThinpMetaPercent = per
case "dm.thinp_autoextend_percent":
per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32)
if err != nil {
return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_percent=%s`", val)
}
if per > 100 {
return nil, errors.New("dm.thinp_autoextend_percent must be greater than 0 and less than 100")
}
lvmSetupConfig.AutoExtendPercent = per
case "dm.thinp_autoextend_threshold":
per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32)
if err != nil {
return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_threshold=%s`", val)
}
if per > 100 {
return nil, errors.New("dm.thinp_autoextend_threshold must be greater than 0 and less than 100")
}
lvmSetupConfig.AutoExtendThreshold = per
case "dm.libdm_log_level":
level, err := strconv.ParseInt(val, 10, 32)
if err != nil {
return nil, errors.Wrapf(err, "could not parse `dm.libdm_log_level=%s`", val)
}
if level < devicemapper.LogLevelFatal || level > devicemapper.LogLevelDebug {
return nil, errors.Errorf("dm.libdm_log_level must be in range [%d,%d]", devicemapper.LogLevelFatal, devicemapper.LogLevelDebug)
}
// Register a new logging callback with the specified level.
devicemapper.LogInit(devicemapper.DefaultLogger{
Level: int(level),
})
default: default:
if nthOption > len(defaults) {
return nil, fmt.Errorf("devmapper: Unknown option %s", key) return nil, fmt.Errorf("devmapper: Unknown option %s", key)
} }
logrus.Errorf("devmapper: Unknown option %s, ignoring", key)
} }
settings[key] = val
if err := validateLVMConfig(lvmSetupConfig); err != nil {
return nil, err
} }
devices.lvmSetupConfig = lvmSetupConfig
// By default, don't do blk discard hack on raw devices, its rarely useful and is expensive // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive
if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") {
devices.doBlkDiscard = false devices.doBlkDiscard = false
@ -2655,15 +2821,5 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
return nil, err return nil, err
} }
// Save these settings along with the other metadata
defaults = []string{}
for key, val := range settings {
defaults = append(defaults, key+"="+val)
}
defaultsBytes = []byte(strings.Join(defaults, "\n") + "\n")
if err := ioutils.AtomicWriteFile(defaultsFile, defaultsBytes, 0600); err != nil {
return nil, err
}
return devices, nil return devices, nil
} }

View file

@ -14,8 +14,10 @@ import (
"github.com/containers/storage/drivers" "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/devicemapper" "github.com/containers/storage/pkg/devicemapper"
"github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/locker"
"github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/mount"
"github.com/docker/go-units" "github.com/containers/storage/pkg/system"
units "github.com/docker/go-units"
) )
func init() { func init() {
@ -29,6 +31,7 @@ type Driver struct {
uidMaps []idtools.IDMap uidMaps []idtools.IDMap
gidMaps []idtools.IDMap gidMaps []idtools.IDMap
ctr *graphdriver.RefCounter ctr *graphdriver.RefCounter
locker *locker.Locker
} }
// Init creates a driver with the given home and the set of options. // Init creates a driver with the given home and the set of options.
@ -48,6 +51,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
uidMaps: uidMaps, uidMaps: uidMaps,
gidMaps: gidMaps, gidMaps: gidMaps,
ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()),
locker: locker.New(),
} }
return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil
@ -65,18 +69,18 @@ func (d *Driver) Status() [][2]string {
status := [][2]string{ status := [][2]string{
{"Pool Name", s.PoolName}, {"Pool Name", s.PoolName},
{"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(float64(s.SectorSize)))}, {"Pool Blocksize", units.HumanSize(float64(s.SectorSize))},
{"Base Device Size", fmt.Sprintf("%s", units.HumanSize(float64(s.BaseDeviceSize)))}, {"Base Device Size", units.HumanSize(float64(s.BaseDeviceSize))},
{"Backing Filesystem", s.BaseDeviceFS}, {"Backing Filesystem", s.BaseDeviceFS},
{"Data file", s.DataFile}, {"Data file", s.DataFile},
{"Metadata file", s.MetadataFile}, {"Metadata file", s.MetadataFile},
{"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))}, {"Data Space Used", units.HumanSize(float64(s.Data.Used))},
{"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))}, {"Data Space Total", units.HumanSize(float64(s.Data.Total))},
{"Data Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Available)))}, {"Data Space Available", units.HumanSize(float64(s.Data.Available))},
{"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))}, {"Metadata Space Used", units.HumanSize(float64(s.Metadata.Used))},
{"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))}, {"Metadata Space Total", units.HumanSize(float64(s.Metadata.Total))},
{"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))}, {"Metadata Space Available", units.HumanSize(float64(s.Metadata.Available))},
{"Thin Pool Minimum Free Space", fmt.Sprintf("%s", units.HumanSize(float64(s.MinFreeSpace)))}, {"Thin Pool Minimum Free Space", units.HumanSize(float64(s.MinFreeSpace))},
{"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)},
{"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)},
{"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)}, {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)},
@ -122,12 +126,17 @@ func (d *Driver) Cleanup() error {
// CreateReadWrite creates a layer that is writable for use as a container // CreateReadWrite creates a layer that is writable for use as a container
// file system. // file system.
func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
return d.Create(id, parent, mountLabel, storageOpt) return d.Create(id, parent, opts)
} }
// Create adds a device with a given id and the parent. // Create adds a device with a given id and the parent.
func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
var storageOpt map[string]string
if opts != nil {
storageOpt = opts.StorageOpt
}
if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil { if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil {
return err return err
} }
@ -137,6 +146,8 @@ func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]str
// Remove removes a device with a given id, unmounts the filesystem. // Remove removes a device with a given id, unmounts the filesystem.
func (d *Driver) Remove(id string) error { func (d *Driver) Remove(id string) error {
d.locker.Lock(id)
defer d.locker.Unlock(id)
if !d.DeviceSet.HasDevice(id) { if !d.DeviceSet.HasDevice(id) {
// Consider removing a non-existing device a no-op // Consider removing a non-existing device a no-op
// This is useful to be able to progress on container removal // This is useful to be able to progress on container removal
@ -146,19 +157,15 @@ func (d *Driver) Remove(id string) error {
// This assumes the device has been properly Get/Put:ed and thus is unmounted // This assumes the device has been properly Get/Put:ed and thus is unmounted
if err := d.DeviceSet.DeleteDevice(id, false); err != nil { if err := d.DeviceSet.DeleteDevice(id, false); err != nil {
return err return fmt.Errorf("failed to remove device %s: %v", id, err)
} }
return system.EnsureRemoveAll(path.Join(d.home, "mnt", id))
mp := path.Join(d.home, "mnt", id)
if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) {
return err
}
return nil
} }
// Get mounts a device with given id into the root filesystem // Get mounts a device with given id into the root filesystem
func (d *Driver) Get(id, mountLabel string) (string, error) { func (d *Driver) Get(id, mountLabel string) (string, error) {
d.locker.Lock(id)
defer d.locker.Unlock(id)
mp := path.Join(d.home, "mnt", id) mp := path.Join(d.home, "mnt", id)
rootFs := path.Join(mp, "rootfs") rootFs := path.Join(mp, "rootfs")
if count := d.ctr.Increment(mp); count > 1 { if count := d.ctr.Increment(mp); count > 1 {
@ -209,6 +216,8 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
// Put unmounts a device and removes it. // Put unmounts a device and removes it.
func (d *Driver) Put(id string) error { func (d *Driver) Put(id string) error {
d.locker.Lock(id)
defer d.locker.Unlock(id)
mp := path.Join(d.home, "mnt", id) mp := path.Join(d.home, "mnt", id)
if count := d.ctr.Decrement(mp); count > 0 { if count := d.ctr.Decrement(mp); count > 0 {
return nil return nil
@ -227,6 +236,5 @@ func (d *Driver) Exists(id string) bool {
// AdditionalImageStores returns additional image stores supported by the driver // AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string { func (d *Driver) AdditionalImageStores() []string {
var imageStores []string return nil
return imageStores
} }

View file

@ -7,7 +7,8 @@ import (
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"syscall"
"golang.org/x/sys/unix"
) )
// FIXME: this is copy-pasted from the aufs driver. // FIXME: this is copy-pasted from the aufs driver.
@ -15,19 +16,17 @@ import (
// Mounted returns true if a mount point exists. // Mounted returns true if a mount point exists.
func Mounted(mountpoint string) (bool, error) { func Mounted(mountpoint string) (bool, error) {
mntpoint, err := os.Stat(mountpoint) var mntpointSt unix.Stat_t
if err != nil { if err := unix.Stat(mountpoint, &mntpointSt); err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
return false, nil return false, nil
} }
return false, err return false, err
} }
parent, err := os.Stat(filepath.Join(mountpoint, "..")) var parentSt unix.Stat_t
if err != nil { if err := unix.Stat(filepath.Join(mountpoint, ".."), &parentSt); err != nil {
return false, err return false, err
} }
mntpointSt := mntpoint.Sys().(*syscall.Stat_t)
parentSt := parent.Sys().(*syscall.Stat_t)
return mntpointSt.Dev != parentSt.Dev, nil return mntpointSt.Dev != parentSt.Dev, nil
} }

View file

@ -29,12 +29,19 @@ var (
// ErrNotSupported returned when driver is not supported. // ErrNotSupported returned when driver is not supported.
ErrNotSupported = errors.New("driver not supported") ErrNotSupported = errors.New("driver not supported")
// ErrPrerequisites retuned when driver does not meet prerequisites. // ErrPrerequisites returned when driver does not meet prerequisites.
ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)")
// ErrIncompatibleFS returned when file system is not supported. // ErrIncompatibleFS returned when file system is not supported.
ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver")
) )
//CreateOpts contains optional arguments for Create() and CreateReadWrite()
// methods.
type CreateOpts struct {
MountLabel string
StorageOpt map[string]string
}
// InitFunc initializes the storage driver. // InitFunc initializes the storage driver.
type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error)
@ -48,11 +55,13 @@ type ProtoDriver interface {
// String returns a string representation of this driver. // String returns a string representation of this driver.
String() string String() string
// CreateReadWrite creates a new, empty filesystem layer that is ready // CreateReadWrite creates a new, empty filesystem layer that is ready
// to be used as the storage for a container. // to be used as the storage for a container. Additional options can
CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error // be passed in opts. parent may be "" and opts may be nil.
CreateReadWrite(id, parent string, opts *CreateOpts) error
// Create creates a new, empty, filesystem layer with the // Create creates a new, empty, filesystem layer with the
// specified id and parent and mountLabel. Parent and mountLabel may be "". // specified id and parent and options passed in opts. Parent
Create(id, parent, mountLabel string, storageOpt map[string]string) error // may be "" and opts may be nil.
Create(id, parent string, opts *CreateOpts) error
// Remove attempts to remove the filesystem layer with this id. // Remove attempts to remove the filesystem layer with this id.
Remove(id string) error Remove(id string) error
// Get returns the mountpoint for the layered filesystem referred // Get returns the mountpoint for the layered filesystem referred
@ -79,9 +88,8 @@ type ProtoDriver interface {
AdditionalImageStores() []string AdditionalImageStores() []string
} }
// Driver is the interface for layered/snapshot file system drivers. // DiffDriver is the interface to use to implement graph diffs
type Driver interface { type DiffDriver interface {
ProtoDriver
// Diff produces an archive of the changes between the specified // Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "". // layer and its parent layer which may be "".
Diff(id, parent string) (io.ReadCloser, error) Diff(id, parent string) (io.ReadCloser, error)
@ -99,6 +107,29 @@ type Driver interface {
DiffSize(id, parent string) (size int64, err error) DiffSize(id, parent string) (size int64, err error)
} }
// Driver is the interface for layered/snapshot file system drivers.
type Driver interface {
ProtoDriver
DiffDriver
}
// Capabilities defines a list of capabilities a driver may implement.
// These capabilities are not required; however, they do determine how a
// graphdriver can be used.
type Capabilities struct {
// Flags that this driver is capable of reproducing exactly equivalent
// diffs for read-only layers. If set, clients can rely on the driver
// for consistent tar streams, and avoid extra processing to account
// for potential differences (eg: the layer store's use of tar-split).
ReproducesExactDiffs bool
}
// CapabilityDriver is the interface for layered file system drivers that
// can report on their Capabilities.
type CapabilityDriver interface {
Capabilities() Capabilities
}
// DiffGetterDriver is the interface for layered file system drivers that // DiffGetterDriver is the interface for layered file system drivers that
// provide a specialized function for getting file contents for tar-split. // provide a specialized function for getting file contents for tar-split.
type DiffGetterDriver interface { type DiffGetterDriver interface {
@ -137,15 +168,13 @@ func Register(name string, initFunc InitFunc) error {
} }
// GetDriver initializes and returns the registered driver // GetDriver initializes and returns the registered driver
func GetDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { func GetDriver(name string, config Options) (Driver, error) {
if initFunc, exists := drivers[name]; exists { if initFunc, exists := drivers[name]; exists {
return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) return initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps)
} }
if pluginDriver, err := lookupPlugin(name, home, options); err == nil {
return pluginDriver, nil logrus.Errorf("Failed to GetDriver graph %s %s", name, config.Root)
} return nil, errors.Wrapf(ErrNotSupported, "failed to GetDriver graph %s %s", name, config.Root)
logrus.Errorf("Failed to GetDriver graph %s %s", name, home)
return nil, errors.Wrapf(ErrNotSupported, "failed to GetDriver graph %s %s", name, home)
} }
// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins // getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins
@ -157,15 +186,24 @@ func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []id
return nil, errors.Wrapf(ErrNotSupported, "failed to built-in GetDriver graph %s %s", name, home) return nil, errors.Wrapf(ErrNotSupported, "failed to built-in GetDriver graph %s %s", name, home)
} }
// Options is used to initialize a graphdriver
type Options struct {
Root string
DriverOptions []string
UIDMaps []idtools.IDMap
GIDMaps []idtools.IDMap
ExperimentalEnabled bool
}
// New creates the driver and initializes it at the specified root. // New creates the driver and initializes it at the specified root.
func New(root string, name string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { func New(name string, config Options) (Driver, error) {
if name != "" { if name != "" {
logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver
return GetDriver(name, root, options, uidMaps, gidMaps) return GetDriver(name, config)
} }
// Guess for prior driver // Guess for prior driver
driversMap := scanPriorDrivers(root) driversMap := scanPriorDrivers(config.Root)
for _, name := range priority { for _, name := range priority {
if name == "vfs" { if name == "vfs" {
// don't use vfs even if there is state present. // don't use vfs even if there is state present.
@ -174,13 +212,13 @@ func New(root string, name string, options []string, uidMaps, gidMaps []idtools.
if _, prior := driversMap[name]; prior { if _, prior := driversMap[name]; prior {
// of the state found from prior drivers, check in order of our priority // of the state found from prior drivers, check in order of our priority
// which we would prefer // which we would prefer
driver, err := getBuiltinDriver(name, root, options, uidMaps, gidMaps) driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps)
if err != nil { if err != nil {
// unlike below, we will return error here, because there is prior // unlike below, we will return error here, because there is prior
// state, and now it is no longer supported/prereq/compatible, so // state, and now it is no longer supported/prereq/compatible, so
// something changed and needs attention. Otherwise the daemon's // something changed and needs attention. Otherwise the daemon's
// images would just "disappear". // images would just "disappear".
logrus.Errorf("[graphdriver] prior storage driver %q failed: %s", name, err) logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err)
return nil, err return nil, err
} }
@ -192,17 +230,17 @@ func New(root string, name string, options []string, uidMaps, gidMaps []idtools.
driversSlice = append(driversSlice, name) driversSlice = append(driversSlice, name)
} }
return nil, fmt.Errorf("%q contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s <DRIVER>)", root, strings.Join(driversSlice, ", ")) return nil, fmt.Errorf("%s contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s <DRIVER>)", config.Root, strings.Join(driversSlice, ", "))
} }
logrus.Infof("[graphdriver] using prior storage driver %q", name) logrus.Infof("[graphdriver] using prior storage driver: %s", name)
return driver, nil return driver, nil
} }
} }
// Check for priority drivers first // Check for priority drivers first
for _, name := range priority { for _, name := range priority {
driver, err := getBuiltinDriver(name, root, options, uidMaps, gidMaps) driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps)
if err != nil { if err != nil {
if isDriverNotSupported(err) { if isDriverNotSupported(err) {
continue continue
@ -214,7 +252,7 @@ func New(root string, name string, options []string, uidMaps, gidMaps []idtools.
// Check all registered drivers if no priority driver is found // Check all registered drivers if no priority driver is found
for name, initFunc := range drivers { for name, initFunc := range drivers {
driver, err := initFunc(filepath.Join(root, name), options, uidMaps, gidMaps) driver, err := initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps)
if err != nil { if err != nil {
if isDriverNotSupported(err) { if isDriverNotSupported(err) {
continue continue

View file

@ -1,6 +1,10 @@
package graphdriver package graphdriver
import "syscall" import (
"syscall"
"golang.org/x/sys/unix"
)
var ( var (
// Slice of drivers that should be used in an order // Slice of drivers that should be used in an order
@ -11,7 +15,7 @@ var (
// Mounted checks if the given path is mounted as the fs type // Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) { func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf syscall.Statfs_t var buf unix.Statfs_t
if err := syscall.Statfs(mountPath, &buf); err != nil { if err := syscall.Statfs(mountPath, &buf); err != nil {
return false, err return false, err
} }

View file

@ -4,9 +4,9 @@ package graphdriver
import ( import (
"path/filepath" "path/filepath"
"syscall"
"github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/mount"
"golang.org/x/sys/unix"
) )
const ( const (
@ -66,13 +66,14 @@ var (
FsMagicAufs: "aufs", FsMagicAufs: "aufs",
FsMagicBtrfs: "btrfs", FsMagicBtrfs: "btrfs",
FsMagicCramfs: "cramfs", FsMagicCramfs: "cramfs",
FsMagicEcryptfs: "ecryptfs",
FsMagicExtfs: "extfs", FsMagicExtfs: "extfs",
FsMagicF2fs: "f2fs", FsMagicF2fs: "f2fs",
FsMagicGPFS: "gpfs", FsMagicGPFS: "gpfs",
FsMagicJffs2Fs: "jffs2", FsMagicJffs2Fs: "jffs2",
FsMagicJfs: "jfs", FsMagicJfs: "jfs",
FsMagicNfsFs: "nfs", FsMagicNfsFs: "nfs",
FsMagicOverlay: "overlay", FsMagicOverlay: "overlayfs",
FsMagicRAMFs: "ramfs", FsMagicRAMFs: "ramfs",
FsMagicReiserFs: "reiserfs", FsMagicReiserFs: "reiserfs",
FsMagicSmbFs: "smb", FsMagicSmbFs: "smb",
@ -87,14 +88,14 @@ var (
// GetFSMagic returns the filesystem id given the path. // GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) { func GetFSMagic(rootpath string) (FsMagic, error) {
var buf syscall.Statfs_t var buf unix.Statfs_t
if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil { if err := unix.Statfs(filepath.Dir(rootpath), &buf); err != nil {
return 0, err return 0, err
} }
return FsMagic(buf.Type), nil return FsMagic(buf.Type), nil
} }
// NewFsChecker returns a checker configured for the provied FsMagic // NewFsChecker returns a checker configured for the provided FsMagic
func NewFsChecker(t FsMagic) Checker { func NewFsChecker(t FsMagic) Checker {
return &fsChecker{ return &fsChecker{
t: t, t: t,
@ -126,8 +127,8 @@ func (c *defaultChecker) IsMounted(path string) bool {
// Mounted checks if the given path is mounted as the fs type // Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) { func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf syscall.Statfs_t var buf unix.Statfs_t
if err := syscall.Statfs(mountPath, &buf); err != nil { if err := unix.Statfs(mountPath, &buf); err != nil {
return false, err return false, err
} }
return FsMagic(buf.Type) == fsType, nil return FsMagic(buf.Type) == fsType, nil

View file

@ -19,8 +19,8 @@ import (
"path/filepath" "path/filepath"
"unsafe" "unsafe"
"github.com/pkg/errors" "github.com/containers/storage/pkg/mount"
log "github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
const ( const (
@ -45,22 +45,52 @@ func GetFSMagic(rootpath string) (FsMagic, error) {
return 0, nil return 0, nil
} }
type fsChecker struct {
t FsMagic
}
func (c *fsChecker) IsMounted(path string) bool {
m, _ := Mounted(c.t, path)
return m
}
// NewFsChecker returns a checker configured for the provided FsMagic
func NewFsChecker(t FsMagic) Checker {
return &fsChecker{
t: t,
}
}
// NewDefaultChecker returns a check that parses /proc/mountinfo to check
// if the specified path is mounted.
// No-op on Solaris.
func NewDefaultChecker() Checker {
return &defaultChecker{}
}
type defaultChecker struct {
}
func (c *defaultChecker) IsMounted(path string) bool {
m, _ := mount.Mounted(path)
return m
}
// Mounted checks if the given path is mounted as the fs type // Mounted checks if the given path is mounted as the fs type
//Solaris supports only ZFS for now //Solaris supports only ZFS for now
func Mounted(fsType FsMagic, mountPath string) (bool, error) { func Mounted(fsType FsMagic, mountPath string) (bool, error) {
cs := C.CString(filepath.Dir(mountPath)) cs := C.CString(filepath.Dir(mountPath))
defer C.free(unsafe.Pointer(cs))
buf := C.getstatfs(cs) buf := C.getstatfs(cs)
defer C.free(unsafe.Pointer(buf))
// on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ]
if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) ||
(buf.f_basetype[3] != 0) { (buf.f_basetype[3] != 0) {
log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath) logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath)
C.free(unsafe.Pointer(buf)) return false, ErrPrerequisites
return false, errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", mountPath)
} }
C.free(unsafe.Pointer(buf))
C.free(unsafe.Pointer(cs))
return true, nil return true, nil
} }

View file

@ -36,25 +36,25 @@ type NaiveDiffDriver struct {
// ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) // ApplyDiff(id, parent string, diff io.Reader) (size int64, err error)
// DiffSize(id, parent string) (size int64, err error) // DiffSize(id, parent string) (size int64, err error)
func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver { func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver {
gdw := &NaiveDiffDriver{ return &NaiveDiffDriver{ProtoDriver: driver,
ProtoDriver: driver,
uidMaps: uidMaps, uidMaps: uidMaps,
gidMaps: gidMaps, gidMaps: gidMaps}
}
return gdw
} }
// Diff produces an archive of the changes between the specified // Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "". // layer and its parent layer which may be "".
func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err error) { func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err error) {
layerFs, err := gdw.Get(id, "") startTime := time.Now()
driver := gdw.ProtoDriver
layerFs, err := driver.Get(id, "")
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer func() { defer func() {
if err != nil { if err != nil {
gdw.Put(id) driver.Put(id)
} }
}() }()
@ -65,16 +65,16 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err
} }
return ioutils.NewReadCloserWrapper(archive, func() error { return ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close() err := archive.Close()
gdw.Put(id) driver.Put(id)
return err return err
}), nil }), nil
} }
parentFs, err := gdw.Get(parent, "") parentFs, err := driver.Get(parent, "")
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer gdw.Put(parent) defer driver.Put(parent)
changes, err := archive.ChangesDirs(layerFs, parentFs) changes, err := archive.ChangesDirs(layerFs, parentFs)
if err != nil { if err != nil {
@ -88,7 +88,13 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err
return ioutils.NewReadCloserWrapper(archive, func() error { return ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close() err := archive.Close()
gdw.Put(id) driver.Put(id)
// NaiveDiffDriver compares file metadata with parent layers. Parent layers
// are extracted from tar's with full second precision on modified time.
// We need this hack here to make sure calls within same second receive
// correct result.
time.Sleep(time.Until(startTime.Truncate(time.Second).Add(time.Second)))
return err return err
}), nil }), nil
} }
@ -96,20 +102,22 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err
// Changes produces a list of changes between the specified layer // Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes. // and its parent layer. If parent is "", then all changes will be ADD changes.
func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) {
layerFs, err := gdw.Get(id, "") driver := gdw.ProtoDriver
layerFs, err := driver.Get(id, "")
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer gdw.Put(id) defer driver.Put(id)
parentFs := "" parentFs := ""
if parent != "" { if parent != "" {
parentFs, err = gdw.Get(parent, "") parentFs, err = driver.Get(parent, "")
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer gdw.Put(parent) defer driver.Put(parent)
} }
return archive.ChangesDirs(layerFs, parentFs) return archive.ChangesDirs(layerFs, parentFs)
@ -119,12 +127,14 @@ func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error)
// layer with the specified id and parent, returning the size of the // layer with the specified id and parent, returning the size of the
// new layer in bytes. // new layer in bytes.
func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) {
driver := gdw.ProtoDriver
// Mount the root filesystem so we can apply the diff/layer. // Mount the root filesystem so we can apply the diff/layer.
layerFs, err := gdw.Get(id, "") layerFs, err := driver.Get(id, "")
if err != nil { if err != nil {
return return
} }
defer gdw.Put(id) defer driver.Put(id)
options := &archive.TarOptions{UIDMaps: gdw.uidMaps, options := &archive.TarOptions{UIDMaps: gdw.uidMaps,
GIDMaps: gdw.gidMaps} GIDMaps: gdw.gidMaps}
@ -142,16 +152,18 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size i
// and its parent and returns the size in bytes of the changes // and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory. // relative to its base filesystem directory.
func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) { func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) {
driver := gdw.ProtoDriver
changes, err := gdw.Changes(id, parent) changes, err := gdw.Changes(id, parent)
if err != nil { if err != nil {
return return
} }
layerFs, err := gdw.Get(id, "") layerFs, err := driver.Get(id, "")
if err != nil { if err != nil {
return return
} }
defer gdw.Put(id) defer driver.Put(id)
return archive.ChangesSize(layerFs, changes), nil return archive.ChangesSize(layerFs, changes), nil
} }

View file

@ -0,0 +1,79 @@
// +build linux
package overlay
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"github.com/containers/storage/pkg/system"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
// hasOpaqueCopyUpBug checks whether the filesystem has a bug
// which copies up the opaque flag when copying up an opaque
// directory. When this bug exists naive diff should be used.
func hasOpaqueCopyUpBug(d string) error {
td, err := ioutil.TempDir(d, "opaque-bug-check")
if err != nil {
return err
}
defer func() {
if err := os.RemoveAll(td); err != nil {
logrus.Warnf("Failed to remove check directory %v: %v", td, err)
}
}()
// Make directories l1/d, l2/d, l3, work, merged
if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil {
return err
}
if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil {
return err
}
if err := os.Mkdir(filepath.Join(td, "l3"), 0755); err != nil {
return err
}
if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil {
return err
}
if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil {
return err
}
// Mark l2/d as opaque
if err := system.Lsetxattr(filepath.Join(td, "l2", "d"), "trusted.overlay.opaque", []byte("y"), 0); err != nil {
return errors.Wrap(err, "failed to set opaque flag on middle layer")
}
opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, "work"))
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil {
return errors.Wrap(err, "failed to mount overlay")
}
defer func() {
if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil {
logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err)
}
}()
// Touch file in d to force copy up of opaque directory "d" from "l2" to "l3"
if err := ioutil.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil {
return errors.Wrap(err, "failed to write to merged directory")
}
// Check l3/d does not have opaque flag
xattrOpaque, err := system.Lgetxattr(filepath.Join(td, "l3", "d"), "trusted.overlay.opaque")
if err != nil {
return errors.Wrap(err, "failed to read opaque flag on upper layer")
}
if string(xattrOpaque) == "y" {
return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix")
}
return nil
}

View file

@ -9,9 +9,9 @@ import (
"fmt" "fmt"
"os" "os"
"runtime" "runtime"
"syscall"
"github.com/containers/storage/pkg/reexec" "github.com/containers/storage/pkg/reexec"
"golang.org/x/sys/unix"
) )
func init() { func init() {
@ -31,12 +31,12 @@ type mountOptions struct {
Flag uint32 Flag uint32
} }
func mountFrom(dir, device, target, mType, label string) error { func mountFrom(dir, device, target, mType string, flags uintptr, label string) error {
options := &mountOptions{ options := &mountOptions{
Device: device, Device: device,
Target: target, Target: target,
Type: mType, Type: mType,
Flag: 0, Flag: uint32(flags),
Label: label, Label: label,
} }
@ -51,16 +51,18 @@ func mountFrom(dir, device, target, mType, label string) error {
cmd.Stderr = output cmd.Stderr = output
if err := cmd.Start(); err != nil { if err := cmd.Start(); err != nil {
w.Close()
return fmt.Errorf("mountfrom error on re-exec cmd: %v", err) return fmt.Errorf("mountfrom error on re-exec cmd: %v", err)
} }
//write the options to the pipe for the untar exec to read //write the options to the pipe for the untar exec to read
if err := json.NewEncoder(w).Encode(options); err != nil { if err := json.NewEncoder(w).Encode(options); err != nil {
w.Close()
return fmt.Errorf("mountfrom json encode to pipe failed: %v", err) return fmt.Errorf("mountfrom json encode to pipe failed: %v", err)
} }
w.Close() w.Close()
if err := cmd.Wait(); err != nil { if err := cmd.Wait(); err != nil {
return fmt.Errorf("mountfrom re-exec error: %v: output: %s", err, output) return fmt.Errorf("mountfrom re-exec error: %v: output: %v", err, output)
} }
return nil return nil
} }
@ -80,7 +82,7 @@ func mountFromMain() {
fatal(err) fatal(err)
} }
if err := syscall.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil { if err := unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil {
fatal(err) fatal(err)
} }

View file

@ -13,21 +13,26 @@ import (
"path/filepath" "path/filepath"
"strconv" "strconv"
"strings" "strings"
"syscall" "sync"
"github.com/sirupsen/logrus"
"github.com/containers/storage/drivers" "github.com/containers/storage/drivers"
"github.com/containers/storage/drivers/overlayutils"
"github.com/containers/storage/drivers/quota"
"github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/fsutils"
"github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/locker"
"github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/parsers" "github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/parsers/kernel" "github.com/containers/storage/pkg/parsers/kernel"
"github.com/containers/storage/pkg/system"
units "github.com/docker/go-units"
"github.com/opencontainers/selinux/go-selinux/label" "github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
) )
var ( var (
@ -77,6 +82,12 @@ const (
idLength = 26 idLength = 26
) )
type overlayOptions struct {
overrideKernelCheck bool
imageStores []string
quota quota.Quota
}
// Driver contains information about the home directory and the list of active mounts that are created using this driver. // Driver contains information about the home directory and the list of active mounts that are created using this driver.
type Driver struct { type Driver struct {
name string name string
@ -84,20 +95,31 @@ type Driver struct {
uidMaps []idtools.IDMap uidMaps []idtools.IDMap
gidMaps []idtools.IDMap gidMaps []idtools.IDMap
ctr *graphdriver.RefCounter ctr *graphdriver.RefCounter
opts *overlayOptions quotaCtl *quota.Control
options overlayOptions
naiveDiff graphdriver.DiffDriver
supportsDType bool
locker *locker.Locker
} }
var backingFs = "<unknown>" var (
backingFs = "<unknown>"
projectQuotaSupported = false
useNaiveDiffLock sync.Once
useNaiveDiffOnly bool
)
func init() { func init() {
graphdriver.Register("overlay", InitAsOverlay) graphdriver.Register("overlay", Init)
graphdriver.Register("overlay2", InitAsOverlay2) graphdriver.Register("overlay2", Init)
} }
// InitWithName returns the a naive diff driver for the overlay filesystem, // Init returns the a native diff driver for overlay filesystem.
// which returns the passed-in name when asked which driver it is. // If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error.
func InitWithName(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { // If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned.
opts, err := parseOptions(name, options) func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
opts, err := parseOptions(options)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -115,7 +137,7 @@ func InitWithName(name, home string, options []string, uidMaps, gidMaps []idtool
if !opts.overrideKernelCheck { if !opts.overrideKernelCheck {
return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay") return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay")
} }
logrus.Warnf("Using pre-4.0.0 kernel for overlay, mount failures may require kernel update") logrus.Warn("Using pre-4.0.0 kernel for overlay, mount failures may require kernel update")
} }
fsMagic, err := graphdriver.GetFSMagic(home) fsMagic, err := graphdriver.GetFSMagic(home)
@ -128,9 +150,19 @@ func InitWithName(name, home string, options []string, uidMaps, gidMaps []idtool
// check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs
switch fsMagic { switch fsMagic {
case graphdriver.FsMagicBtrfs, graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
logrus.Errorf("'overlay' is not supported over %s", backingFs) logrus.Errorf("'overlay' is not supported over %s", backingFs)
return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s", backingFs) return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s", backingFs)
case graphdriver.FsMagicBtrfs:
// Support for OverlayFS on BTRFS was added in kernel 4.7
// See https://btrfs.wiki.kernel.org/index.php/Changelog
if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 7, Minor: 0}) < 0 {
if !opts.overrideKernelCheck {
logrus.Errorf("'overlay' requires kernel 4.7 to use on %s", backingFs)
return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' requires kernel 4.7 to use on %s", backingFs)
}
logrus.Warn("Using pre-4.7.0 kernel for overlay on btrfs, may require kernel update")
}
} }
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
@ -146,38 +178,47 @@ func InitWithName(name, home string, options []string, uidMaps, gidMaps []idtool
return nil, err return nil, err
} }
supportsDType, err := fsutils.SupportsDType(home)
if err != nil {
return nil, err
}
if !supportsDType {
logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay", backingFs))
// TODO: Will make fatal when CRI-O Has AMI built on RHEL7.4
// return nil, overlayutils.ErrDTypeNotSupported("overlay", backingFs)
}
d := &Driver{ d := &Driver{
name: name, name: "overlay",
home: home, home: home,
uidMaps: uidMaps, uidMaps: uidMaps,
gidMaps: gidMaps, gidMaps: gidMaps,
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)),
opts: opts, supportsDType: supportsDType,
locker: locker.New(),
options: *opts,
} }
d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps)
if backingFs == "xfs" {
// Try to enable project quota support over xfs.
if d.quotaCtl, err = quota.NewControl(home); err == nil {
projectQuotaSupported = true
} else if opts.quota.Size > 0 {
return nil, fmt.Errorf("Storage option overlay.size not supported. Filesystem does not support Project Quota: %v", err)
}
} else if opts.quota.Size > 0 {
// if xfs is not the backing fs then error out if the storage-opt overlay.size is used.
return nil, fmt.Errorf("Storage Option overlay.size only supported for backingFS XFS. Found %v", backingFs)
}
logrus.Debugf("backingFs=%s, projectQuotaSupported=%v", backingFs, projectQuotaSupported)
return d, nil return d, nil
} }
// InitAsOverlay returns the a naive diff driver for overlay filesystem. func parseOptions(options []string) (*overlayOptions, error) {
// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error.
// If a overlay filesystem is not supported over a existing filesystem then error graphdriver.ErrIncompatibleFS is returned.
func InitAsOverlay(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
return InitWithName("overlay", home, options, uidMaps, gidMaps)
}
// InitAsOverlay2 returns the a naive diff driver for overlay filesystem.
// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error.
// If a overlay filesystem is not supported over a existing filesystem then error graphdriver.ErrIncompatibleFS is returned.
func InitAsOverlay2(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
return InitWithName("overlay2", home, options, uidMaps, gidMaps)
}
type overlayOptions struct {
overrideKernelCheck bool
imageStores []string
}
func parseOptions(name string, options []string) (*overlayOptions, error) {
o := &overlayOptions{} o := &overlayOptions{}
for _, option := range options { for _, option := range options {
key, val, err := parsers.ParseKeyValueOpt(option) key, val, err := parsers.ParseKeyValueOpt(option)
@ -187,28 +228,37 @@ func parseOptions(name string, options []string) (*overlayOptions, error) {
key = strings.ToLower(key) key = strings.ToLower(key)
switch key { switch key {
case "overlay.override_kernel_check", "overlay2.override_kernel_check": case "overlay.override_kernel_check", "overlay2.override_kernel_check":
logrus.Debugf("overlay: overide_kernelcheck=%s", val)
o.overrideKernelCheck, err = strconv.ParseBool(val) o.overrideKernelCheck, err = strconv.ParseBool(val)
if err != nil { if err != nil {
return nil, err return nil, err
} }
case "overlay.size", "overlay2.size":
logrus.Debugf("overlay: size=%s", val)
size, err := units.RAMInBytes(val)
if err != nil {
return nil, err
}
o.quota.Size = uint64(size)
case "overlay.imagestore", "overlay2.imagestore": case "overlay.imagestore", "overlay2.imagestore":
logrus.Debugf("overlay: imagestore=%s", val)
// Additional read only image stores to use for lower paths // Additional read only image stores to use for lower paths
for _, store := range strings.Split(val, ",") { for _, store := range strings.Split(val, ",") {
store = filepath.Clean(store) store = filepath.Clean(store)
if !filepath.IsAbs(store) { if !filepath.IsAbs(store) {
return nil, fmt.Errorf("%s: image path %q is not absolute. Can not be relative", name, store) return nil, fmt.Errorf("overlay: image path %q is not absolute. Can not be relative", store)
} }
st, err := os.Stat(store) st, err := os.Stat(store)
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: Can't stat imageStore dir %s: %v", name, store, err) return nil, fmt.Errorf("overlay: can't stat imageStore dir %s: %v", store, err)
} }
if !st.IsDir() { if !st.IsDir() {
return nil, fmt.Errorf("%s: image path %q must be a directory", name, store) return nil, fmt.Errorf("overlay: image path %q must be a directory", store)
} }
o.imageStores = append(o.imageStores, store) o.imageStores = append(o.imageStores, store)
} }
default: default:
return nil, fmt.Errorf("%s: Unknown option %s", name, key) return nil, fmt.Errorf("overlay: Unknown option %s", key)
} }
} }
return o, nil return o, nil
@ -235,6 +285,16 @@ func supportsOverlay() error {
return errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") return errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
} }
func useNaiveDiff(home string) bool {
useNaiveDiffLock.Do(func() {
if err := hasOpaqueCopyUpBug(home); err != nil {
logrus.Warnf("Not using native diff for overlay: %v", err)
useNaiveDiffOnly = true
}
})
return useNaiveDiffOnly
}
func (d *Driver) String() string { func (d *Driver) String() string {
return d.name return d.name
} }
@ -244,6 +304,8 @@ func (d *Driver) String() string {
func (d *Driver) Status() [][2]string { func (d *Driver) Status() [][2]string {
return [][2]string{ return [][2]string{
{"Backing Filesystem", backingFs}, {"Backing Filesystem", backingFs},
{"Supports d_type", strconv.FormatBool(d.supportsDType)},
{"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))},
} }
} }
@ -281,18 +343,39 @@ func (d *Driver) Cleanup() error {
// CreateReadWrite creates a layer that is writable for use as a container // CreateReadWrite creates a layer that is writable for use as a container
// file system. // file system.
func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
return d.Create(id, parent, mountLabel, storageOpt) if opts != nil && len(opts.StorageOpt) != 0 && !projectQuotaSupported {
return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option")
}
if opts == nil {
opts = &graphdriver.CreateOpts{
StorageOpt: map[string]string{},
}
}
if _, ok := opts.StorageOpt["size"]; !ok {
if opts.StorageOpt == nil {
opts.StorageOpt = map[string]string{}
}
opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10)
}
return d.create(id, parent, opts)
} }
// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. // Create is used to create the upper, lower, and merge directories required for overlay fs for a given id.
// The parent filesystem is used to configure these directories for the overlay. // The parent filesystem is used to configure these directories for the overlay.
func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) (retErr error) { func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) {
if opts != nil && len(opts.StorageOpt) != 0 {
if len(storageOpt) != 0 { if _, ok := opts.StorageOpt["size"]; ok {
return fmt.Errorf("--storage-opt is not supported for overlay") return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers")
} }
}
return d.create(id, parent, opts)
}
func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) {
dir := d.dir(id) dir := d.dir(id)
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
@ -313,6 +396,20 @@ func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]str
} }
}() }()
if opts != nil && len(opts.StorageOpt) > 0 {
driver := &Driver{}
if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil {
return err
}
if driver.options.quota.Size > 0 {
// Set container disk quota limit
if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil {
return err
}
}
}
if err := idtools.MkdirAs(path.Join(dir, "diff"), 0755, rootUID, rootGID); err != nil { if err := idtools.MkdirAs(path.Join(dir, "diff"), 0755, rootUID, rootGID); err != nil {
return err return err
} }
@ -352,6 +449,26 @@ func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]str
return nil return nil
} }
// Parse overlay storage options
func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error {
// Read size to set the disk project quota per container
for key, val := range storageOpt {
key := strings.ToLower(key)
switch key {
case "size":
size, err := units.RAMInBytes(val)
if err != nil {
return err
}
driver.options.quota.Size = uint64(size)
default:
return fmt.Errorf("Unknown option %s", key)
}
}
return nil
}
func (d *Driver) getLower(parent string) (string, error) { func (d *Driver) getLower(parent string) (string, error) {
parentDir := d.dir(parent) parentDir := d.dir(parent)
@ -378,11 +495,11 @@ func (d *Driver) getLower(parent string) (string, error) {
return strings.Join(lowers, ":"), nil return strings.Join(lowers, ":"), nil
} }
func (d *Driver) dir(val string) string { func (d *Driver) dir(id string) string {
newpath := path.Join(d.home, val) newpath := path.Join(d.home, id)
if _, err := os.Stat(newpath); err != nil { if _, err := os.Stat(newpath); err != nil {
for _, p := range d.AdditionalImageStores() { for _, p := range d.AdditionalImageStores() {
l := path.Join(p, d.name, val) l := path.Join(p, d.name, id)
_, err = os.Stat(l) _, err = os.Stat(l)
if err == nil { if err == nil {
return l return l
@ -412,6 +529,8 @@ func (d *Driver) getLowerDirs(id string) ([]string, error) {
// Remove cleans the directories that are created for this id. // Remove cleans the directories that are created for this id.
func (d *Driver) Remove(id string) error { func (d *Driver) Remove(id string) error {
d.locker.Lock(id)
defer d.locker.Unlock(id)
dir := d.dir(id) dir := d.dir(id)
lid, err := ioutil.ReadFile(path.Join(dir, "link")) lid, err := ioutil.ReadFile(path.Join(dir, "link"))
if err == nil { if err == nil {
@ -420,14 +539,16 @@ func (d *Driver) Remove(id string) error {
} }
} }
if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) {
return err return err
} }
return nil return nil
} }
// Get creates and mounts the required file system for the given id and returns the mount path. // Get creates and mounts the required file system for the given id and returns the mount path.
func (d *Driver) Get(id string, mountLabel string) (s string, err error) { func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) {
d.locker.Lock(id)
defer d.locker.Unlock(id)
dir := d.dir(id) dir := d.dir(id)
if _, err := os.Stat(dir); err != nil { if _, err := os.Stat(dir); err != nil {
return "", err return "", err
@ -459,7 +580,7 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
return "", fmt.Errorf("Can't stat lower layer %q: %v", newpath, err) return "", fmt.Errorf("Can't stat lower layer %q: %v", newpath, err)
} }
} else { } else {
lower = l lower = newpath
} }
if newlowers == "" { if newlowers == "" {
newlowers = lower newlowers = lower
@ -473,22 +594,42 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
return mergedDir, nil return mergedDir, nil
} }
defer func() { defer func() {
if err != nil { if retErr != nil {
if c := d.ctr.Decrement(mergedDir); c <= 0 { if c := d.ctr.Decrement(mergedDir); c <= 0 {
syscall.Unmount(mergedDir, 0) if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil {
logrus.Errorf("error unmounting %v: %v", mergedDir, mntErr)
}
} }
} }
}() }()
workDir := path.Join(dir, "work") workDir := path.Join(dir, "work")
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", newlowers, path.Join(id, "diff"), path.Join(id, "work")) opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", newlowers, diffDir, workDir)
mountLabel = label.FormatMountLabel(opts, mountLabel) mountData := label.FormatMountLabel(opts, mountLabel)
if len(mountLabel) > syscall.Getpagesize() { mount := unix.Mount
return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountLabel)) mountTarget := mergedDir
pageSize := unix.Getpagesize()
// Use relative paths and mountFrom when the mount data has exceeded
// the page size. The mount syscall fails if the mount data cannot
// fit within a page and relative links make the mount data much
// smaller at the expense of requiring a fork exec to chroot.
if len(mountData) > pageSize {
//FIXME: We need to figure out to get this to work with additional stores
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work"))
mountData = label.FormatMountLabel(opts, mountLabel)
if len(mountData) > pageSize {
return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData))
} }
if err := mountFrom(d.home, "overlay", path.Join(id, "merged"), "overlay", mountLabel); err != nil { mount = func(source string, target string, mType string, flags uintptr, label string) error {
return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) return mountFrom(d.home, source, target, mType, flags, label)
}
mountTarget = path.Join(id, "merged")
}
if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil {
return "", fmt.Errorf("error creating overlay mount to %s: %v", mountTarget, err)
} }
// chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a
@ -507,19 +648,17 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
// Put unmounts the mount path created for the give id. // Put unmounts the mount path created for the give id.
func (d *Driver) Put(id string) error { func (d *Driver) Put(id string) error {
d.locker.Lock(id)
defer d.locker.Unlock(id)
mountpoint := path.Join(d.dir(id), "merged") mountpoint := path.Join(d.dir(id), "merged")
if count := d.ctr.Decrement(mountpoint); count > 0 { if count := d.ctr.Decrement(mountpoint); count > 0 {
return nil return nil
} }
err := syscall.Unmount(mountpoint, 0) err := unix.Unmount(mountpoint, unix.MNT_DETACH)
if err != nil { if err != nil {
if _, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)); err != nil { logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err)
// We didn't have a "lower" directory, so we weren't mounting a "merged" directory anyway }
return nil return nil
}
logrus.Debugf("Failed to unmount %s %s: %v", id, d.name, err)
}
return err
} }
// Exists checks to see if the id is already mounted. // Exists checks to see if the id is already mounted.
@ -528,8 +667,33 @@ func (d *Driver) Exists(id string) bool {
return err == nil return err == nil
} }
// isParent returns if the passed in parent is the direct parent of the passed in layer
func (d *Driver) isParent(id, parent string) bool {
lowers, err := d.getLowerDirs(id)
if err != nil {
return false
}
if parent == "" && len(lowers) > 0 {
return false
}
parentDir := d.dir(parent)
var ld string
if len(lowers) > 0 {
ld = filepath.Dir(lowers[0])
}
if ld == "" && parent == "" {
return true
}
return ld == parentDir
}
// ApplyDiff applies the new layer into a root // ApplyDiff applies the new layer into a root
func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) {
if !d.isParent(id, parent) {
return d.naiveDiff.ApplyDiff(id, parent, diff)
}
applyDir := d.getDiffPath(id) applyDir := d.getDiffPath(id)
logrus.Debugf("Applying tar in %s", applyDir) logrus.Debugf("Applying tar in %s", applyDir)
@ -542,7 +706,7 @@ func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64
return 0, err return 0, err
} }
return d.DiffSize(id, parent) return directory.Size(applyDir)
} }
func (d *Driver) getDiffPath(id string) string { func (d *Driver) getDiffPath(id string) string {
@ -555,12 +719,19 @@ func (d *Driver) getDiffPath(id string) string {
// and its parent and returns the size in bytes of the changes // and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory. // relative to its base filesystem directory.
func (d *Driver) DiffSize(id, parent string) (size int64, err error) { func (d *Driver) DiffSize(id, parent string) (size int64, err error) {
if useNaiveDiff(d.home) || !d.isParent(id, parent) {
return d.naiveDiff.DiffSize(id, parent)
}
return directory.Size(d.getDiffPath(id)) return directory.Size(d.getDiffPath(id))
} }
// Diff produces an archive of the changes between the specified // Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "". // layer and its parent layer which may be "".
func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
if useNaiveDiff(d.home) || !d.isParent(id, parent) {
return d.naiveDiff.Diff(id, parent)
}
diffPath := d.getDiffPath(id) diffPath := d.getDiffPath(id)
logrus.Debugf("Tar with options on %s", diffPath) logrus.Debugf("Tar with options on %s", diffPath)
return archive.TarWithOptions(diffPath, &archive.TarOptions{ return archive.TarWithOptions(diffPath, &archive.TarOptions{
@ -574,6 +745,9 @@ func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
// Changes produces a list of changes between the specified layer // Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes. // and its parent layer. If parent is "", then all changes will be ADD changes.
func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
if useNaiveDiff(d.home) || !d.isParent(id, parent) {
return d.naiveDiff.Changes(id, parent)
}
// Overlay doesn't have snapshots, so we need to get changes from all parent // Overlay doesn't have snapshots, so we need to get changes from all parent
// layers. // layers.
diffPath := d.getDiffPath(id) diffPath := d.getDiffPath(id)
@ -587,5 +761,5 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
// AdditionalImageStores returns additional image stores supported by the driver // AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string { func (d *Driver) AdditionalImageStores() []string {
return d.opts.imageStores return d.options.imageStores
} }

View file

@ -12,6 +12,7 @@ import (
"time" "time"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
) )
// generateID creates a new random string identifier with the given length // generateID creates a new random string identifier with the given length
@ -69,7 +70,7 @@ func retryOnError(err error) bool {
case *os.PathError: case *os.PathError:
return retryOnError(err.Err) // unpack the target error return retryOnError(err.Err) // unpack the target error
case syscall.Errno: case syscall.Errno:
if err == syscall.EPERM { if err == unix.EPERM {
// EPERM represents an entropy pool exhaustion, a condition under // EPERM represents an entropy pool exhaustion, a condition under
// which we backoff and retry. // which we backoff and retry.
return true return true

View file

@ -0,0 +1,18 @@
// +build linux
package overlayutils
import (
"errors"
"fmt"
)
// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type.
func ErrDTypeNotSupported(driver, backingFs string) error {
msg := fmt.Sprintf("%s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.", driver, backingFs)
if backingFs == "xfs" {
msg += " Reformat the filesystem with ftype=1 to enable d_type support."
}
msg += " Running without d_type is not supported."
return errors.New(msg)
}

View file

@ -1,32 +0,0 @@
// +build experimental
package graphdriver
import (
"fmt"
"io"
"github.com/containers/storage/pkg/plugins"
)
type pluginClient interface {
// Call calls the specified method with the specified arguments for the plugin.
Call(string, interface{}, interface{}) error
// Stream calls the specified method with the specified arguments for the plugin and returns the response IO stream
Stream(string, interface{}) (io.ReadCloser, error)
// SendFile calls the specified method, and passes through the IO stream
SendFile(string, io.Reader, interface{}) error
}
func lookupPlugin(name, home string, opts []string) (Driver, error) {
pl, err := plugins.Get(name, "GraphDriver")
if err != nil {
return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err)
}
return newPluginDriver(name, home, opts, pl.Client())
}
func newPluginDriver(name, home string, opts []string, c pluginClient) (Driver, error) {
proxy := &graphDriverProxy{name, c}
return proxy, proxy.Init(home, opts)
}

View file

@ -1,7 +0,0 @@
// +build !experimental
package graphdriver
func lookupPlugin(name, home string, opts []string) (Driver, error) {
return nil, ErrNotSupported
}

View file

@ -1,226 +0,0 @@
// +build experimental
package graphdriver
import (
"fmt"
"io"
"github.com/containers/storage/pkg/archive"
"github.com/pkg/errors"
)
type graphDriverProxy struct {
name string
client pluginClient
}
type graphDriverRequest struct {
ID string `json:",omitempty"`
Parent string `json:",omitempty"`
MountLabel string `json:",omitempty"`
}
type graphDriverResponse struct {
Err string `json:",omitempty"`
Dir string `json:",omitempty"`
Exists bool `json:",omitempty"`
Status [][2]string `json:",omitempty"`
Changes []archive.Change `json:",omitempty"`
Size int64 `json:",omitempty"`
Metadata map[string]string `json:",omitempty"`
}
type graphDriverInitRequest struct {
Home string
Opts []string
}
func (d *graphDriverProxy) Init(home string, opts []string) error {
args := &graphDriverInitRequest{
Home: home,
Opts: opts,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Init", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) String() string {
return d.name
}
func (d *graphDriverProxy) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
args := &graphDriverRequest{
ID: id,
Parent: parent,
MountLabel: mountLabel,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.CreateReadWrite", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) Create(id, parent, mountLabel string, storageOpt map[string]string) error {
args := &graphDriverRequest{
ID: id,
Parent: parent,
MountLabel: mountLabel,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Create", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) Remove(id string) error {
args := &graphDriverRequest{ID: id}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Remove", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) {
args := &graphDriverRequest{
ID: id,
MountLabel: mountLabel,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Get", args, &ret); err != nil {
return "", err
}
var err error
if ret.Err != "" {
err = errors.New(ret.Err)
}
return ret.Dir, err
}
func (d *graphDriverProxy) Put(id string) error {
args := &graphDriverRequest{ID: id}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Put", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) Exists(id string) bool {
args := &graphDriverRequest{ID: id}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Exists", args, &ret); err != nil {
return false
}
return ret.Exists
}
func (d *graphDriverProxy) Status() [][2]string {
args := &graphDriverRequest{}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Status", args, &ret); err != nil {
return nil
}
return ret.Status
}
func (d *graphDriverProxy) Metadata(id string) (map[string]string, error) {
args := &graphDriverRequest{
ID: id,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Metadata", args, &ret); err != nil {
return nil, err
}
if ret.Err != "" {
return nil, errors.New(ret.Err)
}
return ret.Metadata, nil
}
func (d *graphDriverProxy) Cleanup() error {
args := &graphDriverRequest{}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Cleanup", args, &ret); err != nil {
return nil
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) Diff(id, parent string) (io.ReadCloser, error) {
args := &graphDriverRequest{
ID: id,
Parent: parent,
}
body, err := d.client.Stream("GraphDriver.Diff", args)
if err != nil {
return nil, err
}
return io.ReadClose(body), nil
}
func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) {
args := &graphDriverRequest{
ID: id,
Parent: parent,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Changes", args, &ret); err != nil {
return nil, err
}
if ret.Err != "" {
return nil, errors.New(ret.Err)
}
return ret.Changes, nil
}
func (d *graphDriverProxy) ApplyDiff(id, parent string, diff io.Reader) (int64, error) {
var ret graphDriverResponse
if err := d.client.SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil {
return -1, err
}
if ret.Err != "" {
return -1, errors.New(ret.Err)
}
return ret.Size, nil
}
func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) {
args := &graphDriverRequest{
ID: id,
Parent: parent,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.DiffSize", args, &ret); err != nil {
return -1, err
}
if ret.Err != "" {
return -1, errors.New(ret.Err)
}
return ret.Size, nil
}

View file

@ -0,0 +1,337 @@
// +build linux
//
// projectquota.go - implements XFS project quota controls
// for setting quota limits on a newly created directory.
// It currently supports the legacy XFS specific ioctls.
//
// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR
// for both xfs/ext4 for kernel version >= v4.5
//
package quota
/*
#include <stdlib.h>
#include <dirent.h>
#include <linux/fs.h>
#include <linux/quota.h>
#include <linux/dqblk_xfs.h>
#ifndef FS_XFLAG_PROJINHERIT
struct fsxattr {
__u32 fsx_xflags;
__u32 fsx_extsize;
__u32 fsx_nextents;
__u32 fsx_projid;
unsigned char fsx_pad[12];
};
#define FS_XFLAG_PROJINHERIT 0x00000200
#endif
#ifndef FS_IOC_FSGETXATTR
#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr)
#endif
#ifndef FS_IOC_FSSETXATTR
#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr)
#endif
#ifndef PRJQUOTA
#define PRJQUOTA 2
#endif
#ifndef XFS_PROJ_QUOTA
#define XFS_PROJ_QUOTA 2
#endif
#ifndef Q_XSETPQLIM
#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA)
#endif
#ifndef Q_XGETPQUOTA
#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA)
#endif
*/
import "C"
import (
"fmt"
"io/ioutil"
"path"
"path/filepath"
"unsafe"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
// Quota limit params - currently we only control blocks hard limit
type Quota struct {
Size uint64
}
// Control - Context to be used by storage driver (e.g. overlay)
// who wants to apply project quotas to container dirs
type Control struct {
backingFsBlockDev string
nextProjectID uint32
quotas map[string]uint32
}
// NewControl - initialize project quota support.
// Test to make sure that quota can be set on a test dir and find
// the first project id to be used for the next container create.
//
// Returns nil (and error) if project quota is not supported.
//
// First get the project id of the home directory.
// This test will fail if the backing fs is not xfs.
//
// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.:
// echo 999:/var/lib/containers/storage/overlay >> /etc/projects
// echo storage:999 >> /etc/projid
// xfs_quota -x -c 'project -s storage' /<xfs mount point>
//
// In that case, the home directory project id will be used as a "start offset"
// and all containers will be assigned larger project ids (e.g. >= 1000).
// This is a way to prevent xfs_quota management from conflicting with containers/storage.
//
// Then try to create a test directory with the next project id and set a quota
// on it. If that works, continue to scan existing containers to map allocated
// project ids.
//
func NewControl(basePath string) (*Control, error) {
//
// Get project id of parent dir as minimal id to be used by driver
//
minProjectID, err := getProjectID(basePath)
if err != nil {
return nil, err
}
minProjectID++
//
// create backing filesystem device node
//
backingFsBlockDev, err := makeBackingFsDev(basePath)
if err != nil {
return nil, err
}
//
// Test if filesystem supports project quotas by trying to set
// a quota on the first available project id
//
quota := Quota{
Size: 0,
}
if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil {
return nil, err
}
q := Control{
backingFsBlockDev: backingFsBlockDev,
nextProjectID: minProjectID + 1,
quotas: make(map[string]uint32),
}
//
// get first project id to be used for next container
//
err = q.findNextProjectID(basePath)
if err != nil {
return nil, err
}
logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID)
return &q, nil
}
// SetQuota - assign a unique project id to directory and set the quota limits
// for that project id
func (q *Control) SetQuota(targetPath string, quota Quota) error {
projectID, ok := q.quotas[targetPath]
if !ok {
projectID = q.nextProjectID
//
// assign project id to new container directory
//
err := setProjectID(targetPath, projectID)
if err != nil {
return err
}
q.quotas[targetPath] = projectID
q.nextProjectID++
}
//
// set the quota limit for the container's project id
//
logrus.Debugf("SetQuota(%s, %d): projectID=%d", targetPath, quota.Size, projectID)
return setProjectQuota(q.backingFsBlockDev, projectID, quota)
}
// setProjectQuota - set the quota for project id on xfs block device
func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error {
var d C.fs_disk_quota_t
d.d_version = C.FS_DQUOT_VERSION
d.d_id = C.__u32(projectID)
d.d_flags = C.XFS_PROJ_QUOTA
d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT
d.d_blk_hardlimit = C.__u64(quota.Size / 512)
d.d_blk_softlimit = d.d_blk_hardlimit
var cs = C.CString(backingFsBlockDev)
defer C.free(unsafe.Pointer(cs))
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM,
uintptr(unsafe.Pointer(cs)), uintptr(d.d_id),
uintptr(unsafe.Pointer(&d)), 0, 0)
if errno != 0 {
return fmt.Errorf("Failed to set quota limit for projid %d on %s: %v",
projectID, backingFsBlockDev, errno.Error())
}
return nil
}
// GetQuota - get the quota limits of a directory that was configured with SetQuota
func (q *Control) GetQuota(targetPath string, quota *Quota) error {
projectID, ok := q.quotas[targetPath]
if !ok {
return fmt.Errorf("quota not found for path : %s", targetPath)
}
//
// get the quota limit for the container's project id
//
var d C.fs_disk_quota_t
var cs = C.CString(q.backingFsBlockDev)
defer C.free(unsafe.Pointer(cs))
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA,
uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)),
uintptr(unsafe.Pointer(&d)), 0, 0)
if errno != 0 {
return fmt.Errorf("Failed to get quota limit for projid %d on %s: %v",
projectID, q.backingFsBlockDev, errno.Error())
}
quota.Size = uint64(d.d_blk_hardlimit) * 512
return nil
}
// getProjectID - get the project id of path on xfs
func getProjectID(targetPath string) (uint32, error) {
dir, err := openDir(targetPath)
if err != nil {
return 0, err
}
defer closeDir(dir)
var fsx C.struct_fsxattr
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
uintptr(unsafe.Pointer(&fsx)))
if errno != 0 {
return 0, fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error())
}
return uint32(fsx.fsx_projid), nil
}
// setProjectID - set the project id of path on xfs
func setProjectID(targetPath string, projectID uint32) error {
dir, err := openDir(targetPath)
if err != nil {
return err
}
defer closeDir(dir)
var fsx C.struct_fsxattr
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
uintptr(unsafe.Pointer(&fsx)))
if errno != 0 {
return fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error())
}
fsx.fsx_projid = C.__u32(projectID)
fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT
_, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR,
uintptr(unsafe.Pointer(&fsx)))
if errno != 0 {
return fmt.Errorf("Failed to set projid for %s: %v", targetPath, errno.Error())
}
return nil
}
// findNextProjectID - find the next project id to be used for containers
// by scanning driver home directory to find used project ids
func (q *Control) findNextProjectID(home string) error {
files, err := ioutil.ReadDir(home)
if err != nil {
return fmt.Errorf("read directory failed : %s", home)
}
for _, file := range files {
if !file.IsDir() {
continue
}
path := filepath.Join(home, file.Name())
projid, err := getProjectID(path)
if err != nil {
return err
}
if projid > 0 {
q.quotas[path] = projid
}
if q.nextProjectID <= projid {
q.nextProjectID = projid + 1
}
}
return nil
}
func free(p *C.char) {
C.free(unsafe.Pointer(p))
}
func openDir(path string) (*C.DIR, error) {
Cpath := C.CString(path)
defer free(Cpath)
dir := C.opendir(Cpath)
if dir == nil {
return nil, fmt.Errorf("Can't open dir")
}
return dir, nil
}
func closeDir(dir *C.DIR) {
if dir != nil {
C.closedir(dir)
}
}
func getDirFd(dir *C.DIR) uintptr {
return uintptr(C.dirfd(dir))
}
// Get the backing block device of the driver home directory
// and create a block device node under the home directory
// to be used by quotactl commands
func makeBackingFsDev(home string) (string, error) {
var stat unix.Stat_t
if err := unix.Stat(home, &stat); err != nil {
return "", err
}
backingFsBlockDev := path.Join(home, "backingFsBlockDev")
// Re-create just in case someone copied the home directory over to a new device
unix.Unlink(backingFsBlockDev)
if err := unix.Mknod(backingFsBlockDev, unix.S_IFBLK|0600, int(stat.Dev)); err != nil {
return "", fmt.Errorf("Failed to mknod %s: %v", backingFsBlockDev, err)
}
return backingFsBlockDev, nil
}

View file

@ -8,7 +8,7 @@ import (
"github.com/containers/storage/drivers" "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
"github.com/opencontainers/selinux/go-selinux/label" "github.com/opencontainers/selinux/go-selinux/label"
) )
@ -26,14 +26,10 @@ func init() {
func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
d := &Driver{ d := &Driver{
home: home, home: home,
uidMaps: uidMaps, idMappings: idtools.NewIDMappingsFromMaps(uidMaps, gidMaps),
gidMaps: gidMaps,
} }
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) rootIDs := d.idMappings.RootPair()
if err != nil { if err := idtools.MkdirAllAndChown(home, 0700, rootIDs); err != nil {
return nil, err
}
if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil {
return nil, err return nil, err
} }
return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil
@ -45,8 +41,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver // Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver
type Driver struct { type Driver struct {
home string home string
uidMaps []idtools.IDMap idMappings *idtools.IDMappings
gidMaps []idtools.IDMap
} }
func (d *Driver) String() string { func (d *Driver) String() string {
@ -70,29 +65,26 @@ func (d *Driver) Cleanup() error {
// CreateReadWrite creates a layer that is writable for use as a container // CreateReadWrite creates a layer that is writable for use as a container
// file system. // file system.
func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
return d.Create(id, parent, mountLabel, storageOpt) return d.Create(id, parent, opts)
} }
// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. // Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent.
func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
if len(storageOpt) != 0 { if opts != nil && len(opts.StorageOpt) != 0 {
return fmt.Errorf("--storage-opt is not supported for vfs") return fmt.Errorf("--storage-opt is not supported for vfs")
} }
dir := d.dir(id) dir := d.dir(id)
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) rootIDs := d.idMappings.RootPair()
if err != nil { if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0700, rootIDs); err != nil {
return err return err
} }
if err := idtools.MkdirAllAs(filepath.Dir(dir), 0700, rootUID, rootGID); err != nil { if err := idtools.MkdirAndChown(dir, 0755, rootIDs); err != nil {
return err return err
} }
if err := idtools.MkdirAs(dir, 0755, rootUID, rootGID); err != nil { labelOpts := []string{"level:s0"}
return err if _, mountLabel, err := label.InitLabels(labelOpts); err == nil {
}
opts := []string{"level:s0"}
if _, mountLabel, err := label.InitLabels(opts); err == nil {
label.SetFileLabel(dir, mountLabel) label.SetFileLabel(dir, mountLabel)
} }
if parent == "" { if parent == "" {
@ -102,10 +94,7 @@ func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]str
if err != nil { if err != nil {
return fmt.Errorf("%s: %s", parent, err) return fmt.Errorf("%s: %s", parent, err)
} }
if err := CopyWithTar(parentDir, dir); err != nil { return CopyWithTar(parentDir, dir)
return err
}
return nil
} }
func (d *Driver) dir(id string) string { func (d *Driver) dir(id string) string {
@ -114,10 +103,7 @@ func (d *Driver) dir(id string) string {
// Remove deletes the content from the directory for a given id. // Remove deletes the content from the directory for a given id.
func (d *Driver) Remove(id string) error { func (d *Driver) Remove(id string) error {
if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { return system.EnsureRemoveAll(d.dir(id))
return err
}
return nil
} }
// Get returns the directory for the given id. // Get returns the directory for the given id.
@ -146,6 +132,5 @@ func (d *Driver) Exists(id string) bool {
// AdditionalImageStores returns additional image stores supported by the driver // AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string { func (d *Driver) AdditionalImageStores() []string {
var imageStores []string return nil
return imageStores
} }

View file

@ -6,6 +6,7 @@ import (
"bufio" "bufio"
"bytes" "bytes"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@ -16,6 +17,7 @@ import (
"strings" "strings"
"sync" "sync"
"syscall" "syscall"
"time"
"unsafe" "unsafe"
"github.com/Microsoft/go-winio" "github.com/Microsoft/go-winio"
@ -29,17 +31,37 @@ import (
"github.com/containers/storage/pkg/longpath" "github.com/containers/storage/pkg/longpath"
"github.com/containers/storage/pkg/reexec" "github.com/containers/storage/pkg/reexec"
"github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/system"
units "github.com/docker/go-units"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/vbatts/tar-split/tar/storage" "golang.org/x/sys/windows"
) )
// filterDriver is an HCSShim driver type for the Windows Filter driver. // filterDriver is an HCSShim driver type for the Windows Filter driver.
const filterDriver = 1 const filterDriver = 1
var (
// mutatedFiles is a list of files that are mutated by the import process
// and must be backed up and restored.
mutatedFiles = map[string]string{
"UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak",
"UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak",
"UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak",
"UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak",
}
noreexec = false
)
// init registers the windows graph drivers to the register. // init registers the windows graph drivers to the register.
func init() { func init() {
graphdriver.Register("windowsfilter", InitFilter) graphdriver.Register("windowsfilter", InitFilter)
reexec.Register("storage-windows-write-layer", writeLayer) // DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes
// debugging issues in the re-exec codepath significantly easier.
if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" {
logrus.Warnf("WindowsGraphDriver is set to not re-exec. This is intended for debugging purposes only.")
noreexec = true
} else {
reexec.Register("docker-windows-write-layer", writeLayerReexec)
}
} }
type checker struct { type checker struct {
@ -60,13 +82,22 @@ type Driver struct {
cache map[string]string cache map[string]string
} }
func isTP5OrOlder() bool {
return system.GetOSVersion().Build <= 14300
}
// InitFilter returns a new Windows storage filter driver. // InitFilter returns a new Windows storage filter driver.
func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
logrus.Debugf("WindowsGraphDriver InitFilter at %s", home) logrus.Debugf("WindowsGraphDriver InitFilter at %s", home)
fsType, err := getFileSystemType(string(home[0]))
if err != nil {
return nil, err
}
if strings.ToLower(fsType) == "refs" {
return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home)
}
if err := idtools.MkdirAllAs(home, 0700, 0, 0); err != nil {
return nil, fmt.Errorf("windowsfilter failed to create '%s': %v", home, err)
}
d := &Driver{ d := &Driver{
info: hcsshim.DriverInfo{ info: hcsshim.DriverInfo{
HomeDir: home, HomeDir: home,
@ -78,6 +109,37 @@ func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap)
return d, nil return d, nil
} }
// win32FromHresult is a helper function to get the win32 error code from an HRESULT
func win32FromHresult(hr uintptr) uintptr {
if hr&0x1fff0000 == 0x00070000 {
return hr & 0xffff
}
return hr
}
// getFileSystemType obtains the type of a file system through GetVolumeInformation
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364993(v=vs.85).aspx
func getFileSystemType(drive string) (fsType string, hr error) {
var (
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
procGetVolumeInformation = modkernel32.NewProc("GetVolumeInformationW")
buf = make([]uint16, 255)
size = windows.MAX_PATH + 1
)
if len(drive) != 1 {
hr = errors.New("getFileSystemType must be called with a drive letter")
return
}
drive += `:\`
n := uintptr(unsafe.Pointer(nil))
r0, _, _ := syscall.Syscall9(procGetVolumeInformation.Addr(), 8, uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(drive))), n, n, n, n, n, uintptr(unsafe.Pointer(&buf[0])), uintptr(size), 0)
if int32(r0) < 0 {
hr = syscall.Errno(win32FromHresult(r0))
}
fsType = windows.UTF16ToString(buf)
return
}
// String returns the string representation of a driver. This should match // String returns the string representation of a driver. This should match
// the name the graph driver has been registered with. // the name the graph driver has been registered with.
func (d *Driver) String() string { func (d *Driver) String() string {
@ -91,8 +153,19 @@ func (d *Driver) Status() [][2]string {
} }
} }
// panicIfUsedByLcow does exactly what it says.
// TODO @jhowardmsft - this is a temporary measure for the bring-up of
// Linux containers on Windows. It is a failsafe to ensure that the right
// graphdriver is used.
func panicIfUsedByLcow() {
if system.LCOWSupported() {
panic("inconsistency - windowsfilter graphdriver should not be used when in LCOW mode")
}
}
// Exists returns true if the given id is registered with this driver. // Exists returns true if the given id is registered with this driver.
func (d *Driver) Exists(id string) bool { func (d *Driver) Exists(id string) bool {
panicIfUsedByLcow()
rID, err := d.resolveID(id) rID, err := d.resolveID(id)
if err != nil { if err != nil {
return false return false
@ -106,20 +179,24 @@ func (d *Driver) Exists(id string) bool {
// CreateReadWrite creates a layer that is writable for use as a container // CreateReadWrite creates a layer that is writable for use as a container
// file system. // file system.
func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
return d.create(id, parent, mountLabel, false, storageOpt) panicIfUsedByLcow()
if opts != nil {
return d.create(id, parent, opts.MountLabel, false, opts.StorageOpt)
}
return d.create(id, parent, "", false, nil)
} }
// Create creates a new read-only layer with the given id. // Create creates a new read-only layer with the given id.
func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
return d.create(id, parent, mountLabel, true, storageOpt) panicIfUsedByLcow()
if opts != nil {
return d.create(id, parent, opts.MountLabel, true, opts.StorageOpt)
}
return d.create(id, parent, "", true, nil)
} }
func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt map[string]string) error { func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt map[string]string) error {
if len(storageOpt) != 0 {
return fmt.Errorf("--storage-opt is not supported for windows")
}
rPId, err := d.resolveID(parent) rPId, err := d.resolveID(parent)
if err != nil { if err != nil {
return err return err
@ -133,7 +210,7 @@ func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt
var layerChain []string var layerChain []string
if rPId != "" { if rPId != "" {
parentPath, err := hcsshim.LayerMountPath(d.info, rPId) parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId)
if err != nil { if err != nil {
return err return err
} }
@ -156,32 +233,20 @@ func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt
parentPath = layerChain[0] parentPath = layerChain[0]
} }
if isTP5OrOlder() {
// Pre-create the layer directory, providing an ACL to give the Hyper-V Virtual Machines
// group access. This is necessary to ensure that Hyper-V containers can access the
// virtual machine data. This is not necessary post-TP5.
path, err := syscall.UTF16FromString(filepath.Join(d.info.HomeDir, id))
if err != nil {
return err
}
// Give system and administrators full control, and VMs read, write, and execute.
// Mark these ACEs as inherited.
sd, err := winio.SddlToSecurityDescriptor("D:(A;OICI;FA;;;SY)(A;OICI;FA;;;BA)(A;OICI;FRFWFX;;;S-1-5-83-0)")
if err != nil {
return err
}
err = syscall.CreateDirectory(&path[0], &syscall.SecurityAttributes{
Length: uint32(unsafe.Sizeof(syscall.SecurityAttributes{})),
SecurityDescriptor: uintptr(unsafe.Pointer(&sd[0])),
})
if err != nil {
return err
}
}
if err := hcsshim.CreateSandboxLayer(d.info, id, parentPath, layerChain); err != nil { if err := hcsshim.CreateSandboxLayer(d.info, id, parentPath, layerChain); err != nil {
return err return err
} }
storageOptions, err := parseStorageOpt(storageOpt)
if err != nil {
return fmt.Errorf("Failed to parse storage options - %s", err)
}
if storageOptions.size != 0 {
if err := hcsshim.ExpandSandboxSize(d.info, id, storageOptions.size); err != nil {
return err
}
}
} }
if _, err := os.Lstat(d.dir(parent)); err != nil { if _, err := os.Lstat(d.dir(parent)); err != nil {
@ -208,16 +273,89 @@ func (d *Driver) dir(id string) string {
// Remove unmounts and removes the dir information. // Remove unmounts and removes the dir information.
func (d *Driver) Remove(id string) error { func (d *Driver) Remove(id string) error {
panicIfUsedByLcow()
rID, err := d.resolveID(id) rID, err := d.resolveID(id)
if err != nil { if err != nil {
return err return err
} }
os.RemoveAll(filepath.Join(d.info.HomeDir, "sysfile-backups", rID)) // ok to fail
return hcsshim.DestroyLayer(d.info, rID) // This retry loop is due to a bug in Windows (Internal bug #9432268)
// if GetContainers fails with ErrVmcomputeOperationInvalidState
// it is a transient error. Retry until it succeeds.
var computeSystems []hcsshim.ContainerProperties
retryCount := 0
osv := system.GetOSVersion()
for {
// Get and terminate any template VMs that are currently using the layer.
// Note: It is unfortunate that we end up in the graphdrivers Remove() call
// for both containers and images, but the logic for template VMs is only
// needed for images - specifically we are looking to see if a base layer
// is in use by a template VM as a result of having started a Hyper-V
// container at some point.
//
// We have a retry loop for ErrVmcomputeOperationInvalidState and
// ErrVmcomputeOperationAccessIsDenied as there is a race condition
// in RS1 and RS2 building during enumeration when a silo is going away
// for example under it, in HCS. AccessIsDenied added to fix 30278.
//
// TODO @jhowardmsft - For RS3, we can remove the retries. Also consider
// using platform APIs (if available) to get this more succinctly. Also
// consider enhancing the Remove() interface to have context of why
// the remove is being called - that could improve efficiency by not
// enumerating compute systems during a remove of a container as it's
// not required.
computeSystems, err = hcsshim.GetContainers(hcsshim.ComputeSystemQuery{})
if err != nil {
if (osv.Build < 15139) &&
((err == hcsshim.ErrVmcomputeOperationInvalidState) || (err == hcsshim.ErrVmcomputeOperationAccessIsDenied)) {
if retryCount >= 500 {
break
}
retryCount++
time.Sleep(10 * time.Millisecond)
continue
}
return err
}
break
}
for _, computeSystem := range computeSystems {
if strings.Contains(computeSystem.RuntimeImagePath, id) && computeSystem.IsRuntimeTemplate {
container, err := hcsshim.OpenContainer(computeSystem.ID)
if err != nil {
return err
}
defer container.Close()
err = container.Terminate()
if hcsshim.IsPending(err) {
err = container.Wait()
} else if hcsshim.IsAlreadyStopped(err) {
err = nil
}
if err != nil {
return err
}
}
}
layerPath := filepath.Join(d.info.HomeDir, rID)
tmpID := fmt.Sprintf("%s-removing", rID)
tmpLayerPath := filepath.Join(d.info.HomeDir, tmpID)
if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) {
return err
}
if err := hcsshim.DestroyLayer(d.info, tmpID); err != nil {
logrus.Errorf("Failed to DestroyLayer %s: %s", id, err)
}
return nil
} }
// Get returns the rootfs path for the id. This will mount the dir at it's given path. // Get returns the rootfs path for the id. This will mount the dir at its given path.
func (d *Driver) Get(id, mountLabel string) (string, error) { func (d *Driver) Get(id, mountLabel string) (string, error) {
panicIfUsedByLcow()
logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel) logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel)
var dir string var dir string
@ -248,9 +386,12 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
return "", err return "", err
} }
mountPath, err := hcsshim.LayerMountPath(d.info, rID) mountPath, err := hcsshim.GetLayerMountPath(d.info, rID)
if err != nil { if err != nil {
d.ctr.Decrement(rID) d.ctr.Decrement(rID)
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
logrus.Warnf("Failed to Unprepare %s: %s", id, err)
}
if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
logrus.Warnf("Failed to Deactivate %s: %s", id, err) logrus.Warnf("Failed to Deactivate %s: %s", id, err)
} }
@ -273,6 +414,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
// Put adds a new layer to the driver. // Put adds a new layer to the driver.
func (d *Driver) Put(id string) error { func (d *Driver) Put(id string) error {
panicIfUsedByLcow()
logrus.Debugf("WindowsGraphDriver Put() id %s", id) logrus.Debugf("WindowsGraphDriver Put() id %s", id)
rID, err := d.resolveID(id) rID, err := d.resolveID(id)
@ -283,9 +425,15 @@ func (d *Driver) Put(id string) error {
return nil return nil
} }
d.cacheMu.Lock() d.cacheMu.Lock()
_, exists := d.cache[rID]
delete(d.cache, rID) delete(d.cache, rID)
d.cacheMu.Unlock() d.cacheMu.Unlock()
// If the cache was not populated, then the layer was left unprepared and deactivated
if !exists {
return nil
}
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
return err return err
} }
@ -293,7 +441,31 @@ func (d *Driver) Put(id string) error {
} }
// Cleanup ensures the information the driver stores is properly removed. // Cleanup ensures the information the driver stores is properly removed.
// We use this opportunity to cleanup any -removing folders which may be
// still left if the daemon was killed while it was removing a layer.
func (d *Driver) Cleanup() error { func (d *Driver) Cleanup() error {
items, err := ioutil.ReadDir(d.info.HomeDir)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
// Note we don't return an error below - it's possible the files
// are locked. However, next time around after the daemon exits,
// we likely will be able to to cleanup successfully. Instead we log
// warnings if there are errors.
for _, item := range items {
if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") {
if err := hcsshim.DestroyLayer(d.info, item.Name()); err != nil {
logrus.Warnf("Failed to cleanup %s: %s", item.Name(), err)
} else {
logrus.Infof("Cleaned up %s", item.Name())
}
}
}
return nil return nil
} }
@ -301,6 +473,7 @@ func (d *Driver) Cleanup() error {
// layer and its parent layer which may be "". // layer and its parent layer which may be "".
// The layer should be mounted when calling this function // The layer should be mounted when calling this function
func (d *Driver) Diff(id, parent string) (_ io.ReadCloser, err error) { func (d *Driver) Diff(id, parent string) (_ io.ReadCloser, err error) {
panicIfUsedByLcow()
rID, err := d.resolveID(id) rID, err := d.resolveID(id)
if err != nil { if err != nil {
return return
@ -335,8 +508,9 @@ func (d *Driver) Diff(id, parent string) (_ io.ReadCloser, err error) {
// Changes produces a list of changes between the specified layer // Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes. // and its parent layer. If parent is "", then all changes will be ADD changes.
// The layer should be mounted when calling this function // The layer should not be mounted when calling this function.
func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
panicIfUsedByLcow()
rID, err := d.resolveID(id) rID, err := d.resolveID(id)
if err != nil { if err != nil {
return nil, err return nil, err
@ -346,13 +520,12 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
return nil, err return nil, err
} }
// this is assuming that the layer is unmounted if err := hcsshim.ActivateLayer(d.info, rID); err != nil {
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
return nil, err return nil, err
} }
defer func() { defer func() {
if err := hcsshim.PrepareLayer(d.info, rID, parentChain); err != nil { if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
logrus.Warnf("Failed to Deactivate %s: %s", rID, err) logrus.Errorf("changes() failed to DeactivateLayer %s %s: %s", id, rID, err2)
} }
}() }()
@ -392,7 +565,8 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
// layer with the specified id and parent, returning the size of the // layer with the specified id and parent, returning the size of the
// new layer in bytes. // new layer in bytes.
// The layer should not be mounted when calling this function // The layer should not be mounted when calling this function
func (d *Driver) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) { func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) {
panicIfUsedByLcow()
var layerChain []string var layerChain []string
if parent != "" { if parent != "" {
rPId, err := d.resolveID(parent) rPId, err := d.resolveID(parent)
@ -403,7 +577,7 @@ func (d *Driver) ApplyDiff(id, parent string, diff archive.Reader) (int64, error
if err != nil { if err != nil {
return 0, err return 0, err
} }
parentPath, err := hcsshim.LayerMountPath(d.info, rPId) parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId)
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -427,6 +601,7 @@ func (d *Driver) ApplyDiff(id, parent string, diff archive.Reader) (int64, error
// and its parent and returns the size in bytes of the changes // and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory. // relative to its base filesystem directory.
func (d *Driver) DiffSize(id, parent string) (size int64, err error) { func (d *Driver) DiffSize(id, parent string) (size int64, err error) {
panicIfUsedByLcow()
rPId, err := d.resolveID(parent) rPId, err := d.resolveID(parent)
if err != nil { if err != nil {
return return
@ -448,6 +623,7 @@ func (d *Driver) DiffSize(id, parent string) (size int64, err error) {
// Metadata returns custom driver information. // Metadata returns custom driver information.
func (d *Driver) Metadata(id string) (map[string]string, error) { func (d *Driver) Metadata(id string) (map[string]string, error) {
panicIfUsedByLcow()
m := make(map[string]string) m := make(map[string]string)
m["dir"] = d.dir(id) m["dir"] = d.dir(id)
return m, nil return m, nil
@ -505,7 +681,48 @@ func (d *Driver) exportLayer(id string, parentLayerPaths []string) (io.ReadClose
return archive, nil return archive, nil
} }
func writeLayerFromTar(r archive.Reader, w hcsshim.LayerWriter) (int64, error) { // writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and
// writes it to a backup stream, and also saves any files that will be mutated
// by the import layer process to a backup location.
func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) {
var bcdBackup *os.File
var bcdBackupWriter *winio.BackupFileWriter
if backupPath, ok := mutatedFiles[hdr.Name]; ok {
bcdBackup, err = os.Create(filepath.Join(root, backupPath))
if err != nil {
return nil, err
}
defer func() {
cerr := bcdBackup.Close()
if err == nil {
err = cerr
}
}()
bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false)
defer func() {
cerr := bcdBackupWriter.Close()
if err == nil {
err = cerr
}
}()
buf.Reset(io.MultiWriter(w, bcdBackupWriter))
} else {
buf.Reset(w)
}
defer func() {
ferr := buf.Flush()
if err == nil {
err = ferr
}
}()
return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr)
}
func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) {
t := tar.NewReader(r) t := tar.NewReader(r)
hdr, err := t.Next() hdr, err := t.Next()
totalSize := int64(0) totalSize := int64(0)
@ -539,30 +756,7 @@ func writeLayerFromTar(r archive.Reader, w hcsshim.LayerWriter) (int64, error) {
if err != nil { if err != nil {
return 0, err return 0, err
} }
buf.Reset(w) hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root)
// Add the Hyper-V Virtual Machine group ACE to the security descriptor
// for TP5 so that Xenons can access all files. This is not necessary
// for post-TP5 builds.
if isTP5OrOlder() {
if sddl, ok := hdr.Winheaders["sd"]; ok {
var ace string
if hdr.Typeflag == tar.TypeDir {
ace = "(A;OICI;0x1200a9;;;S-1-5-83-0)"
} else {
ace = "(A;;0x1200a9;;;S-1-5-83-0)"
}
if hdr.Winheaders["sd"], ok = addAceToSddlDacl(sddl, ace); !ok {
logrus.Debugf("failed to add VM ACE to %s", sddl)
}
}
}
hdr, err = backuptar.WriteBackupStreamFromTarFile(buf, t, hdr)
ferr := buf.Flush()
if ferr != nil {
err = ferr
}
totalSize += size totalSize += size
} }
} }
@ -572,49 +766,10 @@ func writeLayerFromTar(r archive.Reader, w hcsshim.LayerWriter) (int64, error) {
return totalSize, nil return totalSize, nil
} }
func addAceToSddlDacl(sddl, ace string) (string, bool) {
daclStart := strings.Index(sddl, "D:")
if daclStart < 0 {
return sddl, false
}
dacl := sddl[daclStart:]
daclEnd := strings.Index(dacl, "S:")
if daclEnd < 0 {
daclEnd = len(dacl)
}
dacl = dacl[:daclEnd]
if strings.Contains(dacl, ace) {
return sddl, true
}
i := 2
for i+1 < len(dacl) {
if dacl[i] != '(' {
return sddl, false
}
if dacl[i+1] == 'A' {
break
}
i += 2
for p := 1; i < len(dacl) && p > 0; i++ {
if dacl[i] == '(' {
p++
} else if dacl[i] == ')' {
p--
}
}
}
return sddl[:daclStart+i] + ace + sddl[daclStart+i:], true
}
// importLayer adds a new layer to the tag and graph store based on the given data. // importLayer adds a new layer to the tag and graph store based on the given data.
func (d *Driver) importLayer(id string, layerData archive.Reader, parentLayerPaths []string) (size int64, err error) { func (d *Driver) importLayer(id string, layerData io.Reader, parentLayerPaths []string) (size int64, err error) {
cmd := reexec.Command(append([]string{"storage-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...) if !noreexec {
cmd := reexec.Command(append([]string{"docker-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...)
output := bytes.NewBuffer(nil) output := bytes.NewBuffer(nil)
cmd.Stdin = layerData cmd.Stdin = layerData
cmd.Stdout = output cmd.Stdout = output
@ -629,18 +784,34 @@ func (d *Driver) importLayer(id string, layerData archive.Reader, parentLayerPat
} }
return strconv.ParseInt(output.String(), 10, 64) return strconv.ParseInt(output.String(), 10, 64)
}
return writeLayer(layerData, d.info.HomeDir, id, parentLayerPaths...)
} }
// writeLayer is the re-exec entry point for writing a layer from a tar file // writeLayerReexec is the re-exec entry point for writing a layer from a tar file
func writeLayer() { func writeLayerReexec() {
home := os.Args[1] size, err := writeLayer(os.Stdin, os.Args[1], os.Args[2], os.Args[3:]...)
id := os.Args[2] if err != nil {
parentLayerPaths := os.Args[3:] fmt.Fprint(os.Stderr, err)
os.Exit(1)
}
fmt.Fprint(os.Stdout, size)
}
err := func() error { // writeLayer writes a layer from a tar file.
func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ...string) (int64, error) {
err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}) err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege})
if err != nil { if err != nil {
return err return 0, err
}
if noreexec {
defer func() {
if err := winio.DisableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}); err != nil {
// This should never happen, but just in case when in debugging mode.
// See https://github.com/docker/docker/pull/28002#discussion_r86259241 for rationale.
panic("Failed to disabled process privileges while in non re-exec mode")
}
}()
} }
info := hcsshim.DriverInfo{ info := hcsshim.DriverInfo{
@ -650,27 +821,20 @@ func writeLayer() {
w, err := hcsshim.NewLayerWriter(info, id, parentLayerPaths) w, err := hcsshim.NewLayerWriter(info, id, parentLayerPaths)
if err != nil { if err != nil {
return err return 0, err
} }
size, err := writeLayerFromTar(os.Stdin, w) size, err := writeLayerFromTar(layerData, w, filepath.Join(home, id))
if err != nil { if err != nil {
return err return 0, err
} }
err = w.Close() err = w.Close()
if err != nil { if err != nil {
return err return 0, err
} }
fmt.Fprint(os.Stdout, size) return size, nil
return nil
}()
if err != nil {
fmt.Fprint(os.Stderr, err)
os.Exit(1)
}
} }
// resolveID computes the layerID information based on the given id. // resolveID computes the layerID information based on the given id.
@ -686,11 +850,7 @@ func (d *Driver) resolveID(id string) (string, error) {
// setID stores the layerId in disk. // setID stores the layerId in disk.
func (d *Driver) setID(id, altID string) error { func (d *Driver) setID(id, altID string) error {
err := ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) return ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600)
if err != nil {
return err
}
return nil
} }
// getLayerChain returns the layer chain information. // getLayerChain returns the layer chain information.
@ -733,17 +893,23 @@ type fileGetCloserWithBackupPrivileges struct {
} }
func (fg *fileGetCloserWithBackupPrivileges) Get(filename string) (io.ReadCloser, error) { func (fg *fileGetCloserWithBackupPrivileges) Get(filename string) (io.ReadCloser, error) {
if backupPath, ok := mutatedFiles[filename]; ok {
return os.Open(filepath.Join(fg.path, backupPath))
}
var f *os.File var f *os.File
// Open the file while holding the Windows backup privilege. This ensures that the // Open the file while holding the Windows backup privilege. This ensures that the
// file can be opened even if the caller does not actually have access to it according // file can be opened even if the caller does not actually have access to it according
// to the security descriptor. // to the security descriptor. Also use sequential file access to avoid depleting the
// standby list - Microsoft VSO Bug Tracker #9900466
err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error {
path := longpath.AddPrefix(filepath.Join(fg.path, filename)) path := longpath.AddPrefix(filepath.Join(fg.path, filename))
p, err := syscall.UTF16FromString(path) p, err := windows.UTF16FromString(path)
if err != nil { if err != nil {
return err return err
} }
h, err := syscall.CreateFile(&p[0], syscall.GENERIC_READ, syscall.FILE_SHARE_READ, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN
h, err := windows.CreateFile(&p[0], windows.GENERIC_READ, windows.FILE_SHARE_READ, nil, windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS|fileFlagSequentialScan, 0)
if err != nil { if err != nil {
return &os.PathError{Op: "open", Path: path, Err: err} return &os.PathError{Op: "open", Path: path, Err: err}
} }
@ -757,19 +923,10 @@ func (fg *fileGetCloserWithBackupPrivileges) Close() error {
return nil return nil
} }
type fileGetDestroyCloser struct {
storage.FileGetter
path string
}
func (f *fileGetDestroyCloser) Close() error {
// TODO: activate layers and release here?
return os.RemoveAll(f.path)
}
// DiffGetter returns a FileGetCloser that can read files from the directory that // DiffGetter returns a FileGetCloser that can read files from the directory that
// contains files for the layer differences. Used for direct access for tar-split. // contains files for the layer differences. Used for direct access for tar-split.
func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
panicIfUsedByLcow()
id, err := d.resolveID(id) id, err := d.resolveID(id)
if err != nil { if err != nil {
return nil, err return nil, err
@ -777,3 +934,32 @@ func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
return &fileGetCloserWithBackupPrivileges{d.dir(id)}, nil return &fileGetCloserWithBackupPrivileges{d.dir(id)}, nil
} }
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
return nil
}
type storageOptions struct {
size uint64
}
func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) {
options := storageOptions{}
// Read size to change the block device size per container.
for key, val := range storageOpt {
key := strings.ToLower(key)
switch key {
case "size":
size, err := units.RAMInBytes(val)
if err != nil {
return nil, err
}
options.size = uint64(size)
default:
return nil, fmt.Errorf("Unknown storage option: %s", key)
}
}
return &options, nil
}

View file

@ -10,7 +10,6 @@ import (
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"syscall"
"time" "time"
"github.com/containers/storage/drivers" "github.com/containers/storage/drivers"
@ -21,6 +20,7 @@ import (
"github.com/opencontainers/selinux/go-selinux/label" "github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
) )
type zfsOptions struct { type zfsOptions struct {
@ -100,6 +100,14 @@ func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdri
return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName)
} }
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
return nil, fmt.Errorf("Failed to get root uid/guid: %v", err)
}
if err := idtools.MkdirAllAs(base, 0700, rootUID, rootGID); err != nil {
return nil, fmt.Errorf("Failed to create '%s': %v", base, err)
}
if err := mount.MakePrivate(base); err != nil { if err := mount.MakePrivate(base); err != nil {
return nil, err return nil, err
} }
@ -134,8 +142,8 @@ func parseOptions(opt []string) (zfsOptions, error) {
} }
func lookupZfsDataset(rootdir string) (string, error) { func lookupZfsDataset(rootdir string) (string, error) {
var stat syscall.Stat_t var stat unix.Stat_t
if err := syscall.Stat(rootdir, &stat); err != nil { if err := unix.Stat(rootdir, &stat); err != nil {
return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err) return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err)
} }
wantedDev := stat.Dev wantedDev := stat.Dev
@ -145,7 +153,7 @@ func lookupZfsDataset(rootdir string) (string, error) {
return "", err return "", err
} }
for _, m := range mounts { for _, m := range mounts {
if err := syscall.Stat(m.Mountpoint, &stat); err != nil { if err := unix.Stat(m.Mountpoint, &stat); err != nil {
logrus.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) logrus.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err)
continue // may fail on fuse file systems continue // may fail on fuse file systems
} }
@ -213,7 +221,10 @@ func (d *Driver) Status() [][2]string {
// Metadata returns image/container metadata related to graph driver // Metadata returns image/container metadata related to graph driver
func (d *Driver) Metadata(id string) (map[string]string, error) { func (d *Driver) Metadata(id string) (map[string]string, error) {
return nil, nil return map[string]string{
"Mountpoint": d.mountPath(id),
"Dataset": d.zfsPath(id),
}, nil
} }
func (d *Driver) cloneFilesystem(name, parentName string) error { func (d *Driver) cloneFilesystem(name, parentName string) error {
@ -248,12 +259,17 @@ func (d *Driver) mountPath(id string) string {
// CreateReadWrite creates a layer that is writable for use as a container // CreateReadWrite creates a layer that is writable for use as a container
// file system. // file system.
func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
return d.Create(id, parent, mountLabel, storageOpt) return d.Create(id, parent, opts)
} }
// Create prepares the dataset and filesystem for the ZFS driver for the given id under the parent. // Create prepares the dataset and filesystem for the ZFS driver for the given id under the parent.
func (d *Driver) Create(id string, parent string, mountLabel string, storageOpt map[string]string) error { func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
var storageOpt map[string]string
if opts != nil {
storageOpt = opts.StorageOpt
}
err := d.create(id, parent, storageOpt) err := d.create(id, parent, storageOpt)
if err == nil { if err == nil {
return nil return nil
@ -391,22 +407,20 @@ func (d *Driver) Put(id string) error {
logrus.Debugf(`[zfs] unmount("%s")`, mountpoint) logrus.Debugf(`[zfs] unmount("%s")`, mountpoint)
err = mount.Unmount(mountpoint) if err := mount.Unmount(mountpoint); err != nil {
if err != nil {
return fmt.Errorf("error unmounting to %s: %v", mountpoint, err) return fmt.Errorf("error unmounting to %s: %v", mountpoint, err)
} }
return err return nil
} }
// Exists checks to see if the cache entry exists for the given id. // Exists checks to see if the cache entry exists for the given id.
func (d *Driver) Exists(id string) bool { func (d *Driver) Exists(id string) bool {
d.Lock() d.Lock()
defer d.Unlock() defer d.Unlock()
return d.filesystemsCache[d.zfsPath(id)] == true return d.filesystemsCache[d.zfsPath(id)]
} }
// AdditionalImageStores returns additional image stores supported by the driver // AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string { func (d *Driver) AdditionalImageStores() []string {
var imageStores []string return nil
return imageStores
} }

View file

@ -3,16 +3,16 @@ package zfs
import ( import (
"fmt" "fmt"
"strings" "strings"
"syscall"
"github.com/containers/storage/drivers" "github.com/containers/storage/drivers"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
) )
func checkRootdirFs(rootdir string) error { func checkRootdirFs(rootdir string) error {
var buf syscall.Statfs_t var buf unix.Statfs_t
if err := syscall.Statfs(rootdir, &buf); err != nil { if err := unix.Statfs(rootdir, &buf); err != nil {
return fmt.Errorf("Failed to access '%s': %s", rootdir, err) return fmt.Errorf("Failed to access '%s': %s", rootdir, err)
} }

View file

@ -2,16 +2,16 @@ package zfs
import ( import (
"fmt" "fmt"
"syscall"
"github.com/containers/storage/drivers" "github.com/containers/storage/drivers"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
) )
func checkRootdirFs(rootdir string) error { func checkRootdirFs(rootdir string) error {
var buf syscall.Statfs_t var buf unix.Statfs_t
if err := syscall.Statfs(rootdir, &buf); err != nil { if err := unix.Statfs(rootdir, &buf); err != nil {
return fmt.Errorf("Failed to access '%s': %s", rootdir, err) return fmt.Errorf("Failed to access '%s': %s", rootdir, err)
} }

View file

@ -22,24 +22,23 @@ import (
"github.com/containers/storage/drivers" "github.com/containers/storage/drivers"
"github.com/pkg/errors" "github.com/pkg/errors"
log "github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
func checkRootdirFs(rootdir string) error { func checkRootdirFs(rootdir string) error {
cs := C.CString(filepath.Dir(rootdir)) cs := C.CString(filepath.Dir(rootdir))
defer C.free(unsafe.Pointer(cs))
buf := C.getstatfs(cs) buf := C.getstatfs(cs)
defer C.free(unsafe.Pointer(buf))
// on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ]
if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) ||
(buf.f_basetype[3] != 0) { (buf.f_basetype[3] != 0) {
log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir)
C.free(unsafe.Pointer(buf))
return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir) return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir)
} }
C.free(unsafe.Pointer(buf))
C.free(unsafe.Pointer(cs))
return nil return nil
} }

View file

@ -24,8 +24,9 @@ type Image struct {
// unique among images. // unique among images.
Names []string `json:"names,omitempty"` Names []string `json:"names,omitempty"`
// TopLayer is the ID of the topmost layer of the image itself. // TopLayer is the ID of the topmost layer of the image itself, if the
// Multiple images can refer to the same top layer. // image contains one or more layers. Multiple images can refer to the
// same top layer.
TopLayer string `json:"layer"` TopLayer string `json:"layer"`
// Metadata is data we keep for the convenience of the caller. It is not // Metadata is data we keep for the convenience of the caller. It is not
@ -270,6 +271,7 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
if _, idInUse := r.byid[id]; idInUse { if _, idInUse := r.byid[id]; idInUse {
return nil, ErrDuplicateID return nil, ErrDuplicateID
} }
names = dedupeNames(names)
for _, name := range names { for _, name := range names {
if _, nameInUse := r.byname[name]; nameInUse { if _, nameInUse := r.byname[name]; nameInUse {
return nil, ErrDuplicateName return nil, ErrDuplicateName
@ -326,6 +328,7 @@ func (r *imageStore) SetNames(id string, names []string) error {
if !r.IsReadWrite() { if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change image name assignments at %q", r.imagespath()) return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change image name assignments at %q", r.imagespath())
} }
names = dedupeNames(names)
if image, ok := r.lookup(id); ok { if image, ok := r.lookup(id); ok {
for _, name := range image.Names { for _, name := range image.Names {
delete(r.byname, name) delete(r.byname, name)

View file

@ -490,15 +490,20 @@ func (r *layerStore) Put(id, parent string, names []string, mountLabel string, o
if _, idInUse := r.byid[id]; idInUse { if _, idInUse := r.byid[id]; idInUse {
return nil, -1, ErrDuplicateID return nil, -1, ErrDuplicateID
} }
names = dedupeNames(names)
for _, name := range names { for _, name := range names {
if _, nameInUse := r.byname[name]; nameInUse { if _, nameInUse := r.byname[name]; nameInUse {
return nil, -1, ErrDuplicateName return nil, -1, ErrDuplicateName
} }
} }
opts := drivers.CreateOpts{
MountLabel: mountLabel,
StorageOpt: options,
}
if writeable { if writeable {
err = r.driver.CreateReadWrite(id, parent, mountLabel, options) err = r.driver.CreateReadWrite(id, parent, &opts)
} else { } else {
err = r.driver.Create(id, parent, mountLabel, options) err = r.driver.Create(id, parent, &opts)
} }
if err == nil { if err == nil {
layer = &Layer{ layer = &Layer{
@ -622,6 +627,7 @@ func (r *layerStore) SetNames(id string, names []string) error {
if !r.IsReadWrite() { if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change layer name assignments at %q", r.layerspath()) return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change layer name assignments at %q", r.layerspath())
} }
names = dedupeNames(names)
if layer, ok := r.lookup(id); ok { if layer, ok := r.lookup(id); ok {
for _, name := range layer.Names { for _, name := range layer.Names {
delete(r.byname, name) delete(r.byname, name)

View file

@ -0,0 +1,97 @@
// +build ignore
// Simple tool to create an archive stream from an old and new directory
//
// By default it will stream the comparison of two temporary directories with junk files
package main
import (
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"github.com/containers/storage/pkg/archive"
"github.com/sirupsen/logrus"
)
var (
flDebug = flag.Bool("D", false, "debugging output")
flNewDir = flag.String("newdir", "", "")
flOldDir = flag.String("olddir", "", "")
log = logrus.New()
)
func main() {
flag.Usage = func() {
fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
fmt.Printf("%s [OPTIONS]\n", os.Args[0])
flag.PrintDefaults()
}
flag.Parse()
log.Out = os.Stderr
if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
logrus.SetLevel(logrus.DebugLevel)
}
var newDir, oldDir string
if len(*flNewDir) == 0 {
var err error
newDir, err = ioutil.TempDir("", "storage-test-newDir")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(newDir)
if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
log.Fatal(err)
}
} else {
newDir = *flNewDir
}
if len(*flOldDir) == 0 {
oldDir, err := ioutil.TempDir("", "storage-test-oldDir")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(oldDir)
} else {
oldDir = *flOldDir
}
changes, err := archive.ChangesDirs(newDir, oldDir)
if err != nil {
log.Fatal(err)
}
a, err := archive.ExportChanges(newDir, changes)
if err != nil {
log.Fatal(err)
}
defer a.Close()
i, err := io.Copy(os.Stdout, a)
if err != nil && err != io.EOF {
log.Fatal(err)
}
fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
}
func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
fileData := []byte("fooo")
for n := 0; n < numberOfFiles; n++ {
fileName := fmt.Sprintf("file-%d", n)
if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
return 0, err
}
if makeLinks {
if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
return 0, err
}
}
}
totalSize := numberOfFiles * len(fileData)
return totalSize, nil
}

View file

@ -0,0 +1,20 @@
// +build linux
package dmesg
import (
"unsafe"
"golang.org/x/sys/unix"
)
// Dmesg returns last messages from the kernel log, up to size bytes
func Dmesg(size int) []byte {
t := uintptr(3) // SYSLOG_ACTION_READ_ALL
b := make([]byte, size)
amt, _, err := unix.Syscall(unix.SYS_SYSLOG, t, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)))
if err != 0 {
return []byte{}
}
return b[:amt]
}

View file

@ -0,0 +1,88 @@
// +build linux
package fsutils
import (
"fmt"
"io/ioutil"
"os"
"unsafe"
"golang.org/x/sys/unix"
)
func locateDummyIfEmpty(path string) (string, error) {
children, err := ioutil.ReadDir(path)
if err != nil {
return "", err
}
if len(children) != 0 {
return "", nil
}
dummyFile, err := ioutil.TempFile(path, "fsutils-dummy")
if err != nil {
return "", err
}
name := dummyFile.Name()
err = dummyFile.Close()
return name, err
}
// SupportsDType returns whether the filesystem mounted on path supports d_type
func SupportsDType(path string) (bool, error) {
// locate dummy so that we have at least one dirent
dummy, err := locateDummyIfEmpty(path)
if err != nil {
return false, err
}
if dummy != "" {
defer os.Remove(dummy)
}
visited := 0
supportsDType := true
fn := func(ent *unix.Dirent) bool {
visited++
if ent.Type == unix.DT_UNKNOWN {
supportsDType = false
// stop iteration
return true
}
// continue iteration
return false
}
if err = iterateReadDir(path, fn); err != nil {
return false, err
}
if visited == 0 {
return false, fmt.Errorf("did not hit any dirent during iteration %s", path)
}
return supportsDType, nil
}
func iterateReadDir(path string, fn func(*unix.Dirent) bool) error {
d, err := os.Open(path)
if err != nil {
return err
}
defer d.Close()
fd := int(d.Fd())
buf := make([]byte, 4096)
for {
nbytes, err := unix.ReadDirent(fd, buf)
if err != nil {
return err
}
if nbytes == 0 {
break
}
for off := 0; off < nbytes; {
ent := (*unix.Dirent)(unsafe.Pointer(&buf[off]))
if stop := fn(ent); stop {
return nil
}
off += int(ent.Reclen)
}
}
return nil
}

View file

@ -0,0 +1,65 @@
Locker
=====
locker provides a mechanism for creating finer-grained locking to help
free up more global locks to handle other tasks.
The implementation looks close to a sync.Mutex, however, the user must provide a
reference to use to refer to the underlying lock when locking and unlocking,
and unlock may generate an error.
If a lock with a given name does not exist when `Lock` is called, one is
created.
Lock references are automatically cleaned up on `Unlock` if nothing else is
waiting for the lock.
## Usage
```go
package important
import (
"sync"
"time"
"github.com/containers/storage/pkg/locker"
)
type important struct {
locks *locker.Locker
data map[string]interface{}
mu sync.Mutex
}
func (i *important) Get(name string) interface{} {
i.locks.Lock(name)
defer i.locks.Unlock(name)
return data[name]
}
func (i *important) Create(name string, data interface{}) {
i.locks.Lock(name)
defer i.locks.Unlock(name)
i.createImportant(data)
s.mu.Lock()
i.data[name] = data
s.mu.Unlock()
}
func (i *important) createImportant(data interface{}) {
time.Sleep(10 * time.Second)
}
```
For functions dealing with a given name, always lock at the beginning of the
function (or before doing anything with the underlying state), this ensures any
other function that is dealing with the same name will block.
When needing to modify the underlying data, use the global lock to ensure nothing
else is modifying it at the same time.
Since name lock is already in place, no reads will occur while the modification
is being performed.

View file

@ -0,0 +1,112 @@
/*
Package locker provides a mechanism for creating finer-grained locking to help
free up more global locks to handle other tasks.
The implementation looks close to a sync.Mutex, however the user must provide a
reference to use to refer to the underlying lock when locking and unlocking,
and unlock may generate an error.
If a lock with a given name does not exist when `Lock` is called, one is
created.
Lock references are automatically cleaned up on `Unlock` if nothing else is
waiting for the lock.
*/
package locker
import (
"errors"
"sync"
"sync/atomic"
)
// ErrNoSuchLock is returned when the requested lock does not exist
var ErrNoSuchLock = errors.New("no such lock")
// Locker provides a locking mechanism based on the passed in reference name
type Locker struct {
mu sync.Mutex
locks map[string]*lockCtr
}
// lockCtr is used by Locker to represent a lock with a given name.
type lockCtr struct {
mu sync.Mutex
// waiters is the number of waiters waiting to acquire the lock
// this is int32 instead of uint32 so we can add `-1` in `dec()`
waiters int32
}
// inc increments the number of waiters waiting for the lock
func (l *lockCtr) inc() {
atomic.AddInt32(&l.waiters, 1)
}
// dec decrements the number of waiters waiting on the lock
func (l *lockCtr) dec() {
atomic.AddInt32(&l.waiters, -1)
}
// count gets the current number of waiters
func (l *lockCtr) count() int32 {
return atomic.LoadInt32(&l.waiters)
}
// Lock locks the mutex
func (l *lockCtr) Lock() {
l.mu.Lock()
}
// Unlock unlocks the mutex
func (l *lockCtr) Unlock() {
l.mu.Unlock()
}
// New creates a new Locker
func New() *Locker {
return &Locker{
locks: make(map[string]*lockCtr),
}
}
// Lock locks a mutex with the given name. If it doesn't exist, one is created
func (l *Locker) Lock(name string) {
l.mu.Lock()
if l.locks == nil {
l.locks = make(map[string]*lockCtr)
}
nameLock, exists := l.locks[name]
if !exists {
nameLock = &lockCtr{}
l.locks[name] = nameLock
}
// increment the nameLock waiters while inside the main mutex
// this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently
nameLock.inc()
l.mu.Unlock()
// Lock the nameLock outside the main mutex so we don't block other operations
// once locked then we can decrement the number of waiters for this lock
nameLock.Lock()
nameLock.dec()
}
// Unlock unlocks the mutex with the given name
// If the given lock is not being waited on by any other callers, it is deleted
func (l *Locker) Unlock(name string) error {
l.mu.Lock()
nameLock, exists := l.locks[name]
if !exists {
l.mu.Unlock()
return ErrNoSuchLock
}
if nameLock.count() == 0 {
delete(l.locks, name)
}
nameLock.Unlock()
l.mu.Unlock()
return nil
}

View file

@ -1,205 +0,0 @@
package plugins
import (
"bytes"
"encoding/json"
"io"
"io/ioutil"
"net/http"
"net/url"
"time"
"github.com/containers/storage/pkg/plugins/transport"
"github.com/docker/go-connections/sockets"
"github.com/docker/go-connections/tlsconfig"
"github.com/sirupsen/logrus"
)
const (
defaultTimeOut = 30
)
func newTransport(addr string, tlsConfig *tlsconfig.Options) (transport.Transport, error) {
tr := &http.Transport{}
if tlsConfig != nil {
c, err := tlsconfig.Client(*tlsConfig)
if err != nil {
return nil, err
}
tr.TLSClientConfig = c
}
u, err := url.Parse(addr)
if err != nil {
return nil, err
}
socket := u.Host
if socket == "" {
// valid local socket addresses have the host empty.
socket = u.Path
}
if err := sockets.ConfigureTransport(tr, u.Scheme, socket); err != nil {
return nil, err
}
scheme := httpScheme(u)
return transport.NewHTTPTransport(tr, scheme, socket), nil
}
// NewClient creates a new plugin client (http).
func NewClient(addr string, tlsConfig *tlsconfig.Options) (*Client, error) {
clientTransport, err := newTransport(addr, tlsConfig)
if err != nil {
return nil, err
}
return newClientWithTransport(clientTransport, 0), nil
}
// NewClientWithTimeout creates a new plugin client (http).
func NewClientWithTimeout(addr string, tlsConfig *tlsconfig.Options, timeout time.Duration) (*Client, error) {
clientTransport, err := newTransport(addr, tlsConfig)
if err != nil {
return nil, err
}
return newClientWithTransport(clientTransport, timeout), nil
}
// newClientWithTransport creates a new plugin client with a given transport.
func newClientWithTransport(tr transport.Transport, timeout time.Duration) *Client {
return &Client{
http: &http.Client{
Transport: tr,
Timeout: timeout,
},
requestFactory: tr,
}
}
// Client represents a plugin client.
type Client struct {
http *http.Client // http client to use
requestFactory transport.RequestFactory
}
// Call calls the specified method with the specified arguments for the plugin.
// It will retry for 30 seconds if a failure occurs when calling.
func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) error {
var buf bytes.Buffer
if args != nil {
if err := json.NewEncoder(&buf).Encode(args); err != nil {
return err
}
}
body, err := c.callWithRetry(serviceMethod, &buf, true)
if err != nil {
return err
}
defer body.Close()
if ret != nil {
if err := json.NewDecoder(body).Decode(&ret); err != nil {
logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err)
return err
}
}
return nil
}
// Stream calls the specified method with the specified arguments for the plugin and returns the response body
func (c *Client) Stream(serviceMethod string, args interface{}) (io.ReadCloser, error) {
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(args); err != nil {
return nil, err
}
return c.callWithRetry(serviceMethod, &buf, true)
}
// SendFile calls the specified method, and passes through the IO stream
func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) error {
body, err := c.callWithRetry(serviceMethod, data, true)
if err != nil {
return err
}
defer body.Close()
if err := json.NewDecoder(body).Decode(&ret); err != nil {
logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err)
return err
}
return nil
}
func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) {
var retries int
start := time.Now()
for {
req, err := c.requestFactory.NewRequest(serviceMethod, data)
if err != nil {
return nil, err
}
resp, err := c.http.Do(req)
if err != nil {
if !retry {
return nil, err
}
timeOff := backoff(retries)
if abort(start, timeOff) {
return nil, err
}
retries++
logrus.Warnf("Unable to connect to plugin: %s%s: %v, retrying in %v", req.URL.Host, req.URL.Path, err, timeOff)
time.Sleep(timeOff)
continue
}
if resp.StatusCode != http.StatusOK {
b, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
return nil, &statusError{resp.StatusCode, serviceMethod, err.Error()}
}
// Plugins' Response(s) should have an Err field indicating what went
// wrong. Try to unmarshal into ResponseErr. Otherwise fallback to just
// return the string(body)
type responseErr struct {
Err string
}
remoteErr := responseErr{}
if err := json.Unmarshal(b, &remoteErr); err == nil {
if remoteErr.Err != "" {
return nil, &statusError{resp.StatusCode, serviceMethod, remoteErr.Err}
}
}
// old way...
return nil, &statusError{resp.StatusCode, serviceMethod, string(b)}
}
return resp.Body, nil
}
}
func backoff(retries int) time.Duration {
b, max := 1, defaultTimeOut
for b < max && retries > 0 {
b *= 2
retries--
}
if b > max {
b = max
}
return time.Duration(b) * time.Second
}
func abort(start time.Time, timeOff time.Duration) bool {
return timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second
}
func httpScheme(u *url.URL) string {
scheme := u.Scheme
if scheme != "https" {
scheme = "http"
}
return scheme
}

View file

@ -1,131 +0,0 @@
package plugins
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/url"
"os"
"path/filepath"
"strings"
"sync"
)
var (
// ErrNotFound plugin not found
ErrNotFound = errors.New("plugin not found")
socketsPath = "/run/container/storage/plugins"
)
// localRegistry defines a registry that is local (using unix socket).
type localRegistry struct{}
func newLocalRegistry() localRegistry {
return localRegistry{}
}
// Scan scans all the plugin paths and returns all the names it found
func Scan() ([]string, error) {
var names []string
if err := filepath.Walk(socketsPath, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return nil
}
if fi.Mode()&os.ModeSocket != 0 {
name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name()))
names = append(names, name)
}
return nil
}); err != nil {
return nil, err
}
for _, path := range specsPaths {
if err := filepath.Walk(path, func(p string, fi os.FileInfo, err error) error {
if err != nil || fi.IsDir() {
return nil
}
name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name()))
names = append(names, name)
return nil
}); err != nil {
return nil, err
}
}
return names, nil
}
// Plugin returns the plugin registered with the given name (or returns an error).
func (l *localRegistry) Plugin(name string) (*Plugin, error) {
socketpaths := pluginPaths(socketsPath, name, ".sock")
for _, p := range socketpaths {
if fi, err := os.Stat(p); err == nil && fi.Mode()&os.ModeSocket != 0 {
return NewLocalPlugin(name, "unix://"+p), nil
}
}
var txtspecpaths []string
for _, p := range specsPaths {
txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".spec")...)
txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".json")...)
}
for _, p := range txtspecpaths {
if _, err := os.Stat(p); err == nil {
if strings.HasSuffix(p, ".json") {
return readPluginJSONInfo(name, p)
}
return readPluginInfo(name, p)
}
}
return nil, ErrNotFound
}
func readPluginInfo(name, path string) (*Plugin, error) {
content, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
addr := strings.TrimSpace(string(content))
u, err := url.Parse(addr)
if err != nil {
return nil, err
}
if len(u.Scheme) == 0 {
return nil, fmt.Errorf("Unknown protocol")
}
return NewLocalPlugin(name, addr), nil
}
func readPluginJSONInfo(name, path string) (*Plugin, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
var p Plugin
if err := json.NewDecoder(f).Decode(&p); err != nil {
return nil, err
}
p.name = name
if p.TLSConfig != nil && len(p.TLSConfig.CAFile) == 0 {
p.TLSConfig.InsecureSkipVerify = true
}
p.activateWait = sync.NewCond(&sync.Mutex{})
return &p, nil
}
func pluginPaths(base, name, ext string) []string {
return []string{
filepath.Join(base, name+ext),
filepath.Join(base, name, name+ext),
}
}

View file

@ -1,5 +0,0 @@
// +build !windows
package plugins
var specsPaths = []string{"/etc/containers/storage/plugins", "/usr/lib/containers/storage/plugins"}

View file

@ -1,8 +0,0 @@
package plugins
import (
"os"
"path/filepath"
)
var specsPaths = []string{filepath.Join(os.Getenv("programdata"), "containers", "storage", "plugins")}

View file

@ -1,33 +0,0 @@
package plugins
import (
"fmt"
"net/http"
)
type statusError struct {
status int
method string
err string
}
// Error returns a formatted string for this error type
func (e *statusError) Error() string {
return fmt.Sprintf("%s: %v", e.method, e.err)
}
// IsNotFound indicates if the passed in error is from an http.StatusNotFound from the plugin
func IsNotFound(err error) bool {
return isStatusError(err, http.StatusNotFound)
}
func isStatusError(err error, status int) bool {
if err == nil {
return false
}
e, ok := err.(*statusError)
if !ok {
return false
}
return e.status == status
}

View file

@ -1,329 +0,0 @@
// Package plugins provides structures and helper functions to manage Docker
// plugins.
//
// Docker discovers plugins by looking for them in the plugin directory whenever
// a user or container tries to use one by name. UNIX domain socket files must
// be located under /run/container/storage/plugins, whereas spec files can be located
// either under /etc/container/storage/plugins or /usr/lib/container/storage/plugins. This is handled
// by the Registry interface, which lets you list all plugins or get a plugin by
// its name if it exists.
//
// The plugins need to implement an HTTP server and bind this to the UNIX socket
// or the address specified in the spec files.
// A handshake is send at /Plugin.Activate, and plugins are expected to return
// a Manifest with a list of of Docker subsystems which this plugin implements.
//
// In order to use a plugins, you can use the ``Get`` with the name of the
// plugin and the subsystem it implements.
//
// plugin, err := plugins.Get("example", "VolumeDriver")
// if err != nil {
// return fmt.Errorf("Error looking up volume plugin example: %v", err)
// }
package plugins
import (
"errors"
"sync"
"time"
"github.com/docker/go-connections/tlsconfig"
"github.com/sirupsen/logrus"
)
var (
// ErrNotImplements is returned if the plugin does not implement the requested driver.
ErrNotImplements = errors.New("Plugin does not implement the requested driver")
)
type plugins struct {
sync.Mutex
plugins map[string]*Plugin
}
type extpointHandlers struct {
sync.RWMutex
extpointHandlers map[string][]func(string, *Client)
}
var (
storage = plugins{plugins: make(map[string]*Plugin)}
handlers = extpointHandlers{extpointHandlers: make(map[string][]func(string, *Client))}
)
// Manifest lists what a plugin implements.
type Manifest struct {
// List of subsystem the plugin implements.
Implements []string
}
// Plugin is the definition of a container/storage plugin.
type Plugin struct {
// Name of the plugin
name string
// Address of the plugin
Addr string
// TLS configuration of the plugin
TLSConfig *tlsconfig.Options
// Client attached to the plugin
client *Client
// Manifest of the plugin (see above)
Manifest *Manifest `json:"-"`
// wait for activation to finish
activateWait *sync.Cond
// error produced by activation
activateErr error
// keeps track of callback handlers run against this plugin
handlersRun bool
}
// Name returns the name of the plugin.
func (p *Plugin) Name() string {
return p.name
}
// Client returns a ready-to-use plugin client that can be used to communicate with the plugin.
func (p *Plugin) Client() *Client {
return p.client
}
// IsV1 returns true for V1 plugins and false otherwise.
func (p *Plugin) IsV1() bool {
return true
}
// NewLocalPlugin creates a new local plugin.
func NewLocalPlugin(name, addr string) *Plugin {
return &Plugin{
name: name,
Addr: addr,
// TODO: change to nil
TLSConfig: &tlsconfig.Options{InsecureSkipVerify: true},
activateWait: sync.NewCond(&sync.Mutex{}),
}
}
func (p *Plugin) activate() error {
p.activateWait.L.Lock()
if p.activated() {
p.runHandlers()
p.activateWait.L.Unlock()
return p.activateErr
}
p.activateErr = p.activateWithLock()
p.runHandlers()
p.activateWait.L.Unlock()
p.activateWait.Broadcast()
return p.activateErr
}
// runHandlers runs the registered handlers for the implemented plugin types
// This should only be run after activation, and while the activation lock is held.
func (p *Plugin) runHandlers() {
if !p.activated() {
return
}
handlers.RLock()
if !p.handlersRun {
for _, iface := range p.Manifest.Implements {
hdlrs, handled := handlers.extpointHandlers[iface]
if !handled {
continue
}
for _, handler := range hdlrs {
handler(p.name, p.client)
}
}
p.handlersRun = true
}
handlers.RUnlock()
}
// activated returns if the plugin has already been activated.
// This should only be called with the activation lock held
func (p *Plugin) activated() bool {
return p.Manifest != nil
}
func (p *Plugin) activateWithLock() error {
c, err := NewClient(p.Addr, p.TLSConfig)
if err != nil {
return err
}
p.client = c
m := new(Manifest)
if err = p.client.Call("Plugin.Activate", nil, m); err != nil {
return err
}
p.Manifest = m
return nil
}
func (p *Plugin) waitActive() error {
p.activateWait.L.Lock()
for !p.activated() && p.activateErr == nil {
p.activateWait.Wait()
}
p.activateWait.L.Unlock()
return p.activateErr
}
func (p *Plugin) implements(kind string) bool {
if p.Manifest == nil {
return false
}
for _, driver := range p.Manifest.Implements {
if driver == kind {
return true
}
}
return false
}
func load(name string) (*Plugin, error) {
return loadWithRetry(name, true)
}
func loadWithRetry(name string, retry bool) (*Plugin, error) {
registry := newLocalRegistry()
start := time.Now()
var retries int
for {
pl, err := registry.Plugin(name)
if err != nil {
if !retry {
return nil, err
}
timeOff := backoff(retries)
if abort(start, timeOff) {
return nil, err
}
retries++
logrus.Warnf("Unable to locate plugin: %s, retrying in %v", name, timeOff)
time.Sleep(timeOff)
continue
}
storage.Lock()
if pl, exists := storage.plugins[name]; exists {
storage.Unlock()
return pl, pl.activate()
}
storage.plugins[name] = pl
storage.Unlock()
err = pl.activate()
if err != nil {
storage.Lock()
delete(storage.plugins, name)
storage.Unlock()
}
return pl, err
}
}
func get(name string) (*Plugin, error) {
storage.Lock()
pl, ok := storage.plugins[name]
storage.Unlock()
if ok {
return pl, pl.activate()
}
return load(name)
}
// Get returns the plugin given the specified name and requested implementation.
func Get(name, imp string) (*Plugin, error) {
pl, err := get(name)
if err != nil {
return nil, err
}
if err := pl.waitActive(); err == nil && pl.implements(imp) {
logrus.Debugf("%s implements: %s", name, imp)
return pl, nil
}
return nil, ErrNotImplements
}
// Handle adds the specified function to the extpointHandlers.
func Handle(iface string, fn func(string, *Client)) {
handlers.Lock()
hdlrs, ok := handlers.extpointHandlers[iface]
if !ok {
hdlrs = []func(string, *Client){}
}
hdlrs = append(hdlrs, fn)
handlers.extpointHandlers[iface] = hdlrs
storage.Lock()
for _, p := range storage.plugins {
p.activateWait.L.Lock()
if p.activated() && p.implements(iface) {
p.handlersRun = false
}
p.activateWait.L.Unlock()
}
storage.Unlock()
handlers.Unlock()
}
// GetAll returns all the plugins for the specified implementation
func GetAll(imp string) ([]*Plugin, error) {
pluginNames, err := Scan()
if err != nil {
return nil, err
}
type plLoad struct {
pl *Plugin
err error
}
chPl := make(chan *plLoad, len(pluginNames))
var wg sync.WaitGroup
for _, name := range pluginNames {
storage.Lock()
pl, ok := storage.plugins[name]
storage.Unlock()
if ok {
chPl <- &plLoad{pl, nil}
continue
}
wg.Add(1)
go func(name string) {
defer wg.Done()
pl, err := loadWithRetry(name, false)
chPl <- &plLoad{pl, err}
}(name)
}
wg.Wait()
close(chPl)
var out []*Plugin
for pl := range chPl {
if pl.err != nil {
logrus.Error(pl.err)
continue
}
if err := pl.pl.waitActive(); err == nil && pl.pl.implements(imp) {
out = append(out, pl.pl)
}
}
return out, nil
}

View file

@ -1,9 +0,0 @@
// +build !windows
package plugins
// BasePath returns the path to which all paths returned by the plugin are relative to.
// For v1 plugins, this always returns the host's root directory.
func (p *Plugin) BasePath() string {
return "/"
}

View file

@ -1,8 +0,0 @@
package plugins
// BasePath returns the path to which all paths returned by the plugin are relative to.
// For Windows v1 plugins, this returns an empty string, since the plugin is already aware
// of the absolute path of the mount.
func (p *Plugin) BasePath() string {
return ""
}

View file

@ -1,36 +0,0 @@
package transport
import (
"io"
"net/http"
)
// httpTransport holds an http.RoundTripper
// and information about the scheme and address the transport
// sends request to.
type httpTransport struct {
http.RoundTripper
scheme string
addr string
}
// NewHTTPTransport creates a new httpTransport.
func NewHTTPTransport(r http.RoundTripper, scheme, addr string) Transport {
return httpTransport{
RoundTripper: r,
scheme: scheme,
addr: addr,
}
}
// NewRequest creates a new http.Request and sets the URL
// scheme and address with the transport's fields.
func (t httpTransport) NewRequest(path string, data io.Reader) (*http.Request, error) {
req, err := newHTTPRequest(path, data)
if err != nil {
return nil, err
}
req.URL.Scheme = t.scheme
req.URL.Host = t.addr
return req, nil
}

View file

@ -1,36 +0,0 @@
package transport
import (
"io"
"net/http"
"strings"
)
// VersionMimetype is the Content-Type the engine sends to plugins.
const VersionMimetype = "application/vnd.docker.plugins.v1.2+json"
// RequestFactory defines an interface that
// transports can implement to create new requests.
type RequestFactory interface {
NewRequest(path string, data io.Reader) (*http.Request, error)
}
// Transport defines an interface that plugin transports
// must implement.
type Transport interface {
http.RoundTripper
RequestFactory
}
// newHTTPRequest creates a new request with a path and a body.
func newHTTPRequest(path string, data io.Reader) (*http.Request, error) {
if !strings.HasPrefix(path, "/") {
path = "/" + path
}
req, err := http.NewRequest("POST", path, data)
if err != nil {
return nil, err
}
req.Header.Add("Accept", VersionMimetype)
return req, nil
}

View file

@ -590,7 +590,13 @@ func (s *store) getGraphDriver() (drivers.Driver, error) {
if s.graphDriver != nil { if s.graphDriver != nil {
return s.graphDriver, nil return s.graphDriver, nil
} }
driver, err := drivers.New(s.graphRoot, s.graphDriverName, s.graphOptions, s.uidMap, s.gidMap) config := drivers.Options{
Root: s.graphRoot,
DriverOptions: s.graphOptions,
UIDMaps: s.uidMap,
GIDMaps: s.gidMap,
}
driver, err := drivers.New(s.graphDriverName, config)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -769,6 +775,7 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, o
id = stringid.GenerateRandomID() id = stringid.GenerateRandomID()
} }
if layer != "" {
lstore, err := s.LayerStore() lstore, err := s.LayerStore()
if err != nil { if err != nil {
return nil, err return nil, err
@ -793,6 +800,7 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, o
return nil, ErrLayerUnknown return nil, ErrLayerUnknown
} }
layer = ilayer.ID layer = ilayer.ID
}
ristore, err := s.ImageStore() ristore, err := s.ImageStore()
if err != nil { if err != nil {
@ -1168,15 +1176,20 @@ func (s *store) Exists(id string) bool {
return false return false
} }
func (s *store) SetNames(id string, names []string) error { func dedupeNames(names []string) []string {
deduped := []string{}
seen := make(map[string]bool) seen := make(map[string]bool)
deduped := make([]string, 0, len(names))
for _, name := range names { for _, name := range names {
if _, wasSeen := seen[name]; !wasSeen { if _, wasSeen := seen[name]; !wasSeen {
seen[name] = true seen[name] = true
deduped = append(deduped, name) deduped = append(deduped, name)
} }
} }
return deduped
}
func (s *store) SetNames(id string, names []string) error {
deduped := dedupeNames(names)
rlstore, err := s.LayerStore() rlstore, err := s.LayerStore()
if err != nil { if err != nil {
@ -2243,6 +2256,12 @@ type OptionsConfig struct {
// Image stores. Usually used to access Networked File System // Image stores. Usually used to access Networked File System
// for shared image content // for shared image content
AdditionalImageStores []string `toml:"additionalimagestores"` AdditionalImageStores []string `toml:"additionalimagestores"`
// Size
Size string `toml:"size"`
// OverrideKernelCheck
OverrideKernelCheck string `toml:"override_kernel_check"`
} }
// TOML-friendly explicit tables used for conversions. // TOML-friendly explicit tables used for conversions.
@ -2286,7 +2305,12 @@ func init() {
for _, s := range config.Storage.Options.AdditionalImageStores { for _, s := range config.Storage.Options.AdditionalImageStores {
DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.imagestore=%s", config.Storage.Driver, s)) DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.imagestore=%s", config.Storage.Driver, s))
} }
if config.Storage.Options.Size != "" {
DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.size=%s", config.Storage.Driver, config.Storage.Options.Size))
}
if config.Storage.Options.OverrideKernelCheck != "" {
DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.override_kernel_check=%s", config.Storage.Driver, config.Storage.Options.OverrideKernelCheck))
}
if os.Getenv("STORAGE_DRIVER") != "" { if os.Getenv("STORAGE_DRIVER") != "" {
DefaultStoreOptions.GraphDriverName = os.Getenv("STORAGE_DRIVER") DefaultStoreOptions.GraphDriverName = os.Getenv("STORAGE_DRIVER")
} }