Vendor in latest containers/storage
Fixes store.lock issue comming up when using store.Names Signed-off-by: umohnani8 <umohnani@redhat.com>
This commit is contained in:
parent
400713a58b
commit
cfc2393d58
119 changed files with 2846 additions and 1730 deletions
|
@ -23,6 +23,7 @@ RUN apt-get update && apt-get install -y \
|
||||||
libseccomp2/jessie-backports \
|
libseccomp2/jessie-backports \
|
||||||
libseccomp-dev/jessie-backports \
|
libseccomp-dev/jessie-backports \
|
||||||
libtool \
|
libtool \
|
||||||
|
libudev-dev \
|
||||||
protobuf-c-compiler \
|
protobuf-c-compiler \
|
||||||
protobuf-compiler \
|
protobuf-compiler \
|
||||||
python-minimal \
|
python-minimal \
|
||||||
|
|
|
@ -8,7 +8,7 @@ github.com/sirupsen/logrus v1.0.0
|
||||||
github.com/containers/image d17474f39dae1da15ab9ae033d57ebefcf62f77a
|
github.com/containers/image d17474f39dae1da15ab9ae033d57ebefcf62f77a
|
||||||
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
|
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
|
||||||
github.com/ostreedev/ostree-go master
|
github.com/ostreedev/ostree-go master
|
||||||
github.com/containers/storage f8cff0727cf0802f0752ca58d2c05ec5270a47d5
|
github.com/containers/storage 9c85fa701316a49afdf85d55a0d7cb582ed03625
|
||||||
github.com/containernetworking/cni v0.4.0
|
github.com/containernetworking/cni v0.4.0
|
||||||
google.golang.org/grpc v1.0.4 https://github.com/grpc/grpc-go
|
google.golang.org/grpc v1.0.4 https://github.com/grpc/grpc-go
|
||||||
github.com/opencontainers/selinux v1.0.0-rc1
|
github.com/opencontainers/selinux v1.0.0-rc1
|
||||||
|
|
6
vendor/github.com/containers/storage/containers.go
generated
vendored
6
vendor/github.com/containers/storage/containers.go
generated
vendored
|
@ -2,7 +2,6 @@ package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
@ -13,11 +12,6 @@ import (
|
||||||
"github.com/containers/storage/pkg/truncindex"
|
"github.com/containers/storage/pkg/truncindex"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrContainerUnknown indicates that there was no container with the specified name or ID
|
|
||||||
ErrContainerUnknown = errors.New("container not known")
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Container is a reference to a read-write layer with metadata.
|
// A Container is a reference to a read-write layer with metadata.
|
||||||
type Container struct {
|
type Container struct {
|
||||||
// ID is either one which was specified at create-time, or a random
|
// ID is either one which was specified at create-time, or a random
|
||||||
|
|
7
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
7
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
|
@ -25,6 +25,7 @@ package aufs
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
@ -363,7 +364,7 @@ func (a *Driver) Put(id string) error {
|
||||||
|
|
||||||
// Diff produces an archive of the changes between the specified
|
// Diff produces an archive of the changes between the specified
|
||||||
// layer and its parent layer which may be "".
|
// layer and its parent layer which may be "".
|
||||||
func (a *Driver) Diff(id, parent string) (archive.Archive, error) {
|
func (a *Driver) Diff(id, parent string) (io.ReadCloser, error) {
|
||||||
// AUFS doesn't need the parent layer to produce a diff.
|
// AUFS doesn't need the parent layer to produce a diff.
|
||||||
return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
|
return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
|
||||||
Compression: archive.Uncompressed,
|
Compression: archive.Uncompressed,
|
||||||
|
@ -394,7 +395,7 @@ func (a *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
|
||||||
return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil
|
return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Driver) applyDiff(id string, diff archive.Reader) error {
|
func (a *Driver) applyDiff(id string, diff io.Reader) error {
|
||||||
return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
|
return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
|
||||||
UIDMaps: a.uidMaps,
|
UIDMaps: a.uidMaps,
|
||||||
GIDMaps: a.gidMaps,
|
GIDMaps: a.gidMaps,
|
||||||
|
@ -412,7 +413,7 @@ func (a *Driver) DiffSize(id, parent string) (size int64, err error) {
|
||||||
// ApplyDiff extracts the changeset from the given diff into the
|
// ApplyDiff extracts the changeset from the given diff into the
|
||||||
// layer with the specified id and parent, returning the size of the
|
// layer with the specified id and parent, returning the size of the
|
||||||
// new layer in bytes.
|
// new layer in bytes.
|
||||||
func (a *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) {
|
func (a *Driver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) {
|
||||||
// AUFS doesn't need the parent id to apply the diff.
|
// AUFS doesn't need the parent id to apply the diff.
|
||||||
if err = a.applyDiff(id, diff); err != nil {
|
if err = a.applyDiff(id, diff); err != nil {
|
||||||
return
|
return
|
||||||
|
|
7
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
7
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
|
@ -2,6 +2,7 @@ package graphdriver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -83,15 +84,15 @@ type Driver interface {
|
||||||
ProtoDriver
|
ProtoDriver
|
||||||
// Diff produces an archive of the changes between the specified
|
// Diff produces an archive of the changes between the specified
|
||||||
// layer and its parent layer which may be "".
|
// layer and its parent layer which may be "".
|
||||||
Diff(id, parent string) (archive.Archive, error)
|
Diff(id, parent string) (io.ReadCloser, error)
|
||||||
// Changes produces a list of changes between the specified layer
|
// Changes produces a list of changes between the specified layer
|
||||||
// and its parent layer. If parent is "", then all changes will be ADD changes.
|
// and its parent layer. If parent is "", then all changes will be ADD changes.
|
||||||
Changes(id, parent string) ([]archive.Change, error)
|
Changes(id, parent string) ([]archive.Change, error)
|
||||||
// ApplyDiff extracts the changeset from the given diff into the
|
// ApplyDiff extracts the changeset from the given diff into the
|
||||||
// layer with the specified id and parent, returning the size of the
|
// layer with the specified id and parent, returning the size of the
|
||||||
// new layer in bytes.
|
// new layer in bytes.
|
||||||
// The archive.Reader must be an uncompressed stream.
|
// The io.Reader must be an uncompressed stream.
|
||||||
ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error)
|
ApplyDiff(id, parent string, diff io.Reader) (size int64, err error)
|
||||||
// DiffSize calculates the changes between the specified id
|
// DiffSize calculates the changes between the specified id
|
||||||
// and its parent and returns the size in bytes of the changes
|
// and its parent and returns the size in bytes of the changes
|
||||||
// relative to its base filesystem directory.
|
// relative to its base filesystem directory.
|
||||||
|
|
12
vendor/github.com/containers/storage/drivers/fsdiff.go
generated
vendored
12
vendor/github.com/containers/storage/drivers/fsdiff.go
generated
vendored
|
@ -1,14 +1,14 @@
|
||||||
package graphdriver
|
package graphdriver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"io"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
|
|
||||||
"github.com/containers/storage/pkg/archive"
|
"github.com/containers/storage/pkg/archive"
|
||||||
"github.com/containers/storage/pkg/chrootarchive"
|
"github.com/containers/storage/pkg/chrootarchive"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
"github.com/containers/storage/pkg/ioutils"
|
"github.com/containers/storage/pkg/ioutils"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -31,9 +31,9 @@ type NaiveDiffDriver struct {
|
||||||
// NewNaiveDiffDriver returns a fully functional driver that wraps the
|
// NewNaiveDiffDriver returns a fully functional driver that wraps the
|
||||||
// given ProtoDriver and adds the capability of the following methods which
|
// given ProtoDriver and adds the capability of the following methods which
|
||||||
// it may or may not support on its own:
|
// it may or may not support on its own:
|
||||||
// Diff(id, parent string) (archive.Archive, error)
|
// Diff(id, parent string) (io.ReadCloser, error)
|
||||||
// Changes(id, parent string) ([]archive.Change, error)
|
// Changes(id, parent string) ([]archive.Change, error)
|
||||||
// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error)
|
// ApplyDiff(id, parent string, diff io.Reader) (size int64, err error)
|
||||||
// DiffSize(id, parent string) (size int64, err error)
|
// DiffSize(id, parent string) (size int64, err error)
|
||||||
func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver {
|
func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver {
|
||||||
gdw := &NaiveDiffDriver{
|
gdw := &NaiveDiffDriver{
|
||||||
|
@ -46,7 +46,7 @@ func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Dr
|
||||||
|
|
||||||
// Diff produces an archive of the changes between the specified
|
// Diff produces an archive of the changes between the specified
|
||||||
// layer and its parent layer which may be "".
|
// layer and its parent layer which may be "".
|
||||||
func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err error) {
|
func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err error) {
|
||||||
layerFs, err := gdw.Get(id, "")
|
layerFs, err := gdw.Get(id, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -118,7 +118,7 @@ func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error)
|
||||||
// ApplyDiff extracts the changeset from the given diff into the
|
// ApplyDiff extracts the changeset from the given diff into the
|
||||||
// layer with the specified id and parent, returning the size of the
|
// layer with the specified id and parent, returning the size of the
|
||||||
// new layer in bytes.
|
// new layer in bytes.
|
||||||
func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) {
|
func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) {
|
||||||
// Mount the root filesystem so we can apply the diff/layer.
|
// Mount the root filesystem so we can apply the diff/layer.
|
||||||
layerFs, err := gdw.Get(id, "")
|
layerFs, err := gdw.Get(id, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
21
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
21
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
|
@ -5,6 +5,7 @@ package overlay
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
@ -96,7 +97,7 @@ func init() {
|
||||||
// InitWithName returns the a naive diff driver for the overlay filesystem,
|
// InitWithName returns the a naive diff driver for the overlay filesystem,
|
||||||
// which returns the passed-in name when asked which driver it is.
|
// which returns the passed-in name when asked which driver it is.
|
||||||
func InitWithName(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
|
func InitWithName(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
|
||||||
opts, err := parseOptions(options)
|
opts, err := parseOptions(name, options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -176,7 +177,7 @@ type overlayOptions struct {
|
||||||
imageStores []string
|
imageStores []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseOptions(options []string) (*overlayOptions, error) {
|
func parseOptions(name string, options []string) (*overlayOptions, error) {
|
||||||
o := &overlayOptions{}
|
o := &overlayOptions{}
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
key, val, err := parsers.ParseKeyValueOpt(option)
|
key, val, err := parsers.ParseKeyValueOpt(option)
|
||||||
|
@ -190,24 +191,24 @@ func parseOptions(options []string) (*overlayOptions, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
case "overlay.imagestore":
|
case "overlay.imagestore", "overlay2.imagestore":
|
||||||
// Additional read only image stores to use for lower paths
|
// Additional read only image stores to use for lower paths
|
||||||
for _, store := range strings.Split(val, ",") {
|
for _, store := range strings.Split(val, ",") {
|
||||||
store = filepath.Clean(store)
|
store = filepath.Clean(store)
|
||||||
if !filepath.IsAbs(store) {
|
if !filepath.IsAbs(store) {
|
||||||
return nil, fmt.Errorf("overlay: image path %q is not absolute. Can not be relative", store)
|
return nil, fmt.Errorf("%s: image path %q is not absolute. Can not be relative", name, store)
|
||||||
}
|
}
|
||||||
st, err := os.Stat(store)
|
st, err := os.Stat(store)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("overlay: Can't stat imageStore dir %s: %v", store, err)
|
return nil, fmt.Errorf("%s: Can't stat imageStore dir %s: %v", name, store, err)
|
||||||
}
|
}
|
||||||
if !st.IsDir() {
|
if !st.IsDir() {
|
||||||
return nil, fmt.Errorf("overlay: image path %q must be a directory", store)
|
return nil, fmt.Errorf("%s: image path %q must be a directory", name, store)
|
||||||
}
|
}
|
||||||
o.imageStores = append(o.imageStores, store)
|
o.imageStores = append(o.imageStores, store)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("overlay: Unknown option %s", key)
|
return nil, fmt.Errorf("%s: Unknown option %s", name, key)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return o, nil
|
return o, nil
|
||||||
|
@ -516,7 +517,7 @@ func (d *Driver) Put(id string) error {
|
||||||
// We didn't have a "lower" directory, so we weren't mounting a "merged" directory anyway
|
// We didn't have a "lower" directory, so we weren't mounting a "merged" directory anyway
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
|
logrus.Debugf("Failed to unmount %s %s: %v", id, d.name, err)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -528,7 +529,7 @@ func (d *Driver) Exists(id string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ApplyDiff applies the new layer into a root
|
// ApplyDiff applies the new layer into a root
|
||||||
func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) {
|
func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) {
|
||||||
applyDir := d.getDiffPath(id)
|
applyDir := d.getDiffPath(id)
|
||||||
|
|
||||||
logrus.Debugf("Applying tar in %s", applyDir)
|
logrus.Debugf("Applying tar in %s", applyDir)
|
||||||
|
@ -559,7 +560,7 @@ func (d *Driver) DiffSize(id, parent string) (size int64, err error) {
|
||||||
|
|
||||||
// Diff produces an archive of the changes between the specified
|
// Diff produces an archive of the changes between the specified
|
||||||
// layer and its parent layer which may be "".
|
// layer and its parent layer which may be "".
|
||||||
func (d *Driver) Diff(id, parent string) (archive.Archive, error) {
|
func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
|
||||||
diffPath := d.getDiffPath(id)
|
diffPath := d.getDiffPath(id)
|
||||||
logrus.Debugf("Tar with options on %s", diffPath)
|
logrus.Debugf("Tar with options on %s", diffPath)
|
||||||
return archive.TarWithOptions(diffPath, &archive.TarOptions{
|
return archive.TarWithOptions(diffPath, &archive.TarOptions{
|
||||||
|
|
7
vendor/github.com/containers/storage/drivers/proxy.go
generated
vendored
7
vendor/github.com/containers/storage/drivers/proxy.go
generated
vendored
|
@ -4,6 +4,7 @@ package graphdriver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
"github.com/containers/storage/pkg/archive"
|
"github.com/containers/storage/pkg/archive"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
@ -170,7 +171,7 @@ func (d *graphDriverProxy) Cleanup() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *graphDriverProxy) Diff(id, parent string) (archive.Archive, error) {
|
func (d *graphDriverProxy) Diff(id, parent string) (io.ReadCloser, error) {
|
||||||
args := &graphDriverRequest{
|
args := &graphDriverRequest{
|
||||||
ID: id,
|
ID: id,
|
||||||
Parent: parent,
|
Parent: parent,
|
||||||
|
@ -179,7 +180,7 @@ func (d *graphDriverProxy) Diff(id, parent string) (archive.Archive, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return archive.Archive(body), nil
|
return io.ReadClose(body), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) {
|
func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) {
|
||||||
|
@ -198,7 +199,7 @@ func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error)
|
||||||
return ret.Changes, nil
|
return ret.Changes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *graphDriverProxy) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) {
|
func (d *graphDriverProxy) ApplyDiff(id, parent string, diff io.Reader) (int64, error) {
|
||||||
var ret graphDriverResponse
|
var ret graphDriverResponse
|
||||||
if err := d.client.SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil {
|
if err := d.client.SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil {
|
||||||
return -1, err
|
return -1, err
|
||||||
|
|
2
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
|
@ -14,7 +14,7 @@ import (
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// CopyWithTar defines the copy method to use.
|
// CopyWithTar defines the copy method to use.
|
||||||
CopyWithTar = chrootarchive.CopyWithTar
|
CopyWithTar = chrootarchive.NewArchiver(nil).CopyWithTar
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
|
4
vendor/github.com/containers/storage/drivers/windows/windows.go
generated
vendored
4
vendor/github.com/containers/storage/drivers/windows/windows.go
generated
vendored
|
@ -300,7 +300,7 @@ func (d *Driver) Cleanup() error {
|
||||||
// Diff produces an archive of the changes between the specified
|
// Diff produces an archive of the changes between the specified
|
||||||
// layer and its parent layer which may be "".
|
// layer and its parent layer which may be "".
|
||||||
// The layer should be mounted when calling this function
|
// The layer should be mounted when calling this function
|
||||||
func (d *Driver) Diff(id, parent string) (_ archive.Archive, err error) {
|
func (d *Driver) Diff(id, parent string) (_ io.ReadCloser, err error) {
|
||||||
rID, err := d.resolveID(id)
|
rID, err := d.resolveID(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
|
@ -483,7 +483,7 @@ func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// exportLayer generates an archive from a layer based on the given ID.
|
// exportLayer generates an archive from a layer based on the given ID.
|
||||||
func (d *Driver) exportLayer(id string, parentLayerPaths []string) (archive.Archive, error) {
|
func (d *Driver) exportLayer(id string, parentLayerPaths []string) (io.ReadCloser, error) {
|
||||||
archive, w := io.Pipe()
|
archive, w := io.Pipe()
|
||||||
go func() {
|
go func() {
|
||||||
err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error {
|
err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error {
|
||||||
|
|
52
vendor/github.com/containers/storage/errors.go
generated
vendored
Normal file
52
vendor/github.com/containers/storage/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrContainerUnknown indicates that there was no container with the specified name or ID.
|
||||||
|
ErrContainerUnknown = errors.New("container not known")
|
||||||
|
// ErrImageUnknown indicates that there was no image with the specified name or ID.
|
||||||
|
ErrImageUnknown = errors.New("image not known")
|
||||||
|
// ErrParentUnknown indicates that we didn't record the ID of the parent of the specified layer.
|
||||||
|
ErrParentUnknown = errors.New("parent of layer not known")
|
||||||
|
// ErrLayerUnknown indicates that there was no layer with the specified name or ID.
|
||||||
|
ErrLayerUnknown = errors.New("layer not known")
|
||||||
|
// ErrLoadError indicates that there was an initialization error.
|
||||||
|
ErrLoadError = errors.New("error loading storage metadata")
|
||||||
|
// ErrDuplicateID indicates that an ID which is to be assigned to a new item is already being used.
|
||||||
|
ErrDuplicateID = errors.New("that ID is already in use")
|
||||||
|
// ErrDuplicateName indicates that a name which is to be assigned to a new item is already being used.
|
||||||
|
ErrDuplicateName = errors.New("that name is already in use")
|
||||||
|
// ErrParentIsContainer is returned when a caller attempts to create a layer as a child of a container's layer.
|
||||||
|
ErrParentIsContainer = errors.New("would-be parent layer is a container")
|
||||||
|
// ErrNotAContainer is returned when the caller attempts to delete a container that isn't a container.
|
||||||
|
ErrNotAContainer = errors.New("identifier is not a container")
|
||||||
|
// ErrNotAnImage is returned when the caller attempts to delete an image that isn't an image.
|
||||||
|
ErrNotAnImage = errors.New("identifier is not an image")
|
||||||
|
// ErrNotALayer is returned when the caller attempts to delete a layer that isn't a layer.
|
||||||
|
ErrNotALayer = errors.New("identifier is not a layer")
|
||||||
|
// ErrNotAnID is returned when the caller attempts to read or write metadata from an item that doesn't exist.
|
||||||
|
ErrNotAnID = errors.New("identifier is not a layer, image, or container")
|
||||||
|
// ErrLayerHasChildren is returned when the caller attempts to delete a layer that has children.
|
||||||
|
ErrLayerHasChildren = errors.New("layer has children")
|
||||||
|
// ErrLayerUsedByImage is returned when the caller attempts to delete a layer that is an image's top layer.
|
||||||
|
ErrLayerUsedByImage = errors.New("layer is in use by an image")
|
||||||
|
// ErrLayerUsedByContainer is returned when the caller attempts to delete a layer that is a container's layer.
|
||||||
|
ErrLayerUsedByContainer = errors.New("layer is in use by a container")
|
||||||
|
// ErrImageUsedByContainer is returned when the caller attempts to delete an image that is a container's image.
|
||||||
|
ErrImageUsedByContainer = errors.New("image is in use by a container")
|
||||||
|
// ErrIncompleteOptions is returned when the caller attempts to initialize a Store without providing required information.
|
||||||
|
ErrIncompleteOptions = errors.New("missing necessary StoreOptions")
|
||||||
|
// ErrSizeUnknown is returned when the caller asks for the size of a big data item, but the Store couldn't determine the answer.
|
||||||
|
ErrSizeUnknown = errors.New("size is not known")
|
||||||
|
// ErrStoreIsReadOnly is returned when the caller makes a call to a read-only store that would require modifying its contents.
|
||||||
|
ErrStoreIsReadOnly = errors.New("called a write method on a read-only store")
|
||||||
|
// ErrLockReadOnly indicates that the caller only took a read-only lock, and is not allowed to write.
|
||||||
|
ErrLockReadOnly = errors.New("lock is not a read-write lock")
|
||||||
|
// ErrDuplicateImageNames indicates that the read-only store uses the same name for multiple images.
|
||||||
|
ErrDuplicateImageNames = errors.New("read-only image store assigns the same name to multiple images")
|
||||||
|
// ErrDuplicateLayerNames indicates that the read-only store uses the same name for multiple layers.
|
||||||
|
ErrDuplicateLayerNames = errors.New("read-only layer store assigns the same name to multiple layers")
|
||||||
|
)
|
7
vendor/github.com/containers/storage/images.go
generated
vendored
7
vendor/github.com/containers/storage/images.go
generated
vendored
|
@ -13,11 +13,6 @@ import (
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrImageUnknown indicates that there was no image with the specified name or ID
|
|
||||||
ErrImageUnknown = errors.New("image not known")
|
|
||||||
)
|
|
||||||
|
|
||||||
// An Image is a reference to a layer and an associated metadata string.
|
// An Image is a reference to a layer and an associated metadata string.
|
||||||
type Image struct {
|
type Image struct {
|
||||||
// ID is either one which was specified at create-time, or a random
|
// ID is either one which was specified at create-time, or a random
|
||||||
|
@ -153,7 +148,7 @@ func (r *imageStore) Load() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if shouldSave && !r.IsReadWrite() {
|
if shouldSave && !r.IsReadWrite() {
|
||||||
return errors.New("image store assigns the same name to multiple images")
|
return ErrDuplicateImageNames
|
||||||
}
|
}
|
||||||
r.images = images
|
r.images = images
|
||||||
r.idindex = truncindex.NewTruncIndex(idlist)
|
r.idindex = truncindex.NewTruncIndex(idlist)
|
||||||
|
|
17
vendor/github.com/containers/storage/layers.go
generated
vendored
17
vendor/github.com/containers/storage/layers.go
generated
vendored
|
@ -27,13 +27,6 @@ const (
|
||||||
compressionFlag = "diff-compression"
|
compressionFlag = "diff-compression"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrParentUnknown indicates that we didn't record the ID of the parent of the specified layer
|
|
||||||
ErrParentUnknown = errors.New("parent of layer not known")
|
|
||||||
// ErrLayerUnknown indicates that there was no layer with the specified name or ID
|
|
||||||
ErrLayerUnknown = errors.New("layer not known")
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Layer is a record of a copy-on-write layer that's stored by the lower
|
// A Layer is a record of a copy-on-write layer that's stored by the lower
|
||||||
// level graph driver.
|
// level graph driver.
|
||||||
type Layer struct {
|
type Layer struct {
|
||||||
|
@ -191,7 +184,7 @@ type LayerStore interface {
|
||||||
CreateWithFlags(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}) (layer *Layer, err error)
|
CreateWithFlags(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}) (layer *Layer, err error)
|
||||||
|
|
||||||
// Put combines the functions of CreateWithFlags and ApplyDiff.
|
// Put combines the functions of CreateWithFlags and ApplyDiff.
|
||||||
Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff archive.Reader) (*Layer, int64, error)
|
Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error)
|
||||||
|
|
||||||
// SetNames replaces the list of names associated with a layer with the
|
// SetNames replaces the list of names associated with a layer with the
|
||||||
// supplied values.
|
// supplied values.
|
||||||
|
@ -213,7 +206,7 @@ type LayerStore interface {
|
||||||
|
|
||||||
// ApplyDiff reads a tarstream which was created by a previous call to Diff and
|
// ApplyDiff reads a tarstream which was created by a previous call to Diff and
|
||||||
// applies its changes to a specified layer.
|
// applies its changes to a specified layer.
|
||||||
ApplyDiff(to string, diff archive.Reader) (int64, error)
|
ApplyDiff(to string, diff io.Reader) (int64, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type layerStore struct {
|
type layerStore struct {
|
||||||
|
@ -280,7 +273,7 @@ func (r *layerStore) Load() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if shouldSave && !r.IsReadWrite() {
|
if shouldSave && !r.IsReadWrite() {
|
||||||
return errors.New("layer store assigns the same name to multiple layers")
|
return ErrDuplicateLayerNames
|
||||||
}
|
}
|
||||||
mpath := r.mountspath()
|
mpath := r.mountspath()
|
||||||
data, err = ioutil.ReadFile(mpath)
|
data, err = ioutil.ReadFile(mpath)
|
||||||
|
@ -470,7 +463,7 @@ func (r *layerStore) Status() ([][2]string, error) {
|
||||||
return r.driver.Status(), nil
|
return r.driver.Status(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff archive.Reader) (layer *Layer, size int64, err error) {
|
func (r *layerStore) Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff io.Reader) (layer *Layer, size int64, err error) {
|
||||||
if !r.IsReadWrite() {
|
if !r.IsReadWrite() {
|
||||||
return nil, -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new layers at %q", r.layerspath())
|
return nil, -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new layers at %q", r.layerspath())
|
||||||
}
|
}
|
||||||
|
@ -907,7 +900,7 @@ func (r *layerStore) DiffSize(from, to string) (size int64, err error) {
|
||||||
return r.driver.DiffSize(to, from)
|
return r.driver.DiffSize(to, from)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) ApplyDiff(to string, diff archive.Reader) (size int64, err error) {
|
func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error) {
|
||||||
if !r.IsReadWrite() {
|
if !r.IsReadWrite() {
|
||||||
return -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer contents at %q", r.layerspath())
|
return -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer contents at %q", r.layerspath())
|
||||||
}
|
}
|
||||||
|
|
2
vendor/github.com/containers/storage/lockfile.go
generated
vendored
2
vendor/github.com/containers/storage/lockfile.go
generated
vendored
|
@ -44,8 +44,6 @@ type lockfile struct {
|
||||||
var (
|
var (
|
||||||
lockfiles map[string]*lockfile
|
lockfiles map[string]*lockfile
|
||||||
lockfilesLock sync.Mutex
|
lockfilesLock sync.Mutex
|
||||||
// ErrLockReadOnly indicates that the caller only took a read-only lock, and is not allowed to write
|
|
||||||
ErrLockReadOnly = errors.New("lock is not a read-write lock")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetLockfile opens a read-write lock file, creating it if necessary. The
|
// GetLockfile opens a read-write lock file, creating it if necessary. The
|
||||||
|
|
1
vendor/github.com/containers/storage/pkg/archive/README.md
generated
vendored
Normal file
1
vendor/github.com/containers/storage/pkg/archive/README.md
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
This code provides helper functions for dealing with archive files.
|
486
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
486
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
|
@ -6,7 +6,6 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"compress/bzip2"
|
"compress/bzip2"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
@ -27,18 +26,11 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
// Archive is a type of io.ReadCloser which has two interfaces Read and Closer.
|
|
||||||
Archive io.ReadCloser
|
|
||||||
// Reader is a type of io.Reader.
|
|
||||||
Reader io.Reader
|
|
||||||
// Compression is the state represents if compressed or not.
|
// Compression is the state represents if compressed or not.
|
||||||
Compression int
|
Compression int
|
||||||
// WhiteoutFormat is the format of whiteouts unpacked
|
// WhiteoutFormat is the format of whiteouts unpacked
|
||||||
WhiteoutFormat int
|
WhiteoutFormat int
|
||||||
// TarChownOptions wraps the chown options UID and GID.
|
|
||||||
TarChownOptions struct {
|
|
||||||
UID, GID int
|
|
||||||
}
|
|
||||||
// TarOptions wraps the tar options.
|
// TarOptions wraps the tar options.
|
||||||
TarOptions struct {
|
TarOptions struct {
|
||||||
IncludeFiles []string
|
IncludeFiles []string
|
||||||
|
@ -47,7 +39,7 @@ type (
|
||||||
NoLchown bool
|
NoLchown bool
|
||||||
UIDMaps []idtools.IDMap
|
UIDMaps []idtools.IDMap
|
||||||
GIDMaps []idtools.IDMap
|
GIDMaps []idtools.IDMap
|
||||||
ChownOpts *TarChownOptions
|
ChownOpts *idtools.IDPair
|
||||||
IncludeSourceDir bool
|
IncludeSourceDir bool
|
||||||
// WhiteoutFormat is the expected on disk format for whiteout files.
|
// WhiteoutFormat is the expected on disk format for whiteout files.
|
||||||
// This format will be converted to the standard format on pack
|
// This format will be converted to the standard format on pack
|
||||||
|
@ -59,34 +51,28 @@ type (
|
||||||
// For each include when creating an archive, the included name will be
|
// For each include when creating an archive, the included name will be
|
||||||
// replaced with the matching name from this map.
|
// replaced with the matching name from this map.
|
||||||
RebaseNames map[string]string
|
RebaseNames map[string]string
|
||||||
|
InUserNS bool
|
||||||
}
|
}
|
||||||
|
)
|
||||||
|
|
||||||
// Archiver allows the reuse of most utility functions of this package
|
// Archiver allows the reuse of most utility functions of this package
|
||||||
// with a pluggable Untar function. Also, to facilitate the passing of
|
// with a pluggable Untar function. Also, to facilitate the passing of
|
||||||
// specific id mappings for untar, an archiver can be created with maps
|
// specific id mappings for untar, an archiver can be created with maps
|
||||||
// which will then be passed to Untar operations
|
// which will then be passed to Untar operations
|
||||||
Archiver struct {
|
type Archiver struct {
|
||||||
Untar func(io.Reader, string, *TarOptions) error
|
Untar func(io.Reader, string, *TarOptions) error
|
||||||
UIDMaps []idtools.IDMap
|
IDMappings *idtools.IDMappings
|
||||||
GIDMaps []idtools.IDMap
|
}
|
||||||
|
|
||||||
|
// NewDefaultArchiver returns a new Archiver without any IDMappings
|
||||||
|
func NewDefaultArchiver() *Archiver {
|
||||||
|
return &Archiver{Untar: Untar, IDMappings: &idtools.IDMappings{}}
|
||||||
}
|
}
|
||||||
|
|
||||||
// breakoutError is used to differentiate errors related to breaking out
|
// breakoutError is used to differentiate errors related to breaking out
|
||||||
// When testing archive breakout in the unit tests, this error is expected
|
// When testing archive breakout in the unit tests, this error is expected
|
||||||
// in order for the test to pass.
|
// in order for the test to pass.
|
||||||
breakoutError error
|
type breakoutError error
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrNotImplemented is the error message of function not implemented.
|
|
||||||
ErrNotImplemented = errors.New("Function not implemented")
|
|
||||||
defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil}
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// HeaderSize is the size in bytes of a tar header
|
|
||||||
HeaderSize = 512
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// Uncompressed represents the uncompressed.
|
// Uncompressed represents the uncompressed.
|
||||||
|
@ -107,17 +93,15 @@ const (
|
||||||
OverlayWhiteoutFormat
|
OverlayWhiteoutFormat
|
||||||
)
|
)
|
||||||
|
|
||||||
// IsArchive checks for the magic bytes of a tar or any supported compression
|
const (
|
||||||
// algorithm.
|
modeISDIR = 040000 // Directory
|
||||||
func IsArchive(header []byte) bool {
|
modeISFIFO = 010000 // FIFO
|
||||||
compression := DetectCompression(header)
|
modeISREG = 0100000 // Regular file
|
||||||
if compression != Uncompressed {
|
modeISLNK = 0120000 // Symbolic link
|
||||||
return true
|
modeISBLK = 060000 // Block special file
|
||||||
}
|
modeISCHR = 020000 // Character special file
|
||||||
r := tar.NewReader(bytes.NewBuffer(header))
|
modeISSOCK = 0140000 // Socket
|
||||||
_, err := r.Next()
|
)
|
||||||
return err == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsArchivePath checks if the (possibly compressed) file at the given path
|
// IsArchivePath checks if the (possibly compressed) file at the given path
|
||||||
// starts with a tar file header.
|
// starts with a tar file header.
|
||||||
|
@ -147,7 +131,7 @@ func DetectCompression(source []byte) Compression {
|
||||||
logrus.Debug("Len too short")
|
logrus.Debug("Len too short")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if bytes.Compare(m, source[:len(m)]) == 0 {
|
if bytes.Equal(m, source[:len(m)]) {
|
||||||
return compression
|
return compression
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -206,7 +190,7 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CompressStream compresseses the dest with specified compression algorithm.
|
// CompressStream compresses the dest with specified compression algorithm.
|
||||||
func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
|
func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
|
||||||
p := pools.BufioWriter32KPool
|
p := pools.BufioWriter32KPool
|
||||||
buf := p.Get(dest)
|
buf := p.Get(dest)
|
||||||
|
@ -220,13 +204,100 @@ func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, er
|
||||||
return writeBufWrapper, nil
|
return writeBufWrapper, nil
|
||||||
case Bzip2, Xz:
|
case Bzip2, Xz:
|
||||||
// archive/bzip2 does not support writing, and there is no xz support at all
|
// archive/bzip2 does not support writing, and there is no xz support at all
|
||||||
// However, this is not a problem as we only currently generates gzipped tars
|
// However, this is not a problem as docker only currently generates gzipped tars
|
||||||
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
|
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
|
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to
|
||||||
|
// modify the contents or header of an entry in the archive. If the file already
|
||||||
|
// exists in the archive the TarModifierFunc will be called with the Header and
|
||||||
|
// a reader which will return the files content. If the file does not exist both
|
||||||
|
// header and content will be nil.
|
||||||
|
type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error)
|
||||||
|
|
||||||
|
// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the
|
||||||
|
// tar stream are modified if they match any of the keys in mods.
|
||||||
|
func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser {
|
||||||
|
pipeReader, pipeWriter := io.Pipe()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
tarReader := tar.NewReader(inputTarStream)
|
||||||
|
tarWriter := tar.NewWriter(pipeWriter)
|
||||||
|
defer inputTarStream.Close()
|
||||||
|
defer tarWriter.Close()
|
||||||
|
|
||||||
|
modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error {
|
||||||
|
header, data, err := modifier(name, original, tarReader)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
case header == nil:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
header.Name = name
|
||||||
|
header.Size = int64(len(data))
|
||||||
|
if err := tarWriter.WriteHeader(header); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(data) != 0 {
|
||||||
|
if _, err := tarWriter.Write(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var originalHeader *tar.Header
|
||||||
|
for {
|
||||||
|
originalHeader, err = tarReader.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
pipeWriter.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
modifier, ok := mods[originalHeader.Name]
|
||||||
|
if !ok {
|
||||||
|
// No modifiers for this file, copy the header and data
|
||||||
|
if err := tarWriter.WriteHeader(originalHeader); err != nil {
|
||||||
|
pipeWriter.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err := pools.Copy(tarWriter, tarReader); err != nil {
|
||||||
|
pipeWriter.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
delete(mods, originalHeader.Name)
|
||||||
|
|
||||||
|
if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil {
|
||||||
|
pipeWriter.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply the modifiers that haven't matched any files in the archive
|
||||||
|
for name, modifier := range mods {
|
||||||
|
if err := modify(name, nil, modifier, nil); err != nil {
|
||||||
|
pipeWriter.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pipeWriter.Close()
|
||||||
|
|
||||||
|
}()
|
||||||
|
return pipeReader
|
||||||
|
}
|
||||||
|
|
||||||
// Extension returns the extension of a file that uses the specified compression algorithm.
|
// Extension returns the extension of a file that uses the specified compression algorithm.
|
||||||
func (compression *Compression) Extension() string {
|
func (compression *Compression) Extension() string {
|
||||||
switch *compression {
|
switch *compression {
|
||||||
|
@ -242,8 +313,65 @@ func (compression *Compression) Extension() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FileInfoHeader creates a populated Header from fi.
|
||||||
|
// Compared to archive pkg this function fills in more information.
|
||||||
|
// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR),
|
||||||
|
// which have been deleted since Go 1.9 archive/tar.
|
||||||
|
func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
|
||||||
|
hdr, err := tar.FileInfoHeader(fi, link)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi)
|
||||||
|
name, err = canonicalTarName(name, fi.IsDir())
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err)
|
||||||
|
}
|
||||||
|
hdr.Name = name
|
||||||
|
if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return hdr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar
|
||||||
|
// https://github.com/golang/go/commit/66b5a2f
|
||||||
|
func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 {
|
||||||
|
fm := fi.Mode()
|
||||||
|
switch {
|
||||||
|
case fm.IsRegular():
|
||||||
|
mode |= modeISREG
|
||||||
|
case fi.IsDir():
|
||||||
|
mode |= modeISDIR
|
||||||
|
case fm&os.ModeSymlink != 0:
|
||||||
|
mode |= modeISLNK
|
||||||
|
case fm&os.ModeDevice != 0:
|
||||||
|
if fm&os.ModeCharDevice != 0 {
|
||||||
|
mode |= modeISCHR
|
||||||
|
} else {
|
||||||
|
mode |= modeISBLK
|
||||||
|
}
|
||||||
|
case fm&os.ModeNamedPipe != 0:
|
||||||
|
mode |= modeISFIFO
|
||||||
|
case fm&os.ModeSocket != 0:
|
||||||
|
mode |= modeISSOCK
|
||||||
|
}
|
||||||
|
return mode
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
|
||||||
|
// to a tar header
|
||||||
|
func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
|
||||||
|
capability, _ := system.Lgetxattr(path, "security.capability")
|
||||||
|
if capability != nil {
|
||||||
|
hdr.Xattrs = make(map[string]string)
|
||||||
|
hdr.Xattrs["security.capability"] = string(capability)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
type tarWhiteoutConverter interface {
|
type tarWhiteoutConverter interface {
|
||||||
ConvertWrite(*tar.Header, string, os.FileInfo) error
|
ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error)
|
||||||
ConvertRead(*tar.Header, string) (bool, error)
|
ConvertRead(*tar.Header, string) (bool, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -253,8 +381,8 @@ type tarAppender struct {
|
||||||
|
|
||||||
// for hardlink mapping
|
// for hardlink mapping
|
||||||
SeenFiles map[uint64]string
|
SeenFiles map[uint64]string
|
||||||
UIDMaps []idtools.IDMap
|
IDMappings *idtools.IDMappings
|
||||||
GIDMaps []idtools.IDMap
|
ChownOpts *idtools.IDPair
|
||||||
|
|
||||||
// For packing and unpacking whiteout files in the
|
// For packing and unpacking whiteout files in the
|
||||||
// non standard format. The whiteout files defined
|
// non standard format. The whiteout files defined
|
||||||
|
@ -263,6 +391,16 @@ type tarAppender struct {
|
||||||
WhiteoutConverter tarWhiteoutConverter
|
WhiteoutConverter tarWhiteoutConverter
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender {
|
||||||
|
return &tarAppender{
|
||||||
|
SeenFiles: make(map[uint64]string),
|
||||||
|
TarWriter: tar.NewWriter(writer),
|
||||||
|
Buffer: pools.BufioWriter32KPool.Get(nil),
|
||||||
|
IDMappings: idMapping,
|
||||||
|
ChownOpts: chownOpts,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// canonicalTarName provides a platform-independent and consistent posix-style
|
// canonicalTarName provides a platform-independent and consistent posix-style
|
||||||
//path for files and directories to be archived regardless of the platform.
|
//path for files and directories to be archived regardless of the platform.
|
||||||
func canonicalTarName(name string, isDir bool) (string, error) {
|
func canonicalTarName(name string, isDir bool) (string, error) {
|
||||||
|
@ -285,33 +423,30 @@ func (ta *tarAppender) addTarFile(path, name string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
link := ""
|
var link string
|
||||||
if fi.Mode()&os.ModeSymlink != 0 {
|
if fi.Mode()&os.ModeSymlink != 0 {
|
||||||
if link, err = os.Readlink(path); err != nil {
|
var err error
|
||||||
return err
|
link, err = os.Readlink(path)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
hdr, err := tar.FileInfoHeader(fi, link)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
|
|
||||||
|
|
||||||
name, err = canonicalTarName(name, fi.IsDir())
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("tar: cannot canonicalize path: %v", err)
|
|
||||||
}
|
}
|
||||||
hdr.Name = name
|
|
||||||
|
|
||||||
inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys())
|
hdr, err := FileInfoHeader(name, fi, link)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// if it's not a directory and has more than 1 link,
|
// if it's not a directory and has more than 1 link,
|
||||||
// it's hard linked, so set the type flag accordingly
|
// it's hard linked, so set the type flag accordingly
|
||||||
if !fi.IsDir() && hasHardlinks(fi) {
|
if !fi.IsDir() && hasHardlinks(fi) {
|
||||||
|
inode, err := getInodeFromStat(fi.Sys())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
// a link should have a name that it links too
|
// a link should have a name that it links too
|
||||||
// and that linked name should be first in the tar archive
|
// and that linked name should be first in the tar archive
|
||||||
if oldpath, ok := ta.SeenFiles[inode]; ok {
|
if oldpath, ok := ta.SeenFiles[inode]; ok {
|
||||||
|
@ -323,36 +458,46 @@ func (ta *tarAppender) addTarFile(path, name string) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
capability, _ := system.Lgetxattr(path, "security.capability")
|
|
||||||
if capability != nil {
|
|
||||||
hdr.Xattrs = make(map[string]string)
|
|
||||||
hdr.Xattrs["security.capability"] = string(capability)
|
|
||||||
}
|
|
||||||
|
|
||||||
//handle re-mapping container ID mappings back to host ID mappings before
|
//handle re-mapping container ID mappings back to host ID mappings before
|
||||||
//writing tar headers/files. We skip whiteout files because they were written
|
//writing tar headers/files. We skip whiteout files because they were written
|
||||||
//by the kernel and already have proper ownership relative to the host
|
//by the kernel and already have proper ownership relative to the host
|
||||||
if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && (ta.UIDMaps != nil || ta.GIDMaps != nil) {
|
if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() {
|
||||||
uid, gid, err := getFileUIDGID(fi.Sys())
|
fileIDPair, err := getFileUIDGID(fi.Sys())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
xUID, err := idtools.ToContainer(uid, ta.UIDMaps)
|
hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
xGID, err := idtools.ToContainer(gid, ta.GIDMaps)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
hdr.Uid = xUID
|
|
||||||
hdr.Gid = xGID
|
// explicitly override with ChownOpts
|
||||||
|
if ta.ChownOpts != nil {
|
||||||
|
hdr.Uid = ta.ChownOpts.UID
|
||||||
|
hdr.Gid = ta.ChownOpts.GID
|
||||||
}
|
}
|
||||||
|
|
||||||
if ta.WhiteoutConverter != nil {
|
if ta.WhiteoutConverter != nil {
|
||||||
if err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi); err != nil {
|
wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi)
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If a new whiteout file exists, write original hdr, then
|
||||||
|
// replace hdr with wo to be written after. Whiteouts should
|
||||||
|
// always be written after the original. Note the original
|
||||||
|
// hdr may have been updated to be a whiteout with returning
|
||||||
|
// a whiteout header
|
||||||
|
if wo != nil {
|
||||||
|
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
|
||||||
|
return fmt.Errorf("tar: cannot use whiteout for non-empty file")
|
||||||
|
}
|
||||||
|
hdr = wo
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
|
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
|
||||||
|
@ -360,7 +505,10 @@ func (ta *tarAppender) addTarFile(path, name string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
|
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
|
||||||
file, err := os.Open(path)
|
// We use system.OpenSequential to ensure we use sequential file
|
||||||
|
// access on Windows to avoid depleting the standby list.
|
||||||
|
// On Linux, this equates to a regular os.Open.
|
||||||
|
file, err := system.OpenSequential(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -381,7 +529,7 @@ func (ta *tarAppender) addTarFile(path, name string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error {
|
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns bool) error {
|
||||||
// hdr.Mode is in linux format, which we can use for sycalls,
|
// hdr.Mode is in linux format, which we can use for sycalls,
|
||||||
// but for os.Foo() calls we need the mode converted to os.FileMode,
|
// but for os.Foo() calls we need the mode converted to os.FileMode,
|
||||||
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
|
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
|
||||||
|
@ -398,8 +546,10 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
||||||
}
|
}
|
||||||
|
|
||||||
case tar.TypeReg, tar.TypeRegA:
|
case tar.TypeReg, tar.TypeRegA:
|
||||||
// Source is regular file
|
// Source is regular file. We use system.OpenFileSequential to use sequential
|
||||||
file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
|
// file access to avoid depleting the standby list on Windows.
|
||||||
|
// On Linux, this equates to a regular os.OpenFile
|
||||||
|
file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -409,7 +559,16 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
||||||
}
|
}
|
||||||
file.Close()
|
file.Close()
|
||||||
|
|
||||||
case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
|
case tar.TypeBlock, tar.TypeChar:
|
||||||
|
if inUserns { // cannot create devices in a userns
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Handle this is an OS-specific way
|
||||||
|
if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
case tar.TypeFifo:
|
||||||
// Handle this is an OS-specific way
|
// Handle this is an OS-specific way
|
||||||
if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
|
if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -444,13 +603,13 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag)
|
return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Lchown is not supported on Windows.
|
// Lchown is not supported on Windows.
|
||||||
if Lchown && runtime.GOOS != "windows" {
|
if Lchown && runtime.GOOS != "windows" {
|
||||||
if chownOpts == nil {
|
if chownOpts == nil {
|
||||||
chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid}
|
chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
|
||||||
}
|
}
|
||||||
if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
|
if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -525,8 +684,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||||
// on platforms other than Windows.
|
// on platforms other than Windows.
|
||||||
srcPath = fixVolumePathPrefix(srcPath)
|
srcPath = fixVolumePathPrefix(srcPath)
|
||||||
|
|
||||||
patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns)
|
pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -539,14 +697,12 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
ta := &tarAppender{
|
ta := newTarAppender(
|
||||||
TarWriter: tar.NewWriter(compressWriter),
|
idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
|
||||||
Buffer: pools.BufioWriter32KPool.Get(nil),
|
compressWriter,
|
||||||
SeenFiles: make(map[uint64]string),
|
options.ChownOpts,
|
||||||
UIDMaps: options.UIDMaps,
|
)
|
||||||
GIDMaps: options.GIDMaps,
|
ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat)
|
||||||
WhiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat),
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
// Make sure to check the error on Close.
|
// Make sure to check the error on Close.
|
||||||
|
@ -623,7 +779,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||||
// is asking for that file no matter what - which is true
|
// is asking for that file no matter what - which is true
|
||||||
// for some files, like .dockerignore and Dockerfile (sometimes)
|
// for some files, like .dockerignore and Dockerfile (sometimes)
|
||||||
if include != relFilePath {
|
if include != relFilePath {
|
||||||
skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs)
|
skip, err = pm.Matches(relFilePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Errorf("Error matching %s: %v", relFilePath, err)
|
logrus.Errorf("Error matching %s: %v", relFilePath, err)
|
||||||
return err
|
return err
|
||||||
|
@ -633,7 +789,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||||
if skip {
|
if skip {
|
||||||
// If we want to skip this file and its a directory
|
// If we want to skip this file and its a directory
|
||||||
// then we should first check to see if there's an
|
// then we should first check to see if there's an
|
||||||
// excludes pattern (eg !dir/file) that starts with this
|
// excludes pattern (e.g. !dir/file) that starts with this
|
||||||
// dir. If so then we can't skip this dir.
|
// dir. If so then we can't skip this dir.
|
||||||
|
|
||||||
// Its not a dir then so we can just return/skip.
|
// Its not a dir then so we can just return/skip.
|
||||||
|
@ -642,18 +798,17 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// No exceptions (!...) in patterns so just skip dir
|
// No exceptions (!...) in patterns so just skip dir
|
||||||
if !exceptions {
|
if !pm.Exclusions() {
|
||||||
return filepath.SkipDir
|
return filepath.SkipDir
|
||||||
}
|
}
|
||||||
|
|
||||||
dirSlash := relFilePath + string(filepath.Separator)
|
dirSlash := relFilePath + string(filepath.Separator)
|
||||||
|
|
||||||
for _, pat := range patterns {
|
for _, pat := range pm.Patterns() {
|
||||||
if pat[0] != '!' {
|
if !pat.Exclusion() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
pat = pat[1:] + string(filepath.Separator)
|
if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) {
|
||||||
if strings.HasPrefix(pat, dirSlash) {
|
|
||||||
// found a match - so can't skip this dir
|
// found a match - so can't skip this dir
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -703,10 +858,8 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err
|
||||||
defer pools.BufioReader32KPool.Put(trBuf)
|
defer pools.BufioReader32KPool.Put(trBuf)
|
||||||
|
|
||||||
var dirs []*tar.Header
|
var dirs []*tar.Header
|
||||||
remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
|
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
|
||||||
if err != nil {
|
rootIDs := idMappings.RootPair()
|
||||||
return err
|
|
||||||
}
|
|
||||||
whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat)
|
whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat)
|
||||||
|
|
||||||
// Iterate through the files in the archive.
|
// Iterate through the files in the archive.
|
||||||
|
@ -740,7 +893,7 @@ loop:
|
||||||
parent := filepath.Dir(hdr.Name)
|
parent := filepath.Dir(hdr.Name)
|
||||||
parentPath := filepath.Join(dest, parent)
|
parentPath := filepath.Join(dest, parent)
|
||||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||||
err = idtools.MkdirAllNewAs(parentPath, 0777, remappedRootUID, remappedRootGID)
|
err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -785,27 +938,9 @@ loop:
|
||||||
}
|
}
|
||||||
trBuf.Reset(tr)
|
trBuf.Reset(tr)
|
||||||
|
|
||||||
// if the options contain a uid & gid maps, convert header uid/gid
|
if err := remapIDs(idMappings, hdr); err != nil {
|
||||||
// entries using the maps such that lchown sets the proper mapped
|
|
||||||
// uid/gid after writing the file. We only perform this mapping if
|
|
||||||
// the file isn't already owned by the remapped root UID or GID, as
|
|
||||||
// that specific uid/gid has no mapping from container -> host, and
|
|
||||||
// those files already have the proper ownership for inside the
|
|
||||||
// container.
|
|
||||||
if hdr.Uid != remappedRootUID {
|
|
||||||
xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
hdr.Uid = xUID
|
|
||||||
}
|
|
||||||
if hdr.Gid != remappedRootGID {
|
|
||||||
xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
hdr.Gid = xGID
|
|
||||||
}
|
|
||||||
|
|
||||||
if whiteoutConverter != nil {
|
if whiteoutConverter != nil {
|
||||||
writeFile, err := whiteoutConverter.ConvertRead(hdr, path)
|
writeFile, err := whiteoutConverter.ConvertRead(hdr, path)
|
||||||
|
@ -817,7 +952,7 @@ loop:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil {
|
if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -889,23 +1024,13 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer archive.Close()
|
defer archive.Close()
|
||||||
|
options := &TarOptions{
|
||||||
var options *TarOptions
|
UIDMaps: archiver.IDMappings.UIDs(),
|
||||||
if archiver.UIDMaps != nil || archiver.GIDMaps != nil {
|
GIDMaps: archiver.IDMappings.GIDs(),
|
||||||
options = &TarOptions{
|
|
||||||
UIDMaps: archiver.UIDMaps,
|
|
||||||
GIDMaps: archiver.GIDMaps,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return archiver.Untar(archive, dst, options)
|
return archiver.Untar(archive, dst, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
|
|
||||||
// If either Tar or Untar fails, TarUntar aborts and returns the error.
|
|
||||||
func TarUntar(src, dst string) error {
|
|
||||||
return defaultArchiver.TarUntar(src, dst)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UntarPath untar a file from path to a destination, src is the source tar file path.
|
// UntarPath untar a file from path to a destination, src is the source tar file path.
|
||||||
func (archiver *Archiver) UntarPath(src, dst string) error {
|
func (archiver *Archiver) UntarPath(src, dst string) error {
|
||||||
archive, err := os.Open(src)
|
archive, err := os.Open(src)
|
||||||
|
@ -913,22 +1038,13 @@ func (archiver *Archiver) UntarPath(src, dst string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer archive.Close()
|
defer archive.Close()
|
||||||
var options *TarOptions
|
options := &TarOptions{
|
||||||
if archiver.UIDMaps != nil || archiver.GIDMaps != nil {
|
UIDMaps: archiver.IDMappings.UIDs(),
|
||||||
options = &TarOptions{
|
GIDMaps: archiver.IDMappings.GIDs(),
|
||||||
UIDMaps: archiver.UIDMaps,
|
|
||||||
GIDMaps: archiver.GIDMaps,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return archiver.Untar(archive, dst, options)
|
return archiver.Untar(archive, dst, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UntarPath is a convenience function which looks for an archive
|
|
||||||
// at filesystem path `src`, and unpacks it at `dst`.
|
|
||||||
func UntarPath(src, dst string) error {
|
|
||||||
return defaultArchiver.UntarPath(src, dst)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyWithTar creates a tar archive of filesystem path `src`, and
|
// CopyWithTar creates a tar archive of filesystem path `src`, and
|
||||||
// unpacks it at filesystem path `dst`.
|
// unpacks it at filesystem path `dst`.
|
||||||
// The archive is streamed directly with fixed buffering and no
|
// The archive is streamed directly with fixed buffering and no
|
||||||
|
@ -945,27 +1061,16 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error {
|
||||||
// if this archiver is set up with ID mapping we need to create
|
// if this archiver is set up with ID mapping we need to create
|
||||||
// the new destination directory with the remapped root UID/GID pair
|
// the new destination directory with the remapped root UID/GID pair
|
||||||
// as owner
|
// as owner
|
||||||
rootUID, rootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps)
|
rootIDs := archiver.IDMappings.RootPair()
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Create dst, copy src's content into it
|
// Create dst, copy src's content into it
|
||||||
logrus.Debugf("Creating dest directory: %s", dst)
|
logrus.Debugf("Creating dest directory: %s", dst)
|
||||||
if err := idtools.MkdirAllNewAs(dst, 0755, rootUID, rootGID); err != nil {
|
if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
|
logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
|
||||||
return archiver.TarUntar(src, dst)
|
return archiver.TarUntar(src, dst)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CopyWithTar creates a tar archive of filesystem path `src`, and
|
|
||||||
// unpacks it at filesystem path `dst`.
|
|
||||||
// The archive is streamed directly with fixed buffering and no
|
|
||||||
// intermediary disk IO.
|
|
||||||
func CopyWithTar(src, dst string) error {
|
|
||||||
return defaultArchiver.CopyWithTar(src, dst)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyFileWithTar emulates the behavior of the 'cp' command-line
|
// CopyFileWithTar emulates the behavior of the 'cp' command-line
|
||||||
// for a single file. It copies a regular file from path `src` to
|
// for a single file. It copies a regular file from path `src` to
|
||||||
// path `dst`, and preserves all its metadata.
|
// path `dst`, and preserves all its metadata.
|
||||||
|
@ -986,7 +1091,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
||||||
dst = filepath.Join(dst, filepath.Base(src))
|
dst = filepath.Join(dst, filepath.Base(src))
|
||||||
}
|
}
|
||||||
// Create the holding directory if necessary
|
// Create the holding directory if necessary
|
||||||
if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil {
|
if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1007,28 +1112,10 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
||||||
hdr.Name = filepath.Base(dst)
|
hdr.Name = filepath.Base(dst)
|
||||||
hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
|
hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
|
||||||
|
|
||||||
remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps)
|
if err := remapIDs(archiver.IDMappings, hdr); err != nil {
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// only perform mapping if the file being copied isn't already owned by the
|
|
||||||
// uid or gid of the remapped root in the container
|
|
||||||
if remappedRootUID != hdr.Uid {
|
|
||||||
xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
hdr.Uid = xUID
|
|
||||||
}
|
|
||||||
if remappedRootGID != hdr.Gid {
|
|
||||||
xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
hdr.Gid = xGID
|
|
||||||
}
|
|
||||||
|
|
||||||
tw := tar.NewWriter(w)
|
tw := tar.NewWriter(w)
|
||||||
defer tw.Close()
|
defer tw.Close()
|
||||||
if err := tw.WriteHeader(hdr); err != nil {
|
if err := tw.WriteHeader(hdr); err != nil {
|
||||||
|
@ -1040,7 +1127,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
defer func() {
|
defer func() {
|
||||||
if er := <-errC; err != nil {
|
if er := <-errC; err == nil && er != nil {
|
||||||
err = er
|
err = er
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -1052,16 +1139,10 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// CopyFileWithTar emulates the behavior of the 'cp' command-line
|
func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error {
|
||||||
// for a single file. It copies a regular file from path `src` to
|
ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid})
|
||||||
// path `dst`, and preserves all its metadata.
|
hdr.Uid, hdr.Gid = ids.UID, ids.GID
|
||||||
//
|
return err
|
||||||
// Destination handling is in an operating specific manner depending
|
|
||||||
// where the daemon is running. If `dst` ends with a trailing slash
|
|
||||||
// the final destination path will be `dst/base(src)` (Linux) or
|
|
||||||
// `dst\base(src)` (Windows).
|
|
||||||
func CopyFileWithTar(src, dst string) (err error) {
|
|
||||||
return defaultArchiver.CopyFileWithTar(src, dst)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// cmdStream executes a command, and returns its stdout as a stream.
|
// cmdStream executes a command, and returns its stdout as a stream.
|
||||||
|
@ -1096,7 +1177,7 @@ func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{},
|
||||||
// NewTempArchive reads the content of src into a temporary file, and returns the contents
|
// NewTempArchive reads the content of src into a temporary file, and returns the contents
|
||||||
// of that file as an archive. The archive can only be read once - as soon as reading completes,
|
// of that file as an archive. The archive can only be read once - as soon as reading completes,
|
||||||
// the file will be deleted.
|
// the file will be deleted.
|
||||||
func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
|
func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) {
|
||||||
f, err := ioutil.TempFile(dir, "")
|
f, err := ioutil.TempFile(dir, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -1145,3 +1226,26 @@ func (archive *TempArchive) Read(data []byte) (int, error) {
|
||||||
}
|
}
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsArchive checks for the magic bytes of a tar or any supported compression
|
||||||
|
// algorithm.
|
||||||
|
func IsArchive(header []byte) bool {
|
||||||
|
compression := DetectCompression(header)
|
||||||
|
if compression != Uncompressed {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
r := tar.NewReader(bytes.NewBuffer(header))
|
||||||
|
_, err := r.Next()
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UntarPath is a convenience function which looks for an archive
|
||||||
|
// at filesystem path `src`, and unpacks it at `dst`.
|
||||||
|
func UntarPath(src, dst string) error {
|
||||||
|
return NewDefaultArchiver().UntarPath(src, dst)
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// HeaderSize is the size in bytes of a tar header
|
||||||
|
HeaderSize = 512
|
||||||
|
)
|
||||||
|
|
25
vendor/github.com/containers/storage/pkg/archive/archive_linux.go
generated
vendored
25
vendor/github.com/containers/storage/pkg/archive/archive_linux.go
generated
vendored
|
@ -5,9 +5,9 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/containers/storage/pkg/system"
|
"github.com/containers/storage/pkg/system"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
|
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
|
||||||
|
@ -19,7 +19,7 @@ func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
|
||||||
|
|
||||||
type overlayWhiteoutConverter struct{}
|
type overlayWhiteoutConverter struct{}
|
||||||
|
|
||||||
func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) error {
|
func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
|
||||||
// convert whiteouts to AUFS format
|
// convert whiteouts to AUFS format
|
||||||
if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
|
if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
|
||||||
// we just rename the file and make it normal
|
// we just rename the file and make it normal
|
||||||
|
@ -34,12 +34,16 @@ func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os
|
||||||
// convert opaque dirs to AUFS format by writing an empty file with the prefix
|
// convert opaque dirs to AUFS format by writing an empty file with the prefix
|
||||||
opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
|
opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
if opaque != nil && len(opaque) == 1 && opaque[0] == 'y' {
|
if len(opaque) == 1 && opaque[0] == 'y' {
|
||||||
|
if hdr.Xattrs != nil {
|
||||||
|
delete(hdr.Xattrs, "trusted.overlay.opaque")
|
||||||
|
}
|
||||||
|
|
||||||
// create a header for the whiteout file
|
// create a header for the whiteout file
|
||||||
// it should inherit some properties from the parent, but be a regular file
|
// it should inherit some properties from the parent, but be a regular file
|
||||||
*hdr = tar.Header{
|
wo = &tar.Header{
|
||||||
Typeflag: tar.TypeReg,
|
Typeflag: tar.TypeReg,
|
||||||
Mode: hdr.Mode & int64(os.ModePerm),
|
Mode: hdr.Mode & int64(os.ModePerm),
|
||||||
Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
|
Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
|
||||||
|
@ -54,7 +58,7 @@ func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
|
func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
|
||||||
|
@ -63,12 +67,9 @@ func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool,
|
||||||
|
|
||||||
// if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
|
// if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
|
||||||
if base == WhiteoutOpaqueDir {
|
if base == WhiteoutOpaqueDir {
|
||||||
if err := syscall.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0); err != nil {
|
err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// don't write the file itself
|
// don't write the file itself
|
||||||
return false, nil
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// if a file was deleted and we are using overlay, we need to create a character device
|
// if a file was deleted and we are using overlay, we need to create a character device
|
||||||
|
@ -76,7 +77,7 @@ func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool,
|
||||||
originalBase := base[len(WhiteoutPrefix):]
|
originalBase := base[len(WhiteoutPrefix):]
|
||||||
originalPath := filepath.Join(dir, originalBase)
|
originalPath := filepath.Join(dir, originalBase)
|
||||||
|
|
||||||
if err := syscall.Mknod(originalPath, syscall.S_IFCHR, 0); err != nil {
|
if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
|
if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
|
||||||
|
|
54
vendor/github.com/containers/storage/pkg/archive/archive_unix.go
generated
vendored
54
vendor/github.com/containers/storage/pkg/archive/archive_unix.go
generated
vendored
|
@ -9,7 +9,10 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/containers/storage/pkg/idtools"
|
||||||
"github.com/containers/storage/pkg/system"
|
"github.com/containers/storage/pkg/system"
|
||||||
|
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
// fixVolumePathPrefix does platform specific processing to ensure that if
|
// fixVolumePathPrefix does platform specific processing to ensure that if
|
||||||
|
@ -40,33 +43,38 @@ func chmodTarEntry(perm os.FileMode) os.FileMode {
|
||||||
return perm // noop for unix as golang APIs provide perm bits correctly
|
return perm // noop for unix as golang APIs provide perm bits correctly
|
||||||
}
|
}
|
||||||
|
|
||||||
func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
|
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
|
||||||
s, ok := stat.(*syscall.Stat_t)
|
s, ok := stat.(*syscall.Stat_t)
|
||||||
|
|
||||||
if !ok {
|
if ok {
|
||||||
err = errors.New("cannot convert stat value to syscall.Stat_t")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
inode = uint64(s.Ino)
|
|
||||||
|
|
||||||
// Currently go does not fill in the major/minors
|
// Currently go does not fill in the major/minors
|
||||||
if s.Mode&syscall.S_IFBLK != 0 ||
|
if s.Mode&unix.S_IFBLK != 0 ||
|
||||||
s.Mode&syscall.S_IFCHR != 0 {
|
s.Mode&unix.S_IFCHR != 0 {
|
||||||
hdr.Devmajor = int64(major(uint64(s.Rdev)))
|
hdr.Devmajor = int64(major(uint64(s.Rdev))) // nolint: unconvert
|
||||||
hdr.Devminor = int64(minor(uint64(s.Rdev)))
|
hdr.Devminor = int64(minor(uint64(s.Rdev))) // nolint: unconvert
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFileUIDGID(stat interface{}) (int, int, error) {
|
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
|
||||||
|
s, ok := stat.(*syscall.Stat_t)
|
||||||
|
|
||||||
|
if ok {
|
||||||
|
inode = s.Ino
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
|
||||||
s, ok := stat.(*syscall.Stat_t)
|
s, ok := stat.(*syscall.Stat_t)
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t")
|
return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t")
|
||||||
}
|
}
|
||||||
return int(s.Uid), int(s.Gid), nil
|
return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func major(device uint64) uint64 {
|
func major(device uint64) uint64 {
|
||||||
|
@ -80,20 +88,22 @@ func minor(device uint64) uint64 {
|
||||||
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||||
// createTarFile to handle the following types of header: Block; Char; Fifo
|
// createTarFile to handle the following types of header: Block; Char; Fifo
|
||||||
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||||
|
if rsystem.RunningInUserNS() {
|
||||||
|
// cannot create a device if running in user namespace
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
mode := uint32(hdr.Mode & 07777)
|
mode := uint32(hdr.Mode & 07777)
|
||||||
switch hdr.Typeflag {
|
switch hdr.Typeflag {
|
||||||
case tar.TypeBlock:
|
case tar.TypeBlock:
|
||||||
mode |= syscall.S_IFBLK
|
mode |= unix.S_IFBLK
|
||||||
case tar.TypeChar:
|
case tar.TypeChar:
|
||||||
mode |= syscall.S_IFCHR
|
mode |= unix.S_IFCHR
|
||||||
case tar.TypeFifo:
|
case tar.TypeFifo:
|
||||||
mode |= syscall.S_IFIFO
|
mode |= unix.S_IFIFO
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
|
return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
||||||
|
|
23
vendor/github.com/containers/storage/pkg/archive/archive_windows.go
generated
vendored
23
vendor/github.com/containers/storage/pkg/archive/archive_windows.go
generated
vendored
|
@ -9,6 +9,7 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/containers/storage/pkg/idtools"
|
||||||
"github.com/containers/storage/pkg/longpath"
|
"github.com/containers/storage/pkg/longpath"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -42,15 +43,23 @@ func CanonicalTarNameForPath(p string) (string, error) {
|
||||||
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
||||||
// on the platform the archival is done.
|
// on the platform the archival is done.
|
||||||
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
||||||
perm &= 0755
|
//perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
|
||||||
|
permPart := perm & os.ModePerm
|
||||||
|
noPermPart := perm &^ os.ModePerm
|
||||||
// Add the x bit: make everything +x from windows
|
// Add the x bit: make everything +x from windows
|
||||||
perm |= 0111
|
permPart |= 0111
|
||||||
|
permPart &= 0755
|
||||||
|
|
||||||
return perm
|
return noPermPart | permPart
|
||||||
}
|
}
|
||||||
|
|
||||||
func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
|
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
|
||||||
// do nothing. no notion of Rdev, Inode, Nlink in stat on Windows
|
// do nothing. no notion of Rdev, Nlink in stat on Windows
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
|
||||||
|
// do nothing. no notion of Inode in stat on Windows
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,7 +73,7 @@ func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFileUIDGID(stat interface{}) (int, int, error) {
|
func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
|
||||||
// no notion of file ownership mapping yet on Windows
|
// no notion of file ownership mapping yet on Windows
|
||||||
return 0, 0, nil
|
return idtools.IDPair{0, 0}, nil
|
||||||
}
|
}
|
||||||
|
|
15
vendor/github.com/containers/storage/pkg/archive/changes.go
generated
vendored
15
vendor/github.com/containers/storage/pkg/archive/changes.go
generated
vendored
|
@ -267,7 +267,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, newChild := range info.children {
|
for name, newChild := range info.children {
|
||||||
oldChild, _ := oldChildren[name]
|
oldChild := oldChildren[name]
|
||||||
if oldChild != nil {
|
if oldChild != nil {
|
||||||
// change?
|
// change?
|
||||||
oldStat := oldChild.stat
|
oldStat := oldChild.stat
|
||||||
|
@ -279,7 +279,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
||||||
// breaks down is if some code intentionally hides a change by setting
|
// breaks down is if some code intentionally hides a change by setting
|
||||||
// back mtime
|
// back mtime
|
||||||
if statDifferent(oldStat, newStat) ||
|
if statDifferent(oldStat, newStat) ||
|
||||||
bytes.Compare(oldChild.capability, newChild.capability) != 0 {
|
!bytes.Equal(oldChild.capability, newChild.capability) {
|
||||||
change := Change{
|
change := Change{
|
||||||
Path: newChild.path(),
|
Path: newChild.path(),
|
||||||
Kind: ChangeModify,
|
Kind: ChangeModify,
|
||||||
|
@ -391,16 +391,11 @@ func ChangesSize(newDir string, changes []Change) int64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExportChanges produces an Archive from the provided changes, relative to dir.
|
// ExportChanges produces an Archive from the provided changes, relative to dir.
|
||||||
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) {
|
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
|
||||||
reader, writer := io.Pipe()
|
reader, writer := io.Pipe()
|
||||||
go func() {
|
go func() {
|
||||||
ta := &tarAppender{
|
ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil)
|
||||||
TarWriter: tar.NewWriter(writer),
|
|
||||||
Buffer: pools.BufioWriter32KPool.Get(nil),
|
|
||||||
SeenFiles: make(map[uint64]string),
|
|
||||||
UIDMaps: uidMaps,
|
|
||||||
GIDMaps: gidMaps,
|
|
||||||
}
|
|
||||||
// this buffer is needed for the duration of this piped stream
|
// this buffer is needed for the duration of this piped stream
|
||||||
defer pools.BufioWriter32KPool.Put(ta.Buffer)
|
defer pools.BufioWriter32KPool.Put(ta.Buffer)
|
||||||
|
|
||||||
|
|
13
vendor/github.com/containers/storage/pkg/archive/changes_linux.go
generated
vendored
13
vendor/github.com/containers/storage/pkg/archive/changes_linux.go
generated
vendored
|
@ -10,6 +10,7 @@ import (
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/containers/storage/pkg/system"
|
"github.com/containers/storage/pkg/system"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
// walker is used to implement collectFileInfoForChanges on linux. Where this
|
// walker is used to implement collectFileInfoForChanges on linux. Where this
|
||||||
|
@ -65,7 +66,7 @@ func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
|
||||||
}
|
}
|
||||||
parent := root.LookUp(filepath.Dir(path))
|
parent := root.LookUp(filepath.Dir(path))
|
||||||
if parent == nil {
|
if parent == nil {
|
||||||
return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path)
|
return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path)
|
||||||
}
|
}
|
||||||
info := &FileInfo{
|
info := &FileInfo{
|
||||||
name: filepath.Base(path),
|
name: filepath.Base(path),
|
||||||
|
@ -233,7 +234,7 @@ func readdirnames(dirname string) (names []nameIno, err error) {
|
||||||
// Refill the buffer if necessary
|
// Refill the buffer if necessary
|
||||||
if bufp >= nbuf {
|
if bufp >= nbuf {
|
||||||
bufp = 0
|
bufp = 0
|
||||||
nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux
|
nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux
|
||||||
if nbuf < 0 {
|
if nbuf < 0 {
|
||||||
nbuf = 0
|
nbuf = 0
|
||||||
}
|
}
|
||||||
|
@ -255,12 +256,12 @@ func readdirnames(dirname string) (names []nameIno, err error) {
|
||||||
return sl, nil
|
return sl, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseDirent is a minor modification of syscall.ParseDirent (linux version)
|
// parseDirent is a minor modification of unix.ParseDirent (linux version)
|
||||||
// which returns {name,inode} pairs instead of just names.
|
// which returns {name,inode} pairs instead of just names.
|
||||||
func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
|
func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
|
||||||
origlen := len(buf)
|
origlen := len(buf)
|
||||||
for len(buf) > 0 {
|
for len(buf) > 0 {
|
||||||
dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0]))
|
dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0]))
|
||||||
buf = buf[dirent.Reclen:]
|
buf = buf[dirent.Reclen:]
|
||||||
if dirent.Ino == 0 { // File absent in directory.
|
if dirent.Ino == 0 { // File absent in directory.
|
||||||
continue
|
continue
|
||||||
|
@ -293,7 +294,7 @@ func OverlayChanges(layers []string, rw string) ([]Change, error) {
|
||||||
func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) {
|
func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) {
|
||||||
if fi.Mode()&os.ModeCharDevice != 0 {
|
if fi.Mode()&os.ModeCharDevice != 0 {
|
||||||
s := fi.Sys().(*syscall.Stat_t)
|
s := fi.Sys().(*syscall.Stat_t)
|
||||||
if major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 {
|
if major(s.Rdev) == 0 && minor(s.Rdev) == 0 {
|
||||||
return path, nil
|
return path, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -302,7 +303,7 @@ func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
if opaque != nil && len(opaque) == 1 && opaque[0] == 'y' {
|
if len(opaque) == 1 && opaque[0] == 'y' {
|
||||||
return path, nil
|
return path, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
7
vendor/github.com/containers/storage/pkg/archive/changes_unix.go
generated
vendored
7
vendor/github.com/containers/storage/pkg/archive/changes_unix.go
generated
vendored
|
@ -7,6 +7,7 @@ import (
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/containers/storage/pkg/system"
|
"github.com/containers/storage/pkg/system"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
||||||
|
@ -16,7 +17,7 @@ func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
||||||
oldStat.GID() != newStat.GID() ||
|
oldStat.GID() != newStat.GID() ||
|
||||||
oldStat.Rdev() != newStat.Rdev() ||
|
oldStat.Rdev() != newStat.Rdev() ||
|
||||||
// Don't look at size for dirs, its not a good measure of change
|
// Don't look at size for dirs, its not a good measure of change
|
||||||
(oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR &&
|
(oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR &&
|
||||||
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
|
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -24,11 +25,11 @@ func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (info *FileInfo) isDir() bool {
|
func (info *FileInfo) isDir() bool {
|
||||||
return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0
|
return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func getIno(fi os.FileInfo) uint64 {
|
func getIno(fi os.FileInfo) uint64 {
|
||||||
return uint64(fi.Sys().(*syscall.Stat_t).Ino)
|
return fi.Sys().(*syscall.Stat_t).Ino
|
||||||
}
|
}
|
||||||
|
|
||||||
func hasHardlinks(fi os.FileInfo) bool {
|
func hasHardlinks(fi os.FileInfo) bool {
|
||||||
|
|
6
vendor/github.com/containers/storage/pkg/archive/changes_windows.go
generated
vendored
6
vendor/github.com/containers/storage/pkg/archive/changes_windows.go
generated
vendored
|
@ -9,16 +9,16 @@ import (
|
||||||
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
||||||
|
|
||||||
// Don't look at size for dirs, its not a good measure of change
|
// Don't look at size for dirs, its not a good measure of change
|
||||||
if oldStat.ModTime() != newStat.ModTime() ||
|
if oldStat.Mtim() != newStat.Mtim() ||
|
||||||
oldStat.Mode() != newStat.Mode() ||
|
oldStat.Mode() != newStat.Mode() ||
|
||||||
oldStat.Size() != newStat.Size() && !oldStat.IsDir() {
|
oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (info *FileInfo) isDir() bool {
|
func (info *FileInfo) isDir() bool {
|
||||||
return info.parent == nil || info.stat.IsDir()
|
return info.parent == nil || info.stat.Mode().IsDir()
|
||||||
}
|
}
|
||||||
|
|
||||||
func getIno(fi os.FileInfo) (inode uint64) {
|
func getIno(fi os.FileInfo) (inode uint64) {
|
||||||
|
|
15
vendor/github.com/containers/storage/pkg/archive/copy.go
generated
vendored
15
vendor/github.com/containers/storage/pkg/archive/copy.go
generated
vendored
|
@ -88,13 +88,13 @@ func SplitPathDirEntry(path string) (dir, base string) {
|
||||||
// This function acts as a convenient wrapper around TarWithOptions, which
|
// This function acts as a convenient wrapper around TarWithOptions, which
|
||||||
// requires a directory as the source path. TarResource accepts either a
|
// requires a directory as the source path. TarResource accepts either a
|
||||||
// directory or a file path and correctly sets the Tar options.
|
// directory or a file path and correctly sets the Tar options.
|
||||||
func TarResource(sourceInfo CopyInfo) (content Archive, err error) {
|
func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
|
||||||
return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
|
return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TarResourceRebase is like TarResource but renames the first path element of
|
// TarResourceRebase is like TarResource but renames the first path element of
|
||||||
// items in the resulting tar archive to match the given rebaseName if not "".
|
// items in the resulting tar archive to match the given rebaseName if not "".
|
||||||
func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) {
|
func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) {
|
||||||
sourcePath = normalizePath(sourcePath)
|
sourcePath = normalizePath(sourcePath)
|
||||||
if _, err = os.Lstat(sourcePath); err != nil {
|
if _, err = os.Lstat(sourcePath); err != nil {
|
||||||
// Catches the case where the source does not exist or is not a
|
// Catches the case where the source does not exist or is not a
|
||||||
|
@ -103,7 +103,7 @@ func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err erro
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Separate the source path between it's directory and
|
// Separate the source path between its directory and
|
||||||
// the entry in that directory which we are archiving.
|
// the entry in that directory which we are archiving.
|
||||||
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
|
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
|
||||||
|
|
||||||
|
@ -241,7 +241,7 @@ func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
|
||||||
// contain the archived resource described by srcInfo, to the destination
|
// contain the archived resource described by srcInfo, to the destination
|
||||||
// described by dstInfo. Returns the possibly modified content archive along
|
// described by dstInfo. Returns the possibly modified content archive along
|
||||||
// with the path to the destination directory which it should be extracted to.
|
// with the path to the destination directory which it should be extracted to.
|
||||||
func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) {
|
func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) {
|
||||||
// Ensure in platform semantics
|
// Ensure in platform semantics
|
||||||
srcInfo.Path = normalizePath(srcInfo.Path)
|
srcInfo.Path = normalizePath(srcInfo.Path)
|
||||||
dstInfo.Path = normalizePath(dstInfo.Path)
|
dstInfo.Path = normalizePath(dstInfo.Path)
|
||||||
|
@ -304,7 +304,7 @@ func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir st
|
||||||
|
|
||||||
// RebaseArchiveEntries rewrites the given srcContent archive replacing
|
// RebaseArchiveEntries rewrites the given srcContent archive replacing
|
||||||
// an occurrence of oldBase with newBase at the beginning of entry names.
|
// an occurrence of oldBase with newBase at the beginning of entry names.
|
||||||
func RebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive {
|
func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
|
||||||
if oldBase == string(os.PathSeparator) {
|
if oldBase == string(os.PathSeparator) {
|
||||||
// If oldBase specifies the root directory, use an empty string as
|
// If oldBase specifies the root directory, use an empty string as
|
||||||
// oldBase instead so that newBase doesn't replace the path separator
|
// oldBase instead so that newBase doesn't replace the path separator
|
||||||
|
@ -332,6 +332,9 @@ func RebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive {
|
||||||
}
|
}
|
||||||
|
|
||||||
hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
|
hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
|
||||||
|
if hdr.Typeflag == tar.TypeLink {
|
||||||
|
hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1)
|
||||||
|
}
|
||||||
|
|
||||||
if err = rebasedTar.WriteHeader(hdr); err != nil {
|
if err = rebasedTar.WriteHeader(hdr); err != nil {
|
||||||
w.CloseWithError(err)
|
w.CloseWithError(err)
|
||||||
|
@ -380,7 +383,7 @@ func CopyResource(srcPath, dstPath string, followLink bool) error {
|
||||||
|
|
||||||
// CopyTo handles extracting the given content whose
|
// CopyTo handles extracting the given content whose
|
||||||
// entries should be sourced from srcInfo to dstPath.
|
// entries should be sourced from srcInfo to dstPath.
|
||||||
func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error {
|
func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error {
|
||||||
// The destination path need not exist, but CopyInfoDestinationPath will
|
// The destination path need not exist, but CopyInfoDestinationPath will
|
||||||
// ensure that at least the parent directory exists.
|
// ensure that at least the parent directory exists.
|
||||||
dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
|
dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
|
||||||
|
|
43
vendor/github.com/containers/storage/pkg/archive/diff.go
generated
vendored
43
vendor/github.com/containers/storage/pkg/archive/diff.go
generated
vendored
|
@ -19,7 +19,7 @@ import (
|
||||||
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
|
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
|
||||||
// compressed or uncompressed.
|
// compressed or uncompressed.
|
||||||
// Returns the size in bytes of the contents of the layer.
|
// Returns the size in bytes of the contents of the layer.
|
||||||
func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) {
|
func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
|
||||||
tr := tar.NewReader(layer)
|
tr := tar.NewReader(layer)
|
||||||
trBuf := pools.BufioReader32KPool.Get(tr)
|
trBuf := pools.BufioReader32KPool.Get(tr)
|
||||||
defer pools.BufioReader32KPool.Put(trBuf)
|
defer pools.BufioReader32KPool.Put(trBuf)
|
||||||
|
@ -33,17 +33,11 @@ func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, er
|
||||||
if options.ExcludePatterns == nil {
|
if options.ExcludePatterns == nil {
|
||||||
options.ExcludePatterns = []string{}
|
options.ExcludePatterns = []string{}
|
||||||
}
|
}
|
||||||
remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
|
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
aufsTempdir := ""
|
aufsTempdir := ""
|
||||||
aufsHardlinks := make(map[string]*tar.Header)
|
aufsHardlinks := make(map[string]*tar.Header)
|
||||||
|
|
||||||
if options == nil {
|
|
||||||
options = &TarOptions{}
|
|
||||||
}
|
|
||||||
// Iterate through the files in the archive.
|
// Iterate through the files in the archive.
|
||||||
for {
|
for {
|
||||||
hdr, err := tr.Next()
|
hdr, err := tr.Next()
|
||||||
|
@ -90,7 +84,7 @@ func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, er
|
||||||
parentPath := filepath.Join(dest, parent)
|
parentPath := filepath.Join(dest, parent)
|
||||||
|
|
||||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||||
err = system.MkdirAll(parentPath, 0600)
|
err = system.MkdirAll(parentPath, 0600, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -111,7 +105,7 @@ func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, er
|
||||||
}
|
}
|
||||||
defer os.RemoveAll(aufsTempdir)
|
defer os.RemoveAll(aufsTempdir)
|
||||||
}
|
}
|
||||||
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil {
|
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -198,28 +192,11 @@ func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, er
|
||||||
srcData = tmpFile
|
srcData = tmpFile
|
||||||
}
|
}
|
||||||
|
|
||||||
// if the options contain a uid & gid maps, convert header uid/gid
|
if err := remapIDs(idMappings, srcHdr); err != nil {
|
||||||
// entries using the maps such that lchown sets the proper mapped
|
|
||||||
// uid/gid after writing the file. We only perform this mapping if
|
|
||||||
// the file isn't already owned by the remapped root UID or GID, as
|
|
||||||
// that specific uid/gid has no mapping from container -> host, and
|
|
||||||
// those files already have the proper ownership for inside the
|
|
||||||
// container.
|
|
||||||
if srcHdr.Uid != remappedRootUID {
|
|
||||||
xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
srcHdr.Uid = xUID
|
|
||||||
}
|
if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil {
|
||||||
if srcHdr.Gid != remappedRootGID {
|
|
||||||
xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
srcHdr.Gid = xGID
|
|
||||||
}
|
|
||||||
if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil {
|
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -246,7 +223,7 @@ func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, er
|
||||||
// and applies it to the directory `dest`. The stream `layer` can be
|
// and applies it to the directory `dest`. The stream `layer` can be
|
||||||
// compressed or uncompressed.
|
// compressed or uncompressed.
|
||||||
// Returns the size in bytes of the contents of the layer.
|
// Returns the size in bytes of the contents of the layer.
|
||||||
func ApplyLayer(dest string, layer Reader) (int64, error) {
|
func ApplyLayer(dest string, layer io.Reader) (int64, error) {
|
||||||
return applyLayerHandler(dest, layer, &TarOptions{}, true)
|
return applyLayerHandler(dest, layer, &TarOptions{}, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -254,12 +231,12 @@ func ApplyLayer(dest string, layer Reader) (int64, error) {
|
||||||
// `layer`, and applies it to the directory `dest`. The stream `layer`
|
// `layer`, and applies it to the directory `dest`. The stream `layer`
|
||||||
// can only be uncompressed.
|
// can only be uncompressed.
|
||||||
// Returns the size in bytes of the contents of the layer.
|
// Returns the size in bytes of the contents of the layer.
|
||||||
func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) {
|
func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
|
||||||
return applyLayerHandler(dest, layer, options, false)
|
return applyLayerHandler(dest, layer, options, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
|
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
|
||||||
func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) {
|
func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) {
|
||||||
dest = filepath.Clean(dest)
|
dest = filepath.Clean(dest)
|
||||||
|
|
||||||
// We need to be able to set any perms
|
// We need to be able to set any perms
|
||||||
|
|
6
vendor/github.com/containers/storage/pkg/archive/wrap.go
generated
vendored
6
vendor/github.com/containers/storage/pkg/archive/wrap.go
generated
vendored
|
@ -3,7 +3,7 @@ package archive
|
||||||
import (
|
import (
|
||||||
"archive/tar"
|
"archive/tar"
|
||||||
"bytes"
|
"bytes"
|
||||||
"io/ioutil"
|
"io"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Generate generates a new archive from the content provided
|
// Generate generates a new archive from the content provided
|
||||||
|
@ -22,7 +22,7 @@ import (
|
||||||
//
|
//
|
||||||
// FIXME: stream content instead of buffering
|
// FIXME: stream content instead of buffering
|
||||||
// FIXME: specify permissions and other archive metadata
|
// FIXME: specify permissions and other archive metadata
|
||||||
func Generate(input ...string) (Archive, error) {
|
func Generate(input ...string) (io.Reader, error) {
|
||||||
files := parseStringPairs(input...)
|
files := parseStringPairs(input...)
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
tw := tar.NewWriter(buf)
|
tw := tar.NewWriter(buf)
|
||||||
|
@ -42,7 +42,7 @@ func Generate(input ...string) (Archive, error) {
|
||||||
if err := tw.Close(); err != nil {
|
if err := tw.Close(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return ioutil.NopCloser(buf), nil
|
return buf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseStringPairs(input ...string) (output [][2]string) {
|
func parseStringPairs(input ...string) (output [][2]string) {
|
||||||
|
|
47
vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
generated
vendored
47
vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
generated
vendored
|
@ -11,7 +11,13 @@ import (
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
)
|
)
|
||||||
|
|
||||||
var chrootArchiver = &archive.Archiver{Untar: Untar}
|
// NewArchiver returns a new Archiver which uses chrootarchive.Untar
|
||||||
|
func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver {
|
||||||
|
if idMappings == nil {
|
||||||
|
idMappings = &idtools.IDMappings{}
|
||||||
|
}
|
||||||
|
return &archive.Archiver{Untar: Untar, IDMappings: idMappings}
|
||||||
|
}
|
||||||
|
|
||||||
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
|
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||||
// and unpacks it into the directory at `dest`.
|
// and unpacks it into the directory at `dest`.
|
||||||
|
@ -30,7 +36,6 @@ func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOp
|
||||||
|
|
||||||
// Handler for teasing out the automatic decompression
|
// Handler for teasing out the automatic decompression
|
||||||
func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error {
|
func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error {
|
||||||
|
|
||||||
if tarArchive == nil {
|
if tarArchive == nil {
|
||||||
return fmt.Errorf("Empty archive")
|
return fmt.Errorf("Empty archive")
|
||||||
}
|
}
|
||||||
|
@ -41,14 +46,12 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
|
||||||
options.ExcludePatterns = []string{}
|
options.ExcludePatterns = []string{}
|
||||||
}
|
}
|
||||||
|
|
||||||
rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
|
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
|
||||||
if err != nil {
|
rootIDs := idMappings.RootPair()
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
dest = filepath.Clean(dest)
|
dest = filepath.Clean(dest)
|
||||||
if _, err := os.Stat(dest); os.IsNotExist(err) {
|
if _, err := os.Stat(dest); os.IsNotExist(err) {
|
||||||
if err := idtools.MkdirAllNewAs(dest, 0755, rootUID, rootGID); err != nil {
|
if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -65,33 +68,3 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
|
||||||
|
|
||||||
return invokeUnpack(r, dest, options)
|
return invokeUnpack(r, dest, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
|
|
||||||
// If either Tar or Untar fails, TarUntar aborts and returns the error.
|
|
||||||
func TarUntar(src, dst string) error {
|
|
||||||
return chrootArchiver.TarUntar(src, dst)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyWithTar creates a tar archive of filesystem path `src`, and
|
|
||||||
// unpacks it at filesystem path `dst`.
|
|
||||||
// The archive is streamed directly with fixed buffering and no
|
|
||||||
// intermediary disk IO.
|
|
||||||
func CopyWithTar(src, dst string) error {
|
|
||||||
return chrootArchiver.CopyWithTar(src, dst)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyFileWithTar emulates the behavior of the 'cp' command-line
|
|
||||||
// for a single file. It copies a regular file from path `src` to
|
|
||||||
// path `dst`, and preserves all its metadata.
|
|
||||||
//
|
|
||||||
// If `dst` ends with a trailing slash '/' ('\' on Windows), the final
|
|
||||||
// destination path will be `dst/base(src)` or `dst\base(src)`
|
|
||||||
func CopyFileWithTar(src, dst string) (err error) {
|
|
||||||
return chrootArchiver.CopyFileWithTar(src, dst)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UntarPath is a convenience function which looks for an archive
|
|
||||||
// at filesystem path `src`, and unpacks it at `dst`.
|
|
||||||
func UntarPath(src, dst string) error {
|
|
||||||
return chrootArchiver.UntarPath(src, dst)
|
|
||||||
}
|
|
||||||
|
|
39
vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
generated
vendored
39
vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
generated
vendored
|
@ -5,9 +5,10 @@ import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/containers/storage/pkg/mount"
|
"github.com/containers/storage/pkg/mount"
|
||||||
|
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
// chroot on linux uses pivot_root instead of chroot
|
// chroot on linux uses pivot_root instead of chroot
|
||||||
|
@ -17,14 +18,25 @@ import (
|
||||||
// Old root is removed after the call to pivot_root so it is no longer available under the new root.
|
// Old root is removed after the call to pivot_root so it is no longer available under the new root.
|
||||||
// This is similar to how libcontainer sets up a container's rootfs
|
// This is similar to how libcontainer sets up a container's rootfs
|
||||||
func chroot(path string) (err error) {
|
func chroot(path string) (err error) {
|
||||||
if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil {
|
// if the engine is running in a user namespace we need to use actual chroot
|
||||||
|
if rsystem.RunningInUserNS() {
|
||||||
|
return realChroot(path)
|
||||||
|
}
|
||||||
|
if err := unix.Unshare(unix.CLONE_NEWNS); err != nil {
|
||||||
return fmt.Errorf("Error creating mount namespace before pivot: %v", err)
|
return fmt.Errorf("Error creating mount namespace before pivot: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := mount.MakeRPrivate(path); err != nil {
|
// make everything in new ns private
|
||||||
|
if err := mount.MakeRPrivate("/"); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if mounted, _ := mount.Mounted(path); !mounted {
|
||||||
|
if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil {
|
||||||
|
return realChroot(path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// setup oldRoot for pivot_root
|
// setup oldRoot for pivot_root
|
||||||
pivotDir, err := ioutil.TempDir(path, ".pivot_root")
|
pivotDir, err := ioutil.TempDir(path, ".pivot_root")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -35,7 +47,7 @@ func chroot(path string) (err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if mounted {
|
if mounted {
|
||||||
// make sure pivotDir is not mounted before we try to remove it
|
// make sure pivotDir is not mounted before we try to remove it
|
||||||
if errCleanup := syscall.Unmount(pivotDir, syscall.MNT_DETACH); errCleanup != nil {
|
if errCleanup := unix.Unmount(pivotDir, unix.MNT_DETACH); errCleanup != nil {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = errCleanup
|
err = errCleanup
|
||||||
}
|
}
|
||||||
|
@ -52,16 +64,9 @@ func chroot(path string) (err error) {
|
||||||
err = errCleanup
|
err = errCleanup
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if errCleanup := syscall.Unmount("/", syscall.MNT_DETACH); errCleanup != nil {
|
|
||||||
if err == nil {
|
|
||||||
err = fmt.Errorf("error unmounting root: %v", errCleanup)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if err := syscall.PivotRoot(path, pivotDir); err != nil {
|
if err := unix.PivotRoot(path, pivotDir); err != nil {
|
||||||
// If pivot fails, fall back to the normal chroot after cleaning up temp dir
|
// If pivot fails, fall back to the normal chroot after cleaning up temp dir
|
||||||
if err := os.Remove(pivotDir); err != nil {
|
if err := os.Remove(pivotDir); err != nil {
|
||||||
return fmt.Errorf("Error cleaning up after failed pivot: %v", err)
|
return fmt.Errorf("Error cleaning up after failed pivot: %v", err)
|
||||||
|
@ -74,17 +79,17 @@ func chroot(path string) (err error) {
|
||||||
// This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction
|
// This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction
|
||||||
pivotDir = filepath.Join("/", filepath.Base(pivotDir))
|
pivotDir = filepath.Join("/", filepath.Base(pivotDir))
|
||||||
|
|
||||||
if err := syscall.Chdir("/"); err != nil {
|
if err := unix.Chdir("/"); err != nil {
|
||||||
return fmt.Errorf("Error changing to new root: %v", err)
|
return fmt.Errorf("Error changing to new root: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host
|
// Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host
|
||||||
if err := syscall.Mount("", pivotDir, "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil {
|
if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil {
|
||||||
return fmt.Errorf("Error making old root private after pivot: %v", err)
|
return fmt.Errorf("Error making old root private after pivot: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now unmount the old root so it's no longer visible from the new root
|
// Now unmount the old root so it's no longer visible from the new root
|
||||||
if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil {
|
if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil {
|
||||||
return fmt.Errorf("Error while unmounting old root after pivot: %v", err)
|
return fmt.Errorf("Error while unmounting old root after pivot: %v", err)
|
||||||
}
|
}
|
||||||
mounted = false
|
mounted = false
|
||||||
|
@ -93,10 +98,10 @@ func chroot(path string) (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func realChroot(path string) error {
|
func realChroot(path string) error {
|
||||||
if err := syscall.Chroot(path); err != nil {
|
if err := unix.Chroot(path); err != nil {
|
||||||
return fmt.Errorf("Error after fallback to chroot: %v", err)
|
return fmt.Errorf("Error after fallback to chroot: %v", err)
|
||||||
}
|
}
|
||||||
if err := syscall.Chdir("/"); err != nil {
|
if err := unix.Chdir("/"); err != nil {
|
||||||
return fmt.Errorf("Error changing to new root after chroot: %v", err)
|
return fmt.Errorf("Error changing to new root after chroot: %v", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
6
vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
generated
vendored
6
vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
generated
vendored
|
@ -2,11 +2,11 @@
|
||||||
|
|
||||||
package chrootarchive
|
package chrootarchive
|
||||||
|
|
||||||
import "syscall"
|
import "golang.org/x/sys/unix"
|
||||||
|
|
||||||
func chroot(path string) error {
|
func chroot(path string) error {
|
||||||
if err := syscall.Chroot(path); err != nil {
|
if err := unix.Chroot(path); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return syscall.Chdir("/")
|
return unix.Chdir("/")
|
||||||
}
|
}
|
||||||
|
|
10
vendor/github.com/containers/storage/pkg/chrootarchive/diff.go
generated
vendored
10
vendor/github.com/containers/storage/pkg/chrootarchive/diff.go
generated
vendored
|
@ -1,12 +1,16 @@
|
||||||
package chrootarchive
|
package chrootarchive
|
||||||
|
|
||||||
import "github.com/containers/storage/pkg/archive"
|
import (
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/containers/storage/pkg/archive"
|
||||||
|
)
|
||||||
|
|
||||||
// ApplyLayer parses a diff in the standard layer format from `layer`,
|
// ApplyLayer parses a diff in the standard layer format from `layer`,
|
||||||
// and applies it to the directory `dest`. The stream `layer` can only be
|
// and applies it to the directory `dest`. The stream `layer` can only be
|
||||||
// uncompressed.
|
// uncompressed.
|
||||||
// Returns the size in bytes of the contents of the layer.
|
// Returns the size in bytes of the contents of the layer.
|
||||||
func ApplyLayer(dest string, layer archive.Reader) (size int64, err error) {
|
func ApplyLayer(dest string, layer io.Reader) (size int64, err error) {
|
||||||
return applyLayerHandler(dest, layer, &archive.TarOptions{}, true)
|
return applyLayerHandler(dest, layer, &archive.TarOptions{}, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -14,6 +18,6 @@ func ApplyLayer(dest string, layer archive.Reader) (size int64, err error) {
|
||||||
// `layer`, and applies it to the directory `dest`. The stream `layer`
|
// `layer`, and applies it to the directory `dest`. The stream `layer`
|
||||||
// can only be uncompressed.
|
// can only be uncompressed.
|
||||||
// Returns the size in bytes of the contents of the layer.
|
// Returns the size in bytes of the contents of the layer.
|
||||||
func ApplyUncompressedLayer(dest string, layer archive.Reader, options *archive.TarOptions) (int64, error) {
|
func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) {
|
||||||
return applyLayerHandler(dest, layer, options, false)
|
return applyLayerHandler(dest, layer, options, false)
|
||||||
}
|
}
|
||||||
|
|
14
vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
generated
vendored
14
vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
generated
vendored
|
@ -7,6 +7,7 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
@ -15,6 +16,7 @@ import (
|
||||||
"github.com/containers/storage/pkg/archive"
|
"github.com/containers/storage/pkg/archive"
|
||||||
"github.com/containers/storage/pkg/reexec"
|
"github.com/containers/storage/pkg/reexec"
|
||||||
"github.com/containers/storage/pkg/system"
|
"github.com/containers/storage/pkg/system"
|
||||||
|
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
type applyLayerResponse struct {
|
type applyLayerResponse struct {
|
||||||
|
@ -27,13 +29,14 @@ type applyLayerResponse struct {
|
||||||
func applyLayer() {
|
func applyLayer() {
|
||||||
|
|
||||||
var (
|
var (
|
||||||
tmpDir = ""
|
tmpDir string
|
||||||
err error
|
err error
|
||||||
options *archive.TarOptions
|
options *archive.TarOptions
|
||||||
)
|
)
|
||||||
runtime.LockOSThread()
|
runtime.LockOSThread()
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
|
inUserns := rsystem.RunningInUserNS()
|
||||||
if err := chroot(flag.Arg(0)); err != nil {
|
if err := chroot(flag.Arg(0)); err != nil {
|
||||||
fatal(err)
|
fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -49,6 +52,10 @@ func applyLayer() {
|
||||||
fatal(err)
|
fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if inUserns {
|
||||||
|
options.InUserNS = true
|
||||||
|
}
|
||||||
|
|
||||||
if tmpDir, err = ioutil.TempDir("/", "temp-storage-extract"); err != nil {
|
if tmpDir, err = ioutil.TempDir("/", "temp-storage-extract"); err != nil {
|
||||||
fatal(err)
|
fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -75,7 +82,7 @@ func applyLayer() {
|
||||||
// applyLayerHandler parses a diff in the standard layer format from `layer`, and
|
// applyLayerHandler parses a diff in the standard layer format from `layer`, and
|
||||||
// applies it to the directory `dest`. Returns the size in bytes of the
|
// applies it to the directory `dest`. Returns the size in bytes of the
|
||||||
// contents of the layer.
|
// contents of the layer.
|
||||||
func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
|
func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
|
||||||
dest = filepath.Clean(dest)
|
dest = filepath.Clean(dest)
|
||||||
if decompress {
|
if decompress {
|
||||||
decompressed, err := archive.DecompressStream(layer)
|
decompressed, err := archive.DecompressStream(layer)
|
||||||
|
@ -88,6 +95,9 @@ func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOp
|
||||||
}
|
}
|
||||||
if options == nil {
|
if options == nil {
|
||||||
options = &archive.TarOptions{}
|
options = &archive.TarOptions{}
|
||||||
|
if rsystem.RunningInUserNS() {
|
||||||
|
options.InUserNS = true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if options.ExcludePatterns == nil {
|
if options.ExcludePatterns == nil {
|
||||||
options.ExcludePatterns = []string{}
|
options.ExcludePatterns = []string{}
|
||||||
|
|
5
vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go
generated
vendored
5
vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go
generated
vendored
|
@ -2,6 +2,7 @@ package chrootarchive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
@ -13,7 +14,7 @@ import (
|
||||||
// applyLayerHandler parses a diff in the standard layer format from `layer`, and
|
// applyLayerHandler parses a diff in the standard layer format from `layer`, and
|
||||||
// applies it to the directory `dest`. Returns the size in bytes of the
|
// applies it to the directory `dest`. Returns the size in bytes of the
|
||||||
// contents of the layer.
|
// contents of the layer.
|
||||||
func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
|
func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
|
||||||
dest = filepath.Clean(dest)
|
dest = filepath.Clean(dest)
|
||||||
|
|
||||||
// Ensure it is a Windows-style volume path
|
// Ensure it is a Windows-style volume path
|
||||||
|
@ -37,7 +38,7 @@ func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOp
|
||||||
s, err := archive.UnpackLayer(dest, layer, nil)
|
s, err := archive.UnpackLayer(dest, layer, nil)
|
||||||
os.RemoveAll(tmpDir)
|
os.RemoveAll(tmpDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s", err, dest)
|
return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return s, nil
|
return s, nil
|
||||||
|
|
146
vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go
generated
vendored
146
vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
// +build linux
|
// +build linux,cgo
|
||||||
|
|
||||||
package devicemapper
|
package devicemapper
|
||||||
|
|
||||||
|
@ -7,17 +7,14 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DevmapperLogger defines methods for logging with devicemapper.
|
// Same as DM_DEVICE_* enum values from libdevmapper.h
|
||||||
type DevmapperLogger interface {
|
// nolint: deadcode
|
||||||
DMLog(level int, file string, line int, dmError int, message string)
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
deviceCreate TaskType = iota
|
deviceCreate TaskType = iota
|
||||||
deviceReload
|
deviceReload
|
||||||
|
@ -155,6 +152,7 @@ func (t *Task) run() error {
|
||||||
if res := DmTaskRun(t.unmanaged); res != 1 {
|
if res := DmTaskRun(t.unmanaged); res != 1 {
|
||||||
return ErrTaskRun
|
return ErrTaskRun
|
||||||
}
|
}
|
||||||
|
runtime.KeepAlive(t)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -257,25 +255,12 @@ func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start
|
||||||
// UdevWait waits for any processes that are waiting for udev to complete the specified cookie.
|
// UdevWait waits for any processes that are waiting for udev to complete the specified cookie.
|
||||||
func UdevWait(cookie *uint) error {
|
func UdevWait(cookie *uint) error {
|
||||||
if res := DmUdevWait(*cookie); res != 1 {
|
if res := DmUdevWait(*cookie); res != 1 {
|
||||||
logrus.Debugf("devicemapper: Failed to wait on udev cookie %d", *cookie)
|
logrus.Debugf("devicemapper: Failed to wait on udev cookie %d, %d", *cookie, res)
|
||||||
return ErrUdevWait
|
return ErrUdevWait
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LogInitVerbose is an interface to initialize the verbose logger for the device mapper library.
|
|
||||||
func LogInitVerbose(level int) {
|
|
||||||
DmLogInitVerbose(level)
|
|
||||||
}
|
|
||||||
|
|
||||||
var dmLogger DevmapperLogger
|
|
||||||
|
|
||||||
// LogInit initializes the logger for the device mapper library.
|
|
||||||
func LogInit(logger DevmapperLogger) {
|
|
||||||
dmLogger = logger
|
|
||||||
LogWithErrnoInit()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetDevDir sets the dev folder for the device mapper library (usually /dev).
|
// SetDevDir sets the dev folder for the device mapper library (usually /dev).
|
||||||
func SetDevDir(dir string) error {
|
func SetDevDir(dir string) error {
|
||||||
if res := DmSetDevDir(dir); res != 1 {
|
if res := DmSetDevDir(dir); res != 1 {
|
||||||
|
@ -328,17 +313,21 @@ func RemoveDevice(name string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var cookie uint
|
cookie := new(uint)
|
||||||
if err := task.setCookie(&cookie, 0); err != nil {
|
if err := task.setCookie(cookie, 0); err != nil {
|
||||||
return fmt.Errorf("devicemapper: Can not set cookie: %s", err)
|
return fmt.Errorf("devicemapper: Can not set cookie: %s", err)
|
||||||
}
|
}
|
||||||
defer UdevWait(&cookie)
|
defer UdevWait(cookie)
|
||||||
|
|
||||||
dmSawBusy = false // reset before the task is run
|
dmSawBusy = false // reset before the task is run
|
||||||
|
dmSawEnxio = false
|
||||||
if err = task.run(); err != nil {
|
if err = task.run(); err != nil {
|
||||||
if dmSawBusy {
|
if dmSawBusy {
|
||||||
return ErrBusy
|
return ErrBusy
|
||||||
}
|
}
|
||||||
|
if dmSawEnxio {
|
||||||
|
return ErrEnxio
|
||||||
|
}
|
||||||
return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err)
|
return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -358,7 +347,32 @@ func RemoveDeviceDeferred(name string) error {
|
||||||
return ErrTaskDeferredRemove
|
return ErrTaskDeferredRemove
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// set a task cookie and disable library fallback, or else libdevmapper will
|
||||||
|
// disable udev dm rules and delete the symlink under /dev/mapper by itself,
|
||||||
|
// even if the removal is deferred by the kernel.
|
||||||
|
cookie := new(uint)
|
||||||
|
var flags uint16
|
||||||
|
flags = DmUdevDisableLibraryFallback
|
||||||
|
if err := task.setCookie(cookie, flags); err != nil {
|
||||||
|
return fmt.Errorf("devicemapper: Can not set cookie: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// libdevmapper and udev relies on System V semaphore for synchronization,
|
||||||
|
// semaphores created in `task.setCookie` will be cleaned up in `UdevWait`.
|
||||||
|
// So these two function call must come in pairs, otherwise semaphores will
|
||||||
|
// be leaked, and the limit of number of semaphores defined in `/proc/sys/kernel/sem`
|
||||||
|
// will be reached, which will eventually make all following calls to 'task.SetCookie'
|
||||||
|
// fail.
|
||||||
|
// this call will not wait for the deferred removal's final executing, since no
|
||||||
|
// udev event will be generated, and the semaphore's value will not be incremented
|
||||||
|
// by udev, what UdevWait is just cleaning up the semaphore.
|
||||||
|
defer UdevWait(cookie)
|
||||||
|
|
||||||
|
dmSawEnxio = false
|
||||||
if err = task.run(); err != nil {
|
if err = task.run(); err != nil {
|
||||||
|
if dmSawEnxio {
|
||||||
|
return ErrEnxio
|
||||||
|
}
|
||||||
return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err)
|
return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -427,7 +441,7 @@ func BlockDeviceDiscard(path string) error {
|
||||||
|
|
||||||
// Without this sometimes the remove of the device that happens after
|
// Without this sometimes the remove of the device that happens after
|
||||||
// discard fails with EBUSY.
|
// discard fails with EBUSY.
|
||||||
syscall.Sync()
|
unix.Sync()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -450,13 +464,13 @@ func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize
|
||||||
return fmt.Errorf("devicemapper: Can't add target %s", err)
|
return fmt.Errorf("devicemapper: Can't add target %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var cookie uint
|
cookie := new(uint)
|
||||||
var flags uint16
|
var flags uint16
|
||||||
flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag
|
flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag
|
||||||
if err := task.setCookie(&cookie, flags); err != nil {
|
if err := task.setCookie(cookie, flags); err != nil {
|
||||||
return fmt.Errorf("devicemapper: Can't set cookie %s", err)
|
return fmt.Errorf("devicemapper: Can't set cookie %s", err)
|
||||||
}
|
}
|
||||||
defer UdevWait(&cookie)
|
defer UdevWait(cookie)
|
||||||
|
|
||||||
if err := task.run(); err != nil {
|
if err := task.run(); err != nil {
|
||||||
return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err)
|
return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err)
|
||||||
|
@ -484,7 +498,7 @@ func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := task.run(); err != nil {
|
if err := task.run(); err != nil {
|
||||||
return fmt.Errorf("devicemapper: Error running deviceCreate %s", err)
|
return fmt.Errorf("devicemapper: Error running ReloadPool %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -638,11 +652,11 @@ func ResumeDevice(name string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var cookie uint
|
cookie := new(uint)
|
||||||
if err := task.setCookie(&cookie, 0); err != nil {
|
if err := task.setCookie(cookie, 0); err != nil {
|
||||||
return fmt.Errorf("devicemapper: Can't set cookie %s", err)
|
return fmt.Errorf("devicemapper: Can't set cookie %s", err)
|
||||||
}
|
}
|
||||||
defer UdevWait(&cookie)
|
defer UdevWait(cookie)
|
||||||
|
|
||||||
if err := task.run(); err != nil {
|
if err := task.run(); err != nil {
|
||||||
return fmt.Errorf("devicemapper: Error running deviceResume %s", err)
|
return fmt.Errorf("devicemapper: Error running deviceResume %s", err)
|
||||||
|
@ -736,12 +750,12 @@ func activateDevice(poolName string, name string, deviceID int, size uint64, ext
|
||||||
return fmt.Errorf("devicemapper: Can't add node %s", err)
|
return fmt.Errorf("devicemapper: Can't add node %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var cookie uint
|
cookie := new(uint)
|
||||||
if err := task.setCookie(&cookie, 0); err != nil {
|
if err := task.setCookie(cookie, 0); err != nil {
|
||||||
return fmt.Errorf("devicemapper: Can't set cookie %s", err)
|
return fmt.Errorf("devicemapper: Can't set cookie %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
defer UdevWait(&cookie)
|
defer UdevWait(cookie)
|
||||||
|
|
||||||
if err := task.run(); err != nil {
|
if err := task.run(); err != nil {
|
||||||
return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err)
|
return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err)
|
||||||
|
@ -750,6 +764,33 @@ func activateDevice(poolName string, name string, deviceID int, size uint64, ext
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CreateSnapDeviceRaw creates a snapshot device. Caller needs to suspend and resume the origin device if it is active.
|
||||||
|
func CreateSnapDeviceRaw(poolName string, deviceID int, baseDeviceID int) error {
|
||||||
|
task, err := TaskCreateNamed(deviceTargetMsg, poolName)
|
||||||
|
if task == nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := task.setSector(0); err != nil {
|
||||||
|
return fmt.Errorf("devicemapper: Can't set sector %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil {
|
||||||
|
return fmt.Errorf("devicemapper: Can't set message %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dmSawExist = false // reset before the task is run
|
||||||
|
if err := task.run(); err != nil {
|
||||||
|
// Caller wants to know about ErrDeviceIDExists so that it can try with a different device id.
|
||||||
|
if dmSawExist {
|
||||||
|
return ErrDeviceIDExists
|
||||||
|
}
|
||||||
|
return fmt.Errorf("devicemapper: Error running deviceCreate (CreateSnapDeviceRaw) %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId,
|
// CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId,
|
||||||
func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error {
|
func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error {
|
||||||
devinfo, _ := GetInfo(baseName)
|
devinfo, _ := GetInfo(baseName)
|
||||||
|
@ -761,42 +802,15 @@ func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDevice
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
task, err := TaskCreateNamed(deviceTargetMsg, poolName)
|
if err := CreateSnapDeviceRaw(poolName, deviceID, baseDeviceID); err != nil {
|
||||||
if task == nil {
|
|
||||||
if doSuspend {
|
if doSuspend {
|
||||||
ResumeDevice(baseName)
|
if err2 := ResumeDevice(baseName); err2 != nil {
|
||||||
|
return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: (%v)", err, err2)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := task.setSector(0); err != nil {
|
|
||||||
if doSuspend {
|
|
||||||
ResumeDevice(baseName)
|
|
||||||
}
|
|
||||||
return fmt.Errorf("devicemapper: Can't set sector %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil {
|
|
||||||
if doSuspend {
|
|
||||||
ResumeDevice(baseName)
|
|
||||||
}
|
|
||||||
return fmt.Errorf("devicemapper: Can't set message %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
dmSawExist = false // reset before the task is run
|
|
||||||
if err := task.run(); err != nil {
|
|
||||||
if doSuspend {
|
|
||||||
ResumeDevice(baseName)
|
|
||||||
}
|
|
||||||
// Caller wants to know about ErrDeviceIDExists so that it can try with a different device id.
|
|
||||||
if dmSawExist {
|
|
||||||
return ErrDeviceIDExists
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("devicemapper: Error running deviceCreate (createSnapDevice) %s", err)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if doSuspend {
|
if doSuspend {
|
||||||
if err := ResumeDevice(baseName); err != nil {
|
if err := ResumeDevice(baseName); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
94
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go
generated
vendored
94
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go
generated
vendored
|
@ -1,21 +1,49 @@
|
||||||
// +build linux
|
// +build linux,cgo
|
||||||
|
|
||||||
package devicemapper
|
package devicemapper
|
||||||
|
|
||||||
import "C"
|
import "C"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// DevmapperLogger defines methods required to register as a callback for
|
||||||
|
// logging events recieved from devicemapper. Note that devicemapper will send
|
||||||
|
// *all* logs regardless to callbacks (including debug logs) so it's
|
||||||
|
// recommended to not spam the console with the outputs.
|
||||||
|
type DevmapperLogger interface {
|
||||||
|
// DMLog is the logging callback containing all of the information from
|
||||||
|
// devicemapper. The interface is identical to the C libdm counterpart.
|
||||||
|
DMLog(level int, file string, line int, dmError int, message string)
|
||||||
|
}
|
||||||
|
|
||||||
|
// dmLogger is the current logger in use that is being forwarded our messages.
|
||||||
|
var dmLogger DevmapperLogger
|
||||||
|
|
||||||
|
// LogInit changes the logging callback called after processing libdm logs for
|
||||||
|
// error message information. The default logger simply forwards all logs to
|
||||||
|
// logrus. Calling LogInit(nil) disables the calling of callbacks.
|
||||||
|
func LogInit(logger DevmapperLogger) {
|
||||||
|
dmLogger = logger
|
||||||
|
}
|
||||||
|
|
||||||
// Due to the way cgo works this has to be in a separate file, as devmapper.go has
|
// Due to the way cgo works this has to be in a separate file, as devmapper.go has
|
||||||
// definitions in the cgo block, which is incompatible with using "//export"
|
// definitions in the cgo block, which is incompatible with using "//export"
|
||||||
|
|
||||||
// StorageDevmapperLogCallback exports the devmapper log callback for cgo.
|
// StorageDevmapperLogCallback exports the devmapper log callback for cgo. Note that
|
||||||
|
// because we are using callbacks, this function will be called for *every* log
|
||||||
|
// in libdm (even debug ones because there's no way of setting the verbosity
|
||||||
|
// level for an external logging callback).
|
||||||
//export StorageDevmapperLogCallback
|
//export StorageDevmapperLogCallback
|
||||||
func StorageDevmapperLogCallback(level C.int, file *C.char, line C.int, dmErrnoOrClass C.int, message *C.char) {
|
func StorageDevmapperLogCallback(level C.int, file *C.char, line, dmErrnoOrClass C.int, message *C.char) {
|
||||||
msg := C.GoString(message)
|
msg := C.GoString(message)
|
||||||
if level < 7 {
|
|
||||||
|
// Track what errno libdm saw, because the library only gives us 0 or 1.
|
||||||
|
if level < LogLevelDebug {
|
||||||
if strings.Contains(msg, "busy") {
|
if strings.Contains(msg, "busy") {
|
||||||
dmSawBusy = true
|
dmSawBusy = true
|
||||||
}
|
}
|
||||||
|
@ -33,3 +61,61 @@ func StorageDevmapperLogCallback(level C.int, file *C.char, line C.int, dmErrnoO
|
||||||
dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg)
|
dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DefaultLogger is the default logger used by pkg/devicemapper. It forwards
|
||||||
|
// all logs that are of higher or equal priority to the given level to the
|
||||||
|
// corresponding logrus level.
|
||||||
|
type DefaultLogger struct {
|
||||||
|
// Level corresponds to the highest libdm level that will be forwarded to
|
||||||
|
// logrus. In order to change this, register a new DefaultLogger.
|
||||||
|
Level int
|
||||||
|
}
|
||||||
|
|
||||||
|
// DMLog is the logging callback containing all of the information from
|
||||||
|
// devicemapper. The interface is identical to the C libdm counterpart.
|
||||||
|
func (l DefaultLogger) DMLog(level int, file string, line, dmError int, message string) {
|
||||||
|
if level <= l.Level {
|
||||||
|
// Forward the log to the correct logrus level, if allowed by dmLogLevel.
|
||||||
|
logMsg := fmt.Sprintf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
|
||||||
|
switch level {
|
||||||
|
case LogLevelFatal, LogLevelErr:
|
||||||
|
logrus.Error(logMsg)
|
||||||
|
case LogLevelWarn:
|
||||||
|
logrus.Warn(logMsg)
|
||||||
|
case LogLevelNotice, LogLevelInfo:
|
||||||
|
logrus.Info(logMsg)
|
||||||
|
case LogLevelDebug:
|
||||||
|
logrus.Debug(logMsg)
|
||||||
|
default:
|
||||||
|
// Don't drop any "unknown" levels.
|
||||||
|
logrus.Info(logMsg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// registerLogCallback registers our own logging callback function for libdm
|
||||||
|
// (which is StorageDevmapperLogCallback).
|
||||||
|
//
|
||||||
|
// Because libdm only gives us {0,1} error codes we need to parse the logs
|
||||||
|
// produced by libdm (to set dmSawBusy and so on). Note that by registering a
|
||||||
|
// callback using StorageDevmapperLogCallback, libdm will no longer output logs to
|
||||||
|
// stderr so we have to log everything ourselves. None of this handling is
|
||||||
|
// optional because we depend on log callbacks to parse the logs, and if we
|
||||||
|
// don't forward the log information we'll be in a lot of trouble when
|
||||||
|
// debugging things.
|
||||||
|
func registerLogCallback() {
|
||||||
|
LogWithErrnoInit()
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Use the default logger by default. We only allow LogLevelFatal by
|
||||||
|
// default, because internally we mask a lot of libdm errors by retrying
|
||||||
|
// and similar tricks. Also, libdm is very chatty and we don't want to
|
||||||
|
// worry users for no reason.
|
||||||
|
dmLogger = DefaultLogger{
|
||||||
|
Level: LogLevelFatal,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register as early as possible so we don't miss anything.
|
||||||
|
registerLogCallback()
|
||||||
|
}
|
||||||
|
|
19
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go
generated
vendored
19
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go
generated
vendored
|
@ -1,9 +1,9 @@
|
||||||
// +build linux
|
// +build linux,cgo
|
||||||
|
|
||||||
package devicemapper
|
package devicemapper
|
||||||
|
|
||||||
/*
|
/*
|
||||||
#cgo LDFLAGS: -L. -ldevmapper
|
#define _GNU_SOURCE
|
||||||
#include <libdevmapper.h>
|
#include <libdevmapper.h>
|
||||||
#include <linux/fs.h> // FIXME: present only for BLKGETSIZE64, maybe we can remove it?
|
#include <linux/fs.h> // FIXME: present only for BLKGETSIZE64, maybe we can remove it?
|
||||||
|
|
||||||
|
@ -12,14 +12,20 @@ extern void StorageDevmapperLogCallback(int level, char *file, int line, int dm_
|
||||||
|
|
||||||
static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...)
|
static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...)
|
||||||
{
|
{
|
||||||
char buffer[256];
|
char *buffer = NULL;
|
||||||
va_list ap;
|
va_list ap;
|
||||||
|
int ret;
|
||||||
|
|
||||||
va_start(ap, f);
|
va_start(ap, f);
|
||||||
vsnprintf(buffer, 256, f, ap);
|
ret = vasprintf(&buffer, f, ap);
|
||||||
va_end(ap);
|
va_end(ap);
|
||||||
|
if (ret < 0) {
|
||||||
|
// memory allocation failed -- should never happen?
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
StorageDevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer);
|
StorageDevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer);
|
||||||
|
free(buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void log_with_errno_init()
|
static void log_with_errno_init()
|
||||||
|
@ -56,7 +62,6 @@ const (
|
||||||
var (
|
var (
|
||||||
DmGetLibraryVersion = dmGetLibraryVersionFct
|
DmGetLibraryVersion = dmGetLibraryVersionFct
|
||||||
DmGetNextTarget = dmGetNextTargetFct
|
DmGetNextTarget = dmGetNextTargetFct
|
||||||
DmLogInitVerbose = dmLogInitVerboseFct
|
|
||||||
DmSetDevDir = dmSetDevDirFct
|
DmSetDevDir = dmSetDevDirFct
|
||||||
DmTaskAddTarget = dmTaskAddTargetFct
|
DmTaskAddTarget = dmTaskAddTargetFct
|
||||||
DmTaskCreate = dmTaskCreateFct
|
DmTaskCreate = dmTaskCreateFct
|
||||||
|
@ -226,10 +231,6 @@ func dmCookieSupportedFct() int {
|
||||||
return int(C.dm_cookie_supported())
|
return int(C.dm_cookie_supported())
|
||||||
}
|
}
|
||||||
|
|
||||||
func dmLogInitVerboseFct(level int) {
|
|
||||||
C.dm_log_init_verbose(C.int(level))
|
|
||||||
}
|
|
||||||
|
|
||||||
func logWithErrnoInitFct() {
|
func logWithErrnoInitFct() {
|
||||||
C.log_with_errno_init()
|
C.log_with_errno_init()
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,14 +1,11 @@
|
||||||
// +build linux,!libdm_no_deferred_remove
|
// +build linux,cgo,!libdm_no_deferred_remove
|
||||||
|
|
||||||
package devicemapper
|
package devicemapper
|
||||||
|
|
||||||
/*
|
// #include <libdevmapper.h>
|
||||||
#cgo LDFLAGS: -L. -ldevmapper
|
|
||||||
#include <libdevmapper.h>
|
|
||||||
*/
|
|
||||||
import "C"
|
import "C"
|
||||||
|
|
||||||
// LibraryDeferredRemovalSupport is supported when statically linked.
|
// LibraryDeferredRemovalSupport tells if the feature is enabled in the build
|
||||||
const LibraryDeferredRemovalSupport = true
|
const LibraryDeferredRemovalSupport = true
|
||||||
|
|
||||||
func dmTaskDeferredRemoveFct(task *cdmTask) int {
|
func dmTaskDeferredRemoveFct(task *cdmTask) int {
|
||||||
|
|
6
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go
generated
vendored
Normal file
6
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
// +build linux,cgo,!static_build
|
||||||
|
|
||||||
|
package devicemapper
|
||||||
|
|
||||||
|
// #cgo pkg-config: devmapper
|
||||||
|
import "C"
|
|
@ -1,8 +1,8 @@
|
||||||
// +build linux,libdm_no_deferred_remove
|
// +build linux,cgo,libdm_no_deferred_remove
|
||||||
|
|
||||||
package devicemapper
|
package devicemapper
|
||||||
|
|
||||||
// LibraryDeferredRemovalsupport is not supported when statically linked.
|
// LibraryDeferredRemovalSupport tells if the feature is enabled in the build
|
||||||
const LibraryDeferredRemovalSupport = false
|
const LibraryDeferredRemovalSupport = false
|
||||||
|
|
||||||
func dmTaskDeferredRemoveFct(task *cdmTask) int {
|
func dmTaskDeferredRemoveFct(task *cdmTask) int {
|
||||||
|
|
6
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go
generated
vendored
Normal file
6
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
// +build linux,cgo,static_build
|
||||||
|
|
||||||
|
package devicemapper
|
||||||
|
|
||||||
|
// #cgo pkg-config: --static devmapper
|
||||||
|
import "C"
|
9
vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go
generated
vendored
9
vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go
generated
vendored
|
@ -1,15 +1,16 @@
|
||||||
// +build linux
|
// +build linux,cgo
|
||||||
|
|
||||||
package devicemapper
|
package devicemapper
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
func ioctlBlkGetSize64(fd uintptr) (int64, error) {
|
func ioctlBlkGetSize64(fd uintptr) (int64, error) {
|
||||||
var size int64
|
var size int64
|
||||||
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 {
|
if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
return size, nil
|
return size, nil
|
||||||
|
@ -20,7 +21,7 @@ func ioctlBlkDiscard(fd uintptr, offset, length uint64) error {
|
||||||
r[0] = offset
|
r[0] = offset
|
||||||
r[1] = length
|
r[1] = length
|
||||||
|
|
||||||
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 {
|
if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
211
vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
generated
vendored
211
vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
generated
vendored
|
@ -13,98 +13,74 @@ import (
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// exclusion returns true if the specified pattern is an exclusion
|
// PatternMatcher allows checking paths agaist a list of patterns
|
||||||
func exclusion(pattern string) bool {
|
type PatternMatcher struct {
|
||||||
return pattern[0] == '!'
|
patterns []*Pattern
|
||||||
|
exclusions bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// empty returns true if the specified pattern is empty
|
// NewPatternMatcher creates a new matcher object for specific patterns that can
|
||||||
func empty(pattern string) bool {
|
// be used later to match against patterns against paths
|
||||||
return pattern == ""
|
func NewPatternMatcher(patterns []string) (*PatternMatcher, error) {
|
||||||
|
pm := &PatternMatcher{
|
||||||
|
patterns: make([]*Pattern, 0, len(patterns)),
|
||||||
}
|
}
|
||||||
|
for _, p := range patterns {
|
||||||
// CleanPatterns takes a slice of patterns returns a new
|
|
||||||
// slice of patterns cleaned with filepath.Clean, stripped
|
|
||||||
// of any empty patterns and lets the caller know whether the
|
|
||||||
// slice contains any exception patterns (prefixed with !).
|
|
||||||
func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) {
|
|
||||||
// Loop over exclusion patterns and:
|
|
||||||
// 1. Clean them up.
|
|
||||||
// 2. Indicate whether we are dealing with any exception rules.
|
|
||||||
// 3. Error if we see a single exclusion marker on it's own (!).
|
|
||||||
cleanedPatterns := []string{}
|
|
||||||
patternDirs := [][]string{}
|
|
||||||
exceptions := false
|
|
||||||
for _, pattern := range patterns {
|
|
||||||
// Eliminate leading and trailing whitespace.
|
// Eliminate leading and trailing whitespace.
|
||||||
pattern = strings.TrimSpace(pattern)
|
p = strings.TrimSpace(p)
|
||||||
if empty(pattern) {
|
if p == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if exclusion(pattern) {
|
p = filepath.Clean(p)
|
||||||
if len(pattern) == 1 {
|
newp := &Pattern{}
|
||||||
return nil, nil, false, errors.New("Illegal exclusion pattern: !")
|
if p[0] == '!' {
|
||||||
|
if len(p) == 1 {
|
||||||
|
return nil, errors.New("illegal exclusion pattern: \"!\"")
|
||||||
}
|
}
|
||||||
exceptions = true
|
newp.exclusion = true
|
||||||
|
p = p[1:]
|
||||||
|
pm.exclusions = true
|
||||||
}
|
}
|
||||||
pattern = filepath.Clean(pattern)
|
// Do some syntax checking on the pattern.
|
||||||
cleanedPatterns = append(cleanedPatterns, pattern)
|
// filepath's Match() has some really weird rules that are inconsistent
|
||||||
if exclusion(pattern) {
|
// so instead of trying to dup their logic, just call Match() for its
|
||||||
pattern = pattern[1:]
|
// error state and if there is an error in the pattern return it.
|
||||||
|
// If this becomes an issue we can remove this since its really only
|
||||||
|
// needed in the error (syntax) case - which isn't really critical.
|
||||||
|
if _, err := filepath.Match(p, "."); err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
patternDirs = append(patternDirs, strings.Split(pattern, string(os.PathSeparator)))
|
newp.cleanedPattern = p
|
||||||
|
newp.dirs = strings.Split(p, string(os.PathSeparator))
|
||||||
|
pm.patterns = append(pm.patterns, newp)
|
||||||
|
}
|
||||||
|
return pm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return cleanedPatterns, patternDirs, exceptions, nil
|
// Matches matches path against all the patterns. Matches is not safe to be
|
||||||
}
|
// called concurrently
|
||||||
|
func (pm *PatternMatcher) Matches(file string) (bool, error) {
|
||||||
// Matches returns true if file matches any of the patterns
|
|
||||||
// and isn't excluded by any of the subsequent patterns.
|
|
||||||
func Matches(file string, patterns []string) (bool, error) {
|
|
||||||
file = filepath.Clean(file)
|
|
||||||
|
|
||||||
if file == "." {
|
|
||||||
// Don't let them exclude everything, kind of silly.
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
patterns, patDirs, _, err := CleanPatterns(patterns)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return OptimizedMatches(file, patterns, patDirs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go.
|
|
||||||
// It will assume that the inputs have been preprocessed and therefore the function
|
|
||||||
// doesn't need to do as much error checking and clean-up. This was done to avoid
|
|
||||||
// repeating these steps on each file being checked during the archive process.
|
|
||||||
// The more generic fileutils.Matches() can't make these assumptions.
|
|
||||||
func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) {
|
|
||||||
matched := false
|
matched := false
|
||||||
file = filepath.FromSlash(file)
|
file = filepath.FromSlash(file)
|
||||||
parentPath := filepath.Dir(file)
|
parentPath := filepath.Dir(file)
|
||||||
parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
|
parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
|
||||||
|
|
||||||
for i, pattern := range patterns {
|
for _, pattern := range pm.patterns {
|
||||||
negative := false
|
negative := false
|
||||||
|
|
||||||
if exclusion(pattern) {
|
if pattern.exclusion {
|
||||||
negative = true
|
negative = true
|
||||||
pattern = pattern[1:]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
match, err := regexpMatch(pattern, file)
|
match, err := pattern.match(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err)
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !match && parentPath != "." {
|
if !match && parentPath != "." {
|
||||||
// Check to see if the pattern matches one of our parent dirs.
|
// Check to see if the pattern matches one of our parent dirs.
|
||||||
if len(patDirs[i]) <= len(parentPathDirs) {
|
if len(pattern.dirs) <= len(parentPathDirs) {
|
||||||
match, _ = regexpMatch(strings.Join(patDirs[i], string(os.PathSeparator)),
|
match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator)))
|
||||||
strings.Join(parentPathDirs[:len(patDirs[i])], string(os.PathSeparator)))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -120,28 +96,49 @@ func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool,
|
||||||
return matched, nil
|
return matched, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// regexpMatch tries to match the logic of filepath.Match but
|
// Exclusions returns true if any of the patterns define exclusions
|
||||||
// does so using regexp logic. We do this so that we can expand the
|
func (pm *PatternMatcher) Exclusions() bool {
|
||||||
// wildcard set to include other things, like "**" to mean any number
|
return pm.exclusions
|
||||||
// of directories. This means that we should be backwards compatible
|
|
||||||
// with filepath.Match(). We'll end up supporting more stuff, due to
|
|
||||||
// the fact that we're using regexp, but that's ok - it does no harm.
|
|
||||||
//
|
|
||||||
// As per the comment in golangs filepath.Match, on Windows, escaping
|
|
||||||
// is disabled. Instead, '\\' is treated as path separator.
|
|
||||||
func regexpMatch(pattern, path string) (bool, error) {
|
|
||||||
regStr := "^"
|
|
||||||
|
|
||||||
// Do some syntax checking on the pattern.
|
|
||||||
// filepath's Match() has some really weird rules that are inconsistent
|
|
||||||
// so instead of trying to dup their logic, just call Match() for its
|
|
||||||
// error state and if there is an error in the pattern return it.
|
|
||||||
// If this becomes an issue we can remove this since its really only
|
|
||||||
// needed in the error (syntax) case - which isn't really critical.
|
|
||||||
if _, err := filepath.Match(pattern, path); err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Patterns returns array of active patterns
|
||||||
|
func (pm *PatternMatcher) Patterns() []*Pattern {
|
||||||
|
return pm.patterns
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pattern defines a single regexp used used to filter file paths.
|
||||||
|
type Pattern struct {
|
||||||
|
cleanedPattern string
|
||||||
|
dirs []string
|
||||||
|
regexp *regexp.Regexp
|
||||||
|
exclusion bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Pattern) String() string {
|
||||||
|
return p.cleanedPattern
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exclusion returns true if this pattern defines exclusion
|
||||||
|
func (p *Pattern) Exclusion() bool {
|
||||||
|
return p.exclusion
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Pattern) match(path string) (bool, error) {
|
||||||
|
|
||||||
|
if p.regexp == nil {
|
||||||
|
if err := p.compile(); err != nil {
|
||||||
|
return false, filepath.ErrBadPattern
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b := p.regexp.MatchString(path)
|
||||||
|
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Pattern) compile() error {
|
||||||
|
regStr := "^"
|
||||||
|
pattern := p.cleanedPattern
|
||||||
// Go through the pattern and convert it to a regexp.
|
// Go through the pattern and convert it to a regexp.
|
||||||
// We use a scanner so we can support utf-8 chars.
|
// We use a scanner so we can support utf-8 chars.
|
||||||
var scan scanner.Scanner
|
var scan scanner.Scanner
|
||||||
|
@ -161,17 +158,19 @@ func regexpMatch(pattern, path string) (bool, error) {
|
||||||
// is some flavor of "**"
|
// is some flavor of "**"
|
||||||
scan.Next()
|
scan.Next()
|
||||||
|
|
||||||
|
// Treat **/ as ** so eat the "/"
|
||||||
|
if string(scan.Peek()) == sl {
|
||||||
|
scan.Next()
|
||||||
|
}
|
||||||
|
|
||||||
if scan.Peek() == scanner.EOF {
|
if scan.Peek() == scanner.EOF {
|
||||||
// is "**EOF" - to align with .gitignore just accept all
|
// is "**EOF" - to align with .gitignore just accept all
|
||||||
regStr += ".*"
|
regStr += ".*"
|
||||||
} else {
|
} else {
|
||||||
// is "**"
|
// is "**"
|
||||||
regStr += "((.*" + escSL + ")|([^" + escSL + "]*))"
|
// Note that this allows for any # of /'s (even 0) because
|
||||||
}
|
// the .* will eat everything, even /'s
|
||||||
|
regStr += "(.*" + escSL + ")?"
|
||||||
// Treat **/ as ** so eat the "/"
|
|
||||||
if string(scan.Peek()) == sl {
|
|
||||||
scan.Next()
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// is "*" so map it to anything but "/"
|
// is "*" so map it to anything but "/"
|
||||||
|
@ -180,7 +179,7 @@ func regexpMatch(pattern, path string) (bool, error) {
|
||||||
} else if ch == '?' {
|
} else if ch == '?' {
|
||||||
// "?" is any char except "/"
|
// "?" is any char except "/"
|
||||||
regStr += "[^" + escSL + "]"
|
regStr += "[^" + escSL + "]"
|
||||||
} else if strings.Index(".$", string(ch)) != -1 {
|
} else if ch == '.' || ch == '$' {
|
||||||
// Escape some regexp special chars that have no meaning
|
// Escape some regexp special chars that have no meaning
|
||||||
// in golang's filepath.Match
|
// in golang's filepath.Match
|
||||||
regStr += `\` + string(ch)
|
regStr += `\` + string(ch)
|
||||||
|
@ -206,14 +205,30 @@ func regexpMatch(pattern, path string) (bool, error) {
|
||||||
|
|
||||||
regStr += "$"
|
regStr += "$"
|
||||||
|
|
||||||
res, err := regexp.MatchString(regStr, path)
|
re, err := regexp.Compile(regStr)
|
||||||
|
|
||||||
// Map regexp's error to filepath's so no one knows we're not using filepath
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = filepath.ErrBadPattern
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return res, err
|
p.regexp = re
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Matches returns true if file matches any of the patterns
|
||||||
|
// and isn't excluded by any of the subsequent patterns.
|
||||||
|
func Matches(file string, patterns []string) (bool, error) {
|
||||||
|
pm, err := NewPatternMatcher(patterns)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
file = filepath.Clean(file)
|
||||||
|
|
||||||
|
if file == "." {
|
||||||
|
// Don't let them exclude everything, kind of silly.
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return pm.Matches(file)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CopyFile copies from src to dst until either EOF is reached
|
// CopyFile copies from src to dst until either EOF is reached
|
||||||
|
|
23
vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go
generated
vendored
Normal file
23
vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package homedir
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/containers/storage/pkg/idtools"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetStatic returns the home directory for the current user without calling
|
||||||
|
// os/user.Current(). This is useful for static-linked binary on glibc-based
|
||||||
|
// system, because a call to os/user.Current() in a static binary leads to
|
||||||
|
// segfault due to a glibc issue that won't be fixed in a short term.
|
||||||
|
// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341)
|
||||||
|
func GetStatic() (string, error) {
|
||||||
|
uid := os.Getuid()
|
||||||
|
usr, err := idtools.LookupUID(uid)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return usr.Home, nil
|
||||||
|
}
|
13
vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
generated
vendored
Normal file
13
vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package homedir
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetStatic is not needed for non-linux systems.
|
||||||
|
// (Precisely, it is needed only for glibc-based linux systems.)
|
||||||
|
func GetStatic() (string, error) {
|
||||||
|
return "", errors.New("homedir.GetStatic() is not supported on this system")
|
||||||
|
}
|
|
@ -1,8 +1,9 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
package homedir
|
package homedir
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
|
||||||
|
|
||||||
"github.com/opencontainers/runc/libcontainer/user"
|
"github.com/opencontainers/runc/libcontainer/user"
|
||||||
)
|
)
|
||||||
|
@ -10,9 +11,6 @@ import (
|
||||||
// Key returns the env var name for the user's home dir based on
|
// Key returns the env var name for the user's home dir based on
|
||||||
// the platform being run on
|
// the platform being run on
|
||||||
func Key() string {
|
func Key() string {
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
return "USERPROFILE"
|
|
||||||
}
|
|
||||||
return "HOME"
|
return "HOME"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -21,7 +19,7 @@ func Key() string {
|
||||||
// Returned path should be used with "path/filepath" to form new paths.
|
// Returned path should be used with "path/filepath" to form new paths.
|
||||||
func Get() string {
|
func Get() string {
|
||||||
home := os.Getenv(Key())
|
home := os.Getenv(Key())
|
||||||
if home == "" && runtime.GOOS != "windows" {
|
if home == "" {
|
||||||
if u, err := user.CurrentUser(); err == nil {
|
if u, err := user.CurrentUser(); err == nil {
|
||||||
return u.Home
|
return u.Home
|
||||||
}
|
}
|
||||||
|
@ -32,8 +30,5 @@ func Get() string {
|
||||||
// GetShortcutString returns the string that is shortcut to user's home directory
|
// GetShortcutString returns the string that is shortcut to user's home directory
|
||||||
// in the native shell of the platform running on.
|
// in the native shell of the platform running on.
|
||||||
func GetShortcutString() string {
|
func GetShortcutString() string {
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
return "%USERPROFILE%" // be careful while using in format functions
|
|
||||||
}
|
|
||||||
return "~"
|
return "~"
|
||||||
}
|
}
|
24
vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go
generated
vendored
Normal file
24
vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
package homedir
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Key returns the env var name for the user's home dir based on
|
||||||
|
// the platform being run on
|
||||||
|
func Key() string {
|
||||||
|
return "USERPROFILE"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns the home directory of the current user with the help of
|
||||||
|
// environment variables depending on the target operating system.
|
||||||
|
// Returned path should be used with "path/filepath" to form new paths.
|
||||||
|
func Get() string {
|
||||||
|
return os.Getenv(Key())
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetShortcutString returns the string that is shortcut to user's home directory
|
||||||
|
// in the native shell of the platform running on.
|
||||||
|
func GetShortcutString() string {
|
||||||
|
return "%USERPROFILE%" // be careful while using in format functions
|
||||||
|
}
|
138
vendor/github.com/containers/storage/pkg/idtools/idtools.go
generated
vendored
138
vendor/github.com/containers/storage/pkg/idtools/idtools.go
generated
vendored
|
@ -37,49 +37,56 @@ const (
|
||||||
// MkdirAllAs creates a directory (include any along the path) and then modifies
|
// MkdirAllAs creates a directory (include any along the path) and then modifies
|
||||||
// ownership to the requested uid/gid. If the directory already exists, this
|
// ownership to the requested uid/gid. If the directory already exists, this
|
||||||
// function will still change ownership to the requested uid/gid pair.
|
// function will still change ownership to the requested uid/gid pair.
|
||||||
|
// Deprecated: Use MkdirAllAndChown
|
||||||
func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
|
func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
|
||||||
return mkdirAs(path, mode, ownerUID, ownerGID, true, true)
|
return mkdirAs(path, mode, ownerUID, ownerGID, true, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MkdirAllNewAs creates a directory (include any along the path) and then modifies
|
|
||||||
// ownership ONLY of newly created directories to the requested uid/gid. If the
|
|
||||||
// directories along the path exist, no change of ownership will be performed
|
|
||||||
func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
|
|
||||||
return mkdirAs(path, mode, ownerUID, ownerGID, true, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MkdirAs creates a directory and then modifies ownership to the requested uid/gid.
|
// MkdirAs creates a directory and then modifies ownership to the requested uid/gid.
|
||||||
// If the directory already exists, this function still changes ownership
|
// If the directory already exists, this function still changes ownership
|
||||||
|
// Deprecated: Use MkdirAndChown with a IDPair
|
||||||
func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
|
func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
|
||||||
return mkdirAs(path, mode, ownerUID, ownerGID, false, true)
|
return mkdirAs(path, mode, ownerUID, ownerGID, false, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MkdirAllAndChown creates a directory (include any along the path) and then modifies
|
||||||
|
// ownership to the requested uid/gid. If the directory already exists, this
|
||||||
|
// function will still change ownership to the requested uid/gid pair.
|
||||||
|
func MkdirAllAndChown(path string, mode os.FileMode, ids IDPair) error {
|
||||||
|
return mkdirAs(path, mode, ids.UID, ids.GID, true, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid.
|
||||||
|
// If the directory already exists, this function still changes ownership
|
||||||
|
func MkdirAndChown(path string, mode os.FileMode, ids IDPair) error {
|
||||||
|
return mkdirAs(path, mode, ids.UID, ids.GID, false, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies
|
||||||
|
// ownership ONLY of newly created directories to the requested uid/gid. If the
|
||||||
|
// directories along the path exist, no change of ownership will be performed
|
||||||
|
func MkdirAllAndChownNew(path string, mode os.FileMode, ids IDPair) error {
|
||||||
|
return mkdirAs(path, mode, ids.UID, ids.GID, true, false)
|
||||||
|
}
|
||||||
|
|
||||||
// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
|
// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
|
||||||
// If the maps are empty, then the root uid/gid will default to "real" 0/0
|
// If the maps are empty, then the root uid/gid will default to "real" 0/0
|
||||||
func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
|
func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
|
||||||
var uid, gid int
|
uid, err := toHost(0, uidMap)
|
||||||
|
|
||||||
if uidMap != nil {
|
|
||||||
xUID, err := ToHost(0, uidMap)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return -1, -1, err
|
return -1, -1, err
|
||||||
}
|
}
|
||||||
uid = xUID
|
gid, err := toHost(0, gidMap)
|
||||||
}
|
|
||||||
if gidMap != nil {
|
|
||||||
xGID, err := ToHost(0, gidMap)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return -1, -1, err
|
return -1, -1, err
|
||||||
}
|
}
|
||||||
gid = xGID
|
|
||||||
}
|
|
||||||
return uid, gid, nil
|
return uid, gid, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToContainer takes an id mapping, and uses it to translate a
|
// toContainer takes an id mapping, and uses it to translate a
|
||||||
// host ID to the remapped ID. If no map is provided, then the translation
|
// host ID to the remapped ID. If no map is provided, then the translation
|
||||||
// assumes a 1-to-1 mapping and returns the passed in id
|
// assumes a 1-to-1 mapping and returns the passed in id
|
||||||
func ToContainer(hostID int, idMap []IDMap) (int, error) {
|
func toContainer(hostID int, idMap []IDMap) (int, error) {
|
||||||
if idMap == nil {
|
if idMap == nil {
|
||||||
return hostID, nil
|
return hostID, nil
|
||||||
}
|
}
|
||||||
|
@ -92,10 +99,10 @@ func ToContainer(hostID int, idMap []IDMap) (int, error) {
|
||||||
return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
|
return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToHost takes an id mapping and a remapped ID, and translates the
|
// toHost takes an id mapping and a remapped ID, and translates the
|
||||||
// ID to the mapped host ID. If no map is provided, then the translation
|
// ID to the mapped host ID. If no map is provided, then the translation
|
||||||
// assumes a 1-to-1 mapping and returns the passed in id #
|
// assumes a 1-to-1 mapping and returns the passed in id #
|
||||||
func ToHost(contID int, idMap []IDMap) (int, error) {
|
func toHost(contID int, idMap []IDMap) (int, error) {
|
||||||
if idMap == nil {
|
if idMap == nil {
|
||||||
return contID, nil
|
return contID, nil
|
||||||
}
|
}
|
||||||
|
@ -108,26 +115,101 @@ func ToHost(contID int, idMap []IDMap) (int, error) {
|
||||||
return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
|
return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateIDMappings takes a requested user and group name and
|
// IDPair is a UID and GID pair
|
||||||
|
type IDPair struct {
|
||||||
|
UID int
|
||||||
|
GID int
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDMappings contains a mappings of UIDs and GIDs
|
||||||
|
type IDMappings struct {
|
||||||
|
uids []IDMap
|
||||||
|
gids []IDMap
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIDMappings takes a requested user and group name and
|
||||||
// using the data from /etc/sub{uid,gid} ranges, creates the
|
// using the data from /etc/sub{uid,gid} ranges, creates the
|
||||||
// proper uid and gid remapping ranges for that user/group pair
|
// proper uid and gid remapping ranges for that user/group pair
|
||||||
func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) {
|
func NewIDMappings(username, groupname string) (*IDMappings, error) {
|
||||||
subuidRanges, err := parseSubuid(username)
|
subuidRanges, err := parseSubuid(username)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
subgidRanges, err := parseSubgid(groupname)
|
subgidRanges, err := parseSubgid(groupname)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if len(subuidRanges) == 0 {
|
if len(subuidRanges) == 0 {
|
||||||
return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username)
|
return nil, fmt.Errorf("No subuid ranges found for user %q", username)
|
||||||
}
|
}
|
||||||
if len(subgidRanges) == 0 {
|
if len(subgidRanges) == 0 {
|
||||||
return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname)
|
return nil, fmt.Errorf("No subgid ranges found for group %q", groupname)
|
||||||
}
|
}
|
||||||
|
|
||||||
return createIDMap(subuidRanges), createIDMap(subgidRanges), nil
|
return &IDMappings{
|
||||||
|
uids: createIDMap(subuidRanges),
|
||||||
|
gids: createIDMap(subgidRanges),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIDMappingsFromMaps creates a new mapping from two slices
|
||||||
|
// Deprecated: this is a temporary shim while transitioning to IDMapping
|
||||||
|
func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings {
|
||||||
|
return &IDMappings{uids: uids, gids: gids}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RootPair returns a uid and gid pair for the root user. The error is ignored
|
||||||
|
// because a root user always exists, and the defaults are correct when the uid
|
||||||
|
// and gid maps are empty.
|
||||||
|
func (i *IDMappings) RootPair() IDPair {
|
||||||
|
uid, gid, _ := GetRootUIDGID(i.uids, i.gids)
|
||||||
|
return IDPair{UID: uid, GID: gid}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToHost returns the host UID and GID for the container uid, gid.
|
||||||
|
// Remapping is only performed if the ids aren't already the remapped root ids
|
||||||
|
func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) {
|
||||||
|
var err error
|
||||||
|
target := i.RootPair()
|
||||||
|
|
||||||
|
if pair.UID != target.UID {
|
||||||
|
target.UID, err = toHost(pair.UID, i.uids)
|
||||||
|
if err != nil {
|
||||||
|
return target, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if pair.GID != target.GID {
|
||||||
|
target.GID, err = toHost(pair.GID, i.gids)
|
||||||
|
}
|
||||||
|
return target, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToContainer returns the container UID and GID for the host uid and gid
|
||||||
|
func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) {
|
||||||
|
uid, err := toContainer(pair.UID, i.uids)
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, err
|
||||||
|
}
|
||||||
|
gid, err := toContainer(pair.GID, i.gids)
|
||||||
|
return uid, gid, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Empty returns true if there are no id mappings
|
||||||
|
func (i *IDMappings) Empty() bool {
|
||||||
|
return len(i.uids) == 0 && len(i.gids) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// UIDs return the UID mapping
|
||||||
|
// TODO: remove this once everything has been refactored to use pairs
|
||||||
|
func (i *IDMappings) UIDs() []IDMap {
|
||||||
|
return i.uids
|
||||||
|
}
|
||||||
|
|
||||||
|
// GIDs return the UID mapping
|
||||||
|
// TODO: remove this once everything has been refactored to use pairs
|
||||||
|
func (i *IDMappings) GIDs() []IDMap {
|
||||||
|
return i.gids
|
||||||
}
|
}
|
||||||
|
|
||||||
func createIDMap(subidRanges ranges) []IDMap {
|
func createIDMap(subidRanges ranges) []IDMap {
|
||||||
|
|
154
vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
generated
vendored
154
vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
generated
vendored
|
@ -3,10 +3,21 @@
|
||||||
package idtools
|
package idtools
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/containers/storage/pkg/system"
|
"github.com/containers/storage/pkg/system"
|
||||||
|
"github.com/opencontainers/runc/libcontainer/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
entOnce sync.Once
|
||||||
|
getentCmd string
|
||||||
)
|
)
|
||||||
|
|
||||||
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
|
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
|
||||||
|
@ -18,11 +29,8 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown
|
||||||
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
|
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
|
||||||
paths = []string{path}
|
paths = []string{path}
|
||||||
} else if err == nil && chownExisting {
|
} else if err == nil && chownExisting {
|
||||||
if err := os.Chown(path, ownerUID, ownerGID); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// short-circuit--we were called with an existing directory and chown was requested
|
// short-circuit--we were called with an existing directory and chown was requested
|
||||||
return nil
|
return os.Chown(path, ownerUID, ownerGID)
|
||||||
} else if err == nil {
|
} else if err == nil {
|
||||||
// nothing to do; directory path fully exists already and chown was NOT requested
|
// nothing to do; directory path fully exists already and chown was NOT requested
|
||||||
return nil
|
return nil
|
||||||
|
@ -41,7 +49,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown
|
||||||
paths = append(paths, dirPath)
|
paths = append(paths, dirPath)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
|
if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -58,3 +66,139 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
|
||||||
|
// if that uid, gid pair has access (execute bit) to the directory
|
||||||
|
func CanAccess(path string, pair IDPair) bool {
|
||||||
|
statInfo, err := system.Stat(path)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
fileMode := os.FileMode(statInfo.Mode())
|
||||||
|
permBits := fileMode.Perm()
|
||||||
|
return accessible(statInfo.UID() == uint32(pair.UID),
|
||||||
|
statInfo.GID() == uint32(pair.GID), permBits)
|
||||||
|
}
|
||||||
|
|
||||||
|
func accessible(isOwner, isGroup bool, perms os.FileMode) bool {
|
||||||
|
if isOwner && (perms&0100 == 0100) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if isGroup && (perms&0010 == 0010) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if perms&0001 == 0001 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username,
|
||||||
|
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
|
||||||
|
func LookupUser(username string) (user.User, error) {
|
||||||
|
// first try a local system files lookup using existing capabilities
|
||||||
|
usr, err := user.LookupUser(username)
|
||||||
|
if err == nil {
|
||||||
|
return usr, nil
|
||||||
|
}
|
||||||
|
// local files lookup failed; attempt to call `getent` to query configured passwd dbs
|
||||||
|
usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username))
|
||||||
|
if err != nil {
|
||||||
|
return user.User{}, err
|
||||||
|
}
|
||||||
|
return usr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid,
|
||||||
|
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
|
||||||
|
func LookupUID(uid int) (user.User, error) {
|
||||||
|
// first try a local system files lookup using existing capabilities
|
||||||
|
usr, err := user.LookupUid(uid)
|
||||||
|
if err == nil {
|
||||||
|
return usr, nil
|
||||||
|
}
|
||||||
|
// local files lookup failed; attempt to call `getent` to query configured passwd dbs
|
||||||
|
return getentUser(fmt.Sprintf("%s %d", "passwd", uid))
|
||||||
|
}
|
||||||
|
|
||||||
|
func getentUser(args string) (user.User, error) {
|
||||||
|
reader, err := callGetent(args)
|
||||||
|
if err != nil {
|
||||||
|
return user.User{}, err
|
||||||
|
}
|
||||||
|
users, err := user.ParsePasswd(reader)
|
||||||
|
if err != nil {
|
||||||
|
return user.User{}, err
|
||||||
|
}
|
||||||
|
if len(users) == 0 {
|
||||||
|
return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1])
|
||||||
|
}
|
||||||
|
return users[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name,
|
||||||
|
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
|
||||||
|
func LookupGroup(groupname string) (user.Group, error) {
|
||||||
|
// first try a local system files lookup using existing capabilities
|
||||||
|
group, err := user.LookupGroup(groupname)
|
||||||
|
if err == nil {
|
||||||
|
return group, nil
|
||||||
|
}
|
||||||
|
// local files lookup failed; attempt to call `getent` to query configured group dbs
|
||||||
|
return getentGroup(fmt.Sprintf("%s %s", "group", groupname))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID,
|
||||||
|
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
|
||||||
|
func LookupGID(gid int) (user.Group, error) {
|
||||||
|
// first try a local system files lookup using existing capabilities
|
||||||
|
group, err := user.LookupGid(gid)
|
||||||
|
if err == nil {
|
||||||
|
return group, nil
|
||||||
|
}
|
||||||
|
// local files lookup failed; attempt to call `getent` to query configured group dbs
|
||||||
|
return getentGroup(fmt.Sprintf("%s %d", "group", gid))
|
||||||
|
}
|
||||||
|
|
||||||
|
func getentGroup(args string) (user.Group, error) {
|
||||||
|
reader, err := callGetent(args)
|
||||||
|
if err != nil {
|
||||||
|
return user.Group{}, err
|
||||||
|
}
|
||||||
|
groups, err := user.ParseGroup(reader)
|
||||||
|
if err != nil {
|
||||||
|
return user.Group{}, err
|
||||||
|
}
|
||||||
|
if len(groups) == 0 {
|
||||||
|
return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1])
|
||||||
|
}
|
||||||
|
return groups[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func callGetent(args string) (io.Reader, error) {
|
||||||
|
entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") })
|
||||||
|
// if no `getent` command on host, can't do anything else
|
||||||
|
if getentCmd == "" {
|
||||||
|
return nil, fmt.Errorf("")
|
||||||
|
}
|
||||||
|
out, err := execCmd(getentCmd, args)
|
||||||
|
if err != nil {
|
||||||
|
exitCode, errC := system.GetExitCode(err)
|
||||||
|
if errC != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch exitCode {
|
||||||
|
case 1:
|
||||||
|
return nil, fmt.Errorf("getent reported invalid parameters/database unknown")
|
||||||
|
case 2:
|
||||||
|
terms := strings.Split(args, " ")
|
||||||
|
return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0])
|
||||||
|
case 3:
|
||||||
|
return nil, fmt.Errorf("getent database doesn't support enumeration")
|
||||||
|
default:
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return bytes.NewReader(out), nil
|
||||||
|
}
|
||||||
|
|
9
vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go
generated
vendored
9
vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go
generated
vendored
|
@ -11,8 +11,15 @@ import (
|
||||||
// Platforms such as Windows do not support the UID/GID concept. So make this
|
// Platforms such as Windows do not support the UID/GID concept. So make this
|
||||||
// just a wrapper around system.MkdirAll.
|
// just a wrapper around system.MkdirAll.
|
||||||
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
|
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
|
||||||
if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
|
if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
|
||||||
|
// if that uid, gid pair has access (execute bit) to the directory
|
||||||
|
// Windows does not require/support this function, so always return true
|
||||||
|
func CanAccess(path string, pair IDPair) bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
24
vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go
generated
vendored
24
vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go
generated
vendored
|
@ -2,8 +2,6 @@ package idtools
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -33,23 +31,6 @@ var (
|
||||||
userMod = "usermod"
|
userMod = "usermod"
|
||||||
)
|
)
|
||||||
|
|
||||||
func resolveBinary(binname string) (string, error) {
|
|
||||||
binaryPath, err := exec.LookPath(binname)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
resolvedPath, err := filepath.EvalSymlinks(binaryPath)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
//only return no error if the final resolved binary basename
|
|
||||||
//matches what was searched for
|
|
||||||
if filepath.Base(resolvedPath) == binname {
|
|
||||||
return resolvedPath, nil
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddNamespaceRangesUser takes a username and uses the standard system
|
// AddNamespaceRangesUser takes a username and uses the standard system
|
||||||
// utility to create a system user/group pair used to hold the
|
// utility to create a system user/group pair used to hold the
|
||||||
// /etc/sub{uid,gid} ranges which will be used for user namespace
|
// /etc/sub{uid,gid} ranges which will be used for user namespace
|
||||||
|
@ -181,8 +162,3 @@ func wouldOverlap(arange subIDRange, ID int) bool {
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func execCmd(cmd, args string) ([]byte, error) {
|
|
||||||
execCmd := exec.Command(cmd, strings.Split(args, " ")...)
|
|
||||||
return execCmd.CombinedOutput()
|
|
||||||
}
|
|
||||||
|
|
32
vendor/github.com/containers/storage/pkg/idtools/utils_unix.go
generated
vendored
Normal file
32
vendor/github.com/containers/storage/pkg/idtools/utils_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package idtools
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resolveBinary(binname string) (string, error) {
|
||||||
|
binaryPath, err := exec.LookPath(binname)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
resolvedPath, err := filepath.EvalSymlinks(binaryPath)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
//only return no error if the final resolved binary basename
|
||||||
|
//matches what was searched for
|
||||||
|
if filepath.Base(resolvedPath) == binname {
|
||||||
|
return resolvedPath, nil
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func execCmd(cmd, args string) ([]byte, error) {
|
||||||
|
execCmd := exec.Command(cmd, strings.Split(args, " ")...)
|
||||||
|
return execCmd.CombinedOutput()
|
||||||
|
}
|
186
vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go
generated
vendored
Normal file
186
vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go
generated
vendored
Normal file
|
@ -0,0 +1,186 @@
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// maxCap is the highest capacity to use in byte slices that buffer data.
|
||||||
|
const maxCap = 1e6
|
||||||
|
|
||||||
|
// minCap is the lowest capacity to use in byte slices that buffer data
|
||||||
|
const minCap = 64
|
||||||
|
|
||||||
|
// blockThreshold is the minimum number of bytes in the buffer which will cause
|
||||||
|
// a write to BytesPipe to block when allocating a new slice.
|
||||||
|
const blockThreshold = 1e6
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrClosed is returned when Write is called on a closed BytesPipe.
|
||||||
|
ErrClosed = errors.New("write to closed BytesPipe")
|
||||||
|
|
||||||
|
bufPools = make(map[int]*sync.Pool)
|
||||||
|
bufPoolsLock sync.Mutex
|
||||||
|
)
|
||||||
|
|
||||||
|
// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue).
|
||||||
|
// All written data may be read at most once. Also, BytesPipe allocates
|
||||||
|
// and releases new byte slices to adjust to current needs, so the buffer
|
||||||
|
// won't be overgrown after peak loads.
|
||||||
|
type BytesPipe struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
wait *sync.Cond
|
||||||
|
buf []*fixedBuffer
|
||||||
|
bufLen int
|
||||||
|
closeErr error // error to return from next Read. set to nil if not closed.
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBytesPipe creates new BytesPipe, initialized by specified slice.
|
||||||
|
// If buf is nil, then it will be initialized with slice which cap is 64.
|
||||||
|
// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
|
||||||
|
func NewBytesPipe() *BytesPipe {
|
||||||
|
bp := &BytesPipe{}
|
||||||
|
bp.buf = append(bp.buf, getBuffer(minCap))
|
||||||
|
bp.wait = sync.NewCond(&bp.mu)
|
||||||
|
return bp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes p to BytesPipe.
|
||||||
|
// It can allocate new []byte slices in a process of writing.
|
||||||
|
func (bp *BytesPipe) Write(p []byte) (int, error) {
|
||||||
|
bp.mu.Lock()
|
||||||
|
|
||||||
|
written := 0
|
||||||
|
loop0:
|
||||||
|
for {
|
||||||
|
if bp.closeErr != nil {
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return written, ErrClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(bp.buf) == 0 {
|
||||||
|
bp.buf = append(bp.buf, getBuffer(64))
|
||||||
|
}
|
||||||
|
// get the last buffer
|
||||||
|
b := bp.buf[len(bp.buf)-1]
|
||||||
|
|
||||||
|
n, err := b.Write(p)
|
||||||
|
written += n
|
||||||
|
bp.bufLen += n
|
||||||
|
|
||||||
|
// errBufferFull is an error we expect to get if the buffer is full
|
||||||
|
if err != nil && err != errBufferFull {
|
||||||
|
bp.wait.Broadcast()
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// if there was enough room to write all then break
|
||||||
|
if len(p) == n {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// more data: write to the next slice
|
||||||
|
p = p[n:]
|
||||||
|
|
||||||
|
// make sure the buffer doesn't grow too big from this write
|
||||||
|
for bp.bufLen >= blockThreshold {
|
||||||
|
bp.wait.Wait()
|
||||||
|
if bp.closeErr != nil {
|
||||||
|
continue loop0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// add new byte slice to the buffers slice and continue writing
|
||||||
|
nextCap := b.Cap() * 2
|
||||||
|
if nextCap > maxCap {
|
||||||
|
nextCap = maxCap
|
||||||
|
}
|
||||||
|
bp.buf = append(bp.buf, getBuffer(nextCap))
|
||||||
|
}
|
||||||
|
bp.wait.Broadcast()
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return written, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloseWithError causes further reads from a BytesPipe to return immediately.
|
||||||
|
func (bp *BytesPipe) CloseWithError(err error) error {
|
||||||
|
bp.mu.Lock()
|
||||||
|
if err != nil {
|
||||||
|
bp.closeErr = err
|
||||||
|
} else {
|
||||||
|
bp.closeErr = io.EOF
|
||||||
|
}
|
||||||
|
bp.wait.Broadcast()
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close causes further reads from a BytesPipe to return immediately.
|
||||||
|
func (bp *BytesPipe) Close() error {
|
||||||
|
return bp.CloseWithError(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads bytes from BytesPipe.
|
||||||
|
// Data could be read only once.
|
||||||
|
func (bp *BytesPipe) Read(p []byte) (n int, err error) {
|
||||||
|
bp.mu.Lock()
|
||||||
|
if bp.bufLen == 0 {
|
||||||
|
if bp.closeErr != nil {
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return 0, bp.closeErr
|
||||||
|
}
|
||||||
|
bp.wait.Wait()
|
||||||
|
if bp.bufLen == 0 && bp.closeErr != nil {
|
||||||
|
err := bp.closeErr
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for bp.bufLen > 0 {
|
||||||
|
b := bp.buf[0]
|
||||||
|
read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error
|
||||||
|
n += read
|
||||||
|
bp.bufLen -= read
|
||||||
|
|
||||||
|
if b.Len() == 0 {
|
||||||
|
// it's empty so return it to the pool and move to the next one
|
||||||
|
returnBuffer(b)
|
||||||
|
bp.buf[0] = nil
|
||||||
|
bp.buf = bp.buf[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(p) == read {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
p = p[read:]
|
||||||
|
}
|
||||||
|
|
||||||
|
bp.wait.Broadcast()
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func returnBuffer(b *fixedBuffer) {
|
||||||
|
b.Reset()
|
||||||
|
bufPoolsLock.Lock()
|
||||||
|
pool := bufPools[b.Cap()]
|
||||||
|
bufPoolsLock.Unlock()
|
||||||
|
if pool != nil {
|
||||||
|
pool.Put(b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBuffer(size int) *fixedBuffer {
|
||||||
|
bufPoolsLock.Lock()
|
||||||
|
pool, ok := bufPools[size]
|
||||||
|
if !ok {
|
||||||
|
pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }}
|
||||||
|
bufPools[size] = pool
|
||||||
|
}
|
||||||
|
bufPoolsLock.Unlock()
|
||||||
|
return pool.Get().(*fixedBuffer)
|
||||||
|
}
|
22
vendor/github.com/containers/storage/pkg/ioutils/fmt.go
generated
vendored
22
vendor/github.com/containers/storage/pkg/ioutils/fmt.go
generated
vendored
|
@ -1,22 +0,0 @@
|
||||||
package ioutils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FprintfIfNotEmpty prints the string value if it's not empty
|
|
||||||
func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) {
|
|
||||||
if value != "" {
|
|
||||||
return fmt.Fprintf(w, format, value)
|
|
||||||
}
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FprintfIfTrue prints the boolean value if it's true
|
|
||||||
func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) {
|
|
||||||
if ok {
|
|
||||||
return fmt.Fprintf(w, format, ok)
|
|
||||||
}
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
80
vendor/github.com/containers/storage/pkg/ioutils/fswriters.go
generated
vendored
80
vendor/github.com/containers/storage/pkg/ioutils/fswriters.go
generated
vendored
|
@ -80,3 +80,83 @@ func (w *atomicFileWriter) Close() (retErr error) {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AtomicWriteSet is used to atomically write a set
|
||||||
|
// of files and ensure they are visible at the same time.
|
||||||
|
// Must be committed to a new directory.
|
||||||
|
type AtomicWriteSet struct {
|
||||||
|
root string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAtomicWriteSet creates a new atomic write set to
|
||||||
|
// atomically create a set of files. The given directory
|
||||||
|
// is used as the base directory for storing files before
|
||||||
|
// commit. If no temporary directory is given the system
|
||||||
|
// default is used.
|
||||||
|
func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) {
|
||||||
|
td, err := ioutil.TempDir(tmpDir, "write-set-")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &AtomicWriteSet{
|
||||||
|
root: td,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteFile writes a file to the set, guaranteeing the file
|
||||||
|
// has been synced.
|
||||||
|
func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error {
|
||||||
|
f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n, err := f.Write(data)
|
||||||
|
if err == nil && n < len(data) {
|
||||||
|
err = io.ErrShortWrite
|
||||||
|
}
|
||||||
|
if err1 := f.Close(); err == nil {
|
||||||
|
err = err1
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type syncFileCloser struct {
|
||||||
|
*os.File
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w syncFileCloser) Close() error {
|
||||||
|
err := w.File.Sync()
|
||||||
|
if err1 := w.File.Close(); err == nil {
|
||||||
|
err = err1
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileWriter opens a file writer inside the set. The file
|
||||||
|
// should be synced and closed before calling commit.
|
||||||
|
func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) {
|
||||||
|
f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return syncFileCloser{f}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cancel cancels the set and removes all temporary data
|
||||||
|
// created in the set.
|
||||||
|
func (ws *AtomicWriteSet) Cancel() error {
|
||||||
|
return os.RemoveAll(ws.root)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commit moves all created files to the target directory. The
|
||||||
|
// target directory must not exist and the parent of the target
|
||||||
|
// directory must exist.
|
||||||
|
func (ws *AtomicWriteSet) Commit(target string) error {
|
||||||
|
return os.Rename(ws.root, target)
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the location the set is writing to.
|
||||||
|
func (ws *AtomicWriteSet) String() string {
|
||||||
|
return ws.root
|
||||||
|
}
|
||||||
|
|
226
vendor/github.com/containers/storage/pkg/ioutils/multireader.go
generated
vendored
226
vendor/github.com/containers/storage/pkg/ioutils/multireader.go
generated
vendored
|
@ -1,226 +0,0 @@
|
||||||
package ioutils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
type pos struct {
|
|
||||||
idx int
|
|
||||||
offset int64
|
|
||||||
}
|
|
||||||
|
|
||||||
type multiReadSeeker struct {
|
|
||||||
readers []io.ReadSeeker
|
|
||||||
pos *pos
|
|
||||||
posIdx map[io.ReadSeeker]int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
|
||||||
var tmpOffset int64
|
|
||||||
switch whence {
|
|
||||||
case os.SEEK_SET:
|
|
||||||
for i, rdr := range r.readers {
|
|
||||||
// get size of the current reader
|
|
||||||
s, err := rdr.Seek(0, os.SEEK_END)
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if offset > tmpOffset+s {
|
|
||||||
if i == len(r.readers)-1 {
|
|
||||||
rdrOffset := s + (offset - tmpOffset)
|
|
||||||
if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
r.pos = &pos{i, rdrOffset}
|
|
||||||
return offset, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
tmpOffset += s
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
rdrOffset := offset - tmpOffset
|
|
||||||
idx := i
|
|
||||||
|
|
||||||
rdr.Seek(rdrOffset, os.SEEK_SET)
|
|
||||||
// make sure all following readers are at 0
|
|
||||||
for _, rdr := range r.readers[i+1:] {
|
|
||||||
rdr.Seek(0, os.SEEK_SET)
|
|
||||||
}
|
|
||||||
|
|
||||||
if rdrOffset == s && i != len(r.readers)-1 {
|
|
||||||
idx++
|
|
||||||
rdrOffset = 0
|
|
||||||
}
|
|
||||||
r.pos = &pos{idx, rdrOffset}
|
|
||||||
return offset, nil
|
|
||||||
}
|
|
||||||
case os.SEEK_END:
|
|
||||||
for _, rdr := range r.readers {
|
|
||||||
s, err := rdr.Seek(0, os.SEEK_END)
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
tmpOffset += s
|
|
||||||
}
|
|
||||||
r.Seek(tmpOffset+offset, os.SEEK_SET)
|
|
||||||
return tmpOffset + offset, nil
|
|
||||||
case os.SEEK_CUR:
|
|
||||||
if r.pos == nil {
|
|
||||||
return r.Seek(offset, os.SEEK_SET)
|
|
||||||
}
|
|
||||||
// Just return the current offset
|
|
||||||
if offset == 0 {
|
|
||||||
return r.getCurOffset()
|
|
||||||
}
|
|
||||||
|
|
||||||
curOffset, err := r.getCurOffset()
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset)
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
|
|
||||||
r.pos = &pos{r.posIdx[rdr], rdrOffset}
|
|
||||||
return curOffset + offset, nil
|
|
||||||
default:
|
|
||||||
return -1, fmt.Errorf("Invalid whence: %d", whence)
|
|
||||||
}
|
|
||||||
|
|
||||||
return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) {
|
|
||||||
var rdr io.ReadSeeker
|
|
||||||
var rdrOffset int64
|
|
||||||
|
|
||||||
for i, rdr := range r.readers {
|
|
||||||
offsetTo, err := r.getOffsetToReader(rdr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, -1, err
|
|
||||||
}
|
|
||||||
if offsetTo > offset {
|
|
||||||
rdr = r.readers[i-1]
|
|
||||||
rdrOffset = offsetTo - offset
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if rdr == r.readers[len(r.readers)-1] {
|
|
||||||
rdrOffset = offsetTo + offset
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return rdr, rdrOffset, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *multiReadSeeker) getCurOffset() (int64, error) {
|
|
||||||
var totalSize int64
|
|
||||||
for _, rdr := range r.readers[:r.pos.idx+1] {
|
|
||||||
if r.posIdx[rdr] == r.pos.idx {
|
|
||||||
totalSize += r.pos.offset
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
size, err := getReadSeekerSize(rdr)
|
|
||||||
if err != nil {
|
|
||||||
return -1, fmt.Errorf("error getting seeker size: %v", err)
|
|
||||||
}
|
|
||||||
totalSize += size
|
|
||||||
}
|
|
||||||
return totalSize, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) {
|
|
||||||
var offset int64
|
|
||||||
for _, r := range r.readers {
|
|
||||||
if r == rdr {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
size, err := getReadSeekerSize(rdr)
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
offset += size
|
|
||||||
}
|
|
||||||
return offset, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *multiReadSeeker) Read(b []byte) (int, error) {
|
|
||||||
if r.pos == nil {
|
|
||||||
r.pos = &pos{0, 0}
|
|
||||||
}
|
|
||||||
|
|
||||||
bCap := int64(cap(b))
|
|
||||||
buf := bytes.NewBuffer(nil)
|
|
||||||
var rdr io.ReadSeeker
|
|
||||||
|
|
||||||
for _, rdr = range r.readers[r.pos.idx:] {
|
|
||||||
readBytes, err := io.CopyN(buf, rdr, bCap)
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
bCap -= readBytes
|
|
||||||
|
|
||||||
if bCap == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
rdrPos, err := rdr.Seek(0, os.SEEK_CUR)
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
r.pos = &pos{r.posIdx[rdr], rdrPos}
|
|
||||||
return buf.Read(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) {
|
|
||||||
// save the current position
|
|
||||||
pos, err := rdr.Seek(0, os.SEEK_CUR)
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// get the size
|
|
||||||
size, err := rdr.Seek(0, os.SEEK_END)
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// reset the position
|
|
||||||
if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
return size, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided
|
|
||||||
// input readseekers. After calling this method the initial position is set to the
|
|
||||||
// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances
|
|
||||||
// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker.
|
|
||||||
// Seek can be used over the sum of lengths of all readseekers.
|
|
||||||
//
|
|
||||||
// When a MultiReadSeeker is used, no Read and Seek operations should be made on
|
|
||||||
// its ReadSeeker components. Also, users should make no assumption on the state
|
|
||||||
// of individual readseekers while the MultiReadSeeker is used.
|
|
||||||
func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker {
|
|
||||||
if len(readers) == 1 {
|
|
||||||
return readers[0]
|
|
||||||
}
|
|
||||||
idx := make(map[io.ReadSeeker]int)
|
|
||||||
for i, rdr := range readers {
|
|
||||||
idx[rdr] = i
|
|
||||||
}
|
|
||||||
return &multiReadSeeker{
|
|
||||||
readers: readers,
|
|
||||||
posIdx: idx,
|
|
||||||
}
|
|
||||||
}
|
|
71
vendor/github.com/containers/storage/pkg/ioutils/readers.go
generated
vendored
71
vendor/github.com/containers/storage/pkg/ioutils/readers.go
generated
vendored
|
@ -4,6 +4,8 @@ import (
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
type readCloserWrapper struct {
|
type readCloserWrapper struct {
|
||||||
|
@ -81,3 +83,72 @@ func (r *OnEOFReader) runFunc() {
|
||||||
r.Fn = nil
|
r.Fn = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read
|
||||||
|
// operations.
|
||||||
|
type cancelReadCloser struct {
|
||||||
|
cancel func()
|
||||||
|
pR *io.PipeReader // Stream to read from
|
||||||
|
pW *io.PipeWriter
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the
|
||||||
|
// context is cancelled. The returned io.ReadCloser must be closed when it is
|
||||||
|
// no longer needed.
|
||||||
|
func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser {
|
||||||
|
pR, pW := io.Pipe()
|
||||||
|
|
||||||
|
// Create a context used to signal when the pipe is closed
|
||||||
|
doneCtx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
p := &cancelReadCloser{
|
||||||
|
cancel: cancel,
|
||||||
|
pR: pR,
|
||||||
|
pW: pW,
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
_, err := io.Copy(pW, in)
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
// If the context was closed, p.closeWithError
|
||||||
|
// was already called. Calling it again would
|
||||||
|
// change the error that Read returns.
|
||||||
|
default:
|
||||||
|
p.closeWithError(err)
|
||||||
|
}
|
||||||
|
in.Close()
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
p.closeWithError(ctx.Err())
|
||||||
|
case <-doneCtx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read wraps the Read method of the pipe that provides data from the wrapped
|
||||||
|
// ReadCloser.
|
||||||
|
func (p *cancelReadCloser) Read(buf []byte) (n int, err error) {
|
||||||
|
return p.pR.Read(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// closeWithError closes the wrapper and its underlying reader. It will
|
||||||
|
// cause future calls to Read to return err.
|
||||||
|
func (p *cancelReadCloser) closeWithError(err error) {
|
||||||
|
p.pW.CloseWithError(err)
|
||||||
|
p.cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the wrapper its underlying reader. It will cause
|
||||||
|
// future calls to Read to return io.EOF.
|
||||||
|
func (p *cancelReadCloser) Close() error {
|
||||||
|
p.closeWithError(io.EOF)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
1
vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go
generated
vendored
1
vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go
generated
vendored
|
@ -45,4 +45,5 @@ const (
|
||||||
RELATIME = 0
|
RELATIME = 0
|
||||||
REMOUNT = 0
|
REMOUNT = 0
|
||||||
STRICTATIME = 0
|
STRICTATIME = 0
|
||||||
|
mntDetach = 0
|
||||||
)
|
)
|
||||||
|
|
48
vendor/github.com/containers/storage/pkg/mount/flags_linux.go
generated
vendored
48
vendor/github.com/containers/storage/pkg/mount/flags_linux.go
generated
vendored
|
@ -1,85 +1,87 @@
|
||||||
package mount
|
package mount
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"syscall"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// RDONLY will mount the file system read-only.
|
// RDONLY will mount the file system read-only.
|
||||||
RDONLY = syscall.MS_RDONLY
|
RDONLY = unix.MS_RDONLY
|
||||||
|
|
||||||
// NOSUID will not allow set-user-identifier or set-group-identifier bits to
|
// NOSUID will not allow set-user-identifier or set-group-identifier bits to
|
||||||
// take effect.
|
// take effect.
|
||||||
NOSUID = syscall.MS_NOSUID
|
NOSUID = unix.MS_NOSUID
|
||||||
|
|
||||||
// NODEV will not interpret character or block special devices on the file
|
// NODEV will not interpret character or block special devices on the file
|
||||||
// system.
|
// system.
|
||||||
NODEV = syscall.MS_NODEV
|
NODEV = unix.MS_NODEV
|
||||||
|
|
||||||
// NOEXEC will not allow execution of any binaries on the mounted file system.
|
// NOEXEC will not allow execution of any binaries on the mounted file system.
|
||||||
NOEXEC = syscall.MS_NOEXEC
|
NOEXEC = unix.MS_NOEXEC
|
||||||
|
|
||||||
// SYNCHRONOUS will allow I/O to the file system to be done synchronously.
|
// SYNCHRONOUS will allow I/O to the file system to be done synchronously.
|
||||||
SYNCHRONOUS = syscall.MS_SYNCHRONOUS
|
SYNCHRONOUS = unix.MS_SYNCHRONOUS
|
||||||
|
|
||||||
// DIRSYNC will force all directory updates within the file system to be done
|
// DIRSYNC will force all directory updates within the file system to be done
|
||||||
// synchronously. This affects the following system calls: create, link,
|
// synchronously. This affects the following system calls: create, link,
|
||||||
// unlink, symlink, mkdir, rmdir, mknod and rename.
|
// unlink, symlink, mkdir, rmdir, mknod and rename.
|
||||||
DIRSYNC = syscall.MS_DIRSYNC
|
DIRSYNC = unix.MS_DIRSYNC
|
||||||
|
|
||||||
// REMOUNT will attempt to remount an already-mounted file system. This is
|
// REMOUNT will attempt to remount an already-mounted file system. This is
|
||||||
// commonly used to change the mount flags for a file system, especially to
|
// commonly used to change the mount flags for a file system, especially to
|
||||||
// make a readonly file system writeable. It does not change device or mount
|
// make a readonly file system writeable. It does not change device or mount
|
||||||
// point.
|
// point.
|
||||||
REMOUNT = syscall.MS_REMOUNT
|
REMOUNT = unix.MS_REMOUNT
|
||||||
|
|
||||||
// MANDLOCK will force mandatory locks on a filesystem.
|
// MANDLOCK will force mandatory locks on a filesystem.
|
||||||
MANDLOCK = syscall.MS_MANDLOCK
|
MANDLOCK = unix.MS_MANDLOCK
|
||||||
|
|
||||||
// NOATIME will not update the file access time when reading from a file.
|
// NOATIME will not update the file access time when reading from a file.
|
||||||
NOATIME = syscall.MS_NOATIME
|
NOATIME = unix.MS_NOATIME
|
||||||
|
|
||||||
// NODIRATIME will not update the directory access time.
|
// NODIRATIME will not update the directory access time.
|
||||||
NODIRATIME = syscall.MS_NODIRATIME
|
NODIRATIME = unix.MS_NODIRATIME
|
||||||
|
|
||||||
// BIND remounts a subtree somewhere else.
|
// BIND remounts a subtree somewhere else.
|
||||||
BIND = syscall.MS_BIND
|
BIND = unix.MS_BIND
|
||||||
|
|
||||||
// RBIND remounts a subtree and all possible submounts somewhere else.
|
// RBIND remounts a subtree and all possible submounts somewhere else.
|
||||||
RBIND = syscall.MS_BIND | syscall.MS_REC
|
RBIND = unix.MS_BIND | unix.MS_REC
|
||||||
|
|
||||||
// UNBINDABLE creates a mount which cannot be cloned through a bind operation.
|
// UNBINDABLE creates a mount which cannot be cloned through a bind operation.
|
||||||
UNBINDABLE = syscall.MS_UNBINDABLE
|
UNBINDABLE = unix.MS_UNBINDABLE
|
||||||
|
|
||||||
// RUNBINDABLE marks the entire mount tree as UNBINDABLE.
|
// RUNBINDABLE marks the entire mount tree as UNBINDABLE.
|
||||||
RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC
|
RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC
|
||||||
|
|
||||||
// PRIVATE creates a mount which carries no propagation abilities.
|
// PRIVATE creates a mount which carries no propagation abilities.
|
||||||
PRIVATE = syscall.MS_PRIVATE
|
PRIVATE = unix.MS_PRIVATE
|
||||||
|
|
||||||
// RPRIVATE marks the entire mount tree as PRIVATE.
|
// RPRIVATE marks the entire mount tree as PRIVATE.
|
||||||
RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC
|
RPRIVATE = unix.MS_PRIVATE | unix.MS_REC
|
||||||
|
|
||||||
// SLAVE creates a mount which receives propagation from its master, but not
|
// SLAVE creates a mount which receives propagation from its master, but not
|
||||||
// vice versa.
|
// vice versa.
|
||||||
SLAVE = syscall.MS_SLAVE
|
SLAVE = unix.MS_SLAVE
|
||||||
|
|
||||||
// RSLAVE marks the entire mount tree as SLAVE.
|
// RSLAVE marks the entire mount tree as SLAVE.
|
||||||
RSLAVE = syscall.MS_SLAVE | syscall.MS_REC
|
RSLAVE = unix.MS_SLAVE | unix.MS_REC
|
||||||
|
|
||||||
// SHARED creates a mount which provides the ability to create mirrors of
|
// SHARED creates a mount which provides the ability to create mirrors of
|
||||||
// that mount such that mounts and unmounts within any of the mirrors
|
// that mount such that mounts and unmounts within any of the mirrors
|
||||||
// propagate to the other mirrors.
|
// propagate to the other mirrors.
|
||||||
SHARED = syscall.MS_SHARED
|
SHARED = unix.MS_SHARED
|
||||||
|
|
||||||
// RSHARED marks the entire mount tree as SHARED.
|
// RSHARED marks the entire mount tree as SHARED.
|
||||||
RSHARED = syscall.MS_SHARED | syscall.MS_REC
|
RSHARED = unix.MS_SHARED | unix.MS_REC
|
||||||
|
|
||||||
// RELATIME updates inode access times relative to modify or change time.
|
// RELATIME updates inode access times relative to modify or change time.
|
||||||
RELATIME = syscall.MS_RELATIME
|
RELATIME = unix.MS_RELATIME
|
||||||
|
|
||||||
// STRICTATIME allows to explicitly request full atime updates. This makes
|
// STRICTATIME allows to explicitly request full atime updates. This makes
|
||||||
// it possible for the kernel to default to relatime or noatime but still
|
// it possible for the kernel to default to relatime or noatime but still
|
||||||
// allow userspace to override it.
|
// allow userspace to override it.
|
||||||
STRICTATIME = syscall.MS_STRICTATIME
|
STRICTATIME = unix.MS_STRICTATIME
|
||||||
|
|
||||||
|
mntDetach = unix.MNT_DETACH
|
||||||
)
|
)
|
||||||
|
|
1
vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go
generated
vendored
1
vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go
generated
vendored
|
@ -27,4 +27,5 @@ const (
|
||||||
STRICTATIME = 0
|
STRICTATIME = 0
|
||||||
SYNCHRONOUS = 0
|
SYNCHRONOUS = 0
|
||||||
RDONLY = 0
|
RDONLY = 0
|
||||||
|
mntDetach = 0
|
||||||
)
|
)
|
||||||
|
|
44
vendor/github.com/containers/storage/pkg/mount/mount.go
generated
vendored
44
vendor/github.com/containers/storage/pkg/mount/mount.go
generated
vendored
|
@ -1,7 +1,11 @@
|
||||||
package mount
|
package mount
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/containers/storage/pkg/fileutils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetMounts retrieves a list of mounts for the current running process.
|
// GetMounts retrieves a list of mounts for the current running process.
|
||||||
|
@ -17,6 +21,10 @@ func Mounted(mountpoint string) (bool, error) {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mountpoint, err = fileutils.ReadSymlinkedDirectory(mountpoint)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
// Search the table for the mountpoint
|
// Search the table for the mountpoint
|
||||||
for _, e := range entries {
|
for _, e := range entries {
|
||||||
if e.Mountpoint == mountpoint {
|
if e.Mountpoint == mountpoint {
|
||||||
|
@ -46,13 +54,11 @@ func Mount(device, target, mType, options string) error {
|
||||||
// flags.go for supported option flags.
|
// flags.go for supported option flags.
|
||||||
func ForceMount(device, target, mType, options string) error {
|
func ForceMount(device, target, mType, options string) error {
|
||||||
flag, data := parseOptions(options)
|
flag, data := parseOptions(options)
|
||||||
if err := mount(device, target, mType, uintptr(flag), data); err != nil {
|
return mount(device, target, mType, uintptr(flag), data)
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unmount will unmount the target filesystem, so long as it is mounted.
|
// Unmount lazily unmounts a filesystem on supported platforms, otherwise
|
||||||
|
// does a normal unmount.
|
||||||
func Unmount(target string) error {
|
func Unmount(target string) error {
|
||||||
if mounted, err := Mounted(target); err != nil || !mounted {
|
if mounted, err := Mounted(target); err != nil || !mounted {
|
||||||
return err
|
return err
|
||||||
|
@ -60,6 +66,32 @@ func Unmount(target string) error {
|
||||||
return ForceUnmount(target)
|
return ForceUnmount(target)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RecursiveUnmount unmounts the target and all mounts underneath, starting with
|
||||||
|
// the deepsest mount first.
|
||||||
|
func RecursiveUnmount(target string) error {
|
||||||
|
mounts, err := GetMounts()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make the deepest mount be first
|
||||||
|
sort.Sort(sort.Reverse(byMountpoint(mounts)))
|
||||||
|
|
||||||
|
for i, m := range mounts {
|
||||||
|
if !strings.HasPrefix(m.Mountpoint, target) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 {
|
||||||
|
if mounted, err := Mounted(m.Mountpoint); err != nil || mounted {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Ignore errors for submounts and continue trying to unmount others
|
||||||
|
// The final unmount should fail if there ane any submounts remaining
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// ForceUnmount will force an unmount of the target filesystem, regardless if
|
// ForceUnmount will force an unmount of the target filesystem, regardless if
|
||||||
// it is mounted or not.
|
// it is mounted or not.
|
||||||
func ForceUnmount(target string) (err error) {
|
func ForceUnmount(target string) (err error) {
|
||||||
|
@ -70,5 +102,5 @@ func ForceUnmount(target string) (err error) {
|
||||||
}
|
}
|
||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
}
|
}
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
5
vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go
generated
vendored
5
vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go
generated
vendored
|
@ -13,8 +13,9 @@ import "C"
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
func allocateIOVecs(options []string) []C.struct_iovec {
|
func allocateIOVecs(options []string) []C.struct_iovec {
|
||||||
|
@ -55,5 +56,5 @@ func mount(device, target, mType string, flag uintptr, data string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func unmount(target string, flag int) error {
|
func unmount(target string, flag int) error {
|
||||||
return syscall.Unmount(target, flag)
|
return unix.Unmount(target, flag)
|
||||||
}
|
}
|
||||||
|
|
52
vendor/github.com/containers/storage/pkg/mount/mounter_linux.go
generated
vendored
52
vendor/github.com/containers/storage/pkg/mount/mounter_linux.go
generated
vendored
|
@ -1,21 +1,57 @@
|
||||||
package mount
|
package mount
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"syscall"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
func mount(device, target, mType string, flag uintptr, data string) error {
|
const (
|
||||||
if err := syscall.Mount(device, target, mType, flag, data); err != nil {
|
// ptypes is the set propagation types.
|
||||||
return err
|
ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE
|
||||||
|
|
||||||
|
// pflags is the full set valid flags for a change propagation call.
|
||||||
|
pflags = ptypes | unix.MS_REC | unix.MS_SILENT
|
||||||
|
|
||||||
|
// broflags is the combination of bind and read only
|
||||||
|
broflags = unix.MS_BIND | unix.MS_RDONLY
|
||||||
|
)
|
||||||
|
|
||||||
|
// isremount returns true if either device name or flags identify a remount request, false otherwise.
|
||||||
|
func isremount(device string, flags uintptr) bool {
|
||||||
|
switch {
|
||||||
|
// We treat device "" and "none" as a remount request to provide compatibility with
|
||||||
|
// requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts.
|
||||||
|
case flags&unix.MS_REMOUNT != 0, device == "", device == "none":
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we have a bind mount or remount, remount...
|
func mount(device, target, mType string, flags uintptr, data string) error {
|
||||||
if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY {
|
oflags := flags &^ ptypes
|
||||||
return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data)
|
if !isremount(device, flags) || data != "" {
|
||||||
|
// Initial call applying all non-propagation flags for mount
|
||||||
|
// or remount with changed data
|
||||||
|
if err := unix.Mount(device, target, mType, oflags, data); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if flags&ptypes != 0 {
|
||||||
|
// Change the propagation type.
|
||||||
|
if err := unix.Mount("", target, "", flags&pflags, ""); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if oflags&broflags == broflags {
|
||||||
|
// Remount the bind to apply read only.
|
||||||
|
return unix.Mount("", target, "", oflags|unix.MS_REMOUNT, "")
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func unmount(target string, flag int) error {
|
func unmount(target string, flag int) error {
|
||||||
return syscall.Unmount(target, flag)
|
return unix.Unmount(target, flag)
|
||||||
}
|
}
|
||||||
|
|
3
vendor/github.com/containers/storage/pkg/mount/mounter_solaris.go
generated
vendored
3
vendor/github.com/containers/storage/pkg/mount/mounter_solaris.go
generated
vendored
|
@ -3,8 +3,9 @@
|
||||||
package mount
|
package mount
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
// #include <stdlib.h>
|
// #include <stdlib.h>
|
||||||
|
|
14
vendor/github.com/containers/storage/pkg/mount/mountinfo.go
generated
vendored
14
vendor/github.com/containers/storage/pkg/mount/mountinfo.go
generated
vendored
|
@ -38,3 +38,17 @@ type Info struct {
|
||||||
// VfsOpts represents per super block options.
|
// VfsOpts represents per super block options.
|
||||||
VfsOpts string
|
VfsOpts string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type byMountpoint []*Info
|
||||||
|
|
||||||
|
func (by byMountpoint) Len() int {
|
||||||
|
return len(by)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (by byMountpoint) Less(i, j int) bool {
|
||||||
|
return by[i].Mountpoint < by[j].Mountpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
func (by byMountpoint) Swap(i, j int) {
|
||||||
|
by[i], by[j] = by[j], by[i]
|
||||||
|
}
|
||||||
|
|
58
vendor/github.com/containers/storage/pkg/mount/sharedsubtree_solaris.go
generated
vendored
Normal file
58
vendor/github.com/containers/storage/pkg/mount/sharedsubtree_solaris.go
generated
vendored
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
// +build solaris
|
||||||
|
|
||||||
|
package mount
|
||||||
|
|
||||||
|
// MakeShared ensures a mounted filesystem has the SHARED mount option enabled.
|
||||||
|
// See the supported options in flags.go for further reference.
|
||||||
|
func MakeShared(mountPoint string) error {
|
||||||
|
return ensureMountedAs(mountPoint, "shared")
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled.
|
||||||
|
// See the supported options in flags.go for further reference.
|
||||||
|
func MakeRShared(mountPoint string) error {
|
||||||
|
return ensureMountedAs(mountPoint, "rshared")
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled.
|
||||||
|
// See the supported options in flags.go for further reference.
|
||||||
|
func MakePrivate(mountPoint string) error {
|
||||||
|
return ensureMountedAs(mountPoint, "private")
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option
|
||||||
|
// enabled. See the supported options in flags.go for further reference.
|
||||||
|
func MakeRPrivate(mountPoint string) error {
|
||||||
|
return ensureMountedAs(mountPoint, "rprivate")
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled.
|
||||||
|
// See the supported options in flags.go for further reference.
|
||||||
|
func MakeSlave(mountPoint string) error {
|
||||||
|
return ensureMountedAs(mountPoint, "slave")
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled.
|
||||||
|
// See the supported options in flags.go for further reference.
|
||||||
|
func MakeRSlave(mountPoint string) error {
|
||||||
|
return ensureMountedAs(mountPoint, "rslave")
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option
|
||||||
|
// enabled. See the supported options in flags.go for further reference.
|
||||||
|
func MakeUnbindable(mountPoint string) error {
|
||||||
|
return ensureMountedAs(mountPoint, "unbindable")
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount
|
||||||
|
// option enabled. See the supported options in flags.go for further reference.
|
||||||
|
func MakeRUnbindable(mountPoint string) error {
|
||||||
|
return ensureMountedAs(mountPoint, "runbindable")
|
||||||
|
}
|
||||||
|
|
||||||
|
func ensureMountedAs(mountPoint, options string) error {
|
||||||
|
// TODO: Solaris does not support bind mounts.
|
||||||
|
// Evaluate lofs and also look at the relevant
|
||||||
|
// mount flags to be supported.
|
||||||
|
return nil
|
||||||
|
}
|
17
vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go
generated
vendored
17
vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
// +build linux freebsd solaris
|
// +build linux freebsd solaris openbsd
|
||||||
|
|
||||||
// Package kernel provides helper function to get, parse and compare kernel
|
// Package kernel provides helper function to get, parse and compare kernel
|
||||||
// versions for different platforms.
|
// versions for different platforms.
|
||||||
|
@ -6,6 +6,8 @@ package kernel
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetKernelVersion gets the current kernel version.
|
// GetKernelVersion gets the current kernel version.
|
||||||
|
@ -28,3 +30,16 @@ func GetKernelVersion() (*VersionInfo, error) {
|
||||||
|
|
||||||
return ParseRelease(string(release))
|
return ParseRelease(string(release))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CheckKernelVersion checks if current kernel is newer than (or equal to)
|
||||||
|
// the given version.
|
||||||
|
func CheckKernelVersion(k, major, minor int) bool {
|
||||||
|
if v, err := GetKernelVersion(); err != nil {
|
||||||
|
logrus.Warnf("error getting kernel version: %s", err)
|
||||||
|
} else {
|
||||||
|
if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
21
vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go
generated
vendored
21
vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go
generated
vendored
|
@ -4,8 +4,9 @@ package kernel
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
)
|
)
|
||||||
|
|
||||||
// VersionInfo holds information about the kernel.
|
// VersionInfo holds information about the kernel.
|
||||||
|
@ -24,28 +25,28 @@ func (k *VersionInfo) String() string {
|
||||||
func GetKernelVersion() (*VersionInfo, error) {
|
func GetKernelVersion() (*VersionInfo, error) {
|
||||||
|
|
||||||
var (
|
var (
|
||||||
h syscall.Handle
|
h windows.Handle
|
||||||
dwVersion uint32
|
dwVersion uint32
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
KVI := &VersionInfo{"Unknown", 0, 0, 0}
|
KVI := &VersionInfo{"Unknown", 0, 0, 0}
|
||||||
|
|
||||||
if err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE,
|
if err = windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE,
|
||||||
syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`),
|
windows.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`),
|
||||||
0,
|
0,
|
||||||
syscall.KEY_READ,
|
windows.KEY_READ,
|
||||||
&h); err != nil {
|
&h); err != nil {
|
||||||
return KVI, err
|
return KVI, err
|
||||||
}
|
}
|
||||||
defer syscall.RegCloseKey(h)
|
defer windows.RegCloseKey(h)
|
||||||
|
|
||||||
var buf [1 << 10]uint16
|
var buf [1 << 10]uint16
|
||||||
var typ uint32
|
var typ uint32
|
||||||
n := uint32(len(buf) * 2) // api expects array of bytes, not uint16
|
n := uint32(len(buf) * 2) // api expects array of bytes, not uint16
|
||||||
|
|
||||||
if err = syscall.RegQueryValueEx(h,
|
if err = windows.RegQueryValueEx(h,
|
||||||
syscall.StringToUTF16Ptr("BuildLabEx"),
|
windows.StringToUTF16Ptr("BuildLabEx"),
|
||||||
nil,
|
nil,
|
||||||
&typ,
|
&typ,
|
||||||
(*byte)(unsafe.Pointer(&buf[0])),
|
(*byte)(unsafe.Pointer(&buf[0])),
|
||||||
|
@ -53,11 +54,11 @@ func GetKernelVersion() (*VersionInfo, error) {
|
||||||
return KVI, err
|
return KVI, err
|
||||||
}
|
}
|
||||||
|
|
||||||
KVI.kvi = syscall.UTF16ToString(buf[:])
|
KVI.kvi = windows.UTF16ToString(buf[:])
|
||||||
|
|
||||||
// Important - docker.exe MUST be manifested for this API to return
|
// Important - docker.exe MUST be manifested for this API to return
|
||||||
// the correct information.
|
// the correct information.
|
||||||
if dwVersion, err = syscall.GetVersion(); err != nil {
|
if dwVersion, err = windows.GetVersion(); err != nil {
|
||||||
return KVI, err
|
return KVI, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
14
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go
generated
vendored
14
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go
generated
vendored
|
@ -1,18 +1,16 @@
|
||||||
package kernel
|
package kernel
|
||||||
|
|
||||||
import (
|
import "golang.org/x/sys/unix"
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Utsname represents the system name structure.
|
// Utsname represents the system name structure.
|
||||||
// It is passthrough for syscall.Utsname in order to make it portable with
|
// It is passthrough for unix.Utsname in order to make it portable with
|
||||||
// other platforms where it is not available.
|
// other platforms where it is not available.
|
||||||
type Utsname syscall.Utsname
|
type Utsname unix.Utsname
|
||||||
|
|
||||||
func uname() (*syscall.Utsname, error) {
|
func uname() (*unix.Utsname, error) {
|
||||||
uts := &syscall.Utsname{}
|
uts := &unix.Utsname{}
|
||||||
|
|
||||||
if err := syscall.Uname(uts); err != nil {
|
if err := unix.Uname(uts); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return uts, nil
|
return uts, nil
|
||||||
|
|
37
vendor/github.com/containers/storage/pkg/plugins/client.go
generated
vendored
37
vendor/github.com/containers/storage/pkg/plugins/client.go
generated
vendored
|
@ -19,8 +19,7 @@ const (
|
||||||
defaultTimeOut = 30
|
defaultTimeOut = 30
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewClient creates a new plugin client (http).
|
func newTransport(addr string, tlsConfig *tlsconfig.Options) (transport.Transport, error) {
|
||||||
func NewClient(addr string, tlsConfig *tlsconfig.Options) (*Client, error) {
|
|
||||||
tr := &http.Transport{}
|
tr := &http.Transport{}
|
||||||
|
|
||||||
if tlsConfig != nil {
|
if tlsConfig != nil {
|
||||||
|
@ -45,15 +44,33 @@ func NewClient(addr string, tlsConfig *tlsconfig.Options) (*Client, error) {
|
||||||
}
|
}
|
||||||
scheme := httpScheme(u)
|
scheme := httpScheme(u)
|
||||||
|
|
||||||
clientTransport := transport.NewHTTPTransport(tr, scheme, socket)
|
return transport.NewHTTPTransport(tr, scheme, socket), nil
|
||||||
return NewClientWithTransport(clientTransport), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientWithTransport creates a new plugin client with a given transport.
|
// NewClient creates a new plugin client (http).
|
||||||
func NewClientWithTransport(tr transport.Transport) *Client {
|
func NewClient(addr string, tlsConfig *tlsconfig.Options) (*Client, error) {
|
||||||
|
clientTransport, err := newTransport(addr, tlsConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return newClientWithTransport(clientTransport, 0), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClientWithTimeout creates a new plugin client (http).
|
||||||
|
func NewClientWithTimeout(addr string, tlsConfig *tlsconfig.Options, timeout time.Duration) (*Client, error) {
|
||||||
|
clientTransport, err := newTransport(addr, tlsConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return newClientWithTransport(clientTransport, timeout), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// newClientWithTransport creates a new plugin client with a given transport.
|
||||||
|
func newClientWithTransport(tr transport.Transport, timeout time.Duration) *Client {
|
||||||
return &Client{
|
return &Client{
|
||||||
http: &http.Client{
|
http: &http.Client{
|
||||||
Transport: tr,
|
Transport: tr,
|
||||||
|
Timeout: timeout,
|
||||||
},
|
},
|
||||||
requestFactory: tr,
|
requestFactory: tr,
|
||||||
}
|
}
|
||||||
|
@ -112,15 +129,15 @@ func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) {
|
func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) {
|
||||||
|
var retries int
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
|
for {
|
||||||
req, err := c.requestFactory.NewRequest(serviceMethod, data)
|
req, err := c.requestFactory.NewRequest(serviceMethod, data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var retries int
|
|
||||||
start := time.Now()
|
|
||||||
|
|
||||||
for {
|
|
||||||
resp, err := c.http.Do(req)
|
resp, err := c.http.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !retry {
|
if !retry {
|
||||||
|
|
5
vendor/github.com/containers/storage/pkg/plugins/discovery.go
generated
vendored
5
vendor/github.com/containers/storage/pkg/plugins/discovery.go
generated
vendored
|
@ -15,8 +15,7 @@ import (
|
||||||
var (
|
var (
|
||||||
// ErrNotFound plugin not found
|
// ErrNotFound plugin not found
|
||||||
ErrNotFound = errors.New("plugin not found")
|
ErrNotFound = errors.New("plugin not found")
|
||||||
socketsPath = "/run/containers/storage/plugins"
|
socketsPath = "/run/container/storage/plugins"
|
||||||
specsPaths = []string{"/etc/containers/storage/plugins", "/usr/lib/containers/storage/plugins"}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// localRegistry defines a registry that is local (using unix socket).
|
// localRegistry defines a registry that is local (using unix socket).
|
||||||
|
@ -116,7 +115,7 @@ func readPluginJSONInfo(name, path string) (*Plugin, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
p.name = name
|
p.name = name
|
||||||
if len(p.TLSConfig.CAFile) == 0 {
|
if p.TLSConfig != nil && len(p.TLSConfig.CAFile) == 0 {
|
||||||
p.TLSConfig.InsecureSkipVerify = true
|
p.TLSConfig.InsecureSkipVerify = true
|
||||||
}
|
}
|
||||||
p.activateWait = sync.NewCond(&sync.Mutex{})
|
p.activateWait = sync.NewCond(&sync.Mutex{})
|
||||||
|
|
5
vendor/github.com/containers/storage/pkg/plugins/discovery_unix.go
generated
vendored
Normal file
5
vendor/github.com/containers/storage/pkg/plugins/discovery_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package plugins
|
||||||
|
|
||||||
|
var specsPaths = []string{"/etc/containers/storage/plugins", "/usr/lib/containers/storage/plugins"}
|
8
vendor/github.com/containers/storage/pkg/plugins/discovery_windows.go
generated
vendored
Normal file
8
vendor/github.com/containers/storage/pkg/plugins/discovery_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
package plugins
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
var specsPaths = []string{filepath.Join(os.Getenv("programdata"), "containers", "storage", "plugins")}
|
118
vendor/github.com/containers/storage/pkg/plugins/plugins.go
generated
vendored
118
vendor/github.com/containers/storage/pkg/plugins/plugins.go
generated
vendored
|
@ -1,19 +1,17 @@
|
||||||
// Package plugins provides structures and helper functions to manage Docker
|
// Package plugins provides structures and helper functions to manage Docker
|
||||||
// plugins.
|
// plugins.
|
||||||
//
|
//
|
||||||
// Storage discovers plugins by looking for them in the plugin directory whenever
|
// Docker discovers plugins by looking for them in the plugin directory whenever
|
||||||
// a user or container tries to use one by name. UNIX domain socket files must
|
// a user or container tries to use one by name. UNIX domain socket files must
|
||||||
// be located under /run/containers/storage/plugins, whereas spec files can be
|
// be located under /run/container/storage/plugins, whereas spec files can be located
|
||||||
// located either under /etc/containers/storage/plugins or
|
// either under /etc/container/storage/plugins or /usr/lib/container/storage/plugins. This is handled
|
||||||
// /usr/lib/containers/storage/plugins. This is handled by the Registry
|
// by the Registry interface, which lets you list all plugins or get a plugin by
|
||||||
// interface, which lets you list all plugins or get a plugin by its name if it
|
// its name if it exists.
|
||||||
// exists.
|
|
||||||
//
|
//
|
||||||
// The plugins need to implement an HTTP server and bind this to the UNIX socket
|
// The plugins need to implement an HTTP server and bind this to the UNIX socket
|
||||||
// or the address specified in the spec files.
|
// or the address specified in the spec files.
|
||||||
// A handshake is send at /Plugin.Activate, and plugins are expected to return
|
// A handshake is send at /Plugin.Activate, and plugins are expected to return
|
||||||
// a Manifest with a list of subsystems which this plugin implements. As of
|
// a Manifest with a list of of Docker subsystems which this plugin implements.
|
||||||
// this writing, the known subsystem is "GraphDriver".
|
|
||||||
//
|
//
|
||||||
// In order to use a plugins, you can use the ``Get`` with the name of the
|
// In order to use a plugins, you can use the ``Get`` with the name of the
|
||||||
// plugin and the subsystem it implements.
|
// plugin and the subsystem it implements.
|
||||||
|
@ -43,9 +41,14 @@ type plugins struct {
|
||||||
plugins map[string]*Plugin
|
plugins map[string]*Plugin
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type extpointHandlers struct {
|
||||||
|
sync.RWMutex
|
||||||
|
extpointHandlers map[string][]func(string, *Client)
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
storage = plugins{plugins: make(map[string]*Plugin)}
|
storage = plugins{plugins: make(map[string]*Plugin)}
|
||||||
extpointHandlers = make(map[string]func(string, *Client))
|
handlers = extpointHandlers{extpointHandlers: make(map[string][]func(string, *Client))}
|
||||||
)
|
)
|
||||||
|
|
||||||
// Manifest lists what a plugin implements.
|
// Manifest lists what a plugin implements.
|
||||||
|
@ -54,7 +57,7 @@ type Manifest struct {
|
||||||
Implements []string
|
Implements []string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Plugin is the definition of a storage plugin.
|
// Plugin is the definition of a container/storage plugin.
|
||||||
type Plugin struct {
|
type Plugin struct {
|
||||||
// Name of the plugin
|
// Name of the plugin
|
||||||
name string
|
name string
|
||||||
|
@ -67,12 +70,12 @@ type Plugin struct {
|
||||||
// Manifest of the plugin (see above)
|
// Manifest of the plugin (see above)
|
||||||
Manifest *Manifest `json:"-"`
|
Manifest *Manifest `json:"-"`
|
||||||
|
|
||||||
// error produced by activation
|
|
||||||
activateErr error
|
|
||||||
// specifies if the activation sequence is completed (not if it is successful or not)
|
|
||||||
activated bool
|
|
||||||
// wait for activation to finish
|
// wait for activation to finish
|
||||||
activateWait *sync.Cond
|
activateWait *sync.Cond
|
||||||
|
// error produced by activation
|
||||||
|
activateErr error
|
||||||
|
// keeps track of callback handlers run against this plugin
|
||||||
|
handlersRun bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Name returns the name of the plugin.
|
// Name returns the name of the plugin.
|
||||||
|
@ -85,6 +88,11 @@ func (p *Plugin) Client() *Client {
|
||||||
return p.client
|
return p.client
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsV1 returns true for V1 plugins and false otherwise.
|
||||||
|
func (p *Plugin) IsV1() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// NewLocalPlugin creates a new local plugin.
|
// NewLocalPlugin creates a new local plugin.
|
||||||
func NewLocalPlugin(name, addr string) *Plugin {
|
func NewLocalPlugin(name, addr string) *Plugin {
|
||||||
return &Plugin{
|
return &Plugin{
|
||||||
|
@ -98,19 +106,51 @@ func NewLocalPlugin(name, addr string) *Plugin {
|
||||||
|
|
||||||
func (p *Plugin) activate() error {
|
func (p *Plugin) activate() error {
|
||||||
p.activateWait.L.Lock()
|
p.activateWait.L.Lock()
|
||||||
if p.activated {
|
|
||||||
|
if p.activated() {
|
||||||
|
p.runHandlers()
|
||||||
p.activateWait.L.Unlock()
|
p.activateWait.L.Unlock()
|
||||||
return p.activateErr
|
return p.activateErr
|
||||||
}
|
}
|
||||||
|
|
||||||
p.activateErr = p.activateWithLock()
|
p.activateErr = p.activateWithLock()
|
||||||
p.activated = true
|
|
||||||
|
|
||||||
|
p.runHandlers()
|
||||||
p.activateWait.L.Unlock()
|
p.activateWait.L.Unlock()
|
||||||
p.activateWait.Broadcast()
|
p.activateWait.Broadcast()
|
||||||
return p.activateErr
|
return p.activateErr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// runHandlers runs the registered handlers for the implemented plugin types
|
||||||
|
// This should only be run after activation, and while the activation lock is held.
|
||||||
|
func (p *Plugin) runHandlers() {
|
||||||
|
if !p.activated() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
handlers.RLock()
|
||||||
|
if !p.handlersRun {
|
||||||
|
for _, iface := range p.Manifest.Implements {
|
||||||
|
hdlrs, handled := handlers.extpointHandlers[iface]
|
||||||
|
if !handled {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, handler := range hdlrs {
|
||||||
|
handler(p.name, p.client)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.handlersRun = true
|
||||||
|
}
|
||||||
|
handlers.RUnlock()
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// activated returns if the plugin has already been activated.
|
||||||
|
// This should only be called with the activation lock held
|
||||||
|
func (p *Plugin) activated() bool {
|
||||||
|
return p.Manifest != nil
|
||||||
|
}
|
||||||
|
|
||||||
func (p *Plugin) activateWithLock() error {
|
func (p *Plugin) activateWithLock() error {
|
||||||
c, err := NewClient(p.Addr, p.TLSConfig)
|
c, err := NewClient(p.Addr, p.TLSConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -124,20 +164,12 @@ func (p *Plugin) activateWithLock() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
p.Manifest = m
|
p.Manifest = m
|
||||||
|
|
||||||
for _, iface := range m.Implements {
|
|
||||||
handler, handled := extpointHandlers[iface]
|
|
||||||
if !handled {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
handler(p.name, p.client)
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Plugin) waitActive() error {
|
func (p *Plugin) waitActive() error {
|
||||||
p.activateWait.L.Lock()
|
p.activateWait.L.Lock()
|
||||||
for !p.activated {
|
for !p.activated() && p.activateErr == nil {
|
||||||
p.activateWait.Wait()
|
p.activateWait.Wait()
|
||||||
}
|
}
|
||||||
p.activateWait.L.Unlock()
|
p.activateWait.L.Unlock()
|
||||||
|
@ -145,7 +177,7 @@ func (p *Plugin) waitActive() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Plugin) implements(kind string) bool {
|
func (p *Plugin) implements(kind string) bool {
|
||||||
if err := p.waitActive(); err != nil {
|
if p.Manifest == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
for _, driver := range p.Manifest.Implements {
|
for _, driver := range p.Manifest.Implements {
|
||||||
|
@ -183,6 +215,10 @@ func loadWithRetry(name string, retry bool) (*Plugin, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
storage.Lock()
|
storage.Lock()
|
||||||
|
if pl, exists := storage.plugins[name]; exists {
|
||||||
|
storage.Unlock()
|
||||||
|
return pl, pl.activate()
|
||||||
|
}
|
||||||
storage.plugins[name] = pl
|
storage.plugins[name] = pl
|
||||||
storage.Unlock()
|
storage.Unlock()
|
||||||
|
|
||||||
|
@ -214,7 +250,7 @@ func Get(name, imp string) (*Plugin, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if pl.implements(imp) {
|
if err := pl.waitActive(); err == nil && pl.implements(imp) {
|
||||||
logrus.Debugf("%s implements: %s", name, imp)
|
logrus.Debugf("%s implements: %s", name, imp)
|
||||||
return pl, nil
|
return pl, nil
|
||||||
}
|
}
|
||||||
|
@ -223,7 +259,26 @@ func Get(name, imp string) (*Plugin, error) {
|
||||||
|
|
||||||
// Handle adds the specified function to the extpointHandlers.
|
// Handle adds the specified function to the extpointHandlers.
|
||||||
func Handle(iface string, fn func(string, *Client)) {
|
func Handle(iface string, fn func(string, *Client)) {
|
||||||
extpointHandlers[iface] = fn
|
handlers.Lock()
|
||||||
|
hdlrs, ok := handlers.extpointHandlers[iface]
|
||||||
|
if !ok {
|
||||||
|
hdlrs = []func(string, *Client){}
|
||||||
|
}
|
||||||
|
|
||||||
|
hdlrs = append(hdlrs, fn)
|
||||||
|
handlers.extpointHandlers[iface] = hdlrs
|
||||||
|
|
||||||
|
storage.Lock()
|
||||||
|
for _, p := range storage.plugins {
|
||||||
|
p.activateWait.L.Lock()
|
||||||
|
if p.activated() && p.implements(iface) {
|
||||||
|
p.handlersRun = false
|
||||||
|
}
|
||||||
|
p.activateWait.L.Unlock()
|
||||||
|
}
|
||||||
|
storage.Unlock()
|
||||||
|
|
||||||
|
handlers.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetAll returns all the plugins for the specified implementation
|
// GetAll returns all the plugins for the specified implementation
|
||||||
|
@ -241,7 +296,10 @@ func GetAll(imp string) ([]*Plugin, error) {
|
||||||
chPl := make(chan *plLoad, len(pluginNames))
|
chPl := make(chan *plLoad, len(pluginNames))
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for _, name := range pluginNames {
|
for _, name := range pluginNames {
|
||||||
if pl, ok := storage.plugins[name]; ok {
|
storage.Lock()
|
||||||
|
pl, ok := storage.plugins[name]
|
||||||
|
storage.Unlock()
|
||||||
|
if ok {
|
||||||
chPl <- &plLoad{pl, nil}
|
chPl <- &plLoad{pl, nil}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -263,7 +321,7 @@ func GetAll(imp string) ([]*Plugin, error) {
|
||||||
logrus.Error(pl.err)
|
logrus.Error(pl.err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if pl.pl.implements(imp) {
|
if err := pl.pl.waitActive(); err == nil && pl.pl.implements(imp) {
|
||||||
out = append(out, pl.pl)
|
out = append(out, pl.pl)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
9
vendor/github.com/containers/storage/pkg/plugins/plugins_unix.go
generated
vendored
Normal file
9
vendor/github.com/containers/storage/pkg/plugins/plugins_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package plugins
|
||||||
|
|
||||||
|
// BasePath returns the path to which all paths returned by the plugin are relative to.
|
||||||
|
// For v1 plugins, this always returns the host's root directory.
|
||||||
|
func (p *Plugin) BasePath() string {
|
||||||
|
return "/"
|
||||||
|
}
|
8
vendor/github.com/containers/storage/pkg/plugins/plugins_windows.go
generated
vendored
Normal file
8
vendor/github.com/containers/storage/pkg/plugins/plugins_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
package plugins
|
||||||
|
|
||||||
|
// BasePath returns the path to which all paths returned by the plugin are relative to.
|
||||||
|
// For Windows v1 plugins, this returns an empty string, since the plugin is already aware
|
||||||
|
// of the absolute path of the mount.
|
||||||
|
func (p *Plugin) BasePath() string {
|
||||||
|
return ""
|
||||||
|
}
|
71
vendor/github.com/containers/storage/pkg/random/random.go
generated
vendored
71
vendor/github.com/containers/storage/pkg/random/random.go
generated
vendored
|
@ -1,71 +0,0 @@
|
||||||
package random
|
|
||||||
|
|
||||||
import (
|
|
||||||
cryptorand "crypto/rand"
|
|
||||||
"io"
|
|
||||||
"math"
|
|
||||||
"math/big"
|
|
||||||
"math/rand"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Rand is a global *rand.Rand instance, which initialized with NewSource() source.
|
|
||||||
var Rand = rand.New(NewSource())
|
|
||||||
|
|
||||||
// Reader is a global, shared instance of a pseudorandom bytes generator.
|
|
||||||
// It doesn't consume entropy.
|
|
||||||
var Reader io.Reader = &reader{rnd: Rand}
|
|
||||||
|
|
||||||
// copypaste from standard math/rand
|
|
||||||
type lockedSource struct {
|
|
||||||
lk sync.Mutex
|
|
||||||
src rand.Source
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *lockedSource) Int63() (n int64) {
|
|
||||||
r.lk.Lock()
|
|
||||||
n = r.src.Int63()
|
|
||||||
r.lk.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *lockedSource) Seed(seed int64) {
|
|
||||||
r.lk.Lock()
|
|
||||||
r.src.Seed(seed)
|
|
||||||
r.lk.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSource returns math/rand.Source safe for concurrent use and initialized
|
|
||||||
// with current unix-nano timestamp
|
|
||||||
func NewSource() rand.Source {
|
|
||||||
var seed int64
|
|
||||||
if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil {
|
|
||||||
// This should not happen, but worst-case fallback to time-based seed.
|
|
||||||
seed = time.Now().UnixNano()
|
|
||||||
} else {
|
|
||||||
seed = cryptoseed.Int64()
|
|
||||||
}
|
|
||||||
return &lockedSource{
|
|
||||||
src: rand.NewSource(seed),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type reader struct {
|
|
||||||
rnd *rand.Rand
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *reader) Read(b []byte) (int, error) {
|
|
||||||
i := 0
|
|
||||||
for {
|
|
||||||
val := r.rnd.Int63()
|
|
||||||
for val > 0 {
|
|
||||||
b[i] = byte(val)
|
|
||||||
i++
|
|
||||||
if i == len(b) {
|
|
||||||
return i, nil
|
|
||||||
}
|
|
||||||
val >>= 8
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
5
vendor/github.com/containers/storage/pkg/reexec/README.md
generated
vendored
Normal file
5
vendor/github.com/containers/storage/pkg/reexec/README.md
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
# reexec
|
||||||
|
|
||||||
|
The `reexec` package facilitates the busybox style reexec of the docker binary that we require because
|
||||||
|
of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of
|
||||||
|
the exec of the binary will be used to find and execute custom init paths.
|
6
vendor/github.com/containers/storage/pkg/reexec/command_linux.go
generated
vendored
6
vendor/github.com/containers/storage/pkg/reexec/command_linux.go
generated
vendored
|
@ -5,6 +5,8 @@ package reexec
|
||||||
import (
|
import (
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Self returns the path to the current process's binary.
|
// Self returns the path to the current process's binary.
|
||||||
|
@ -13,7 +15,7 @@ func Self() string {
|
||||||
return "/proc/self/exe"
|
return "/proc/self/exe"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Command returns *exec.Cmd which have Path as current binary. Also it setting
|
// Command returns *exec.Cmd which has Path as current binary. Also it setting
|
||||||
// SysProcAttr.Pdeathsig to SIGTERM.
|
// SysProcAttr.Pdeathsig to SIGTERM.
|
||||||
// This will use the in-memory version (/proc/self/exe) of the current binary,
|
// This will use the in-memory version (/proc/self/exe) of the current binary,
|
||||||
// it is thus safe to delete or replace the on-disk binary (os.Args[0]).
|
// it is thus safe to delete or replace the on-disk binary (os.Args[0]).
|
||||||
|
@ -22,7 +24,7 @@ func Command(args ...string) *exec.Cmd {
|
||||||
Path: Self(),
|
Path: Self(),
|
||||||
Args: args,
|
Args: args,
|
||||||
SysProcAttr: &syscall.SysProcAttr{
|
SysProcAttr: &syscall.SysProcAttr{
|
||||||
Pdeathsig: syscall.SIGTERM,
|
Pdeathsig: unix.SIGTERM,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
4
vendor/github.com/containers/storage/pkg/reexec/command_unix.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/reexec/command_unix.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
// +build freebsd solaris
|
// +build freebsd solaris darwin
|
||||||
|
|
||||||
package reexec
|
package reexec
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ func Self() string {
|
||||||
return naiveSelf()
|
return naiveSelf()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Command returns *exec.Cmd which have Path as current binary.
|
// Command returns *exec.Cmd which has Path as current binary.
|
||||||
// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will
|
// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will
|
||||||
// be set to "/usr/bin/docker".
|
// be set to "/usr/bin/docker".
|
||||||
func Command(args ...string) *exec.Cmd {
|
func Command(args ...string) *exec.Cmd {
|
||||||
|
|
4
vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
// +build !linux,!windows,!freebsd,!solaris
|
// +build !linux,!windows,!freebsd,!solaris,!darwin
|
||||||
|
|
||||||
package reexec
|
package reexec
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@ import (
|
||||||
"os/exec"
|
"os/exec"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Command is unsupported on operating systems apart from Linux and Windows.
|
// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin.
|
||||||
func Command(args ...string) *exec.Cmd {
|
func Command(args ...string) *exec.Cmd {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
2
vendor/github.com/containers/storage/pkg/reexec/command_windows.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/reexec/command_windows.go
generated
vendored
|
@ -12,7 +12,7 @@ func Self() string {
|
||||||
return naiveSelf()
|
return naiveSelf()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Command returns *exec.Cmd which have Path as current binary.
|
// Command returns *exec.Cmd which has Path as current binary.
|
||||||
// For example if current binary is "docker.exe" at "C:\", then cmd.Path will
|
// For example if current binary is "docker.exe" at "C:\", then cmd.Path will
|
||||||
// be set to "C:\docker.exe".
|
// be set to "C:\docker.exe".
|
||||||
func Command(args ...string) *exec.Cmd {
|
func Command(args ...string) *exec.Cmd {
|
||||||
|
|
1
vendor/github.com/containers/storage/pkg/stringid/README.md
generated
vendored
Normal file
1
vendor/github.com/containers/storage/pkg/stringid/README.md
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
This package provides helper functions for dealing with string identifiers
|
60
vendor/github.com/containers/storage/pkg/stringid/stringid.go
generated
vendored
60
vendor/github.com/containers/storage/pkg/stringid/stringid.go
generated
vendored
|
@ -2,19 +2,25 @@
|
||||||
package stringid
|
package stringid
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/rand"
|
cryptorand "crypto/rand"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"math"
|
||||||
|
"math/big"
|
||||||
|
"math/rand"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
"github.com/containers/storage/pkg/random"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const shortLen = 12
|
const shortLen = 12
|
||||||
|
|
||||||
var validShortID = regexp.MustCompile("^[a-z0-9]{12}$")
|
var (
|
||||||
|
validShortID = regexp.MustCompile("^[a-f0-9]{12}$")
|
||||||
|
validHex = regexp.MustCompile(`^[a-f0-9]{64}$`)
|
||||||
|
)
|
||||||
|
|
||||||
// IsShortID determines if an arbitrary string *looks like* a short ID.
|
// IsShortID determines if an arbitrary string *looks like* a short ID.
|
||||||
func IsShortID(id string) bool {
|
func IsShortID(id string) bool {
|
||||||
|
@ -29,19 +35,14 @@ func TruncateID(id string) string {
|
||||||
if i := strings.IndexRune(id, ':'); i >= 0 {
|
if i := strings.IndexRune(id, ':'); i >= 0 {
|
||||||
id = id[i+1:]
|
id = id[i+1:]
|
||||||
}
|
}
|
||||||
trimTo := shortLen
|
if len(id) > shortLen {
|
||||||
if len(id) < shortLen {
|
id = id[:shortLen]
|
||||||
trimTo = len(id)
|
|
||||||
}
|
}
|
||||||
return id[:trimTo]
|
return id
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateID(crypto bool) string {
|
func generateID(r io.Reader) string {
|
||||||
b := make([]byte, 32)
|
b := make([]byte, 32)
|
||||||
r := random.Reader
|
|
||||||
if crypto {
|
|
||||||
r = rand.Reader
|
|
||||||
}
|
|
||||||
for {
|
for {
|
||||||
if _, err := io.ReadFull(r, b); err != nil {
|
if _, err := io.ReadFull(r, b); err != nil {
|
||||||
panic(err) // This shouldn't happen
|
panic(err) // This shouldn't happen
|
||||||
|
@ -59,13 +60,40 @@ func generateID(crypto bool) string {
|
||||||
|
|
||||||
// GenerateRandomID returns a unique id.
|
// GenerateRandomID returns a unique id.
|
||||||
func GenerateRandomID() string {
|
func GenerateRandomID() string {
|
||||||
return generateID(true)
|
return generateID(cryptorand.Reader)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenerateNonCryptoID generates unique id without using cryptographically
|
// GenerateNonCryptoID generates unique id without using cryptographically
|
||||||
// secure sources of random.
|
// secure sources of random.
|
||||||
// It helps you to save entropy.
|
// It helps you to save entropy.
|
||||||
func GenerateNonCryptoID() string {
|
func GenerateNonCryptoID() string {
|
||||||
return generateID(false)
|
return generateID(readerFunc(rand.Read))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateID checks whether an ID string is a valid image ID.
|
||||||
|
func ValidateID(id string) error {
|
||||||
|
if ok := validHex.MatchString(id); !ok {
|
||||||
|
return fmt.Errorf("image ID %q is invalid", id)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// safely set the seed globally so we generate random ids. Tries to use a
|
||||||
|
// crypto seed before falling back to time.
|
||||||
|
var seed int64
|
||||||
|
if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil {
|
||||||
|
// This should not happen, but worst-case fallback to time-based seed.
|
||||||
|
seed = time.Now().UnixNano()
|
||||||
|
} else {
|
||||||
|
seed = cryptoseed.Int64()
|
||||||
|
}
|
||||||
|
|
||||||
|
rand.Seed(seed)
|
||||||
|
}
|
||||||
|
|
||||||
|
type readerFunc func(p []byte) (int, error)
|
||||||
|
|
||||||
|
func (fn readerFunc) Read(p []byte) (int, error) {
|
||||||
|
return fn(p)
|
||||||
}
|
}
|
||||||
|
|
17
vendor/github.com/containers/storage/pkg/system/chtimes.go
generated
vendored
17
vendor/github.com/containers/storage/pkg/system/chtimes.go
generated
vendored
|
@ -2,26 +2,9 @@ package system
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"syscall"
|
|
||||||
"time"
|
"time"
|
||||||
"unsafe"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
maxTime time.Time
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 {
|
|
||||||
// This is a 64 bit timespec
|
|
||||||
// os.Chtimes limits time to the following
|
|
||||||
maxTime = time.Unix(0, 1<<63-1)
|
|
||||||
} else {
|
|
||||||
// This is a 32 bit timespec
|
|
||||||
maxTime = time.Unix(1<<31-1, 0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Chtimes changes the access time and modified time of a file at the given path
|
// Chtimes changes the access time and modified time of a file at the given path
|
||||||
func Chtimes(name string, atime time.Time, mtime time.Time) error {
|
func Chtimes(name string, atime time.Time, mtime time.Time) error {
|
||||||
unixMinTime := time.Unix(0, 0)
|
unixMinTime := time.Unix(0, 0)
|
||||||
|
|
19
vendor/github.com/containers/storage/pkg/system/chtimes_windows.go
generated
vendored
19
vendor/github.com/containers/storage/pkg/system/chtimes_windows.go
generated
vendored
|
@ -3,25 +3,26 @@
|
||||||
package system
|
package system
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"syscall"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
)
|
)
|
||||||
|
|
||||||
//setCTime will set the create time on a file. On Windows, this requires
|
//setCTime will set the create time on a file. On Windows, this requires
|
||||||
//calling SetFileTime and explicitly including the create time.
|
//calling SetFileTime and explicitly including the create time.
|
||||||
func setCTime(path string, ctime time.Time) error {
|
func setCTime(path string, ctime time.Time) error {
|
||||||
ctimespec := syscall.NsecToTimespec(ctime.UnixNano())
|
ctimespec := windows.NsecToTimespec(ctime.UnixNano())
|
||||||
pathp, e := syscall.UTF16PtrFromString(path)
|
pathp, e := windows.UTF16PtrFromString(path)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
h, e := syscall.CreateFile(pathp,
|
h, e := windows.CreateFile(pathp,
|
||||||
syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil,
|
windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil,
|
||||||
syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
|
windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
defer syscall.Close(h)
|
defer windows.Close(h)
|
||||||
c := syscall.NsecToFiletime(syscall.TimespecToNsec(ctimespec))
|
c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec))
|
||||||
return syscall.SetFileTime(h, &c, nil, nil)
|
return windows.SetFileTime(h, &c, nil, nil)
|
||||||
}
|
}
|
||||||
|
|
83
vendor/github.com/containers/storage/pkg/system/events_windows.go
generated
vendored
83
vendor/github.com/containers/storage/pkg/system/events_windows.go
generated
vendored
|
@ -1,83 +0,0 @@
|
||||||
package system
|
|
||||||
|
|
||||||
// This file implements syscalls for Win32 events which are not implemented
|
|
||||||
// in golang.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
procCreateEvent = modkernel32.NewProc("CreateEventW")
|
|
||||||
procOpenEvent = modkernel32.NewProc("OpenEventW")
|
|
||||||
procSetEvent = modkernel32.NewProc("SetEvent")
|
|
||||||
procResetEvent = modkernel32.NewProc("ResetEvent")
|
|
||||||
procPulseEvent = modkernel32.NewProc("PulseEvent")
|
|
||||||
)
|
|
||||||
|
|
||||||
// CreateEvent implements win32 CreateEventW func in golang. It will create an event object.
|
|
||||||
func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) {
|
|
||||||
namep, _ := syscall.UTF16PtrFromString(name)
|
|
||||||
var _p1 uint32
|
|
||||||
if manualReset {
|
|
||||||
_p1 = 1
|
|
||||||
}
|
|
||||||
var _p2 uint32
|
|
||||||
if initialState {
|
|
||||||
_p2 = 1
|
|
||||||
}
|
|
||||||
r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep)))
|
|
||||||
use(unsafe.Pointer(namep))
|
|
||||||
handle = syscall.Handle(r0)
|
|
||||||
if handle == syscall.InvalidHandle {
|
|
||||||
err = e1
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenEvent implements win32 OpenEventW func in golang. It opens an event object.
|
|
||||||
func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) {
|
|
||||||
namep, _ := syscall.UTF16PtrFromString(name)
|
|
||||||
var _p1 uint32
|
|
||||||
if inheritHandle {
|
|
||||||
_p1 = 1
|
|
||||||
}
|
|
||||||
r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep)))
|
|
||||||
use(unsafe.Pointer(namep))
|
|
||||||
handle = syscall.Handle(r0)
|
|
||||||
if handle == syscall.InvalidHandle {
|
|
||||||
err = e1
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetEvent implements win32 SetEvent func in golang.
|
|
||||||
func SetEvent(handle syscall.Handle) (err error) {
|
|
||||||
return setResetPulse(handle, procSetEvent)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResetEvent implements win32 ResetEvent func in golang.
|
|
||||||
func ResetEvent(handle syscall.Handle) (err error) {
|
|
||||||
return setResetPulse(handle, procResetEvent)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PulseEvent implements win32 PulseEvent func in golang.
|
|
||||||
func PulseEvent(handle syscall.Handle) (err error) {
|
|
||||||
return setResetPulse(handle, procPulseEvent)
|
|
||||||
}
|
|
||||||
|
|
||||||
func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) {
|
|
||||||
r0, _, _ := proc.Call(uintptr(handle))
|
|
||||||
if r0 != 0 {
|
|
||||||
err = syscall.Errno(r0)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var temp unsafe.Pointer
|
|
||||||
|
|
||||||
// use ensures a variable is kept alive without the GC freeing while still needed
|
|
||||||
func use(p unsafe.Pointer) {
|
|
||||||
temp = p
|
|
||||||
}
|
|
33
vendor/github.com/containers/storage/pkg/system/exitcode.go
generated
vendored
Normal file
33
vendor/github.com/containers/storage/pkg/system/exitcode.go
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
package system
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os/exec"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetExitCode returns the ExitStatus of the specified error if its type is
|
||||||
|
// exec.ExitError, returns 0 and an error otherwise.
|
||||||
|
func GetExitCode(err error) (int, error) {
|
||||||
|
exitCode := 0
|
||||||
|
if exiterr, ok := err.(*exec.ExitError); ok {
|
||||||
|
if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok {
|
||||||
|
return procExit.ExitStatus(), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return exitCode, fmt.Errorf("failed to get exit code")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProcessExitCode process the specified error and returns the exit status code
|
||||||
|
// if the error was of type exec.ExitError, returns nothing otherwise.
|
||||||
|
func ProcessExitCode(err error) (exitCode int) {
|
||||||
|
if err != nil {
|
||||||
|
var exiterr error
|
||||||
|
if exitCode, exiterr = GetExitCode(err); exiterr != nil {
|
||||||
|
// TODO: Fix this so we check the error's text.
|
||||||
|
// we've failed to retrieve exit code, so we set it to 127
|
||||||
|
exitCode = 127
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
50
vendor/github.com/containers/storage/pkg/system/filesys.go
generated
vendored
50
vendor/github.com/containers/storage/pkg/system/filesys.go
generated
vendored
|
@ -3,13 +3,19 @@
|
||||||
package system
|
package system
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// MkdirAllWithACL is a wrapper for MkdirAll on unix systems.
|
||||||
|
func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error {
|
||||||
|
return MkdirAll(path, perm, sddl)
|
||||||
|
}
|
||||||
|
|
||||||
// MkdirAll creates a directory named path along with any necessary parents,
|
// MkdirAll creates a directory named path along with any necessary parents,
|
||||||
// with permission specified by attribute perm for all dir created.
|
// with permission specified by attribute perm for all dir created.
|
||||||
func MkdirAll(path string, perm os.FileMode) error {
|
func MkdirAll(path string, perm os.FileMode, sddl string) error {
|
||||||
return os.MkdirAll(path, perm)
|
return os.MkdirAll(path, perm)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,3 +23,45 @@ func MkdirAll(path string, perm os.FileMode) error {
|
||||||
func IsAbs(path string) bool {
|
func IsAbs(path string) bool {
|
||||||
return filepath.IsAbs(path)
|
return filepath.IsAbs(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The functions below here are wrappers for the equivalents in the os and ioutils packages.
|
||||||
|
// They are passthrough on Unix platforms, and only relevant on Windows.
|
||||||
|
|
||||||
|
// CreateSequential creates the named file with mode 0666 (before umask), truncating
|
||||||
|
// it if it already exists. If successful, methods on the returned
|
||||||
|
// File can be used for I/O; the associated file descriptor has mode
|
||||||
|
// O_RDWR.
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
func CreateSequential(name string) (*os.File, error) {
|
||||||
|
return os.Create(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenSequential opens the named file for reading. If successful, methods on
|
||||||
|
// the returned file can be used for reading; the associated file
|
||||||
|
// descriptor has mode O_RDONLY.
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
func OpenSequential(name string) (*os.File, error) {
|
||||||
|
return os.Open(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenFileSequential is the generalized open call; most users will use Open
|
||||||
|
// or Create instead. It opens the named file with specified flag
|
||||||
|
// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful,
|
||||||
|
// methods on the returned File can be used for I/O.
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) {
|
||||||
|
return os.OpenFile(name, flag, perm)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempFileSequential creates a new temporary file in the directory dir
|
||||||
|
// with a name beginning with prefix, opens the file for reading
|
||||||
|
// and writing, and returns the resulting *os.File.
|
||||||
|
// If dir is the empty string, TempFile uses the default directory
|
||||||
|
// for temporary files (see os.TempDir).
|
||||||
|
// Multiple programs calling TempFile simultaneously
|
||||||
|
// will not choose the same file. The caller can use f.Name()
|
||||||
|
// to find the pathname of the file. It is the caller's responsibility
|
||||||
|
// to remove the file when no longer needed.
|
||||||
|
func TempFileSequential(dir, prefix string) (f *os.File, err error) {
|
||||||
|
return ioutil.TempFile(dir, prefix)
|
||||||
|
}
|
||||||
|
|
226
vendor/github.com/containers/storage/pkg/system/filesys_windows.go
generated
vendored
226
vendor/github.com/containers/storage/pkg/system/filesys_windows.go
generated
vendored
|
@ -6,17 +6,44 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
winio "github.com/Microsoft/go-winio"
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System
|
||||||
|
SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)"
|
||||||
|
// SddlNtvmAdministratorsLocalSystem is NT VIRTUAL MACHINE\Virtual Machines plus local administrators plus NT AUTHORITY\System
|
||||||
|
SddlNtvmAdministratorsLocalSystem = "D:P(A;OICI;GA;;;S-1-5-83-0)(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory
|
||||||
|
// with an appropriate SDDL defined ACL.
|
||||||
|
func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error {
|
||||||
|
return mkdirall(path, true, sddl)
|
||||||
|
}
|
||||||
|
|
||||||
// MkdirAll implementation that is volume path aware for Windows.
|
// MkdirAll implementation that is volume path aware for Windows.
|
||||||
func MkdirAll(path string, perm os.FileMode) error {
|
func MkdirAll(path string, _ os.FileMode, sddl string) error {
|
||||||
|
return mkdirall(path, false, sddl)
|
||||||
|
}
|
||||||
|
|
||||||
|
// mkdirall is a custom version of os.MkdirAll modified for use on Windows
|
||||||
|
// so that it is both volume path aware, and can create a directory with
|
||||||
|
// a DACL.
|
||||||
|
func mkdirall(path string, applyACL bool, sddl string) error {
|
||||||
if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) {
|
if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// The rest of this method is copied from os.MkdirAll and should be kept
|
// The rest of this method is largely copied from os.MkdirAll and should be kept
|
||||||
// as-is to ensure compatibility.
|
// as-is to ensure compatibility.
|
||||||
|
|
||||||
// Fast path: if we can tell whether path is a directory or file, stop with success or error.
|
// Fast path: if we can tell whether path is a directory or file, stop with success or error.
|
||||||
|
@ -45,14 +72,19 @@ func MkdirAll(path string, perm os.FileMode) error {
|
||||||
|
|
||||||
if j > 1 {
|
if j > 1 {
|
||||||
// Create parent
|
// Create parent
|
||||||
err = MkdirAll(path[0:j-1], perm)
|
err = mkdirall(path[0:j-1], false, sddl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parent now exists; invoke Mkdir and use its result.
|
// Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result.
|
||||||
err = os.Mkdir(path, perm)
|
if applyACL {
|
||||||
|
err = mkdirWithACL(path, sddl)
|
||||||
|
} else {
|
||||||
|
err = os.Mkdir(path, 0)
|
||||||
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Handle arguments like "foo/." by
|
// Handle arguments like "foo/." by
|
||||||
// double-checking that directory doesn't exist.
|
// double-checking that directory doesn't exist.
|
||||||
|
@ -65,6 +97,35 @@ func MkdirAll(path string, perm os.FileMode) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mkdirWithACL creates a new directory. If there is an error, it will be of
|
||||||
|
// type *PathError. .
|
||||||
|
//
|
||||||
|
// This is a modified and combined version of os.Mkdir and windows.Mkdir
|
||||||
|
// in golang to cater for creating a directory am ACL permitting full
|
||||||
|
// access, with inheritance, to any subfolder/file for Built-in Administrators
|
||||||
|
// and Local System.
|
||||||
|
func mkdirWithACL(name string, sddl string) error {
|
||||||
|
sa := windows.SecurityAttributes{Length: 0}
|
||||||
|
sd, err := winio.SddlToSecurityDescriptor(sddl)
|
||||||
|
if err != nil {
|
||||||
|
return &os.PathError{Op: "mkdir", Path: name, Err: err}
|
||||||
|
}
|
||||||
|
sa.Length = uint32(unsafe.Sizeof(sa))
|
||||||
|
sa.InheritHandle = 1
|
||||||
|
sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0]))
|
||||||
|
|
||||||
|
namep, err := windows.UTF16PtrFromString(name)
|
||||||
|
if err != nil {
|
||||||
|
return &os.PathError{Op: "mkdir", Path: name, Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
e := windows.CreateDirectory(namep, &sa)
|
||||||
|
if e != nil {
|
||||||
|
return &os.PathError{Op: "mkdir", Path: name, Err: e}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows,
|
// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows,
|
||||||
// golang filepath.IsAbs does not consider a path \windows\system32 as absolute
|
// golang filepath.IsAbs does not consider a path \windows\system32 as absolute
|
||||||
// as it doesn't start with a drive-letter/colon combination. However, in
|
// as it doesn't start with a drive-letter/colon combination. However, in
|
||||||
|
@ -80,3 +141,158 @@ func IsAbs(path string) bool {
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The origin of the functions below here are the golang OS and windows packages,
|
||||||
|
// slightly modified to only cope with files, not directories due to the
|
||||||
|
// specific use case.
|
||||||
|
//
|
||||||
|
// The alteration is to allow a file on Windows to be opened with
|
||||||
|
// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating
|
||||||
|
// the standby list, particularly when accessing large files such as layer.tar.
|
||||||
|
|
||||||
|
// CreateSequential creates the named file with mode 0666 (before umask), truncating
|
||||||
|
// it if it already exists. If successful, methods on the returned
|
||||||
|
// File can be used for I/O; the associated file descriptor has mode
|
||||||
|
// O_RDWR.
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
func CreateSequential(name string) (*os.File, error) {
|
||||||
|
return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenSequential opens the named file for reading. If successful, methods on
|
||||||
|
// the returned file can be used for reading; the associated file
|
||||||
|
// descriptor has mode O_RDONLY.
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
func OpenSequential(name string) (*os.File, error) {
|
||||||
|
return OpenFileSequential(name, os.O_RDONLY, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenFileSequential is the generalized open call; most users will use Open
|
||||||
|
// or Create instead.
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) {
|
||||||
|
if name == "" {
|
||||||
|
return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT}
|
||||||
|
}
|
||||||
|
r, errf := windowsOpenFileSequential(name, flag, 0)
|
||||||
|
if errf == nil {
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
return nil, &os.PathError{Op: "open", Path: name, Err: errf}
|
||||||
|
}
|
||||||
|
|
||||||
|
func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) {
|
||||||
|
r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0)
|
||||||
|
if e != nil {
|
||||||
|
return nil, e
|
||||||
|
}
|
||||||
|
return os.NewFile(uintptr(r), name), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeInheritSa() *windows.SecurityAttributes {
|
||||||
|
var sa windows.SecurityAttributes
|
||||||
|
sa.Length = uint32(unsafe.Sizeof(sa))
|
||||||
|
sa.InheritHandle = 1
|
||||||
|
return &sa
|
||||||
|
}
|
||||||
|
|
||||||
|
func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) {
|
||||||
|
if len(path) == 0 {
|
||||||
|
return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND
|
||||||
|
}
|
||||||
|
pathp, err := windows.UTF16PtrFromString(path)
|
||||||
|
if err != nil {
|
||||||
|
return windows.InvalidHandle, err
|
||||||
|
}
|
||||||
|
var access uint32
|
||||||
|
switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) {
|
||||||
|
case windows.O_RDONLY:
|
||||||
|
access = windows.GENERIC_READ
|
||||||
|
case windows.O_WRONLY:
|
||||||
|
access = windows.GENERIC_WRITE
|
||||||
|
case windows.O_RDWR:
|
||||||
|
access = windows.GENERIC_READ | windows.GENERIC_WRITE
|
||||||
|
}
|
||||||
|
if mode&windows.O_CREAT != 0 {
|
||||||
|
access |= windows.GENERIC_WRITE
|
||||||
|
}
|
||||||
|
if mode&windows.O_APPEND != 0 {
|
||||||
|
access &^= windows.GENERIC_WRITE
|
||||||
|
access |= windows.FILE_APPEND_DATA
|
||||||
|
}
|
||||||
|
sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE)
|
||||||
|
var sa *windows.SecurityAttributes
|
||||||
|
if mode&windows.O_CLOEXEC == 0 {
|
||||||
|
sa = makeInheritSa()
|
||||||
|
}
|
||||||
|
var createmode uint32
|
||||||
|
switch {
|
||||||
|
case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL):
|
||||||
|
createmode = windows.CREATE_NEW
|
||||||
|
case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC):
|
||||||
|
createmode = windows.CREATE_ALWAYS
|
||||||
|
case mode&windows.O_CREAT == windows.O_CREAT:
|
||||||
|
createmode = windows.OPEN_ALWAYS
|
||||||
|
case mode&windows.O_TRUNC == windows.O_TRUNC:
|
||||||
|
createmode = windows.TRUNCATE_EXISTING
|
||||||
|
default:
|
||||||
|
createmode = windows.OPEN_EXISTING
|
||||||
|
}
|
||||||
|
// Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang.
|
||||||
|
//https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx
|
||||||
|
const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN
|
||||||
|
h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0)
|
||||||
|
return h, e
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helpers for TempFileSequential
|
||||||
|
var rand uint32
|
||||||
|
var randmu sync.Mutex
|
||||||
|
|
||||||
|
func reseed() uint32 {
|
||||||
|
return uint32(time.Now().UnixNano() + int64(os.Getpid()))
|
||||||
|
}
|
||||||
|
func nextSuffix() string {
|
||||||
|
randmu.Lock()
|
||||||
|
r := rand
|
||||||
|
if r == 0 {
|
||||||
|
r = reseed()
|
||||||
|
}
|
||||||
|
r = r*1664525 + 1013904223 // constants from Numerical Recipes
|
||||||
|
rand = r
|
||||||
|
randmu.Unlock()
|
||||||
|
return strconv.Itoa(int(1e9 + r%1e9))[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempFileSequential is a copy of ioutil.TempFile, modified to use sequential
|
||||||
|
// file access. Below is the original comment from golang:
|
||||||
|
// TempFile creates a new temporary file in the directory dir
|
||||||
|
// with a name beginning with prefix, opens the file for reading
|
||||||
|
// and writing, and returns the resulting *os.File.
|
||||||
|
// If dir is the empty string, TempFile uses the default directory
|
||||||
|
// for temporary files (see os.TempDir).
|
||||||
|
// Multiple programs calling TempFile simultaneously
|
||||||
|
// will not choose the same file. The caller can use f.Name()
|
||||||
|
// to find the pathname of the file. It is the caller's responsibility
|
||||||
|
// to remove the file when no longer needed.
|
||||||
|
func TempFileSequential(dir, prefix string) (f *os.File, err error) {
|
||||||
|
if dir == "" {
|
||||||
|
dir = os.TempDir()
|
||||||
|
}
|
||||||
|
|
||||||
|
nconflict := 0
|
||||||
|
for i := 0; i < 10000; i++ {
|
||||||
|
name := filepath.Join(dir, prefix+nextSuffix())
|
||||||
|
f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
|
||||||
|
if os.IsExist(err) {
|
||||||
|
if nconflict++; nconflict > 10 {
|
||||||
|
randmu.Lock()
|
||||||
|
rand = reseed()
|
||||||
|
randmu.Unlock()
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
22
vendor/github.com/containers/storage/pkg/system/init.go
generated
vendored
Normal file
22
vendor/github.com/containers/storage/pkg/system/init.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
package system
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Used by chtimes
|
||||||
|
var maxTime time.Time
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// chtimes initialization
|
||||||
|
if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 {
|
||||||
|
// This is a 64 bit timespec
|
||||||
|
// os.Chtimes limits time to the following
|
||||||
|
maxTime = time.Unix(0, 1<<63-1)
|
||||||
|
} else {
|
||||||
|
// This is a 32 bit timespec
|
||||||
|
maxTime = time.Unix(1<<31-1, 0)
|
||||||
|
}
|
||||||
|
}
|
17
vendor/github.com/containers/storage/pkg/system/init_windows.go
generated
vendored
Normal file
17
vendor/github.com/containers/storage/pkg/system/init_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
package system
|
||||||
|
|
||||||
|
import "os"
|
||||||
|
|
||||||
|
// LCOWSupported determines if Linux Containers on Windows are supported.
|
||||||
|
// Note: This feature is in development (06/17) and enabled through an
|
||||||
|
// environment variable. At a future time, it will be enabled based
|
||||||
|
// on build number. @jhowardmsft
|
||||||
|
var lcowSupported = false
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// LCOW initialization
|
||||||
|
if os.Getenv("LCOW_SUPPORTED") != "" {
|
||||||
|
lcowSupported = true
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
8
vendor/github.com/containers/storage/pkg/system/lcow_unix.go
generated
vendored
Normal file
8
vendor/github.com/containers/storage/pkg/system/lcow_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package system
|
||||||
|
|
||||||
|
// LCOWSupported returns true if Linux containers on Windows are supported.
|
||||||
|
func LCOWSupported() bool {
|
||||||
|
return false
|
||||||
|
}
|
6
vendor/github.com/containers/storage/pkg/system/lcow_windows.go
generated
vendored
Normal file
6
vendor/github.com/containers/storage/pkg/system/lcow_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
package system
|
||||||
|
|
||||||
|
// LCOWSupported returns true if Linux containers on Windows are supported.
|
||||||
|
func LCOWSupported() bool {
|
||||||
|
return lcowSupported
|
||||||
|
}
|
15
vendor/github.com/containers/storage/pkg/system/lstat_windows.go
generated
vendored
15
vendor/github.com/containers/storage/pkg/system/lstat_windows.go
generated
vendored
|
@ -1,25 +1,14 @@
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package system
|
package system
|
||||||
|
|
||||||
import (
|
import "os"
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Lstat calls os.Lstat to get a fileinfo interface back.
|
// Lstat calls os.Lstat to get a fileinfo interface back.
|
||||||
// This is then copied into our own locally defined structure.
|
// This is then copied into our own locally defined structure.
|
||||||
// Note the Linux version uses fromStatT to do the copy back,
|
|
||||||
// but that not strictly necessary when already in an OS specific module.
|
|
||||||
func Lstat(path string) (*StatT, error) {
|
func Lstat(path string) (*StatT, error) {
|
||||||
fi, err := os.Lstat(path)
|
fi, err := os.Lstat(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return &StatT{
|
return fromStatT(&fi)
|
||||||
name: fi.Name(),
|
|
||||||
size: fi.Size(),
|
|
||||||
mode: fi.Mode(),
|
|
||||||
modTime: fi.ModTime(),
|
|
||||||
isDir: fi.IsDir()}, nil
|
|
||||||
}
|
}
|
||||||
|
|
3
vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go
generated
vendored
3
vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go
generated
vendored
|
@ -7,6 +7,7 @@ import (
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// #cgo CFLAGS: -std=c99
|
||||||
// #cgo LDFLAGS: -lkstat
|
// #cgo LDFLAGS: -lkstat
|
||||||
// #include <unistd.h>
|
// #include <unistd.h>
|
||||||
// #include <stdlib.h>
|
// #include <stdlib.h>
|
||||||
|
@ -89,7 +90,7 @@ func ReadMemInfo() (*MemInfo, error) {
|
||||||
|
|
||||||
if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 ||
|
if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 ||
|
||||||
SwapFree < 0 {
|
SwapFree < 0 {
|
||||||
return nil, fmt.Errorf("Error getting system memory info %v\n", err)
|
return nil, fmt.Errorf("error getting system memory info %v\n", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
meminfo := &MemInfo{}
|
meminfo := &MemInfo{}
|
||||||
|
|
5
vendor/github.com/containers/storage/pkg/system/meminfo_windows.go
generated
vendored
5
vendor/github.com/containers/storage/pkg/system/meminfo_windows.go
generated
vendored
|
@ -1,12 +1,13 @@
|
||||||
package system
|
package system
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
|
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||||
|
|
||||||
procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx")
|
procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx")
|
||||||
)
|
)
|
||||||
|
|
4
vendor/github.com/containers/storage/pkg/system/mknod.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/system/mknod.go
generated
vendored
|
@ -3,13 +3,13 @@
|
||||||
package system
|
package system
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"syscall"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Mknod creates a filesystem node (file, device special file or named pipe) named path
|
// Mknod creates a filesystem node (file, device special file or named pipe) named path
|
||||||
// with attributes specified by mode and dev.
|
// with attributes specified by mode and dev.
|
||||||
func Mknod(path string, mode uint32, dev int) error {
|
func Mknod(path string, mode uint32, dev int) error {
|
||||||
return syscall.Mknod(path, mode, dev)
|
return unix.Mknod(path, mode, dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mkdev is used to build the value of linux devices (in /dev/) which specifies major
|
// Mkdev is used to build the value of linux devices (in /dev/) which specifies major
|
||||||
|
|
21
vendor/github.com/containers/storage/pkg/system/path.go
generated
vendored
Normal file
21
vendor/github.com/containers/storage/pkg/system/path.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
package system
|
||||||
|
|
||||||
|
import "runtime"
|
||||||
|
|
||||||
|
const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||||
|
|
||||||
|
// DefaultPathEnv is unix style list of directories to search for
|
||||||
|
// executables. Each directory is separated from the next by a colon
|
||||||
|
// ':' character .
|
||||||
|
func DefaultPathEnv(platform string) string {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
if platform != runtime.GOOS && LCOWSupported() {
|
||||||
|
return defaultUnixPathEnv
|
||||||
|
}
|
||||||
|
// Deliberately empty on Windows containers on Windows as the default path will be set by
|
||||||
|
// the container. Docker has no context of what the default path should be.
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return defaultUnixPathEnv
|
||||||
|
|
||||||
|
}
|
5
vendor/github.com/containers/storage/pkg/system/path_unix.go
generated
vendored
5
vendor/github.com/containers/storage/pkg/system/path_unix.go
generated
vendored
|
@ -2,11 +2,6 @@
|
||||||
|
|
||||||
package system
|
package system
|
||||||
|
|
||||||
// DefaultPathEnv is unix style list of directories to search for
|
|
||||||
// executables. Each directory is separated from the next by a colon
|
|
||||||
// ':' character .
|
|
||||||
const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
|
||||||
|
|
||||||
// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
|
// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
|
||||||
// is the system drive. This is a no-op on Linux.
|
// is the system drive. This is a no-op on Linux.
|
||||||
func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
|
func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
|
||||||
|
|
6
vendor/github.com/containers/storage/pkg/system/path_windows.go
generated
vendored
6
vendor/github.com/containers/storage/pkg/system/path_windows.go
generated
vendored
|
@ -8,15 +8,11 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DefaultPathEnv is deliberately empty on Windows as the default path will be set by
|
|
||||||
// the container. Docker has no context of what the default path should be.
|
|
||||||
const DefaultPathEnv = ""
|
|
||||||
|
|
||||||
// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
|
// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
|
||||||
// This is used, for example, when validating a user provided path in docker cp.
|
// This is used, for example, when validating a user provided path in docker cp.
|
||||||
// If a drive letter is supplied, it must be the system drive. The drive letter
|
// If a drive letter is supplied, it must be the system drive. The drive letter
|
||||||
// is always removed. Also, it translates it to OS semantics (IOW / to \). We
|
// is always removed. Also, it translates it to OS semantics (IOW / to \). We
|
||||||
// need the path in this syntax so that it can ultimately be contatenated with
|
// need the path in this syntax so that it can ultimately be concatenated with
|
||||||
// a Windows long-path which doesn't support drive-letters. Examples:
|
// a Windows long-path which doesn't support drive-letters. Examples:
|
||||||
// C: --> Fail
|
// C: --> Fail
|
||||||
// C:\ --> \
|
// C:\ --> \
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue