bump k8s@550f8be73aac92c7c23b1783d3db17f8660019f6

Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
Antonio Murdaca 2017-01-17 12:19:23 +01:00
parent 38acbb4625
commit 25a85afe1c
No known key found for this signature in database
GPG Key ID: B2BEAD150DE936B9
199 changed files with 2531 additions and 15378 deletions

View File

@ -68,7 +68,8 @@ clone git github.com/opencontainers/runtime-tools master
clone git github.com/tchap/go-patricia v2.2.6
clone git github.com/rajatchopra/ocicni master
clone git github.com/containernetworking/cni master
clone git k8s.io/kubernetes 43110dd64d058786e975ce30d4c12a4853d1778c https://github.com/kubernetes/kubernetes
clone git k8s.io/kubernetes 550f8be73aac92c7c23b1783d3db17f8660019f6 https://github.com/kubernetes/kubernetes
clone git k8s.io/apimachinery master https://github.com/kubernetes/apimachinery
clone git google.golang.org/grpc v1.0.1-GA https://github.com/grpc/grpc-go.git
clone git github.com/opencontainers/runtime-spec bb6925ea99f0e366a3f7d1c975f6577475ca25f0
clone git github.com/docker/distribution d22e09a6686c32be8c17b684b639da4b90efe320

View File

@ -1,23 +0,0 @@
package blkiodev
import "fmt"
// WeightDevice is a structure that holds device:weight pair
type WeightDevice struct {
Path string
Weight uint16
}
func (w *WeightDevice) String() string {
return fmt.Sprintf("%s:%d", w.Path, w.Weight)
}
// ThrottleDevice is a structure that holds device:rate_per_second pair
type ThrottleDevice struct {
Path string
Rate uint64
}
func (t *ThrottleDevice) String() string {
return fmt.Sprintf("%s:%d", t.Path, t.Rate)
}

View File

@ -1,62 +0,0 @@
package container
import (
"time"
"github.com/docker/docker/api/types/strslice"
"github.com/docker/go-connections/nat"
)
// HealthConfig holds configuration settings for the HEALTHCHECK feature.
type HealthConfig struct {
// Test is the test to perform to check that the container is healthy.
// An empty slice means to inherit the default.
// The options are:
// {} : inherit healthcheck
// {"NONE"} : disable healthcheck
// {"CMD", args...} : exec arguments directly
// {"CMD-SHELL", command} : run command with system's default shell
Test []string `json:",omitempty"`
// Zero means to inherit. Durations are expressed as integer nanoseconds.
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
// Zero means inherit.
Retries int `json:",omitempty"`
}
// Config contains the configuration data about a container.
// It should hold only portable information about the container.
// Here, "portable" means "independent from the host we are running on".
// Non-portable information *should* appear in HostConfig.
// All fields added to this struct must be marked `omitempty` to keep getting
// predictable hashes from the old `v1Compatibility` configuration.
type Config struct {
Hostname string // Hostname
Domainname string // Domainname
User string // User that will run the command(s) inside the container, also support user:group
AttachStdin bool // Attach the standard input, makes possible user interaction
AttachStdout bool // Attach the standard output
AttachStderr bool // Attach the standard error
ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports
Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
OpenStdin bool // Open stdin
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
Env []string // List of environment variable to set in the container
Cmd strslice.StrSlice // Command to run when starting the container
Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
Volumes map[string]struct{} // List of volumes (mounts) used for the container
WorkingDir string // Current directory (PWD) in the command will be launched
Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
NetworkDisabled bool `json:",omitempty"` // Is network disabled
MacAddress string `json:",omitempty"` // Mac Address of the container
OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
Labels map[string]string // List of labels set to this container
StopSignal string `json:",omitempty"` // Signal to stop a container
StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
}

View File

@ -1,21 +0,0 @@
package container
// ----------------------------------------------------------------------------
// DO NOT EDIT THIS FILE
// This file was generated by `swagger generate operation`
//
// See hack/generate-swagger-api.sh
// ----------------------------------------------------------------------------
// ContainerCreateCreatedBody container create created body
// swagger:model ContainerCreateCreatedBody
type ContainerCreateCreatedBody struct {
// The ID of the created container
// Required: true
ID string `json:"Id"`
// Warnings encountered when creating the container
// Required: true
Warnings []string `json:"Warnings"`
}

View File

@ -1,17 +0,0 @@
package container
// ----------------------------------------------------------------------------
// DO NOT EDIT THIS FILE
// This file was generated by `swagger generate operation`
//
// See hack/generate-swagger-api.sh
// ----------------------------------------------------------------------------
// ContainerUpdateOKBody container update o k body
// swagger:model ContainerUpdateOKBody
type ContainerUpdateOKBody struct {
// warnings
// Required: true
Warnings []string `json:"Warnings"`
}

View File

@ -1,17 +0,0 @@
package container
// ----------------------------------------------------------------------------
// DO NOT EDIT THIS FILE
// This file was generated by `swagger generate operation`
//
// See hack/generate-swagger-api.sh
// ----------------------------------------------------------------------------
// ContainerWaitOKBody container wait o k body
// swagger:model ContainerWaitOKBody
type ContainerWaitOKBody struct {
// Exit code of the container
// Required: true
StatusCode int64 `json:"StatusCode"`
}

View File

@ -1,333 +0,0 @@
package container
import (
"strings"
"github.com/docker/docker/api/types/blkiodev"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/strslice"
"github.com/docker/go-connections/nat"
"github.com/docker/go-units"
)
// NetworkMode represents the container network stack.
type NetworkMode string
// Isolation represents the isolation technology of a container. The supported
// values are platform specific
type Isolation string
// IsDefault indicates the default isolation technology of a container. On Linux this
// is the native driver. On Windows, this is a Windows Server Container.
func (i Isolation) IsDefault() bool {
return strings.ToLower(string(i)) == "default" || string(i) == ""
}
// IpcMode represents the container ipc stack.
type IpcMode string
// IsPrivate indicates whether the container uses its private ipc stack.
func (n IpcMode) IsPrivate() bool {
return !(n.IsHost() || n.IsContainer())
}
// IsHost indicates whether the container uses the host's ipc stack.
func (n IpcMode) IsHost() bool {
return n == "host"
}
// IsContainer indicates whether the container uses a container's ipc stack.
func (n IpcMode) IsContainer() bool {
parts := strings.SplitN(string(n), ":", 2)
return len(parts) > 1 && parts[0] == "container"
}
// Valid indicates whether the ipc stack is valid.
func (n IpcMode) Valid() bool {
parts := strings.Split(string(n), ":")
switch mode := parts[0]; mode {
case "", "host":
case "container":
if len(parts) != 2 || parts[1] == "" {
return false
}
default:
return false
}
return true
}
// Container returns the name of the container ipc stack is going to be used.
func (n IpcMode) Container() string {
parts := strings.SplitN(string(n), ":", 2)
if len(parts) > 1 {
return parts[1]
}
return ""
}
// UsernsMode represents userns mode in the container.
type UsernsMode string
// IsHost indicates whether the container uses the host's userns.
func (n UsernsMode) IsHost() bool {
return n == "host"
}
// IsPrivate indicates whether the container uses the a private userns.
func (n UsernsMode) IsPrivate() bool {
return !(n.IsHost())
}
// Valid indicates whether the userns is valid.
func (n UsernsMode) Valid() bool {
parts := strings.Split(string(n), ":")
switch mode := parts[0]; mode {
case "", "host":
default:
return false
}
return true
}
// CgroupSpec represents the cgroup to use for the container.
type CgroupSpec string
// IsContainer indicates whether the container is using another container cgroup
func (c CgroupSpec) IsContainer() bool {
parts := strings.SplitN(string(c), ":", 2)
return len(parts) > 1 && parts[0] == "container"
}
// Valid indicates whether the cgroup spec is valid.
func (c CgroupSpec) Valid() bool {
return c.IsContainer() || c == ""
}
// Container returns the name of the container whose cgroup will be used.
func (c CgroupSpec) Container() string {
parts := strings.SplitN(string(c), ":", 2)
if len(parts) > 1 {
return parts[1]
}
return ""
}
// UTSMode represents the UTS namespace of the container.
type UTSMode string
// IsPrivate indicates whether the container uses its private UTS namespace.
func (n UTSMode) IsPrivate() bool {
return !(n.IsHost())
}
// IsHost indicates whether the container uses the host's UTS namespace.
func (n UTSMode) IsHost() bool {
return n == "host"
}
// Valid indicates whether the UTS namespace is valid.
func (n UTSMode) Valid() bool {
parts := strings.Split(string(n), ":")
switch mode := parts[0]; mode {
case "", "host":
default:
return false
}
return true
}
// PidMode represents the pid namespace of the container.
type PidMode string
// IsPrivate indicates whether the container uses its own new pid namespace.
func (n PidMode) IsPrivate() bool {
return !(n.IsHost() || n.IsContainer())
}
// IsHost indicates whether the container uses the host's pid namespace.
func (n PidMode) IsHost() bool {
return n == "host"
}
// IsContainer indicates whether the container uses a container's pid namespace.
func (n PidMode) IsContainer() bool {
parts := strings.SplitN(string(n), ":", 2)
return len(parts) > 1 && parts[0] == "container"
}
// Valid indicates whether the pid namespace is valid.
func (n PidMode) Valid() bool {
parts := strings.Split(string(n), ":")
switch mode := parts[0]; mode {
case "", "host":
case "container":
if len(parts) != 2 || parts[1] == "" {
return false
}
default:
return false
}
return true
}
// Container returns the name of the container whose pid namespace is going to be used.
func (n PidMode) Container() string {
parts := strings.SplitN(string(n), ":", 2)
if len(parts) > 1 {
return parts[1]
}
return ""
}
// DeviceMapping represents the device mapping between the host and the container.
type DeviceMapping struct {
PathOnHost string
PathInContainer string
CgroupPermissions string
}
// RestartPolicy represents the restart policies of the container.
type RestartPolicy struct {
Name string
MaximumRetryCount int
}
// IsNone indicates whether the container has the "no" restart policy.
// This means the container will not automatically restart when exiting.
func (rp *RestartPolicy) IsNone() bool {
return rp.Name == "no" || rp.Name == ""
}
// IsAlways indicates whether the container has the "always" restart policy.
// This means the container will automatically restart regardless of the exit status.
func (rp *RestartPolicy) IsAlways() bool {
return rp.Name == "always"
}
// IsOnFailure indicates whether the container has the "on-failure" restart policy.
// This means the container will automatically restart of exiting with a non-zero exit status.
func (rp *RestartPolicy) IsOnFailure() bool {
return rp.Name == "on-failure"
}
// IsUnlessStopped indicates whether the container has the
// "unless-stopped" restart policy. This means the container will
// automatically restart unless user has put it to stopped state.
func (rp *RestartPolicy) IsUnlessStopped() bool {
return rp.Name == "unless-stopped"
}
// IsSame compares two RestartPolicy to see if they are the same
func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool {
return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount
}
// LogConfig represents the logging configuration of the container.
type LogConfig struct {
Type string
Config map[string]string
}
// Resources contains container's resources (cgroups config, ulimits...)
type Resources struct {
// Applicable to all platforms
CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers)
Memory int64 // Memory limit (in bytes)
NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10<sup>-9</sup> CPUs.
// Applicable to UNIX platforms
CgroupParent string // Parent cgroup.
BlkioWeight uint16 // Block IO weight (relative weight vs. other containers)
BlkioWeightDevice []*blkiodev.WeightDevice
BlkioDeviceReadBps []*blkiodev.ThrottleDevice
BlkioDeviceWriteBps []*blkiodev.ThrottleDevice
BlkioDeviceReadIOps []*blkiodev.ThrottleDevice
BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice
CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period
CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota
CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period
CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime
CpusetCpus string // CpusetCpus 0-2, 0,1
CpusetMems string // CpusetMems 0-2, 0,1
Devices []DeviceMapping // List of devices to map inside the container
DiskQuota int64 // Disk limit (in bytes)
KernelMemory int64 // Kernel memory limit (in bytes)
MemoryReservation int64 // Memory soft limit (in bytes)
MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
MemorySwappiness *int64 // Tuning container memory swappiness behaviour
OomKillDisable *bool // Whether to disable OOM Killer or not
PidsLimit int64 // Setting pids limit for a container
Ulimits []*units.Ulimit // List of ulimits to be set in the container
// Applicable to Windows
CPUCount int64 `json:"CpuCount"` // CPU count
CPUPercent int64 `json:"CpuPercent"` // CPU percent
IOMaximumIOps uint64 // Maximum IOps for the container system drive
IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive
}
// UpdateConfig holds the mutable attributes of a Container.
// Those attributes can be updated at runtime.
type UpdateConfig struct {
// Contains container's resources (cgroups, ulimits)
Resources
RestartPolicy RestartPolicy
}
// HostConfig the non-portable Config structure of a container.
// Here, "non-portable" means "dependent of the host we are running on".
// Portable information *should* appear in Config.
type HostConfig struct {
// Applicable to all platforms
Binds []string // List of volume bindings for this container
ContainerIDFile string // File (path) where the containerId is written
LogConfig LogConfig // Configuration of the logs for this container
NetworkMode NetworkMode // Network mode to use for the container
PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host
RestartPolicy RestartPolicy // Restart policy to be used for the container
AutoRemove bool // Automatically remove container when it exits
VolumeDriver string // Name of the volume driver used to mount volumes
VolumesFrom []string // List of volumes to take from other container
// Applicable to UNIX platforms
CapAdd strslice.StrSlice // List of kernel capabilities to add to the container
CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container
DNS []string `json:"Dns"` // List of DNS server to lookup
DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for
ExtraHosts []string // List of extra hosts
GroupAdd []string // List of additional groups that the container process will run as
IpcMode IpcMode // IPC namespace to use for the container
Cgroup CgroupSpec // Cgroup to use for the container
Links []string // List of links (in the name:alias form)
OomScoreAdj int // Container preference for OOM-killing
PidMode PidMode // PID namespace to use for the container
Privileged bool // Is the container in privileged mode
PublishAllPorts bool // Should docker publish all exposed port for the container
ReadonlyRootfs bool // Is the container root filesystem in read-only
SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux.
StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container.
Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container
UTSMode UTSMode // UTS namespace to use for the container
UsernsMode UsernsMode // The user namespace to use for the container
ShmSize int64 // Total shm memory usage
Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container
Runtime string `json:",omitempty"` // Runtime to use with this container
// Applicable to Windows
ConsoleSize [2]uint // Initial console size (height,width)
Isolation Isolation // Isolation technology of the container (eg default, hyperv)
// Contains container's resources (cgroups, ulimits)
Resources
// Mounts specs used by the container
Mounts []mount.Mount `json:",omitempty"`
// Run a custom init inside the container, if null, use the daemon's configured settings
Init *bool `json:",omitempty"`
// Custom init path
InitPath string `json:",omitempty"`
}

View File

@ -1,81 +0,0 @@
// +build !windows
package container
import "strings"
// IsValid indicates if an isolation technology is valid
func (i Isolation) IsValid() bool {
return i.IsDefault()
}
// IsPrivate indicates whether container uses its private network stack.
func (n NetworkMode) IsPrivate() bool {
return !(n.IsHost() || n.IsContainer())
}
// IsDefault indicates whether container uses the default network stack.
func (n NetworkMode) IsDefault() bool {
return n == "default"
}
// NetworkName returns the name of the network stack.
func (n NetworkMode) NetworkName() string {
if n.IsBridge() {
return "bridge"
} else if n.IsHost() {
return "host"
} else if n.IsContainer() {
return "container"
} else if n.IsNone() {
return "none"
} else if n.IsDefault() {
return "default"
} else if n.IsUserDefined() {
return n.UserDefined()
}
return ""
}
// IsBridge indicates whether container uses the bridge network stack
func (n NetworkMode) IsBridge() bool {
return n == "bridge"
}
// IsHost indicates whether container uses the host network stack.
func (n NetworkMode) IsHost() bool {
return n == "host"
}
// IsContainer indicates whether container uses a container network stack.
func (n NetworkMode) IsContainer() bool {
parts := strings.SplitN(string(n), ":", 2)
return len(parts) > 1 && parts[0] == "container"
}
// IsNone indicates whether container isn't using a network stack.
func (n NetworkMode) IsNone() bool {
return n == "none"
}
// ConnectedContainer is the id of the container which network this container is connected to.
func (n NetworkMode) ConnectedContainer() string {
parts := strings.SplitN(string(n), ":", 2)
if len(parts) > 1 {
return parts[1]
}
return ""
}
// IsUserDefined indicates user-created network
func (n NetworkMode) IsUserDefined() bool {
return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer()
}
//UserDefined indicates user-created network
func (n NetworkMode) UserDefined() string {
if n.IsUserDefined() {
return string(n)
}
return ""
}

View File

@ -1,87 +0,0 @@
package container
import (
"strings"
)
// IsDefault indicates whether container uses the default network stack.
func (n NetworkMode) IsDefault() bool {
return n == "default"
}
// IsNone indicates whether container isn't using a network stack.
func (n NetworkMode) IsNone() bool {
return n == "none"
}
// IsContainer indicates whether container uses a container network stack.
// Returns false as windows doesn't support this mode
func (n NetworkMode) IsContainer() bool {
return false
}
// IsBridge indicates whether container uses the bridge network stack
// in windows it is given the name NAT
func (n NetworkMode) IsBridge() bool {
return n == "nat"
}
// IsHost indicates whether container uses the host network stack.
// returns false as this is not supported by windows
func (n NetworkMode) IsHost() bool {
return false
}
// IsPrivate indicates whether container uses its private network stack.
func (n NetworkMode) IsPrivate() bool {
return !(n.IsHost() || n.IsContainer())
}
// ConnectedContainer is the id of the container which network this container is connected to.
// Returns blank string on windows
func (n NetworkMode) ConnectedContainer() string {
return ""
}
// IsUserDefined indicates user-created network
func (n NetworkMode) IsUserDefined() bool {
return !n.IsDefault() && !n.IsNone() && !n.IsBridge()
}
// IsHyperV indicates the use of a Hyper-V partition for isolation
func (i Isolation) IsHyperV() bool {
return strings.ToLower(string(i)) == "hyperv"
}
// IsProcess indicates the use of process isolation
func (i Isolation) IsProcess() bool {
return strings.ToLower(string(i)) == "process"
}
// IsValid indicates if an isolation technology is valid
func (i Isolation) IsValid() bool {
return i.IsDefault() || i.IsHyperV() || i.IsProcess()
}
// NetworkName returns the name of the network stack.
func (n NetworkMode) NetworkName() string {
if n.IsDefault() {
return "default"
} else if n.IsBridge() {
return "nat"
} else if n.IsNone() {
return "none"
} else if n.IsUserDefined() {
return n.UserDefined()
}
return ""
}
//UserDefined indicates user-created network
func (n NetworkMode) UserDefined() string {
if n.IsUserDefined() {
return string(n)
}
return ""
}

View File

@ -1,103 +0,0 @@
package mount
import (
"os"
)
// Type represents the type of a mount.
type Type string
// Type constants
const (
// TypeBind is the type for mounting host dir
TypeBind Type = "bind"
// TypeVolume is the type for remote storage volumes
TypeVolume Type = "volume"
// TypeTmpfs is the type for mounting tmpfs
TypeTmpfs Type = "tmpfs"
)
// Mount represents a mount (volume).
type Mount struct {
Type Type `json:",omitempty"`
// Source specifies the name of the mount. Depending on mount type, this
// may be a volume name or a host path, or even ignored.
// Source is not supported for tmpfs (must be an empty value)
Source string `json:",omitempty"`
Target string `json:",omitempty"`
ReadOnly bool `json:",omitempty"`
BindOptions *BindOptions `json:",omitempty"`
VolumeOptions *VolumeOptions `json:",omitempty"`
TmpfsOptions *TmpfsOptions `json:",omitempty"`
}
// Propagation represents the propagation of a mount.
type Propagation string
const (
// PropagationRPrivate RPRIVATE
PropagationRPrivate Propagation = "rprivate"
// PropagationPrivate PRIVATE
PropagationPrivate Propagation = "private"
// PropagationRShared RSHARED
PropagationRShared Propagation = "rshared"
// PropagationShared SHARED
PropagationShared Propagation = "shared"
// PropagationRSlave RSLAVE
PropagationRSlave Propagation = "rslave"
// PropagationSlave SLAVE
PropagationSlave Propagation = "slave"
)
// BindOptions defines options specific to mounts of type "bind".
type BindOptions struct {
Propagation Propagation `json:",omitempty"`
}
// VolumeOptions represents the options for a mount of type volume.
type VolumeOptions struct {
NoCopy bool `json:",omitempty"`
Labels map[string]string `json:",omitempty"`
DriverConfig *Driver `json:",omitempty"`
}
// Driver represents a volume driver.
type Driver struct {
Name string `json:",omitempty"`
Options map[string]string `json:",omitempty"`
}
// TmpfsOptions defines options specific to mounts of type "tmpfs".
type TmpfsOptions struct {
// Size sets the size of the tmpfs, in bytes.
//
// This will be converted to an operating system specific value
// depending on the host. For example, on linux, it will be convered to
// use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with
// docker, uses a straight byte value.
//
// Percentages are not supported.
SizeBytes int64 `json:",omitempty"`
// Mode of the tmpfs upon creation
Mode os.FileMode `json:",omitempty"`
// TODO(stevvooe): There are several more tmpfs flags, specified in the
// daemon, that are accepted. Only the most basic are added for now.
//
// From docker/docker/pkg/mount/flags.go:
//
// var validFlags = map[string]bool{
// "": true,
// "size": true, X
// "mode": true, X
// "uid": true,
// "gid": true,
// "nr_inodes": true,
// "nr_blocks": true,
// "mpol": true,
// }
//
// Some of these may be straightforward to add, but others, such as
// uid/gid have implications in a clustered system.
}

View File

@ -1,30 +0,0 @@
package strslice
import "encoding/json"
// StrSlice represents a string or an array of strings.
// We need to override the json decoder to accept both options.
type StrSlice []string
// UnmarshalJSON decodes the byte slice whether it's a string or an array of
// strings. This method is needed to implement json.Unmarshaler.
func (e *StrSlice) UnmarshalJSON(b []byte) error {
if len(b) == 0 {
// With no input, we preserve the existing value by returning nil and
// leaving the target alone. This allows defining default values for
// the type.
return nil
}
p := make([]string, 0, 1)
if err := json.Unmarshal(b, &p); err != nil {
var s string
if err := json.Unmarshal(b, &s); err != nil {
return err
}
p = append(p, s)
}
*e = p
return nil
}

View File

@ -1,14 +0,0 @@
# Legacy API type versions
This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`.
Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`.
## Package name conventions
The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention:
1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`.
2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`.
For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`.

View File

@ -1,62 +0,0 @@
package versions
import (
"strconv"
"strings"
)
// compare compares two version strings
// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise.
func compare(v1, v2 string) int {
var (
currTab = strings.Split(v1, ".")
otherTab = strings.Split(v2, ".")
)
max := len(currTab)
if len(otherTab) > max {
max = len(otherTab)
}
for i := 0; i < max; i++ {
var currInt, otherInt int
if len(currTab) > i {
currInt, _ = strconv.Atoi(currTab[i])
}
if len(otherTab) > i {
otherInt, _ = strconv.Atoi(otherTab[i])
}
if currInt > otherInt {
return 1
}
if otherInt > currInt {
return -1
}
}
return 0
}
// LessThan checks if a version is less than another
func LessThan(v, other string) bool {
return compare(v, other) == -1
}
// LessThanOrEqualTo checks if a version is less than or equal to another
func LessThanOrEqualTo(v, other string) bool {
return compare(v, other) <= 0
}
// GreaterThan checks if a version is greater than another
func GreaterThan(v, other string) bool {
return compare(v, other) == 1
}
// GreaterThanOrEqualTo checks if a version is greater than or equal to another
func GreaterThanOrEqualTo(v, other string) bool {
return compare(v, other) >= 0
}
// Equal checks if a version is equal to another
func Equal(v, other string) bool {
return compare(v, other) == 0
}

View File

@ -1,67 +0,0 @@
package graphdriver
import "sync"
type minfo struct {
check bool
count int
}
// RefCounter is a generic counter for use by graphdriver Get/Put calls
type RefCounter struct {
counts map[string]*minfo
mu sync.Mutex
checker Checker
}
// NewRefCounter returns a new RefCounter
func NewRefCounter(c Checker) *RefCounter {
return &RefCounter{
checker: c,
counts: make(map[string]*minfo),
}
}
// Increment increaes the ref count for the given id and returns the current count
func (c *RefCounter) Increment(path string) int {
c.mu.Lock()
m := c.counts[path]
if m == nil {
m = &minfo{}
c.counts[path] = m
}
// if we are checking this path for the first time check to make sure
// if it was already mounted on the system and make sure we have a correct ref
// count if it is mounted as it is in use.
if !m.check {
m.check = true
if c.checker.IsMounted(path) {
m.count++
}
}
m.count++
c.mu.Unlock()
return m.count
}
// Decrement decreases the ref count for the given id and returns the current count
func (c *RefCounter) Decrement(path string) int {
c.mu.Lock()
m := c.counts[path]
if m == nil {
m = &minfo{}
c.counts[path] = m
}
// if we are checking this path for the first time check to make sure
// if it was already mounted on the system and make sure we have a correct ref
// count if it is mounted as it is in use.
if !m.check {
m.check = true
if c.checker.IsMounted(path) {
m.count++
}
}
m.count--
c.mu.Unlock()
return m.count
}

View File

@ -1,259 +0,0 @@
package graphdriver
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/Sirupsen/logrus"
"github.com/vbatts/tar-split/tar/storage"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/plugingetter"
)
// FsMagic unsigned id of the filesystem in use.
type FsMagic uint32
const (
// FsMagicUnsupported is a predefined constant value other than a valid filesystem id.
FsMagicUnsupported = FsMagic(0x00000000)
)
var (
// All registered drivers
drivers map[string]InitFunc
// ErrNotSupported returned when driver is not supported.
ErrNotSupported = errors.New("driver not supported")
// ErrPrerequisites retuned when driver does not meet prerequisites.
ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)")
// ErrIncompatibleFS returned when file system is not supported.
ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver")
)
//CreateOpts contains optional arguments for Create() and CreateReadWrite()
// methods.
type CreateOpts struct {
MountLabel string
StorageOpt map[string]string
}
// InitFunc initializes the storage driver.
type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error)
// ProtoDriver defines the basic capabilities of a driver.
// This interface exists solely to be a minimum set of methods
// for client code which choose not to implement the entire Driver
// interface and use the NaiveDiffDriver wrapper constructor.
//
// Use of ProtoDriver directly by client code is not recommended.
type ProtoDriver interface {
// String returns a string representation of this driver.
String() string
// CreateReadWrite creates a new, empty filesystem layer that is ready
// to be used as the storage for a container. Additional options can
// be passed in opts. parent may be "" and opts may be nil.
CreateReadWrite(id, parent string, opts *CreateOpts) error
// Create creates a new, empty, filesystem layer with the
// specified id and parent and options passed in opts. Parent
// may be "" and opts may be nil.
Create(id, parent string, opts *CreateOpts) error
// Remove attempts to remove the filesystem layer with this id.
Remove(id string) error
// Get returns the mountpoint for the layered filesystem referred
// to by this id. You can optionally specify a mountLabel or "".
// Returns the absolute path to the mounted layered filesystem.
Get(id, mountLabel string) (dir string, err error)
// Put releases the system resources for the specified id,
// e.g, unmounting layered filesystem.
Put(id string) error
// Exists returns whether a filesystem layer with the specified
// ID exists on this driver.
Exists(id string) bool
// Status returns a set of key-value pairs which give low
// level diagnostic status about this driver.
Status() [][2]string
// Returns a set of key-value pairs which give low level information
// about the image/container driver is managing.
GetMetadata(id string) (map[string]string, error)
// Cleanup performs necessary tasks to release resources
// held by the driver, e.g., unmounting all layered filesystems
// known to this driver.
Cleanup() error
}
// DiffDriver is the interface to use to implement graph diffs
type DiffDriver interface {
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
Diff(id, parent string) (io.ReadCloser, error)
// Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes.
Changes(id, parent string) ([]archive.Change, error)
// ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the
// new layer in bytes.
// The archive.Reader must be an uncompressed stream.
ApplyDiff(id, parent string, diff io.Reader) (size int64, err error)
// DiffSize calculates the changes between the specified id
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
DiffSize(id, parent string) (size int64, err error)
}
// Driver is the interface for layered/snapshot file system drivers.
type Driver interface {
ProtoDriver
DiffDriver
}
// DiffGetterDriver is the interface for layered file system drivers that
// provide a specialized function for getting file contents for tar-split.
type DiffGetterDriver interface {
Driver
// DiffGetter returns an interface to efficiently retrieve the contents
// of files in a layer.
DiffGetter(id string) (FileGetCloser, error)
}
// FileGetCloser extends the storage.FileGetter interface with a Close method
// for cleaning up.
type FileGetCloser interface {
storage.FileGetter
// Close cleans up any resources associated with the FileGetCloser.
Close() error
}
// Checker makes checks on specified filesystems.
type Checker interface {
// IsMounted returns true if the provided path is mounted for the specific checker
IsMounted(path string) bool
}
func init() {
drivers = make(map[string]InitFunc)
}
// Register registers an InitFunc for the driver.
func Register(name string, initFunc InitFunc) error {
if _, exists := drivers[name]; exists {
return fmt.Errorf("Name already registered %s", name)
}
drivers[name] = initFunc
return nil
}
// GetDriver initializes and returns the registered driver
func GetDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap, pg plugingetter.PluginGetter) (Driver, error) {
if initFunc, exists := drivers[name]; exists {
return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps)
}
if pluginDriver, err := lookupPlugin(name, home, options, pg); err == nil {
return pluginDriver, nil
}
logrus.Errorf("Failed to GetDriver graph %s %s", name, home)
return nil, ErrNotSupported
}
// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins
func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) {
if initFunc, exists := drivers[name]; exists {
return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps)
}
logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home)
return nil, ErrNotSupported
}
// New creates the driver and initializes it at the specified root.
func New(root, name string, options []string, uidMaps, gidMaps []idtools.IDMap, pg plugingetter.PluginGetter) (Driver, error) {
if name != "" {
logrus.Debugf("[graphdriver] trying provided driver: %s", name) // so the logs show specified driver
return GetDriver(name, root, options, uidMaps, gidMaps, pg)
}
// Guess for prior driver
driversMap := scanPriorDrivers(root)
for _, name := range priority {
if name == "vfs" {
// don't use vfs even if there is state present.
continue
}
if _, prior := driversMap[name]; prior {
// of the state found from prior drivers, check in order of our priority
// which we would prefer
driver, err := getBuiltinDriver(name, root, options, uidMaps, gidMaps)
if err != nil {
// unlike below, we will return error here, because there is prior
// state, and now it is no longer supported/prereq/compatible, so
// something changed and needs attention. Otherwise the daemon's
// images would just "disappear".
logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err)
return nil, err
}
// abort starting when there are other prior configured drivers
// to ensure the user explicitly selects the driver to load
if len(driversMap)-1 > 0 {
var driversSlice []string
for name := range driversMap {
driversSlice = append(driversSlice, name)
}
return nil, fmt.Errorf("%s contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s <DRIVER>)", root, strings.Join(driversSlice, ", "))
}
logrus.Infof("[graphdriver] using prior storage driver: %s", name)
return driver, nil
}
}
// Check for priority drivers first
for _, name := range priority {
driver, err := getBuiltinDriver(name, root, options, uidMaps, gidMaps)
if err != nil {
if isDriverNotSupported(err) {
continue
}
return nil, err
}
return driver, nil
}
// Check all registered drivers if no priority driver is found
for name, initFunc := range drivers {
driver, err := initFunc(filepath.Join(root, name), options, uidMaps, gidMaps)
if err != nil {
if isDriverNotSupported(err) {
continue
}
return nil, err
}
return driver, nil
}
return nil, fmt.Errorf("No supported storage backend found")
}
// isDriverNotSupported returns true if the error initializing
// the graph driver is a non-supported error.
func isDriverNotSupported(err error) bool {
return err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS
}
// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers
func scanPriorDrivers(root string) map[string]bool {
driversMap := make(map[string]bool)
for driver := range drivers {
p := filepath.Join(root, driver)
if _, err := os.Stat(p); err == nil && driver != "vfs" {
driversMap[driver] = true
}
}
return driversMap
}

View File

@ -1,19 +0,0 @@
package graphdriver
import "syscall"
var (
// Slice of drivers that should be used in an order
priority = []string{
"zfs",
}
)
// Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf syscall.Statfs_t
if err := syscall.Statfs(mountPath, &buf); err != nil {
return false, err
}
return FsMagic(buf.Type) == fsType, nil
}

View File

@ -1,135 +0,0 @@
// +build linux
package graphdriver
import (
"path/filepath"
"syscall"
"github.com/docker/docker/pkg/mount"
)
const (
// FsMagicAufs filesystem id for Aufs
FsMagicAufs = FsMagic(0x61756673)
// FsMagicBtrfs filesystem id for Btrfs
FsMagicBtrfs = FsMagic(0x9123683E)
// FsMagicCramfs filesystem id for Cramfs
FsMagicCramfs = FsMagic(0x28cd3d45)
// FsMagicEcryptfs filesystem id for eCryptfs
FsMagicEcryptfs = FsMagic(0xf15f)
// FsMagicExtfs filesystem id for Extfs
FsMagicExtfs = FsMagic(0x0000EF53)
// FsMagicF2fs filesystem id for F2fs
FsMagicF2fs = FsMagic(0xF2F52010)
// FsMagicGPFS filesystem id for GPFS
FsMagicGPFS = FsMagic(0x47504653)
// FsMagicJffs2Fs filesystem if for Jffs2Fs
FsMagicJffs2Fs = FsMagic(0x000072b6)
// FsMagicJfs filesystem id for Jfs
FsMagicJfs = FsMagic(0x3153464a)
// FsMagicNfsFs filesystem id for NfsFs
FsMagicNfsFs = FsMagic(0x00006969)
// FsMagicRAMFs filesystem id for RamFs
FsMagicRAMFs = FsMagic(0x858458f6)
// FsMagicReiserFs filesystem id for ReiserFs
FsMagicReiserFs = FsMagic(0x52654973)
// FsMagicSmbFs filesystem id for SmbFs
FsMagicSmbFs = FsMagic(0x0000517B)
// FsMagicSquashFs filesystem id for SquashFs
FsMagicSquashFs = FsMagic(0x73717368)
// FsMagicTmpFs filesystem id for TmpFs
FsMagicTmpFs = FsMagic(0x01021994)
// FsMagicVxFS filesystem id for VxFs
FsMagicVxFS = FsMagic(0xa501fcf5)
// FsMagicXfs filesystem id for Xfs
FsMagicXfs = FsMagic(0x58465342)
// FsMagicZfs filesystem id for Zfs
FsMagicZfs = FsMagic(0x2fc12fc1)
// FsMagicOverlay filesystem id for overlay
FsMagicOverlay = FsMagic(0x794C7630)
)
var (
// Slice of drivers that should be used in an order
priority = []string{
"aufs",
"btrfs",
"zfs",
"overlay2",
"overlay",
"devicemapper",
"vfs",
}
// FsNames maps filesystem id to name of the filesystem.
FsNames = map[FsMagic]string{
FsMagicAufs: "aufs",
FsMagicBtrfs: "btrfs",
FsMagicCramfs: "cramfs",
FsMagicExtfs: "extfs",
FsMagicF2fs: "f2fs",
FsMagicGPFS: "gpfs",
FsMagicJffs2Fs: "jffs2",
FsMagicJfs: "jfs",
FsMagicNfsFs: "nfs",
FsMagicOverlay: "overlayfs",
FsMagicRAMFs: "ramfs",
FsMagicReiserFs: "reiserfs",
FsMagicSmbFs: "smb",
FsMagicSquashFs: "squashfs",
FsMagicTmpFs: "tmpfs",
FsMagicUnsupported: "unsupported",
FsMagicVxFS: "vxfs",
FsMagicXfs: "xfs",
FsMagicZfs: "zfs",
}
)
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
var buf syscall.Statfs_t
if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil {
return 0, err
}
return FsMagic(buf.Type), nil
}
// NewFsChecker returns a checker configured for the provied FsMagic
func NewFsChecker(t FsMagic) Checker {
return &fsChecker{
t: t,
}
}
type fsChecker struct {
t FsMagic
}
func (c *fsChecker) IsMounted(path string) bool {
m, _ := Mounted(c.t, path)
return m
}
// NewDefaultChecker returns a check that parses /proc/mountinfo to check
// if the specified path is mounted.
func NewDefaultChecker() Checker {
return &defaultChecker{}
}
type defaultChecker struct {
}
func (c *defaultChecker) IsMounted(path string) bool {
m, _ := mount.Mounted(path)
return m
}
// Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf syscall.Statfs_t
if err := syscall.Statfs(mountPath, &buf); err != nil {
return false, err
}
return FsMagic(buf.Type) == fsType, nil
}

View File

@ -1,97 +0,0 @@
// +build solaris,cgo
package graphdriver
/*
#include <sys/statvfs.h>
#include <stdlib.h>
static inline struct statvfs *getstatfs(char *s) {
struct statvfs *buf;
int err;
buf = (struct statvfs *)malloc(sizeof(struct statvfs));
err = statvfs(s, buf);
return buf;
}
*/
import "C"
import (
"path/filepath"
"unsafe"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/mount"
)
const (
// FsMagicZfs filesystem id for Zfs
FsMagicZfs = FsMagic(0x2fc12fc1)
)
var (
// Slice of drivers that should be used in an order
priority = []string{
"zfs",
}
// FsNames maps filesystem id to name of the filesystem.
FsNames = map[FsMagic]string{
FsMagicZfs: "zfs",
}
)
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
return 0, nil
}
type fsChecker struct {
t FsMagic
}
func (c *fsChecker) IsMounted(path string) bool {
m, _ := Mounted(c.t, path)
return m
}
// NewFsChecker returns a checker configured for the provied FsMagic
func NewFsChecker(t FsMagic) Checker {
return &fsChecker{
t: t,
}
}
// NewDefaultChecker returns a check that parses /proc/mountinfo to check
// if the specified path is mounted.
// No-op on Solaris.
func NewDefaultChecker() Checker {
return &defaultChecker{}
}
type defaultChecker struct {
}
func (c *defaultChecker) IsMounted(path string) bool {
m, _ := mount.Mounted(path)
return m
}
// Mounted checks if the given path is mounted as the fs type
//Solaris supports only ZFS for now
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
cs := C.CString(filepath.Dir(mountPath))
buf := C.getstatfs(cs)
// on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ]
if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) ||
(buf.f_basetype[3] != 0) {
logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath)
C.free(unsafe.Pointer(buf))
return false, ErrPrerequisites
}
C.free(unsafe.Pointer(buf))
C.free(unsafe.Pointer(cs))
return true, nil
}

View File

@ -1,15 +0,0 @@
// +build !linux,!windows,!freebsd,!solaris
package graphdriver
var (
// Slice of drivers that should be used in an order
priority = []string{
"unsupported",
}
)
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
return FsMagicUnsupported, nil
}

View File

@ -1,14 +0,0 @@
package graphdriver
var (
// Slice of drivers that should be used in order
priority = []string{
"windowsfilter",
}
)
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
// Note it is OK to return FsMagicUnsupported on Windows.
return FsMagicUnsupported, nil
}

View File

@ -1,169 +0,0 @@
package graphdriver
import (
"io"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/ioutils"
)
var (
// ApplyUncompressedLayer defines the unpack method used by the graph
// driver.
ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer
)
// NaiveDiffDriver takes a ProtoDriver and adds the
// capability of the Diffing methods which it may or may not
// support on its own. See the comment on the exported
// NewNaiveDiffDriver function below.
// Notably, the AUFS driver doesn't need to be wrapped like this.
type NaiveDiffDriver struct {
ProtoDriver
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
}
// NewNaiveDiffDriver returns a fully functional driver that wraps the
// given ProtoDriver and adds the capability of the following methods which
// it may or may not support on its own:
// Diff(id, parent string) (archive.Archive, error)
// Changes(id, parent string) ([]archive.Change, error)
// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error)
// DiffSize(id, parent string) (size int64, err error)
func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver {
return &NaiveDiffDriver{ProtoDriver: driver,
uidMaps: uidMaps,
gidMaps: gidMaps}
}
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err error) {
startTime := time.Now()
driver := gdw.ProtoDriver
layerFs, err := driver.Get(id, "")
if err != nil {
return nil, err
}
defer func() {
if err != nil {
driver.Put(id)
}
}()
if parent == "" {
archive, err := archive.Tar(layerFs, archive.Uncompressed)
if err != nil {
return nil, err
}
return ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
driver.Put(id)
return err
}), nil
}
parentFs, err := driver.Get(parent, "")
if err != nil {
return nil, err
}
defer driver.Put(parent)
changes, err := archive.ChangesDirs(layerFs, parentFs)
if err != nil {
return nil, err
}
archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps)
if err != nil {
return nil, err
}
return ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
driver.Put(id)
// NaiveDiffDriver compares file metadata with parent layers. Parent layers
// are extracted from tar's with full second precision on modified time.
// We need this hack here to make sure calls within same second receive
// correct result.
time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now()))
return err
}), nil
}
// Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes.
func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) {
driver := gdw.ProtoDriver
layerFs, err := driver.Get(id, "")
if err != nil {
return nil, err
}
defer driver.Put(id)
parentFs := ""
if parent != "" {
parentFs, err = driver.Get(parent, "")
if err != nil {
return nil, err
}
defer driver.Put(parent)
}
return archive.ChangesDirs(layerFs, parentFs)
}
// ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the
// new layer in bytes.
func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) {
driver := gdw.ProtoDriver
// Mount the root filesystem so we can apply the diff/layer.
layerFs, err := driver.Get(id, "")
if err != nil {
return
}
defer driver.Put(id)
options := &archive.TarOptions{UIDMaps: gdw.uidMaps,
GIDMaps: gdw.gidMaps}
start := time.Now().UTC()
logrus.Debug("Start untar layer")
if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil {
return
}
logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
return
}
// DiffSize calculates the changes between the specified layer
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) {
driver := gdw.ProtoDriver
changes, err := gdw.Changes(id, parent)
if err != nil {
return
}
layerFs, err := driver.Get(id, "")
if err != nil {
return
}
defer driver.Put(id)
return archive.ChangesSize(layerFs, changes), nil
}

View File

@ -1,31 +0,0 @@
package graphdriver
import (
"fmt"
"io"
"path/filepath"
"github.com/docker/docker/pkg/plugingetter"
)
type pluginClient interface {
// Call calls the specified method with the specified arguments for the plugin.
Call(string, interface{}, interface{}) error
// Stream calls the specified method with the specified arguments for the plugin and returns the response IO stream
Stream(string, interface{}) (io.ReadCloser, error)
// SendFile calls the specified method, and passes through the IO stream
SendFile(string, io.Reader, interface{}) error
}
func lookupPlugin(name, home string, opts []string, pg plugingetter.PluginGetter) (Driver, error) {
pl, err := pg.Get(name, "GraphDriver", plugingetter.LOOKUP)
if err != nil {
return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err)
}
return newPluginDriver(name, home, opts, pl.Client())
}
func newPluginDriver(name, home string, opts []string, c pluginClient) (Driver, error) {
proxy := &graphDriverProxy{name, c}
return proxy, proxy.Init(filepath.Join(home, name), opts)
}

View File

@ -1,233 +0,0 @@
package graphdriver
import (
"errors"
"fmt"
"io"
"github.com/docker/docker/pkg/archive"
)
type graphDriverProxy struct {
name string
client pluginClient
}
type graphDriverRequest struct {
ID string `json:",omitempty"`
Parent string `json:",omitempty"`
MountLabel string `json:",omitempty"`
}
type graphDriverResponse struct {
Err string `json:",omitempty"`
Dir string `json:",omitempty"`
Exists bool `json:",omitempty"`
Status [][2]string `json:",omitempty"`
Changes []archive.Change `json:",omitempty"`
Size int64 `json:",omitempty"`
Metadata map[string]string `json:",omitempty"`
}
type graphDriverInitRequest struct {
Home string
Opts []string
}
func (d *graphDriverProxy) Init(home string, opts []string) error {
args := &graphDriverInitRequest{
Home: home,
Opts: opts,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Init", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) String() string {
return d.name
}
func (d *graphDriverProxy) CreateReadWrite(id, parent string, opts *CreateOpts) error {
mountLabel := ""
if opts != nil {
mountLabel = opts.MountLabel
}
args := &graphDriverRequest{
ID: id,
Parent: parent,
MountLabel: mountLabel,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.CreateReadWrite", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) Create(id, parent string, opts *CreateOpts) error {
mountLabel := ""
if opts != nil {
mountLabel = opts.MountLabel
}
args := &graphDriverRequest{
ID: id,
Parent: parent,
MountLabel: mountLabel,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Create", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) Remove(id string) error {
args := &graphDriverRequest{ID: id}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Remove", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) {
args := &graphDriverRequest{
ID: id,
MountLabel: mountLabel,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Get", args, &ret); err != nil {
return "", err
}
var err error
if ret.Err != "" {
err = errors.New(ret.Err)
}
return ret.Dir, err
}
func (d *graphDriverProxy) Put(id string) error {
args := &graphDriverRequest{ID: id}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Put", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) Exists(id string) bool {
args := &graphDriverRequest{ID: id}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Exists", args, &ret); err != nil {
return false
}
return ret.Exists
}
func (d *graphDriverProxy) Status() [][2]string {
args := &graphDriverRequest{}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Status", args, &ret); err != nil {
return nil
}
return ret.Status
}
func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) {
args := &graphDriverRequest{
ID: id,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.GetMetadata", args, &ret); err != nil {
return nil, err
}
if ret.Err != "" {
return nil, errors.New(ret.Err)
}
return ret.Metadata, nil
}
func (d *graphDriverProxy) Cleanup() error {
args := &graphDriverRequest{}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Cleanup", args, &ret); err != nil {
return nil
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) Diff(id, parent string) (io.ReadCloser, error) {
args := &graphDriverRequest{
ID: id,
Parent: parent,
}
body, err := d.client.Stream("GraphDriver.Diff", args)
if err != nil {
return nil, err
}
return body, nil
}
func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) {
args := &graphDriverRequest{
ID: id,
Parent: parent,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Changes", args, &ret); err != nil {
return nil, err
}
if ret.Err != "" {
return nil, errors.New(ret.Err)
}
return ret.Changes, nil
}
func (d *graphDriverProxy) ApplyDiff(id, parent string, diff io.Reader) (int64, error) {
var ret graphDriverResponse
if err := d.client.SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil {
return -1, err
}
if ret.Err != "" {
return -1, errors.New(ret.Err)
}
return ret.Size, nil
}
func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) {
args := &graphDriverRequest{
ID: id,
Parent: parent,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.DiffSize", args, &ret); err != nil {
return -1, err
}
if ret.Err != "" {
return -1, errors.New(ret.Err)
}
return ret.Size, nil
}

View File

@ -1,173 +0,0 @@
package image
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/digest"
"github.com/docker/docker/pkg/ioutils"
)
// DigestWalkFunc is function called by StoreBackend.Walk
type DigestWalkFunc func(id digest.Digest) error
// StoreBackend provides interface for image.Store persistence
type StoreBackend interface {
Walk(f DigestWalkFunc) error
Get(id digest.Digest) ([]byte, error)
Set(data []byte) (digest.Digest, error)
Delete(id digest.Digest) error
SetMetadata(id digest.Digest, key string, data []byte) error
GetMetadata(id digest.Digest, key string) ([]byte, error)
DeleteMetadata(id digest.Digest, key string) error
}
// fs implements StoreBackend using the filesystem.
type fs struct {
sync.RWMutex
root string
}
const (
contentDirName = "content"
metadataDirName = "metadata"
)
// NewFSStoreBackend returns new filesystem based backend for image.Store
func NewFSStoreBackend(root string) (StoreBackend, error) {
return newFSStore(root)
}
func newFSStore(root string) (*fs, error) {
s := &fs{
root: root,
}
if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0700); err != nil {
return nil, err
}
if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0700); err != nil {
return nil, err
}
return s, nil
}
func (s *fs) contentFile(dgst digest.Digest) string {
return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex())
}
func (s *fs) metadataDir(dgst digest.Digest) string {
return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex())
}
// Walk calls the supplied callback for each image ID in the storage backend.
func (s *fs) Walk(f DigestWalkFunc) error {
// Only Canonical digest (sha256) is currently supported
s.RLock()
dir, err := ioutil.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical)))
s.RUnlock()
if err != nil {
return err
}
for _, v := range dir {
dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name())
if err := dgst.Validate(); err != nil {
logrus.Debugf("Skipping invalid digest %s: %s", dgst, err)
continue
}
if err := f(dgst); err != nil {
return err
}
}
return nil
}
// Get returns the content stored under a given digest.
func (s *fs) Get(dgst digest.Digest) ([]byte, error) {
s.RLock()
defer s.RUnlock()
return s.get(dgst)
}
func (s *fs) get(dgst digest.Digest) ([]byte, error) {
content, err := ioutil.ReadFile(s.contentFile(dgst))
if err != nil {
return nil, err
}
// todo: maybe optional
if digest.FromBytes(content) != dgst {
return nil, fmt.Errorf("failed to verify: %v", dgst)
}
return content, nil
}
// Set stores content by checksum.
func (s *fs) Set(data []byte) (digest.Digest, error) {
s.Lock()
defer s.Unlock()
if len(data) == 0 {
return "", fmt.Errorf("Invalid empty data")
}
dgst := digest.FromBytes(data)
if err := ioutils.AtomicWriteFile(s.contentFile(dgst), data, 0600); err != nil {
return "", err
}
return dgst, nil
}
// Delete removes content and metadata files associated with the digest.
func (s *fs) Delete(dgst digest.Digest) error {
s.Lock()
defer s.Unlock()
if err := os.RemoveAll(s.metadataDir(dgst)); err != nil {
return err
}
if err := os.Remove(s.contentFile(dgst)); err != nil {
return err
}
return nil
}
// SetMetadata sets metadata for a given ID. It fails if there's no base file.
func (s *fs) SetMetadata(dgst digest.Digest, key string, data []byte) error {
s.Lock()
defer s.Unlock()
if _, err := s.get(dgst); err != nil {
return err
}
baseDir := filepath.Join(s.metadataDir(dgst))
if err := os.MkdirAll(baseDir, 0700); err != nil {
return err
}
return ioutils.AtomicWriteFile(filepath.Join(s.metadataDir(dgst), key), data, 0600)
}
// GetMetadata returns metadata for a given digest.
func (s *fs) GetMetadata(dgst digest.Digest, key string) ([]byte, error) {
s.RLock()
defer s.RUnlock()
if _, err := s.get(dgst); err != nil {
return nil, err
}
return ioutil.ReadFile(filepath.Join(s.metadataDir(dgst), key))
}
// DeleteMetadata removes the metadata associated with a digest.
func (s *fs) DeleteMetadata(dgst digest.Digest, key string) error {
s.Lock()
defer s.Unlock()
return os.RemoveAll(filepath.Join(s.metadataDir(dgst), key))
}

View File

@ -1,150 +0,0 @@
package image
import (
"encoding/json"
"errors"
"io"
"time"
"github.com/docker/distribution/digest"
"github.com/docker/docker/api/types/container"
)
// ID is the content-addressable ID of an image.
type ID digest.Digest
func (id ID) String() string {
return id.Digest().String()
}
// Digest converts ID into a digest
func (id ID) Digest() digest.Digest {
return digest.Digest(id)
}
// IDFromDigest creates an ID from a digest
func IDFromDigest(digest digest.Digest) ID {
return ID(digest)
}
// V1Image stores the V1 image configuration.
type V1Image struct {
// ID a unique 64 character identifier of the image
ID string `json:"id,omitempty"`
// Parent id of the image
Parent string `json:"parent,omitempty"`
// Comment user added comment
Comment string `json:"comment,omitempty"`
// Created timestamp when image was created
Created time.Time `json:"created"`
// Container is the id of the container used to commit
Container string `json:"container,omitempty"`
// ContainerConfig is the configuration of the container that is committed into the image
ContainerConfig container.Config `json:"container_config,omitempty"`
// DockerVersion specifies version on which image is built
DockerVersion string `json:"docker_version,omitempty"`
// Author of the image
Author string `json:"author,omitempty"`
// Config is the configuration of the container received from the client
Config *container.Config `json:"config,omitempty"`
// Architecture is the hardware that the image is build and runs on
Architecture string `json:"architecture,omitempty"`
// OS is the operating system used to build and run the image
OS string `json:"os,omitempty"`
// Size is the total size of the image including all layers it is composed of
Size int64 `json:",omitempty"`
}
// Image stores the image configuration
type Image struct {
V1Image
Parent ID `json:"parent,omitempty"`
RootFS *RootFS `json:"rootfs,omitempty"`
History []History `json:"history,omitempty"`
OSVersion string `json:"os.version,omitempty"`
OSFeatures []string `json:"os.features,omitempty"`
// rawJSON caches the immutable JSON associated with this image.
rawJSON []byte
// computedID is the ID computed from the hash of the image config.
// Not to be confused with the legacy V1 ID in V1Image.
computedID ID
}
// RawJSON returns the immutable JSON associated with the image.
func (img *Image) RawJSON() []byte {
return img.rawJSON
}
// ID returns the image's content-addressable ID.
func (img *Image) ID() ID {
return img.computedID
}
// ImageID stringifies ID.
func (img *Image) ImageID() string {
return img.ID().String()
}
// RunConfig returns the image's container config.
func (img *Image) RunConfig() *container.Config {
return img.Config
}
// MarshalJSON serializes the image to JSON. It sorts the top-level keys so
// that JSON that's been manipulated by a push/pull cycle with a legacy
// registry won't end up with a different key order.
func (img *Image) MarshalJSON() ([]byte, error) {
type MarshalImage Image
pass1, err := json.Marshal(MarshalImage(*img))
if err != nil {
return nil, err
}
var c map[string]*json.RawMessage
if err := json.Unmarshal(pass1, &c); err != nil {
return nil, err
}
return json.Marshal(c)
}
// History stores build commands that were used to create an image
type History struct {
// Created timestamp for build point
Created time.Time `json:"created"`
// Author of the build point
Author string `json:"author,omitempty"`
// CreatedBy keeps the Dockerfile command used while building image.
CreatedBy string `json:"created_by,omitempty"`
// Comment is custom message set by the user when creating the image.
Comment string `json:"comment,omitempty"`
// EmptyLayer is set to true if this history item did not generate a
// layer. Otherwise, the history item is associated with the next
// layer in the RootFS section.
EmptyLayer bool `json:"empty_layer,omitempty"`
}
// Exporter provides interface for exporting and importing images
type Exporter interface {
Load(io.ReadCloser, io.Writer, bool) error
// TODO: Load(net.Context, io.ReadCloser, <- chan StatusMessage) error
Save([]string, io.Writer) error
}
// NewFromJSON creates an Image configuration from json.
func NewFromJSON(src []byte) (*Image, error) {
img := &Image{}
if err := json.Unmarshal(src, img); err != nil {
return nil, err
}
if img.RootFS == nil {
return nil, errors.New("Invalid image JSON, no RootFS key.")
}
img.rawJSON = src
return img, nil
}

View File

@ -1,44 +0,0 @@
package image
import (
"runtime"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/layer"
)
// TypeLayers is used for RootFS.Type for filesystems organized into layers.
const TypeLayers = "layers"
// typeLayersWithBase is an older format used by Windows up to v1.12. We
// explicitly handle this as an error case to ensure that a daemon which still
// has an older image like this on disk can still start, even though the
// image itself is not usable. See https://github.com/docker/docker/pull/25806.
const typeLayersWithBase = "layers+base"
// RootFS describes images root filesystem
// This is currently a placeholder that only supports layers. In the future
// this can be made into an interface that supports different implementations.
type RootFS struct {
Type string `json:"type"`
DiffIDs []layer.DiffID `json:"diff_ids,omitempty"`
}
// NewRootFS returns empty RootFS struct
func NewRootFS() *RootFS {
return &RootFS{Type: TypeLayers}
}
// Append appends a new diffID to rootfs
func (r *RootFS) Append(id layer.DiffID) {
r.DiffIDs = append(r.DiffIDs, id)
}
// ChainID returns the ChainID for the top layer in RootFS.
func (r *RootFS) ChainID() layer.ChainID {
if runtime.GOOS == "windows" && r.Type == typeLayersWithBase {
logrus.Warnf("Layer type is unsupported on this platform. DiffIDs: '%v'", r.DiffIDs)
return ""
}
return layer.CreateChainID(r.DiffIDs)
}

View File

@ -1,295 +0,0 @@
package image
import (
"encoding/json"
"errors"
"fmt"
"sync"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/digest"
"github.com/docker/docker/layer"
)
// Store is an interface for creating and accessing images
type Store interface {
Create(config []byte) (ID, error)
Get(id ID) (*Image, error)
Delete(id ID) ([]layer.Metadata, error)
Search(partialID string) (ID, error)
SetParent(id ID, parent ID) error
GetParent(id ID) (ID, error)
Children(id ID) []ID
Map() map[ID]*Image
Heads() map[ID]*Image
}
// LayerGetReleaser is a minimal interface for getting and releasing images.
type LayerGetReleaser interface {
Get(layer.ChainID) (layer.Layer, error)
Release(layer.Layer) ([]layer.Metadata, error)
}
type imageMeta struct {
layer layer.Layer
children map[ID]struct{}
}
type store struct {
sync.Mutex
ls LayerGetReleaser
images map[ID]*imageMeta
fs StoreBackend
digestSet *digest.Set
}
// NewImageStore returns new store object for given layer store
func NewImageStore(fs StoreBackend, ls LayerGetReleaser) (Store, error) {
is := &store{
ls: ls,
images: make(map[ID]*imageMeta),
fs: fs,
digestSet: digest.NewSet(),
}
// load all current images and retain layers
if err := is.restore(); err != nil {
return nil, err
}
return is, nil
}
func (is *store) restore() error {
err := is.fs.Walk(func(dgst digest.Digest) error {
img, err := is.Get(IDFromDigest(dgst))
if err != nil {
logrus.Errorf("invalid image %v, %v", dgst, err)
return nil
}
var l layer.Layer
if chainID := img.RootFS.ChainID(); chainID != "" {
l, err = is.ls.Get(chainID)
if err != nil {
return err
}
}
if err := is.digestSet.Add(dgst); err != nil {
return err
}
imageMeta := &imageMeta{
layer: l,
children: make(map[ID]struct{}),
}
is.images[IDFromDigest(dgst)] = imageMeta
return nil
})
if err != nil {
return err
}
// Second pass to fill in children maps
for id := range is.images {
if parent, err := is.GetParent(id); err == nil {
if parentMeta := is.images[parent]; parentMeta != nil {
parentMeta.children[id] = struct{}{}
}
}
}
return nil
}
func (is *store) Create(config []byte) (ID, error) {
var img Image
err := json.Unmarshal(config, &img)
if err != nil {
return "", err
}
// Must reject any config that references diffIDs from the history
// which aren't among the rootfs layers.
rootFSLayers := make(map[layer.DiffID]struct{})
for _, diffID := range img.RootFS.DiffIDs {
rootFSLayers[diffID] = struct{}{}
}
layerCounter := 0
for _, h := range img.History {
if !h.EmptyLayer {
layerCounter++
}
}
if layerCounter > len(img.RootFS.DiffIDs) {
return "", errors.New("too many non-empty layers in History section")
}
dgst, err := is.fs.Set(config)
if err != nil {
return "", err
}
imageID := IDFromDigest(dgst)
is.Lock()
defer is.Unlock()
if _, exists := is.images[imageID]; exists {
return imageID, nil
}
layerID := img.RootFS.ChainID()
var l layer.Layer
if layerID != "" {
l, err = is.ls.Get(layerID)
if err != nil {
return "", err
}
}
imageMeta := &imageMeta{
layer: l,
children: make(map[ID]struct{}),
}
is.images[imageID] = imageMeta
if err := is.digestSet.Add(imageID.Digest()); err != nil {
delete(is.images, imageID)
return "", err
}
return imageID, nil
}
func (is *store) Search(term string) (ID, error) {
is.Lock()
defer is.Unlock()
dgst, err := is.digestSet.Lookup(term)
if err != nil {
if err == digest.ErrDigestNotFound {
err = fmt.Errorf("No such image: %s", term)
}
return "", err
}
return IDFromDigest(dgst), nil
}
func (is *store) Get(id ID) (*Image, error) {
// todo: Check if image is in images
// todo: Detect manual insertions and start using them
config, err := is.fs.Get(id.Digest())
if err != nil {
return nil, err
}
img, err := NewFromJSON(config)
if err != nil {
return nil, err
}
img.computedID = id
img.Parent, err = is.GetParent(id)
if err != nil {
img.Parent = ""
}
return img, nil
}
func (is *store) Delete(id ID) ([]layer.Metadata, error) {
is.Lock()
defer is.Unlock()
imageMeta := is.images[id]
if imageMeta == nil {
return nil, fmt.Errorf("unrecognized image ID %s", id.String())
}
for id := range imageMeta.children {
is.fs.DeleteMetadata(id.Digest(), "parent")
}
if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil {
delete(is.images[parent].children, id)
}
if err := is.digestSet.Remove(id.Digest()); err != nil {
logrus.Errorf("error removing %s from digest set: %q", id, err)
}
delete(is.images, id)
is.fs.Delete(id.Digest())
if imageMeta.layer != nil {
return is.ls.Release(imageMeta.layer)
}
return nil, nil
}
func (is *store) SetParent(id, parent ID) error {
is.Lock()
defer is.Unlock()
parentMeta := is.images[parent]
if parentMeta == nil {
return fmt.Errorf("unknown parent image ID %s", parent.String())
}
if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil {
delete(is.images[parent].children, id)
}
parentMeta.children[id] = struct{}{}
return is.fs.SetMetadata(id.Digest(), "parent", []byte(parent))
}
func (is *store) GetParent(id ID) (ID, error) {
d, err := is.fs.GetMetadata(id.Digest(), "parent")
if err != nil {
return "", err
}
return ID(d), nil // todo: validate?
}
func (is *store) Children(id ID) []ID {
is.Lock()
defer is.Unlock()
return is.children(id)
}
func (is *store) children(id ID) []ID {
var ids []ID
if is.images[id] != nil {
for id := range is.images[id].children {
ids = append(ids, id)
}
}
return ids
}
func (is *store) Heads() map[ID]*Image {
return is.imagesMap(false)
}
func (is *store) Map() map[ID]*Image {
return is.imagesMap(true)
}
func (is *store) imagesMap(all bool) map[ID]*Image {
is.Lock()
defer is.Unlock()
images := make(map[ID]*Image)
for id := range is.images {
if !all && len(is.children(id)) > 0 {
continue
}
img, err := is.Get(id)
if err != nil {
logrus.Errorf("invalid image access: %q, error: %q", id, err)
continue
}
images[id] = img
}
return images
}

View File

@ -1,156 +0,0 @@
package v1
import (
"encoding/json"
"fmt"
"reflect"
"regexp"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/digest"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
)
var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
// noFallbackMinVersion is the minimum version for which v1compatibility
// information will not be marshaled through the Image struct to remove
// blank fields.
var noFallbackMinVersion = "1.8.3"
// HistoryFromConfig creates a History struct from v1 configuration JSON
func HistoryFromConfig(imageJSON []byte, emptyLayer bool) (image.History, error) {
h := image.History{}
var v1Image image.V1Image
if err := json.Unmarshal(imageJSON, &v1Image); err != nil {
return h, err
}
return image.History{
Author: v1Image.Author,
Created: v1Image.Created,
CreatedBy: strings.Join(v1Image.ContainerConfig.Cmd, " "),
Comment: v1Image.Comment,
EmptyLayer: emptyLayer,
}, nil
}
// CreateID creates an ID from v1 image, layerID and parent ID.
// Used for backwards compatibility with old clients.
func CreateID(v1Image image.V1Image, layerID layer.ChainID, parent digest.Digest) (digest.Digest, error) {
v1Image.ID = ""
v1JSON, err := json.Marshal(v1Image)
if err != nil {
return "", err
}
var config map[string]*json.RawMessage
if err := json.Unmarshal(v1JSON, &config); err != nil {
return "", err
}
// FIXME: note that this is slightly incompatible with RootFS logic
config["layer_id"] = rawJSON(layerID)
if parent != "" {
config["parent"] = rawJSON(parent)
}
configJSON, err := json.Marshal(config)
if err != nil {
return "", err
}
logrus.Debugf("CreateV1ID %s", configJSON)
return digest.FromBytes(configJSON), nil
}
// MakeConfigFromV1Config creates an image config from the legacy V1 config format.
func MakeConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []image.History) ([]byte, error) {
var dver struct {
DockerVersion string `json:"docker_version"`
}
if err := json.Unmarshal(imageJSON, &dver); err != nil {
return nil, err
}
useFallback := versions.LessThan(dver.DockerVersion, noFallbackMinVersion)
if useFallback {
var v1Image image.V1Image
err := json.Unmarshal(imageJSON, &v1Image)
if err != nil {
return nil, err
}
imageJSON, err = json.Marshal(v1Image)
if err != nil {
return nil, err
}
}
var c map[string]*json.RawMessage
if err := json.Unmarshal(imageJSON, &c); err != nil {
return nil, err
}
delete(c, "id")
delete(c, "parent")
delete(c, "Size") // Size is calculated from data on disk and is inconsistent
delete(c, "parent_id")
delete(c, "layer_id")
delete(c, "throwaway")
c["rootfs"] = rawJSON(rootfs)
c["history"] = rawJSON(history)
return json.Marshal(c)
}
// MakeV1ConfigFromConfig creates an legacy V1 image config from an Image struct
func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
// Top-level v1compatibility string should be a modified version of the
// image config.
var configAsMap map[string]*json.RawMessage
if err := json.Unmarshal(img.RawJSON(), &configAsMap); err != nil {
return nil, err
}
// Delete fields that didn't exist in old manifest
imageType := reflect.TypeOf(img).Elem()
for i := 0; i < imageType.NumField(); i++ {
f := imageType.Field(i)
jsonName := strings.Split(f.Tag.Get("json"), ",")[0]
// Parent is handled specially below.
if jsonName != "" && jsonName != "parent" {
delete(configAsMap, jsonName)
}
}
configAsMap["id"] = rawJSON(v1ID)
if parentV1ID != "" {
configAsMap["parent"] = rawJSON(parentV1ID)
}
if throwaway {
configAsMap["throwaway"] = rawJSON(true)
}
return json.Marshal(configAsMap)
}
func rawJSON(value interface{}) *json.RawMessage {
jsonval, err := json.Marshal(value)
if err != nil {
return nil
}
return (*json.RawMessage)(&jsonval)
}
// ValidateID checks whether an ID string is a valid image ID.
func ValidateID(id string) error {
if ok := validHex.MatchString(id); !ok {
return fmt.Errorf("image ID %q is invalid", id)
}
return nil
}

View File

@ -1,56 +0,0 @@
package layer
import (
"archive/tar"
"bytes"
"fmt"
"io"
"io/ioutil"
)
// DigestSHA256EmptyTar is the canonical sha256 digest of empty tar file -
// (1024 NULL bytes)
const DigestSHA256EmptyTar = DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef")
type emptyLayer struct{}
// EmptyLayer is a layer that corresponds to empty tar.
var EmptyLayer = &emptyLayer{}
func (el *emptyLayer) TarStream() (io.ReadCloser, error) {
buf := new(bytes.Buffer)
tarWriter := tar.NewWriter(buf)
tarWriter.Close()
return ioutil.NopCloser(buf), nil
}
func (el *emptyLayer) TarStreamFrom(p ChainID) (io.ReadCloser, error) {
if p == "" {
return el.TarStream()
}
return nil, fmt.Errorf("can't get parent tar stream of an empty layer")
}
func (el *emptyLayer) ChainID() ChainID {
return ChainID(DigestSHA256EmptyTar)
}
func (el *emptyLayer) DiffID() DiffID {
return DigestSHA256EmptyTar
}
func (el *emptyLayer) Parent() Layer {
return nil
}
func (el *emptyLayer) Size() (size int64, err error) {
return 0, nil
}
func (el *emptyLayer) DiffSize() (size int64, err error) {
return 0, nil
}
func (el *emptyLayer) Metadata() (map[string]string, error) {
return make(map[string]string), nil
}

View File

@ -1,354 +0,0 @@
package layer
import (
"compress/gzip"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution"
"github.com/docker/distribution/digest"
"github.com/docker/docker/pkg/ioutils"
)
var (
stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`)
supportedAlgorithms = []digest.Algorithm{
digest.SHA256,
// digest.SHA384, // Currently not used
// digest.SHA512, // Currently not used
}
)
type fileMetadataStore struct {
root string
}
type fileMetadataTransaction struct {
store *fileMetadataStore
ws *ioutils.AtomicWriteSet
}
// NewFSMetadataStore returns an instance of a metadata store
// which is backed by files on disk using the provided root
// as the root of metadata files.
func NewFSMetadataStore(root string) (MetadataStore, error) {
if err := os.MkdirAll(root, 0700); err != nil {
return nil, err
}
return &fileMetadataStore{
root: root,
}, nil
}
func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string {
dgst := digest.Digest(layer)
return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex())
}
func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string {
return filepath.Join(fms.getLayerDirectory(layer), filename)
}
func (fms *fileMetadataStore) getMountDirectory(mount string) string {
return filepath.Join(fms.root, "mounts", mount)
}
func (fms *fileMetadataStore) getMountFilename(mount, filename string) string {
return filepath.Join(fms.getMountDirectory(mount), filename)
}
func (fms *fileMetadataStore) StartTransaction() (MetadataTransaction, error) {
tmpDir := filepath.Join(fms.root, "tmp")
if err := os.MkdirAll(tmpDir, 0755); err != nil {
return nil, err
}
ws, err := ioutils.NewAtomicWriteSet(tmpDir)
if err != nil {
return nil, err
}
return &fileMetadataTransaction{
store: fms,
ws: ws,
}, nil
}
func (fm *fileMetadataTransaction) SetSize(size int64) error {
content := fmt.Sprintf("%d", size)
return fm.ws.WriteFile("size", []byte(content), 0644)
}
func (fm *fileMetadataTransaction) SetParent(parent ChainID) error {
return fm.ws.WriteFile("parent", []byte(digest.Digest(parent).String()), 0644)
}
func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error {
return fm.ws.WriteFile("diff", []byte(digest.Digest(diff).String()), 0644)
}
func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error {
return fm.ws.WriteFile("cache-id", []byte(cacheID), 0644)
}
func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) error {
jsonRef, err := json.Marshal(ref)
if err != nil {
return err
}
return fm.ws.WriteFile("descriptor.json", jsonRef, 0644)
}
func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) {
f, err := fm.ws.FileWriter("tar-split.json.gz", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil, err
}
var wc io.WriteCloser
if compressInput {
wc = gzip.NewWriter(f)
} else {
wc = f
}
return ioutils.NewWriteCloserWrapper(wc, func() error {
wc.Close()
return f.Close()
}), nil
}
func (fm *fileMetadataTransaction) Commit(layer ChainID) error {
finalDir := fm.store.getLayerDirectory(layer)
if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil {
return err
}
return fm.ws.Commit(finalDir)
}
func (fm *fileMetadataTransaction) Cancel() error {
return fm.ws.Cancel()
}
func (fm *fileMetadataTransaction) String() string {
return fm.ws.String()
}
func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) {
content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "size"))
if err != nil {
return 0, err
}
size, err := strconv.ParseInt(string(content), 10, 64)
if err != nil {
return 0, err
}
return size, nil
}
func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) {
content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "parent"))
if err != nil {
if os.IsNotExist(err) {
return "", nil
}
return "", err
}
dgst, err := digest.ParseDigest(strings.TrimSpace(string(content)))
if err != nil {
return "", err
}
return ChainID(dgst), nil
}
func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) {
content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "diff"))
if err != nil {
return "", err
}
dgst, err := digest.ParseDigest(strings.TrimSpace(string(content)))
if err != nil {
return "", err
}
return DiffID(dgst), nil
}
func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) {
contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id"))
if err != nil {
return "", err
}
content := strings.TrimSpace(string(contentBytes))
if !stringIDRegexp.MatchString(content) {
return "", errors.New("invalid cache id value")
}
return content, nil
}
func (fms *fileMetadataStore) GetDescriptor(layer ChainID) (distribution.Descriptor, error) {
content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "descriptor.json"))
if err != nil {
if os.IsNotExist(err) {
// only return empty descriptor to represent what is stored
return distribution.Descriptor{}, nil
}
return distribution.Descriptor{}, err
}
var ref distribution.Descriptor
err = json.Unmarshal(content, &ref)
if err != nil {
return distribution.Descriptor{}, err
}
return ref, err
}
func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) {
fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz"))
if err != nil {
return nil, err
}
f, err := gzip.NewReader(fz)
if err != nil {
return nil, err
}
return ioutils.NewReadCloserWrapper(f, func() error {
f.Close()
return fz.Close()
}), nil
}
func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error {
if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil {
return err
}
return ioutil.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644)
}
func (fms *fileMetadataStore) SetInitID(mount string, init string) error {
if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil {
return err
}
return ioutil.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644)
}
func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error {
if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil {
return err
}
return ioutil.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644)
}
func (fms *fileMetadataStore) GetMountID(mount string) (string, error) {
contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id"))
if err != nil {
return "", err
}
content := strings.TrimSpace(string(contentBytes))
if !stringIDRegexp.MatchString(content) {
return "", errors.New("invalid mount id value")
}
return content, nil
}
func (fms *fileMetadataStore) GetInitID(mount string) (string, error) {
contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id"))
if err != nil {
if os.IsNotExist(err) {
return "", nil
}
return "", err
}
content := strings.TrimSpace(string(contentBytes))
if !stringIDRegexp.MatchString(content) {
return "", errors.New("invalid init id value")
}
return content, nil
}
func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) {
content, err := ioutil.ReadFile(fms.getMountFilename(mount, "parent"))
if err != nil {
if os.IsNotExist(err) {
return "", nil
}
return "", err
}
dgst, err := digest.ParseDigest(strings.TrimSpace(string(content)))
if err != nil {
return "", err
}
return ChainID(dgst), nil
}
func (fms *fileMetadataStore) List() ([]ChainID, []string, error) {
var ids []ChainID
for _, algorithm := range supportedAlgorithms {
fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, string(algorithm)))
if err != nil {
if os.IsNotExist(err) {
continue
}
return nil, nil, err
}
for _, fi := range fileInfos {
if fi.IsDir() && fi.Name() != "mounts" {
dgst := digest.NewDigestFromHex(string(algorithm), fi.Name())
if err := dgst.Validate(); err != nil {
logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name())
} else {
ids = append(ids, ChainID(dgst))
}
}
}
}
fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, "mounts"))
if err != nil {
if os.IsNotExist(err) {
return ids, []string{}, nil
}
return nil, nil, err
}
var mounts []string
for _, fi := range fileInfos {
if fi.IsDir() {
mounts = append(mounts, fi.Name())
}
}
return ids, mounts, nil
}
func (fms *fileMetadataStore) Remove(layer ChainID) error {
return os.RemoveAll(fms.getLayerDirectory(layer))
}
func (fms *fileMetadataStore) RemoveMount(mount string) error {
return os.RemoveAll(fms.getMountDirectory(mount))
}

View File

@ -1,282 +0,0 @@
// Package layer is package for managing read-only
// and read-write mounts on the union file system
// driver. Read-only mounts are referenced using a
// content hash and are protected from mutation in
// the exposed interface. The tar format is used
// to create read-only layers and export both
// read-only and writable layers. The exported
// tar data for a read-only layer should match
// the tar used to create the layer.
package layer
import (
"errors"
"io"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution"
"github.com/docker/distribution/digest"
"github.com/docker/docker/pkg/archive"
)
var (
// ErrLayerDoesNotExist is used when an operation is
// attempted on a layer which does not exist.
ErrLayerDoesNotExist = errors.New("layer does not exist")
// ErrLayerNotRetained is used when a release is
// attempted on a layer which is not retained.
ErrLayerNotRetained = errors.New("layer not retained")
// ErrMountDoesNotExist is used when an operation is
// attempted on a mount layer which does not exist.
ErrMountDoesNotExist = errors.New("mount does not exist")
// ErrMountNameConflict is used when a mount is attempted
// to be created but there is already a mount with the name
// used for creation.
ErrMountNameConflict = errors.New("mount already exists with name")
// ErrActiveMount is used when an operation on a
// mount is attempted but the layer is still
// mounted and the operation cannot be performed.
ErrActiveMount = errors.New("mount still active")
// ErrNotMounted is used when requesting an active
// mount but the layer is not mounted.
ErrNotMounted = errors.New("not mounted")
// ErrMaxDepthExceeded is used when a layer is attempted
// to be created which would result in a layer depth
// greater than the 125 max.
ErrMaxDepthExceeded = errors.New("max depth exceeded")
// ErrNotSupported is used when the action is not supported
// on the current platform
ErrNotSupported = errors.New("not support on this platform")
)
// ChainID is the content-addressable ID of a layer.
type ChainID digest.Digest
// String returns a string rendition of a layer ID
func (id ChainID) String() string {
return string(id)
}
// DiffID is the hash of an individual layer tar.
type DiffID digest.Digest
// String returns a string rendition of a layer DiffID
func (diffID DiffID) String() string {
return string(diffID)
}
// TarStreamer represents an object which may
// have its contents exported as a tar stream.
type TarStreamer interface {
// TarStream returns a tar archive stream
// for the contents of a layer.
TarStream() (io.ReadCloser, error)
}
// Layer represents a read-only layer
type Layer interface {
TarStreamer
// TarStreamFrom returns a tar archive stream for all the layer chain with
// arbitrary depth.
TarStreamFrom(ChainID) (io.ReadCloser, error)
// ChainID returns the content hash of the entire layer chain. The hash
// chain is made up of DiffID of top layer and all of its parents.
ChainID() ChainID
// DiffID returns the content hash of the layer
// tar stream used to create this layer.
DiffID() DiffID
// Parent returns the next layer in the layer chain.
Parent() Layer
// Size returns the size of the entire layer chain. The size
// is calculated from the total size of all files in the layers.
Size() (int64, error)
// DiffSize returns the size difference of the top layer
// from parent layer.
DiffSize() (int64, error)
// Metadata returns the low level storage metadata associated
// with layer.
Metadata() (map[string]string, error)
}
// RWLayer represents a layer which is
// read and writable
type RWLayer interface {
TarStreamer
// Name of mounted layer
Name() string
// Parent returns the layer which the writable
// layer was created from.
Parent() Layer
// Mount mounts the RWLayer and returns the filesystem path
// the to the writable layer.
Mount(mountLabel string) (string, error)
// Unmount unmounts the RWLayer. This should be called
// for every mount. If there are multiple mount calls
// this operation will only decrement the internal mount counter.
Unmount() error
// Size represents the size of the writable layer
// as calculated by the total size of the files
// changed in the mutable layer.
Size() (int64, error)
// Changes returns the set of changes for the mutable layer
// from the base layer.
Changes() ([]archive.Change, error)
// Metadata returns the low level metadata for the mutable layer
Metadata() (map[string]string, error)
}
// Metadata holds information about a
// read-only layer
type Metadata struct {
// ChainID is the content hash of the layer
ChainID ChainID
// DiffID is the hash of the tar data used to
// create the layer
DiffID DiffID
// Size is the size of the layer and all parents
Size int64
// DiffSize is the size of the top layer
DiffSize int64
}
// MountInit is a function to initialize a
// writable mount. Changes made here will
// not be included in the Tar stream of the
// RWLayer.
type MountInit func(root string) error
// CreateRWLayerOpts contains optional arguments to be passed to CreateRWLayer
type CreateRWLayerOpts struct {
MountLabel string
InitFunc MountInit
StorageOpt map[string]string
}
// Store represents a backend for managing both
// read-only and read-write layers.
type Store interface {
Register(io.Reader, ChainID) (Layer, error)
Get(ChainID) (Layer, error)
Map() map[ChainID]Layer
Release(Layer) ([]Metadata, error)
CreateRWLayer(id string, parent ChainID, opts *CreateRWLayerOpts) (RWLayer, error)
GetRWLayer(id string) (RWLayer, error)
GetMountID(id string) (string, error)
ReleaseRWLayer(RWLayer) ([]Metadata, error)
Cleanup() error
DriverStatus() [][2]string
DriverName() string
}
// DescribableStore represents a layer store capable of storing
// descriptors for layers.
type DescribableStore interface {
RegisterWithDescriptor(io.Reader, ChainID, distribution.Descriptor) (Layer, error)
}
// MetadataTransaction represents functions for setting layer metadata
// with a single transaction.
type MetadataTransaction interface {
SetSize(int64) error
SetParent(parent ChainID) error
SetDiffID(DiffID) error
SetCacheID(string) error
SetDescriptor(distribution.Descriptor) error
TarSplitWriter(compressInput bool) (io.WriteCloser, error)
Commit(ChainID) error
Cancel() error
String() string
}
// MetadataStore represents a backend for persisting
// metadata about layers and providing the metadata
// for restoring a Store.
type MetadataStore interface {
// StartTransaction starts an update for new metadata
// which will be used to represent an ID on commit.
StartTransaction() (MetadataTransaction, error)
GetSize(ChainID) (int64, error)
GetParent(ChainID) (ChainID, error)
GetDiffID(ChainID) (DiffID, error)
GetCacheID(ChainID) (string, error)
GetDescriptor(ChainID) (distribution.Descriptor, error)
TarSplitReader(ChainID) (io.ReadCloser, error)
SetMountID(string, string) error
SetInitID(string, string) error
SetMountParent(string, ChainID) error
GetMountID(string) (string, error)
GetInitID(string) (string, error)
GetMountParent(string) (ChainID, error)
// List returns the full list of referenced
// read-only and read-write layers
List() ([]ChainID, []string, error)
Remove(ChainID) error
RemoveMount(string) error
}
// CreateChainID returns ID for a layerDigest slice
func CreateChainID(dgsts []DiffID) ChainID {
return createChainIDFromParent("", dgsts...)
}
func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID {
if len(dgsts) == 0 {
return parent
}
if parent == "" {
return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...)
}
// H = "H(n-1) SHA256(n)"
dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0])))
return createChainIDFromParent(ChainID(dgst), dgsts[1:]...)
}
// ReleaseAndLog releases the provided layer from the given layer
// store, logging any error and release metadata
func ReleaseAndLog(ls Store, l Layer) {
metadata, err := ls.Release(l)
if err != nil {
logrus.Errorf("Error releasing layer %s: %v", l.ChainID(), err)
}
LogReleaseMetadata(metadata)
}
// LogReleaseMetadata logs a metadata array, uses this to
// ensure consistent logging for release metadata
func LogReleaseMetadata(metadatas []Metadata) {
for _, metadata := range metadatas {
logrus.Infof("Layer %s cleaned up", metadata.ChainID)
}
}

View File

@ -1,695 +0,0 @@
package layer
import (
"errors"
"fmt"
"io"
"io/ioutil"
"sync"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution"
"github.com/docker/distribution/digest"
"github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/pkg/stringid"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
)
// maxLayerDepth represents the maximum number of
// layers which can be chained together. 125 was
// chosen to account for the 127 max in some
// graphdrivers plus the 2 additional layers
// used to create a rwlayer.
const maxLayerDepth = 125
type layerStore struct {
store MetadataStore
driver graphdriver.Driver
layerMap map[ChainID]*roLayer
layerL sync.Mutex
mounts map[string]*mountedLayer
mountL sync.Mutex
}
// StoreOptions are the options used to create a new Store instance
type StoreOptions struct {
StorePath string
MetadataStorePathTemplate string
GraphDriver string
GraphDriverOptions []string
UIDMaps []idtools.IDMap
GIDMaps []idtools.IDMap
PluginGetter plugingetter.PluginGetter
}
// NewStoreFromOptions creates a new Store instance
func NewStoreFromOptions(options StoreOptions) (Store, error) {
driver, err := graphdriver.New(
options.StorePath,
options.GraphDriver,
options.GraphDriverOptions,
options.UIDMaps,
options.GIDMaps,
options.PluginGetter)
if err != nil {
return nil, fmt.Errorf("error initializing graphdriver: %v", err)
}
logrus.Debugf("Using graph driver %s", driver)
fms, err := NewFSMetadataStore(fmt.Sprintf(options.MetadataStorePathTemplate, driver))
if err != nil {
return nil, err
}
return NewStoreFromGraphDriver(fms, driver)
}
// NewStoreFromGraphDriver creates a new Store instance using the provided
// metadata store and graph driver. The metadata store will be used to restore
// the Store.
func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver) (Store, error) {
ls := &layerStore{
store: store,
driver: driver,
layerMap: map[ChainID]*roLayer{},
mounts: map[string]*mountedLayer{},
}
ids, mounts, err := store.List()
if err != nil {
return nil, err
}
for _, id := range ids {
l, err := ls.loadLayer(id)
if err != nil {
logrus.Debugf("Failed to load layer %s: %s", id, err)
continue
}
if l.parent != nil {
l.parent.referenceCount++
}
}
for _, mount := range mounts {
if err := ls.loadMount(mount); err != nil {
logrus.Debugf("Failed to load mount %s: %s", mount, err)
}
}
return ls, nil
}
func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) {
cl, ok := ls.layerMap[layer]
if ok {
return cl, nil
}
diff, err := ls.store.GetDiffID(layer)
if err != nil {
return nil, fmt.Errorf("failed to get diff id for %s: %s", layer, err)
}
size, err := ls.store.GetSize(layer)
if err != nil {
return nil, fmt.Errorf("failed to get size for %s: %s", layer, err)
}
cacheID, err := ls.store.GetCacheID(layer)
if err != nil {
return nil, fmt.Errorf("failed to get cache id for %s: %s", layer, err)
}
parent, err := ls.store.GetParent(layer)
if err != nil {
return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err)
}
descriptor, err := ls.store.GetDescriptor(layer)
if err != nil {
return nil, fmt.Errorf("failed to get descriptor for %s: %s", layer, err)
}
cl = &roLayer{
chainID: layer,
diffID: diff,
size: size,
cacheID: cacheID,
layerStore: ls,
references: map[Layer]struct{}{},
descriptor: descriptor,
}
if parent != "" {
p, err := ls.loadLayer(parent)
if err != nil {
return nil, err
}
cl.parent = p
}
ls.layerMap[cl.chainID] = cl
return cl, nil
}
func (ls *layerStore) loadMount(mount string) error {
if _, ok := ls.mounts[mount]; ok {
return nil
}
mountID, err := ls.store.GetMountID(mount)
if err != nil {
return err
}
initID, err := ls.store.GetInitID(mount)
if err != nil {
return err
}
parent, err := ls.store.GetMountParent(mount)
if err != nil {
return err
}
ml := &mountedLayer{
name: mount,
mountID: mountID,
initID: initID,
layerStore: ls,
references: map[RWLayer]*referencedRWLayer{},
}
if parent != "" {
p, err := ls.loadLayer(parent)
if err != nil {
return err
}
ml.parent = p
p.referenceCount++
}
ls.mounts[ml.name] = ml
return nil
}
func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent string, layer *roLayer) error {
digester := digest.Canonical.New()
tr := io.TeeReader(ts, digester.Hash())
tsw, err := tx.TarSplitWriter(true)
if err != nil {
return err
}
metaPacker := storage.NewJSONPacker(tsw)
defer tsw.Close()
// we're passing nil here for the file putter, because the ApplyDiff will
// handle the extraction of the archive
rdr, err := asm.NewInputTarStream(tr, metaPacker, nil)
if err != nil {
return err
}
applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, rdr)
if err != nil {
return err
}
// Discard trailing data but ensure metadata is picked up to reconstruct stream
io.Copy(ioutil.Discard, rdr) // ignore error as reader may be closed
layer.size = applySize
layer.diffID = DiffID(digester.Digest())
logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize)
return nil
}
func (ls *layerStore) Register(ts io.Reader, parent ChainID) (Layer, error) {
return ls.registerWithDescriptor(ts, parent, distribution.Descriptor{})
}
func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) {
// err is used to hold the error which will always trigger
// cleanup of creates sources but may not be an error returned
// to the caller (already exists).
var err error
var pid string
var p *roLayer
if string(parent) != "" {
p = ls.get(parent)
if p == nil {
return nil, ErrLayerDoesNotExist
}
pid = p.cacheID
// Release parent chain if error
defer func() {
if err != nil {
ls.layerL.Lock()
ls.releaseLayer(p)
ls.layerL.Unlock()
}
}()
if p.depth() >= maxLayerDepth {
err = ErrMaxDepthExceeded
return nil, err
}
}
// Create new roLayer
layer := &roLayer{
parent: p,
cacheID: stringid.GenerateRandomID(),
referenceCount: 1,
layerStore: ls,
references: map[Layer]struct{}{},
descriptor: descriptor,
}
if err = ls.driver.Create(layer.cacheID, pid, nil); err != nil {
return nil, err
}
tx, err := ls.store.StartTransaction()
if err != nil {
return nil, err
}
defer func() {
if err != nil {
logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, err)
if err := ls.driver.Remove(layer.cacheID); err != nil {
logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err)
}
if err := tx.Cancel(); err != nil {
logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err)
}
}
}()
if err = ls.applyTar(tx, ts, pid, layer); err != nil {
return nil, err
}
if layer.parent == nil {
layer.chainID = ChainID(layer.diffID)
} else {
layer.chainID = createChainIDFromParent(layer.parent.chainID, layer.diffID)
}
if err = storeLayer(tx, layer); err != nil {
return nil, err
}
ls.layerL.Lock()
defer ls.layerL.Unlock()
if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil {
// Set error for cleanup, but do not return the error
err = errors.New("layer already exists")
return existingLayer.getReference(), nil
}
if err = tx.Commit(layer.chainID); err != nil {
return nil, err
}
ls.layerMap[layer.chainID] = layer
return layer.getReference(), nil
}
func (ls *layerStore) getWithoutLock(layer ChainID) *roLayer {
l, ok := ls.layerMap[layer]
if !ok {
return nil
}
l.referenceCount++
return l
}
func (ls *layerStore) get(l ChainID) *roLayer {
ls.layerL.Lock()
defer ls.layerL.Unlock()
return ls.getWithoutLock(l)
}
func (ls *layerStore) Get(l ChainID) (Layer, error) {
ls.layerL.Lock()
defer ls.layerL.Unlock()
layer := ls.getWithoutLock(l)
if layer == nil {
return nil, ErrLayerDoesNotExist
}
return layer.getReference(), nil
}
func (ls *layerStore) Map() map[ChainID]Layer {
ls.layerL.Lock()
defer ls.layerL.Unlock()
layers := map[ChainID]Layer{}
for k, v := range ls.layerMap {
layers[k] = v
}
return layers
}
func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error {
err := ls.driver.Remove(layer.cacheID)
if err != nil {
return err
}
err = ls.store.Remove(layer.chainID)
if err != nil {
return err
}
metadata.DiffID = layer.diffID
metadata.ChainID = layer.chainID
metadata.Size, err = layer.Size()
if err != nil {
return err
}
metadata.DiffSize = layer.size
return nil
}
func (ls *layerStore) releaseLayer(l *roLayer) ([]Metadata, error) {
depth := 0
removed := []Metadata{}
for {
if l.referenceCount == 0 {
panic("layer not retained")
}
l.referenceCount--
if l.referenceCount != 0 {
return removed, nil
}
if len(removed) == 0 && depth > 0 {
panic("cannot remove layer with child")
}
if l.hasReferences() {
panic("cannot delete referenced layer")
}
var metadata Metadata
if err := ls.deleteLayer(l, &metadata); err != nil {
return nil, err
}
delete(ls.layerMap, l.chainID)
removed = append(removed, metadata)
if l.parent == nil {
return removed, nil
}
depth++
l = l.parent
}
}
func (ls *layerStore) Release(l Layer) ([]Metadata, error) {
ls.layerL.Lock()
defer ls.layerL.Unlock()
layer, ok := ls.layerMap[l.ChainID()]
if !ok {
return []Metadata{}, nil
}
if !layer.hasReference(l) {
return nil, ErrLayerNotRetained
}
layer.deleteReference(l)
return ls.releaseLayer(layer)
}
func (ls *layerStore) CreateRWLayer(name string, parent ChainID, opts *CreateRWLayerOpts) (RWLayer, error) {
var (
storageOpt map[string]string
initFunc MountInit
mountLabel string
)
if opts != nil {
mountLabel = opts.MountLabel
storageOpt = opts.StorageOpt
initFunc = opts.InitFunc
}
ls.mountL.Lock()
defer ls.mountL.Unlock()
m, ok := ls.mounts[name]
if ok {
return nil, ErrMountNameConflict
}
var err error
var pid string
var p *roLayer
if string(parent) != "" {
p = ls.get(parent)
if p == nil {
return nil, ErrLayerDoesNotExist
}
pid = p.cacheID
// Release parent chain if error
defer func() {
if err != nil {
ls.layerL.Lock()
ls.releaseLayer(p)
ls.layerL.Unlock()
}
}()
}
m = &mountedLayer{
name: name,
parent: p,
mountID: ls.mountID(name),
layerStore: ls,
references: map[RWLayer]*referencedRWLayer{},
}
if initFunc != nil {
pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc, storageOpt)
if err != nil {
return nil, err
}
m.initID = pid
}
createOpts := &graphdriver.CreateOpts{
StorageOpt: storageOpt,
}
if err = ls.driver.CreateReadWrite(m.mountID, pid, createOpts); err != nil {
return nil, err
}
if err = ls.saveMount(m); err != nil {
return nil, err
}
return m.getReference(), nil
}
func (ls *layerStore) GetRWLayer(id string) (RWLayer, error) {
ls.mountL.Lock()
defer ls.mountL.Unlock()
mount, ok := ls.mounts[id]
if !ok {
return nil, ErrMountDoesNotExist
}
return mount.getReference(), nil
}
func (ls *layerStore) GetMountID(id string) (string, error) {
ls.mountL.Lock()
defer ls.mountL.Unlock()
mount, ok := ls.mounts[id]
if !ok {
return "", ErrMountDoesNotExist
}
logrus.Debugf("GetMountID id: %s -> mountID: %s", id, mount.mountID)
return mount.mountID, nil
}
func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) {
ls.mountL.Lock()
defer ls.mountL.Unlock()
m, ok := ls.mounts[l.Name()]
if !ok {
return []Metadata{}, nil
}
if err := m.deleteReference(l); err != nil {
return nil, err
}
if m.hasReferences() {
return []Metadata{}, nil
}
if err := ls.driver.Remove(m.mountID); err != nil {
logrus.Errorf("Error removing mounted layer %s: %s", m.name, err)
m.retakeReference(l)
return nil, err
}
if m.initID != "" {
if err := ls.driver.Remove(m.initID); err != nil {
logrus.Errorf("Error removing init layer %s: %s", m.name, err)
m.retakeReference(l)
return nil, err
}
}
if err := ls.store.RemoveMount(m.name); err != nil {
logrus.Errorf("Error removing mount metadata: %s: %s", m.name, err)
m.retakeReference(l)
return nil, err
}
delete(ls.mounts, m.Name())
ls.layerL.Lock()
defer ls.layerL.Unlock()
if m.parent != nil {
return ls.releaseLayer(m.parent)
}
return []Metadata{}, nil
}
func (ls *layerStore) saveMount(mount *mountedLayer) error {
if err := ls.store.SetMountID(mount.name, mount.mountID); err != nil {
return err
}
if mount.initID != "" {
if err := ls.store.SetInitID(mount.name, mount.initID); err != nil {
return err
}
}
if mount.parent != nil {
if err := ls.store.SetMountParent(mount.name, mount.parent.chainID); err != nil {
return err
}
}
ls.mounts[mount.name] = mount
return nil
}
func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (string, error) {
// Use "<graph-id>-init" to maintain compatibility with graph drivers
// which are expecting this layer with this special name. If all
// graph drivers can be updated to not rely on knowing about this layer
// then the initID should be randomly generated.
initID := fmt.Sprintf("%s-init", graphID)
createOpts := &graphdriver.CreateOpts{
MountLabel: mountLabel,
StorageOpt: storageOpt,
}
if err := ls.driver.CreateReadWrite(initID, parent, createOpts); err != nil {
return "", err
}
p, err := ls.driver.Get(initID, "")
if err != nil {
return "", err
}
if err := initFunc(p); err != nil {
ls.driver.Put(initID)
return "", err
}
if err := ls.driver.Put(initID); err != nil {
return "", err
}
return initID, nil
}
func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error {
diffDriver, ok := ls.driver.(graphdriver.DiffGetterDriver)
if !ok {
diffDriver = &naiveDiffPathDriver{ls.driver}
}
defer metadata.Close()
// get our relative path to the container
fileGetCloser, err := diffDriver.DiffGetter(graphID)
if err != nil {
return err
}
defer fileGetCloser.Close()
metaUnpacker := storage.NewJSONUnpacker(metadata)
upackerCounter := &unpackSizeCounter{metaUnpacker, size}
logrus.Debugf("Assembling tar data for %s", graphID)
return asm.WriteOutputTarStream(fileGetCloser, upackerCounter, w)
}
func (ls *layerStore) Cleanup() error {
return ls.driver.Cleanup()
}
func (ls *layerStore) DriverStatus() [][2]string {
return ls.driver.Status()
}
func (ls *layerStore) DriverName() string {
return ls.driver.String()
}
type naiveDiffPathDriver struct {
graphdriver.Driver
}
type fileGetPutter struct {
storage.FileGetter
driver graphdriver.Driver
id string
}
func (w *fileGetPutter) Close() error {
return w.driver.Put(w.id)
}
func (n *naiveDiffPathDriver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
p, err := n.Driver.Get(id, "")
if err != nil {
return nil, err
}
return &fileGetPutter{storage.NewPathFileGetter(p), n.Driver, id}, nil
}

View File

@ -1,11 +0,0 @@
package layer
import (
"io"
"github.com/docker/distribution"
)
func (ls *layerStore) RegisterWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) {
return ls.registerWithDescriptor(ts, parent, descriptor)
}

View File

@ -1,9 +0,0 @@
// +build linux freebsd darwin openbsd solaris
package layer
import "github.com/docker/docker/pkg/stringid"
func (ls *layerStore) mountID(name string) string {
return stringid.GenerateRandomID()
}

View File

@ -1,98 +0,0 @@
package layer
import (
"errors"
"fmt"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/digest"
"github.com/docker/docker/daemon/graphdriver"
)
// GetLayerPath returns the path to a layer
func GetLayerPath(s Store, layer ChainID) (string, error) {
ls, ok := s.(*layerStore)
if !ok {
return "", errors.New("unsupported layer store")
}
ls.layerL.Lock()
defer ls.layerL.Unlock()
rl, ok := ls.layerMap[layer]
if !ok {
return "", ErrLayerDoesNotExist
}
path, err := ls.driver.Get(rl.cacheID, "")
if err != nil {
return "", err
}
if err := ls.driver.Put(rl.cacheID); err != nil {
return "", err
}
return path, nil
}
func (ls *layerStore) RegisterDiffID(graphID string, size int64) (Layer, error) {
var err error // this is used for cleanup in existingLayer case
diffID := digest.FromBytes([]byte(graphID))
// Create new roLayer
layer := &roLayer{
cacheID: graphID,
diffID: DiffID(diffID),
referenceCount: 1,
layerStore: ls,
references: map[Layer]struct{}{},
size: size,
}
tx, err := ls.store.StartTransaction()
if err != nil {
return nil, err
}
defer func() {
if err != nil {
if err := tx.Cancel(); err != nil {
logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err)
}
}
}()
layer.chainID = createChainIDFromParent("", layer.diffID)
if !ls.driver.Exists(layer.cacheID) {
return nil, fmt.Errorf("layer %q is unknown to driver", layer.cacheID)
}
if err = storeLayer(tx, layer); err != nil {
return nil, err
}
ls.layerL.Lock()
defer ls.layerL.Unlock()
if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil {
// Set error for cleanup, but do not return
err = errors.New("layer already exists")
return existingLayer.getReference(), nil
}
if err = tx.Commit(layer.chainID); err != nil {
return nil, err
}
ls.layerMap[layer.chainID] = layer
return layer.getReference(), nil
}
func (ls *layerStore) mountID(name string) string {
// windows has issues if container ID doesn't match mount ID
return name
}
func (ls *layerStore) GraphDriver() graphdriver.Driver {
return ls.driver
}

View File

@ -1,256 +0,0 @@
package layer
import (
"compress/gzip"
"errors"
"fmt"
"io"
"os"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/digest"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
)
// CreateRWLayerByGraphID creates a RWLayer in the layer store using
// the provided name with the given graphID. To get the RWLayer
// after migration the layer may be retrieved by the given name.
func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent ChainID) (err error) {
ls.mountL.Lock()
defer ls.mountL.Unlock()
m, ok := ls.mounts[name]
if ok {
if m.parent.chainID != parent {
return errors.New("name conflict, mismatched parent")
}
if m.mountID != graphID {
return errors.New("mount already exists")
}
return nil
}
if !ls.driver.Exists(graphID) {
return fmt.Errorf("graph ID does not exist: %q", graphID)
}
var p *roLayer
if string(parent) != "" {
p = ls.get(parent)
if p == nil {
return ErrLayerDoesNotExist
}
// Release parent chain if error
defer func() {
if err != nil {
ls.layerL.Lock()
ls.releaseLayer(p)
ls.layerL.Unlock()
}
}()
}
// TODO: Ensure graphID has correct parent
m = &mountedLayer{
name: name,
parent: p,
mountID: graphID,
layerStore: ls,
references: map[RWLayer]*referencedRWLayer{},
}
// Check for existing init layer
initID := fmt.Sprintf("%s-init", graphID)
if ls.driver.Exists(initID) {
m.initID = initID
}
if err = ls.saveMount(m); err != nil {
return err
}
return nil
}
func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) {
defer func() {
if err != nil {
logrus.Debugf("could not get checksum for %q with tar-split: %q", id, err)
diffID, size, err = ls.checksumForGraphIDNoTarsplit(id, parent, newTarDataPath)
}
}()
if oldTarDataPath == "" {
err = errors.New("no tar-split file")
return
}
tarDataFile, err := os.Open(oldTarDataPath)
if err != nil {
return
}
defer tarDataFile.Close()
uncompressed, err := gzip.NewReader(tarDataFile)
if err != nil {
return
}
dgst := digest.Canonical.New()
err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash())
if err != nil {
return
}
diffID = DiffID(dgst.Digest())
err = os.RemoveAll(newTarDataPath)
if err != nil {
return
}
err = os.Link(oldTarDataPath, newTarDataPath)
return
}
func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) {
rawarchive, err := ls.driver.Diff(id, parent)
if err != nil {
return
}
defer rawarchive.Close()
f, err := os.Create(newTarDataPath)
if err != nil {
return
}
defer f.Close()
mfz := gzip.NewWriter(f)
defer mfz.Close()
metaPacker := storage.NewJSONPacker(mfz)
packerCounter := &packSizeCounter{metaPacker, &size}
archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil)
if err != nil {
return
}
dgst, err := digest.FromReader(archive)
if err != nil {
return
}
diffID = DiffID(dgst)
return
}
func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, diffID DiffID, tarDataFile string, size int64) (Layer, error) {
// err is used to hold the error which will always trigger
// cleanup of creates sources but may not be an error returned
// to the caller (already exists).
var err error
var p *roLayer
if string(parent) != "" {
p = ls.get(parent)
if p == nil {
return nil, ErrLayerDoesNotExist
}
// Release parent chain if error
defer func() {
if err != nil {
ls.layerL.Lock()
ls.releaseLayer(p)
ls.layerL.Unlock()
}
}()
}
// Create new roLayer
layer := &roLayer{
parent: p,
cacheID: graphID,
referenceCount: 1,
layerStore: ls,
references: map[Layer]struct{}{},
diffID: diffID,
size: size,
chainID: createChainIDFromParent(parent, diffID),
}
ls.layerL.Lock()
defer ls.layerL.Unlock()
if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil {
// Set error for cleanup, but do not return
err = errors.New("layer already exists")
return existingLayer.getReference(), nil
}
tx, err := ls.store.StartTransaction()
if err != nil {
return nil, err
}
defer func() {
if err != nil {
logrus.Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err)
if err := tx.Cancel(); err != nil {
logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err)
}
}
}()
tsw, err := tx.TarSplitWriter(false)
if err != nil {
return nil, err
}
defer tsw.Close()
tdf, err := os.Open(tarDataFile)
if err != nil {
return nil, err
}
defer tdf.Close()
_, err = io.Copy(tsw, tdf)
if err != nil {
return nil, err
}
if err = storeLayer(tx, layer); err != nil {
return nil, err
}
if err = tx.Commit(layer.chainID); err != nil {
return nil, err
}
ls.layerMap[layer.chainID] = layer
return layer.getReference(), nil
}
type unpackSizeCounter struct {
unpacker storage.Unpacker
size *int64
}
func (u *unpackSizeCounter) Next() (*storage.Entry, error) {
e, err := u.unpacker.Next()
if err == nil && u.size != nil {
*u.size += e.Size
}
return e, err
}
type packSizeCounter struct {
packer storage.Packer
size *int64
}
func (p *packSizeCounter) AddEntry(e storage.Entry) (int, error) {
n, err := p.packer.AddEntry(e)
if err == nil && p.size != nil {
*p.size += e.Size
}
return n, err
}

View File

@ -1,99 +0,0 @@
package layer
import (
"io"
"github.com/docker/docker/pkg/archive"
)
type mountedLayer struct {
name string
mountID string
initID string
parent *roLayer
path string
layerStore *layerStore
references map[RWLayer]*referencedRWLayer
}
func (ml *mountedLayer) cacheParent() string {
if ml.initID != "" {
return ml.initID
}
if ml.parent != nil {
return ml.parent.cacheID
}
return ""
}
func (ml *mountedLayer) TarStream() (io.ReadCloser, error) {
return ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent())
}
func (ml *mountedLayer) Name() string {
return ml.name
}
func (ml *mountedLayer) Parent() Layer {
if ml.parent != nil {
return ml.parent
}
// Return a nil interface instead of an interface wrapping a nil
// pointer.
return nil
}
func (ml *mountedLayer) Size() (int64, error) {
return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent())
}
func (ml *mountedLayer) Changes() ([]archive.Change, error) {
return ml.layerStore.driver.Changes(ml.mountID, ml.cacheParent())
}
func (ml *mountedLayer) Metadata() (map[string]string, error) {
return ml.layerStore.driver.GetMetadata(ml.mountID)
}
func (ml *mountedLayer) getReference() RWLayer {
ref := &referencedRWLayer{
mountedLayer: ml,
}
ml.references[ref] = ref
return ref
}
func (ml *mountedLayer) hasReferences() bool {
return len(ml.references) > 0
}
func (ml *mountedLayer) deleteReference(ref RWLayer) error {
if _, ok := ml.references[ref]; !ok {
return ErrLayerNotRetained
}
delete(ml.references, ref)
return nil
}
func (ml *mountedLayer) retakeReference(r RWLayer) {
if ref, ok := r.(*referencedRWLayer); ok {
ml.references[ref] = ref
}
}
type referencedRWLayer struct {
*mountedLayer
}
func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) {
return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel)
}
// Unmount decrements the activity count and unmounts the underlying layer
// Callers should only call `Unmount` once per call to `Mount`, even on error.
func (rl *referencedRWLayer) Unmount() error {
return rl.layerStore.driver.Put(rl.mountedLayer.mountID)
}

View File

@ -1,192 +0,0 @@
package layer
import (
"fmt"
"io"
"github.com/docker/distribution"
"github.com/docker/distribution/digest"
)
type roLayer struct {
chainID ChainID
diffID DiffID
parent *roLayer
cacheID string
size int64
layerStore *layerStore
descriptor distribution.Descriptor
referenceCount int
references map[Layer]struct{}
}
// TarStream for roLayer guarentees that the data that is produced is the exact
// data that the layer was registered with.
func (rl *roLayer) TarStream() (io.ReadCloser, error) {
r, err := rl.layerStore.store.TarSplitReader(rl.chainID)
if err != nil {
return nil, err
}
pr, pw := io.Pipe()
go func() {
err := rl.layerStore.assembleTarTo(rl.cacheID, r, nil, pw)
if err != nil {
pw.CloseWithError(err)
} else {
pw.Close()
}
}()
rc, err := newVerifiedReadCloser(pr, digest.Digest(rl.diffID))
if err != nil {
return nil, err
}
return rc, nil
}
// TarStreamFrom does not make any guarentees to the correctness of the produced
// data. As such it should not be used when the layer content must be verified
// to be an exact match to the registered layer.
func (rl *roLayer) TarStreamFrom(parent ChainID) (io.ReadCloser, error) {
var parentCacheID string
for pl := rl.parent; pl != nil; pl = pl.parent {
if pl.chainID == parent {
parentCacheID = pl.cacheID
break
}
}
if parent != ChainID("") && parentCacheID == "" {
return nil, fmt.Errorf("layer ID '%s' is not a parent of the specified layer: cannot provide diff to non-parent", parent)
}
return rl.layerStore.driver.Diff(rl.cacheID, parentCacheID)
}
func (rl *roLayer) ChainID() ChainID {
return rl.chainID
}
func (rl *roLayer) DiffID() DiffID {
return rl.diffID
}
func (rl *roLayer) Parent() Layer {
if rl.parent == nil {
return nil
}
return rl.parent
}
func (rl *roLayer) Size() (size int64, err error) {
if rl.parent != nil {
size, err = rl.parent.Size()
if err != nil {
return
}
}
return size + rl.size, nil
}
func (rl *roLayer) DiffSize() (size int64, err error) {
return rl.size, nil
}
func (rl *roLayer) Metadata() (map[string]string, error) {
return rl.layerStore.driver.GetMetadata(rl.cacheID)
}
type referencedCacheLayer struct {
*roLayer
}
func (rl *roLayer) getReference() Layer {
ref := &referencedCacheLayer{
roLayer: rl,
}
rl.references[ref] = struct{}{}
return ref
}
func (rl *roLayer) hasReference(ref Layer) bool {
_, ok := rl.references[ref]
return ok
}
func (rl *roLayer) hasReferences() bool {
return len(rl.references) > 0
}
func (rl *roLayer) deleteReference(ref Layer) {
delete(rl.references, ref)
}
func (rl *roLayer) depth() int {
if rl.parent == nil {
return 1
}
return rl.parent.depth() + 1
}
func storeLayer(tx MetadataTransaction, layer *roLayer) error {
if err := tx.SetDiffID(layer.diffID); err != nil {
return err
}
if err := tx.SetSize(layer.size); err != nil {
return err
}
if err := tx.SetCacheID(layer.cacheID); err != nil {
return err
}
// Do not store empty descriptors
if layer.descriptor.Digest != "" {
if err := tx.SetDescriptor(layer.descriptor); err != nil {
return err
}
}
if layer.parent != nil {
if err := tx.SetParent(layer.parent.chainID); err != nil {
return err
}
}
return nil
}
func newVerifiedReadCloser(rc io.ReadCloser, dgst digest.Digest) (io.ReadCloser, error) {
verifier, err := digest.NewDigestVerifier(dgst)
if err != nil {
return nil, err
}
return &verifiedReadCloser{
rc: rc,
dgst: dgst,
verifier: verifier,
}, nil
}
type verifiedReadCloser struct {
rc io.ReadCloser
dgst digest.Digest
verifier digest.Verifier
}
func (vrc *verifiedReadCloser) Read(p []byte) (n int, err error) {
n, err = vrc.rc.Read(p)
if n > 0 {
if n, err := vrc.verifier.Write(p[:n]); err != nil {
return n, err
}
}
if err == io.EOF {
if !vrc.verifier.Verified() {
err = fmt.Errorf("could not verify layer data for: %s. This may be because internal files in the layer store were modified. Re-pulling or rebuilding this image may resolve the issue", vrc.dgst)
}
}
return
}
func (vrc *verifiedReadCloser) Close() error {
return vrc.rc.Close()
}

View File

@ -1,9 +0,0 @@
package layer
import "github.com/docker/distribution"
var _ distribution.Describable = &roLayer{}
func (rl *roLayer) Descriptor() distribution.Descriptor {
return rl.descriptor
}

View File

@ -1 +0,0 @@
This code provides helper functions for dealing with archive files.

File diff suppressed because it is too large Load Diff

View File

@ -1,95 +0,0 @@
package archive
import (
"archive/tar"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/docker/docker/pkg/system"
)
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
if format == OverlayWhiteoutFormat {
return overlayWhiteoutConverter{}
}
return nil
}
type overlayWhiteoutConverter struct{}
func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
// convert whiteouts to AUFS format
if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
// we just rename the file and make it normal
dir, filename := filepath.Split(hdr.Name)
hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
hdr.Mode = 0600
hdr.Typeflag = tar.TypeReg
hdr.Size = 0
}
if fi.Mode()&os.ModeDir != 0 {
// convert opaque dirs to AUFS format by writing an empty file with the prefix
opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
if err != nil {
return nil, err
}
if len(opaque) == 1 && opaque[0] == 'y' {
if hdr.Xattrs != nil {
delete(hdr.Xattrs, "trusted.overlay.opaque")
}
// create a header for the whiteout file
// it should inherit some properties from the parent, but be a regular file
wo = &tar.Header{
Typeflag: tar.TypeReg,
Mode: hdr.Mode & int64(os.ModePerm),
Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
Size: 0,
Uid: hdr.Uid,
Uname: hdr.Uname,
Gid: hdr.Gid,
Gname: hdr.Gname,
AccessTime: hdr.AccessTime,
ChangeTime: hdr.ChangeTime,
}
}
}
return
}
func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
base := filepath.Base(path)
dir := filepath.Dir(path)
// if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
if base == WhiteoutOpaqueDir {
if err := syscall.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0); err != nil {
return false, err
}
// don't write the file itself
return false, nil
}
// if a file was deleted and we are using overlay, we need to create a character device
if strings.HasPrefix(base, WhiteoutPrefix) {
originalBase := base[len(WhiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
if err := syscall.Mknod(originalPath, syscall.S_IFCHR, 0); err != nil {
return false, err
}
if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
return false, err
}
// don't write the file itself
return false, nil
}
return true, nil
}

View File

@ -1,7 +0,0 @@
// +build !linux
package archive
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
return nil
}

View File

@ -1,118 +0,0 @@
// +build !windows
package archive
import (
"archive/tar"
"errors"
"os"
"path/filepath"
"syscall"
"github.com/docker/docker/pkg/system"
rsystem "github.com/opencontainers/runc/libcontainer/system"
)
// fixVolumePathPrefix does platform specific processing to ensure that if
// the path being passed in is not in a volume path format, convert it to one.
func fixVolumePathPrefix(srcPath string) string {
return srcPath
}
// getWalkRoot calculates the root path when performing a TarWithOptions.
// We use a separate function as this is platform specific. On Linux, we
// can't use filepath.Join(srcPath,include) because this will clean away
// a trailing "." or "/" which may be important.
func getWalkRoot(srcPath string, include string) string {
return srcPath + string(filepath.Separator) + include
}
// CanonicalTarNameForPath returns platform-specific filepath
// to canonical posix-style path for tar archival. p is relative
// path.
func CanonicalTarNameForPath(p string) (string, error) {
return p, nil // already unix-style
}
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func chmodTarEntry(perm os.FileMode) os.FileMode {
return perm // noop for unix as golang APIs provide perm bits correctly
}
func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
s, ok := stat.(*syscall.Stat_t)
if !ok {
err = errors.New("cannot convert stat value to syscall.Stat_t")
return
}
inode = uint64(s.Ino)
// Currently go does not fill in the major/minors
if s.Mode&syscall.S_IFBLK != 0 ||
s.Mode&syscall.S_IFCHR != 0 {
hdr.Devmajor = int64(major(uint64(s.Rdev)))
hdr.Devminor = int64(minor(uint64(s.Rdev)))
}
return
}
func getFileUIDGID(stat interface{}) (int, int, error) {
s, ok := stat.(*syscall.Stat_t)
if !ok {
return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t")
}
return int(s.Uid), int(s.Gid), nil
}
func major(device uint64) uint64 {
return (device >> 8) & 0xfff
}
func minor(device uint64) uint64 {
return (device & 0xff) | ((device >> 12) & 0xfff00)
}
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
if rsystem.RunningInUserNS() {
// cannot create a device if running in user namespace
return nil
}
mode := uint32(hdr.Mode & 07777)
switch hdr.Typeflag {
case tar.TypeBlock:
mode |= syscall.S_IFBLK
case tar.TypeChar:
mode |= syscall.S_IFCHR
case tar.TypeFifo:
mode |= syscall.S_IFIFO
}
if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
return err
}
return nil
}
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
if hdr.Typeflag == tar.TypeLink {
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
return err
}
}
} else if hdr.Typeflag != tar.TypeSymlink {
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
return err
}
}
return nil
}

View File

@ -1,70 +0,0 @@
// +build windows
package archive
import (
"archive/tar"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/docker/docker/pkg/longpath"
)
// fixVolumePathPrefix does platform specific processing to ensure that if
// the path being passed in is not in a volume path format, convert it to one.
func fixVolumePathPrefix(srcPath string) string {
return longpath.AddPrefix(srcPath)
}
// getWalkRoot calculates the root path when performing a TarWithOptions.
// We use a separate function as this is platform specific.
func getWalkRoot(srcPath string, include string) string {
return filepath.Join(srcPath, include)
}
// CanonicalTarNameForPath returns platform-specific filepath
// to canonical posix-style path for tar archival. p is relative
// path.
func CanonicalTarNameForPath(p string) (string, error) {
// windows: convert windows style relative path with backslashes
// into forward slashes. Since windows does not allow '/' or '\'
// in file names, it is mostly safe to replace however we must
// check just in case
if strings.Contains(p, "/") {
return "", fmt.Errorf("Windows path contains forward slash: %s", p)
}
return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
}
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func chmodTarEntry(perm os.FileMode) os.FileMode {
perm &= 0755
// Add the x bit: make everything +x from windows
perm |= 0111
return perm
}
func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
// do nothing. no notion of Rdev, Inode, Nlink in stat on Windows
return
}
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
return nil
}
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
return nil
}
func getFileUIDGID(stat interface{}) (int, int, error) {
// no notion of file ownership mapping yet on Windows
return 0, 0, nil
}

View File

@ -1,446 +0,0 @@
package archive
import (
"archive/tar"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"syscall"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/system"
)
// ChangeType represents the change type.
type ChangeType int
const (
// ChangeModify represents the modify operation.
ChangeModify = iota
// ChangeAdd represents the add operation.
ChangeAdd
// ChangeDelete represents the delete operation.
ChangeDelete
)
func (c ChangeType) String() string {
switch c {
case ChangeModify:
return "C"
case ChangeAdd:
return "A"
case ChangeDelete:
return "D"
}
return ""
}
// Change represents a change, it wraps the change type and path.
// It describes changes of the files in the path respect to the
// parent layers. The change could be modify, add, delete.
// This is used for layer diff.
type Change struct {
Path string
Kind ChangeType
}
func (change *Change) String() string {
return fmt.Sprintf("%s %s", change.Kind, change.Path)
}
// for sort.Sort
type changesByPath []Change
func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
func (c changesByPath) Len() int { return len(c) }
func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
// Gnu tar and the go tar writer don't have sub-second mtime
// precision, which is problematic when we apply changes via tar
// files, we handle this by comparing for exact times, *or* same
// second count and either a or b having exactly 0 nanoseconds
func sameFsTime(a, b time.Time) bool {
return a == b ||
(a.Unix() == b.Unix() &&
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
}
func sameFsTimeSpec(a, b syscall.Timespec) bool {
return a.Sec == b.Sec &&
(a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
}
// Changes walks the path rw and determines changes for the files in the path,
// with respect to the parent layers
func Changes(layers []string, rw string) ([]Change, error) {
return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
}
func aufsMetadataSkip(path string) (skip bool, err error) {
skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
if err != nil {
skip = true
}
return
}
func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
f := filepath.Base(path)
// If there is a whiteout, then the file was removed
if strings.HasPrefix(f, WhiteoutPrefix) {
originalFile := f[len(WhiteoutPrefix):]
return filepath.Join(filepath.Dir(path), originalFile), nil
}
return "", nil
}
type skipChange func(string) (bool, error)
type deleteChange func(string, string, os.FileInfo) (string, error)
func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
var (
changes []Change
changedDirs = make(map[string]struct{})
)
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
path, err = filepath.Rel(rw, path)
if err != nil {
return err
}
// As this runs on the daemon side, file paths are OS specific.
path = filepath.Join(string(os.PathSeparator), path)
// Skip root
if path == string(os.PathSeparator) {
return nil
}
if sc != nil {
if skip, err := sc(path); skip {
return err
}
}
change := Change{
Path: path,
}
deletedFile, err := dc(rw, path, f)
if err != nil {
return err
}
// Find out what kind of modification happened
if deletedFile != "" {
change.Path = deletedFile
change.Kind = ChangeDelete
} else {
// Otherwise, the file was added
change.Kind = ChangeAdd
// ...Unless it already existed in a top layer, in which case, it's a modification
for _, layer := range layers {
stat, err := os.Stat(filepath.Join(layer, path))
if err != nil && !os.IsNotExist(err) {
return err
}
if err == nil {
// The file existed in the top layer, so that's a modification
// However, if it's a directory, maybe it wasn't actually modified.
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
if stat.IsDir() && f.IsDir() {
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
// Both directories are the same, don't record the change
return nil
}
}
change.Kind = ChangeModify
break
}
}
}
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
// This block is here to ensure the change is recorded even if the
// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
// Check https://github.com/docker/docker/pull/13590 for details.
if f.IsDir() {
changedDirs[path] = struct{}{}
}
if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
parent := filepath.Dir(path)
if _, ok := changedDirs[parent]; !ok && parent != "/" {
changes = append(changes, Change{Path: parent, Kind: ChangeModify})
changedDirs[parent] = struct{}{}
}
}
// Record change
changes = append(changes, change)
return nil
})
if err != nil && !os.IsNotExist(err) {
return nil, err
}
return changes, nil
}
// FileInfo describes the information of a file.
type FileInfo struct {
parent *FileInfo
name string
stat *system.StatT
children map[string]*FileInfo
capability []byte
added bool
}
// LookUp looks up the file information of a file.
func (info *FileInfo) LookUp(path string) *FileInfo {
// As this runs on the daemon side, file paths are OS specific.
parent := info
if path == string(os.PathSeparator) {
return info
}
pathElements := strings.Split(path, string(os.PathSeparator))
for _, elem := range pathElements {
if elem != "" {
child := parent.children[elem]
if child == nil {
return nil
}
parent = child
}
}
return parent
}
func (info *FileInfo) path() string {
if info.parent == nil {
// As this runs on the daemon side, file paths are OS specific.
return string(os.PathSeparator)
}
return filepath.Join(info.parent.path(), info.name)
}
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
sizeAtEntry := len(*changes)
if oldInfo == nil {
// add
change := Change{
Path: info.path(),
Kind: ChangeAdd,
}
*changes = append(*changes, change)
info.added = true
}
// We make a copy so we can modify it to detect additions
// also, we only recurse on the old dir if the new info is a directory
// otherwise any previous delete/change is considered recursive
oldChildren := make(map[string]*FileInfo)
if oldInfo != nil && info.isDir() {
for k, v := range oldInfo.children {
oldChildren[k] = v
}
}
for name, newChild := range info.children {
oldChild, _ := oldChildren[name]
if oldChild != nil {
// change?
oldStat := oldChild.stat
newStat := newChild.stat
// Note: We can't compare inode or ctime or blocksize here, because these change
// when copying a file into a container. However, that is not generally a problem
// because any content change will change mtime, and any status change should
// be visible when actually comparing the stat fields. The only time this
// breaks down is if some code intentionally hides a change by setting
// back mtime
if statDifferent(oldStat, newStat) ||
bytes.Compare(oldChild.capability, newChild.capability) != 0 {
change := Change{
Path: newChild.path(),
Kind: ChangeModify,
}
*changes = append(*changes, change)
newChild.added = true
}
// Remove from copy so we can detect deletions
delete(oldChildren, name)
}
newChild.addChanges(oldChild, changes)
}
for _, oldChild := range oldChildren {
// delete
change := Change{
Path: oldChild.path(),
Kind: ChangeDelete,
}
*changes = append(*changes, change)
}
// If there were changes inside this directory, we need to add it, even if the directory
// itself wasn't changed. This is needed to properly save and restore filesystem permissions.
// As this runs on the daemon side, file paths are OS specific.
if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
change := Change{
Path: info.path(),
Kind: ChangeModify,
}
// Let's insert the directory entry before the recently added entries located inside this dir
*changes = append(*changes, change) // just to resize the slice, will be overwritten
copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
(*changes)[sizeAtEntry] = change
}
}
// Changes add changes to file information.
func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
var changes []Change
info.addChanges(oldInfo, &changes)
return changes
}
func newRootFileInfo() *FileInfo {
// As this runs on the daemon side, file paths are OS specific.
root := &FileInfo{
name: string(os.PathSeparator),
children: make(map[string]*FileInfo),
}
return root
}
// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
// If oldDir is "", then all files in newDir will be Add-Changes.
func ChangesDirs(newDir, oldDir string) ([]Change, error) {
var (
oldRoot, newRoot *FileInfo
)
if oldDir == "" {
emptyDir, err := ioutil.TempDir("", "empty")
if err != nil {
return nil, err
}
defer os.Remove(emptyDir)
oldDir = emptyDir
}
oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
if err != nil {
return nil, err
}
return newRoot.Changes(oldRoot), nil
}
// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
func ChangesSize(newDir string, changes []Change) int64 {
var (
size int64
sf = make(map[uint64]struct{})
)
for _, change := range changes {
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
file := filepath.Join(newDir, change.Path)
fileInfo, err := os.Lstat(file)
if err != nil {
logrus.Errorf("Can not stat %q: %s", file, err)
continue
}
if fileInfo != nil && !fileInfo.IsDir() {
if hasHardlinks(fileInfo) {
inode := getIno(fileInfo)
if _, ok := sf[inode]; !ok {
size += fileInfo.Size()
sf[inode] = struct{}{}
}
} else {
size += fileInfo.Size()
}
}
}
}
return size
}
// ExportChanges produces an Archive from the provided changes, relative to dir.
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
reader, writer := io.Pipe()
go func() {
ta := &tarAppender{
TarWriter: tar.NewWriter(writer),
Buffer: pools.BufioWriter32KPool.Get(nil),
SeenFiles: make(map[uint64]string),
UIDMaps: uidMaps,
GIDMaps: gidMaps,
}
// this buffer is needed for the duration of this piped stream
defer pools.BufioWriter32KPool.Put(ta.Buffer)
sort.Sort(changesByPath(changes))
// In general we log errors here but ignore them because
// during e.g. a diff operation the container can continue
// mutating the filesystem and we can see transient errors
// from this
for _, change := range changes {
if change.Kind == ChangeDelete {
whiteOutDir := filepath.Dir(change.Path)
whiteOutBase := filepath.Base(change.Path)
whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
timestamp := time.Now()
hdr := &tar.Header{
Name: whiteOut[1:],
Size: 0,
ModTime: timestamp,
AccessTime: timestamp,
ChangeTime: timestamp,
}
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
logrus.Debugf("Can't write whiteout header: %s", err)
}
} else {
path := filepath.Join(dir, change.Path)
if err := ta.addTarFile(path, change.Path[1:]); err != nil {
logrus.Debugf("Can't add file %s to tar: %s", path, err)
}
}
}
// Make sure to check the error on Close.
if err := ta.TarWriter.Close(); err != nil {
logrus.Debugf("Can't close layer: %s", err)
}
if err := writer.Close(); err != nil {
logrus.Debugf("failed close Changes writer: %s", err)
}
}()
return reader, nil
}

View File

@ -1,312 +0,0 @@
package archive
import (
"bytes"
"fmt"
"os"
"path/filepath"
"sort"
"syscall"
"unsafe"
"github.com/docker/docker/pkg/system"
)
// walker is used to implement collectFileInfoForChanges on linux. Where this
// method in general returns the entire contents of two directory trees, we
// optimize some FS calls out on linux. In particular, we take advantage of the
// fact that getdents(2) returns the inode of each file in the directory being
// walked, which, when walking two trees in parallel to generate a list of
// changes, can be used to prune subtrees without ever having to lstat(2) them
// directly. Eliminating stat calls in this way can save up to seconds on large
// images.
type walker struct {
dir1 string
dir2 string
root1 *FileInfo
root2 *FileInfo
}
// collectFileInfoForChanges returns a complete representation of the trees
// rooted at dir1 and dir2, with one important exception: any subtree or
// leaf where the inode and device numbers are an exact match between dir1
// and dir2 will be pruned from the results. This method is *only* to be used
// to generating a list of changes between the two directories, as it does not
// reflect the full contents.
func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
w := &walker{
dir1: dir1,
dir2: dir2,
root1: newRootFileInfo(),
root2: newRootFileInfo(),
}
i1, err := os.Lstat(w.dir1)
if err != nil {
return nil, nil, err
}
i2, err := os.Lstat(w.dir2)
if err != nil {
return nil, nil, err
}
if err := w.walk("/", i1, i2); err != nil {
return nil, nil, err
}
return w.root1, w.root2, nil
}
// Given a FileInfo, its path info, and a reference to the root of the tree
// being constructed, register this file with the tree.
func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
if fi == nil {
return nil
}
parent := root.LookUp(filepath.Dir(path))
if parent == nil {
return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path)
}
info := &FileInfo{
name: filepath.Base(path),
children: make(map[string]*FileInfo),
parent: parent,
}
cpath := filepath.Join(dir, path)
stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
if err != nil {
return err
}
info.stat = stat
info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
parent.children[info.name] = info
return nil
}
// Walk a subtree rooted at the same path in both trees being iterated. For
// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
// Register these nodes with the return trees, unless we're still at the
// (already-created) roots:
if path != "/" {
if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
return err
}
if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
return err
}
}
is1Dir := i1 != nil && i1.IsDir()
is2Dir := i2 != nil && i2.IsDir()
sameDevice := false
if i1 != nil && i2 != nil {
si1 := i1.Sys().(*syscall.Stat_t)
si2 := i2.Sys().(*syscall.Stat_t)
if si1.Dev == si2.Dev {
sameDevice = true
}
}
// If these files are both non-existent, or leaves (non-dirs), we are done.
if !is1Dir && !is2Dir {
return nil
}
// Fetch the names of all the files contained in both directories being walked:
var names1, names2 []nameIno
if is1Dir {
names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
if err != nil {
return err
}
}
if is2Dir {
names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
if err != nil {
return err
}
}
// We have lists of the files contained in both parallel directories, sorted
// in the same order. Walk them in parallel, generating a unique merged list
// of all items present in either or both directories.
var names []string
ix1 := 0
ix2 := 0
for {
if ix1 >= len(names1) {
break
}
if ix2 >= len(names2) {
break
}
ni1 := names1[ix1]
ni2 := names2[ix2]
switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
case -1: // ni1 < ni2 -- advance ni1
// we will not encounter ni1 in names2
names = append(names, ni1.name)
ix1++
case 0: // ni1 == ni2
if ni1.ino != ni2.ino || !sameDevice {
names = append(names, ni1.name)
}
ix1++
ix2++
case 1: // ni1 > ni2 -- advance ni2
// we will not encounter ni2 in names1
names = append(names, ni2.name)
ix2++
}
}
for ix1 < len(names1) {
names = append(names, names1[ix1].name)
ix1++
}
for ix2 < len(names2) {
names = append(names, names2[ix2].name)
ix2++
}
// For each of the names present in either or both of the directories being
// iterated, stat the name under each root, and recurse the pair of them:
for _, name := range names {
fname := filepath.Join(path, name)
var cInfo1, cInfo2 os.FileInfo
if is1Dir {
cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
if err != nil && !os.IsNotExist(err) {
return err
}
}
if is2Dir {
cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
if err != nil && !os.IsNotExist(err) {
return err
}
}
if err = w.walk(fname, cInfo1, cInfo2); err != nil {
return err
}
}
return nil
}
// {name,inode} pairs used to support the early-pruning logic of the walker type
type nameIno struct {
name string
ino uint64
}
type nameInoSlice []nameIno
func (s nameInoSlice) Len() int { return len(s) }
func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
// numbers further up the stack when reading directory contents. Unlike
// os.Readdirnames, which returns a list of filenames, this function returns a
// list of {filename,inode} pairs.
func readdirnames(dirname string) (names []nameIno, err error) {
var (
size = 100
buf = make([]byte, 4096)
nbuf int
bufp int
nb int
)
f, err := os.Open(dirname)
if err != nil {
return nil, err
}
defer f.Close()
names = make([]nameIno, 0, size) // Empty with room to grow.
for {
// Refill the buffer if necessary
if bufp >= nbuf {
bufp = 0
nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux
if nbuf < 0 {
nbuf = 0
}
if err != nil {
return nil, os.NewSyscallError("readdirent", err)
}
if nbuf <= 0 {
break // EOF
}
}
// Drain the buffer
nb, names = parseDirent(buf[bufp:nbuf], names)
bufp += nb
}
sl := nameInoSlice(names)
sort.Sort(sl)
return sl, nil
}
// parseDirent is a minor modification of syscall.ParseDirent (linux version)
// which returns {name,inode} pairs instead of just names.
func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
origlen := len(buf)
for len(buf) > 0 {
dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0]))
buf = buf[dirent.Reclen:]
if dirent.Ino == 0 { // File absent in directory.
continue
}
bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
var name = string(bytes[0:clen(bytes[:])])
if name == "." || name == ".." { // Useless names
continue
}
names = append(names, nameIno{name, dirent.Ino})
}
return origlen - len(buf), names
}
func clen(n []byte) int {
for i := 0; i < len(n); i++ {
if n[i] == 0 {
return i
}
}
return len(n)
}
// OverlayChanges walks the path rw and determines changes for the files in the path,
// with respect to the parent layers
func OverlayChanges(layers []string, rw string) ([]Change, error) {
return changes(layers, rw, overlayDeletedFile, nil)
}
func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) {
if fi.Mode()&os.ModeCharDevice != 0 {
s := fi.Sys().(*syscall.Stat_t)
if major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 {
return path, nil
}
}
if fi.Mode()&os.ModeDir != 0 {
opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque")
if err != nil {
return "", err
}
if len(opaque) == 1 && opaque[0] == 'y' {
return path, nil
}
}
return "", nil
}

View File

@ -1,97 +0,0 @@
// +build !linux
package archive
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/docker/docker/pkg/system"
)
func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
var (
oldRoot, newRoot *FileInfo
err1, err2 error
errs = make(chan error, 2)
)
go func() {
oldRoot, err1 = collectFileInfo(oldDir)
errs <- err1
}()
go func() {
newRoot, err2 = collectFileInfo(newDir)
errs <- err2
}()
// block until both routines have returned
for i := 0; i < 2; i++ {
if err := <-errs; err != nil {
return nil, nil, err
}
}
return oldRoot, newRoot, nil
}
func collectFileInfo(sourceDir string) (*FileInfo, error) {
root := newRootFileInfo()
err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
relPath, err := filepath.Rel(sourceDir, path)
if err != nil {
return err
}
// As this runs on the daemon side, file paths are OS specific.
relPath = filepath.Join(string(os.PathSeparator), relPath)
// See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
// Temporary workaround. If the returned path starts with two backslashes,
// trim it down to a single backslash. Only relevant on Windows.
if runtime.GOOS == "windows" {
if strings.HasPrefix(relPath, `\\`) {
relPath = relPath[1:]
}
}
if relPath == string(os.PathSeparator) {
return nil
}
parent := root.LookUp(filepath.Dir(relPath))
if parent == nil {
return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
}
info := &FileInfo{
name: filepath.Base(relPath),
children: make(map[string]*FileInfo),
parent: parent,
}
s, err := system.Lstat(path)
if err != nil {
return err
}
info.stat = s
info.capability, _ = system.Lgetxattr(path, "security.capability")
parent.children[info.name] = info
return nil
})
if err != nil {
return nil, err
}
return root, nil
}

View File

@ -1,36 +0,0 @@
// +build !windows
package archive
import (
"os"
"syscall"
"github.com/docker/docker/pkg/system"
)
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
// Don't look at size for dirs, its not a good measure of change
if oldStat.Mode() != newStat.Mode() ||
oldStat.UID() != newStat.UID() ||
oldStat.GID() != newStat.GID() ||
oldStat.Rdev() != newStat.Rdev() ||
// Don't look at size for dirs, its not a good measure of change
(oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR &&
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
return true
}
return false
}
func (info *FileInfo) isDir() bool {
return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0
}
func getIno(fi os.FileInfo) uint64 {
return uint64(fi.Sys().(*syscall.Stat_t).Ino)
}
func hasHardlinks(fi os.FileInfo) bool {
return fi.Sys().(*syscall.Stat_t).Nlink > 1
}

View File

@ -1,30 +0,0 @@
package archive
import (
"os"
"github.com/docker/docker/pkg/system"
)
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
// Don't look at size for dirs, its not a good measure of change
if oldStat.ModTime() != newStat.ModTime() ||
oldStat.Mode() != newStat.Mode() ||
oldStat.Size() != newStat.Size() && !oldStat.IsDir() {
return true
}
return false
}
func (info *FileInfo) isDir() bool {
return info.parent == nil || info.stat.IsDir()
}
func getIno(fi os.FileInfo) (inode uint64) {
return
}
func hasHardlinks(fi os.FileInfo) bool {
return false
}

View File

@ -1,458 +0,0 @@
package archive
import (
"archive/tar"
"errors"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/system"
)
// Errors used or returned by this file.
var (
ErrNotDirectory = errors.New("not a directory")
ErrDirNotExists = errors.New("no such directory")
ErrCannotCopyDir = errors.New("cannot copy directory")
ErrInvalidCopySource = errors.New("invalid copy source content")
)
// PreserveTrailingDotOrSeparator returns the given cleaned path (after
// processing using any utility functions from the path or filepath stdlib
// packages) and appends a trailing `/.` or `/` if its corresponding original
// path (from before being processed by utility functions from the path or
// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
// path already ends in a `.` path segment, then another is not added. If the
// clean path already ends in a path separator, then another is not added.
func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
// Ensure paths are in platform semantics
cleanedPath = normalizePath(cleanedPath)
originalPath = normalizePath(originalPath)
if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
if !hasTrailingPathSeparator(cleanedPath) {
// Add a separator if it doesn't already end with one (a cleaned
// path would only end in a separator if it is the root).
cleanedPath += string(filepath.Separator)
}
cleanedPath += "."
}
if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) {
cleanedPath += string(filepath.Separator)
}
return cleanedPath
}
// assertsDirectory returns whether the given path is
// asserted to be a directory, i.e., the path ends with
// a trailing '/' or `/.`, assuming a path separator of `/`.
func assertsDirectory(path string) bool {
return hasTrailingPathSeparator(path) || specifiesCurrentDir(path)
}
// hasTrailingPathSeparator returns whether the given
// path ends with the system's path separator character.
func hasTrailingPathSeparator(path string) bool {
return len(path) > 0 && os.IsPathSeparator(path[len(path)-1])
}
// specifiesCurrentDir returns whether the given path specifies
// a "current directory", i.e., the last path segment is `.`.
func specifiesCurrentDir(path string) bool {
return filepath.Base(path) == "."
}
// SplitPathDirEntry splits the given path between its directory name and its
// basename by first cleaning the path but preserves a trailing "." if the
// original path specified the current directory.
func SplitPathDirEntry(path string) (dir, base string) {
cleanedPath := filepath.Clean(normalizePath(path))
if specifiesCurrentDir(path) {
cleanedPath += string(filepath.Separator) + "."
}
return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
}
// TarResource archives the resource described by the given CopyInfo to a Tar
// archive. A non-nil error is returned if sourcePath does not exist or is
// asserted to be a directory but exists as another type of file.
//
// This function acts as a convenient wrapper around TarWithOptions, which
// requires a directory as the source path. TarResource accepts either a
// directory or a file path and correctly sets the Tar options.
func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
}
// TarResourceRebase is like TarResource but renames the first path element of
// items in the resulting tar archive to match the given rebaseName if not "".
func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) {
sourcePath = normalizePath(sourcePath)
if _, err = os.Lstat(sourcePath); err != nil {
// Catches the case where the source does not exist or is not a
// directory if asserted to be a directory, as this also causes an
// error.
return
}
// Separate the source path between its directory and
// the entry in that directory which we are archiving.
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
filter := []string{sourceBase}
logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
return TarWithOptions(sourceDir, &TarOptions{
Compression: Uncompressed,
IncludeFiles: filter,
IncludeSourceDir: true,
RebaseNames: map[string]string{
sourceBase: rebaseName,
},
})
}
// CopyInfo holds basic info about the source
// or destination path of a copy operation.
type CopyInfo struct {
Path string
Exists bool
IsDir bool
RebaseName string
}
// CopyInfoSourcePath stats the given path to create a CopyInfo
// struct representing that resource for the source of an archive copy
// operation. The given path should be an absolute local path. A source path
// has all symlinks evaluated that appear before the last path separator ("/"
// on Unix). As it is to be a copy source, the path must exist.
func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
// normalize the file path and then evaluate the symbol link
// we will use the target file instead of the symbol link if
// followLink is set
path = normalizePath(path)
resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
if err != nil {
return CopyInfo{}, err
}
stat, err := os.Lstat(resolvedPath)
if err != nil {
return CopyInfo{}, err
}
return CopyInfo{
Path: resolvedPath,
Exists: true,
IsDir: stat.IsDir(),
RebaseName: rebaseName,
}, nil
}
// CopyInfoDestinationPath stats the given path to create a CopyInfo
// struct representing that resource for the destination of an archive copy
// operation. The given path should be an absolute local path.
func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
path = normalizePath(path)
originalPath := path
stat, err := os.Lstat(path)
if err == nil && stat.Mode()&os.ModeSymlink == 0 {
// The path exists and is not a symlink.
return CopyInfo{
Path: path,
Exists: true,
IsDir: stat.IsDir(),
}, nil
}
// While the path is a symlink.
for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
if n > maxSymlinkIter {
// Don't follow symlinks more than this arbitrary number of times.
return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
}
// The path is a symbolic link. We need to evaluate it so that the
// destination of the copy operation is the link target and not the
// link itself. This is notably different than CopyInfoSourcePath which
// only evaluates symlinks before the last appearing path separator.
// Also note that it is okay if the last path element is a broken
// symlink as the copy operation should create the target.
var linkTarget string
linkTarget, err = os.Readlink(path)
if err != nil {
return CopyInfo{}, err
}
if !system.IsAbs(linkTarget) {
// Join with the parent directory.
dstParent, _ := SplitPathDirEntry(path)
linkTarget = filepath.Join(dstParent, linkTarget)
}
path = linkTarget
stat, err = os.Lstat(path)
}
if err != nil {
// It's okay if the destination path doesn't exist. We can still
// continue the copy operation if the parent directory exists.
if !os.IsNotExist(err) {
return CopyInfo{}, err
}
// Ensure destination parent dir exists.
dstParent, _ := SplitPathDirEntry(path)
parentDirStat, err := os.Lstat(dstParent)
if err != nil {
return CopyInfo{}, err
}
if !parentDirStat.IsDir() {
return CopyInfo{}, ErrNotDirectory
}
return CopyInfo{Path: path}, nil
}
// The path exists after resolving symlinks.
return CopyInfo{
Path: path,
Exists: true,
IsDir: stat.IsDir(),
}, nil
}
// PrepareArchiveCopy prepares the given srcContent archive, which should
// contain the archived resource described by srcInfo, to the destination
// described by dstInfo. Returns the possibly modified content archive along
// with the path to the destination directory which it should be extracted to.
func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) {
// Ensure in platform semantics
srcInfo.Path = normalizePath(srcInfo.Path)
dstInfo.Path = normalizePath(dstInfo.Path)
// Separate the destination path between its directory and base
// components in case the source archive contents need to be rebased.
dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
_, srcBase := SplitPathDirEntry(srcInfo.Path)
switch {
case dstInfo.Exists && dstInfo.IsDir:
// The destination exists as a directory. No alteration
// to srcContent is needed as its contents can be
// simply extracted to the destination directory.
return dstInfo.Path, ioutil.NopCloser(srcContent), nil
case dstInfo.Exists && srcInfo.IsDir:
// The destination exists as some type of file and the source
// content is a directory. This is an error condition since
// you cannot copy a directory to an existing file location.
return "", nil, ErrCannotCopyDir
case dstInfo.Exists:
// The destination exists as some type of file and the source content
// is also a file. The source content entry will have to be renamed to
// have a basename which matches the destination path's basename.
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
case srcInfo.IsDir:
// The destination does not exist and the source content is an archive
// of a directory. The archive should be extracted to the parent of
// the destination path instead, and when it is, the directory that is
// created as a result should take the name of the destination path.
// The source content entries will have to be renamed to have a
// basename which matches the destination path's basename.
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
case assertsDirectory(dstInfo.Path):
// The destination does not exist and is asserted to be created as a
// directory, but the source content is not a directory. This is an
// error condition since you cannot create a directory from a file
// source.
return "", nil, ErrDirNotExists
default:
// The last remaining case is when the destination does not exist, is
// not asserted to be a directory, and the source content is not an
// archive of a directory. It this case, the destination file will need
// to be created when the archive is extracted and the source content
// entry will have to be renamed to have a basename which matches the
// destination path's basename.
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
}
}
// RebaseArchiveEntries rewrites the given srcContent archive replacing
// an occurrence of oldBase with newBase at the beginning of entry names.
func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
if oldBase == string(os.PathSeparator) {
// If oldBase specifies the root directory, use an empty string as
// oldBase instead so that newBase doesn't replace the path separator
// that all paths will start with.
oldBase = ""
}
rebased, w := io.Pipe()
go func() {
srcTar := tar.NewReader(srcContent)
rebasedTar := tar.NewWriter(w)
for {
hdr, err := srcTar.Next()
if err == io.EOF {
// Signals end of archive.
rebasedTar.Close()
w.Close()
return
}
if err != nil {
w.CloseWithError(err)
return
}
hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
if err = rebasedTar.WriteHeader(hdr); err != nil {
w.CloseWithError(err)
return
}
if _, err = io.Copy(rebasedTar, srcTar); err != nil {
w.CloseWithError(err)
return
}
}
}()
return rebased
}
// CopyResource performs an archive copy from the given source path to the
// given destination path. The source path MUST exist and the destination
// path's parent directory must exist.
func CopyResource(srcPath, dstPath string, followLink bool) error {
var (
srcInfo CopyInfo
err error
)
// Ensure in platform semantics
srcPath = normalizePath(srcPath)
dstPath = normalizePath(dstPath)
// Clean the source and destination paths.
srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
return err
}
content, err := TarResource(srcInfo)
if err != nil {
return err
}
defer content.Close()
return CopyTo(content, srcInfo, dstPath)
}
// CopyTo handles extracting the given content whose
// entries should be sourced from srcInfo to dstPath.
func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error {
// The destination path need not exist, but CopyInfoDestinationPath will
// ensure that at least the parent directory exists.
dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
if err != nil {
return err
}
dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
if err != nil {
return err
}
defer copyArchive.Close()
options := &TarOptions{
NoLchown: true,
NoOverwriteDirNonDir: true,
}
return Untar(copyArchive, dstDir, options)
}
// ResolveHostSourcePath decides real path need to be copied with parameters such as
// whether to follow symbol link or not, if followLink is true, resolvedPath will return
// link target of any symbol link file, else it will only resolve symlink of directory
// but return symbol link file itself without resolving.
func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
if followLink {
resolvedPath, err = filepath.EvalSymlinks(path)
if err != nil {
return
}
resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
} else {
dirPath, basePath := filepath.Split(path)
// if not follow symbol link, then resolve symbol link of parent dir
var resolvedDirPath string
resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
if err != nil {
return
}
// resolvedDirPath will have been cleaned (no trailing path separators) so
// we can manually join it with the base path element.
resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) {
rebaseName = filepath.Base(path)
}
}
return resolvedPath, rebaseName, nil
}
// GetRebaseName normalizes and compares path and resolvedPath,
// return completed resolved path and rebased file name
func GetRebaseName(path, resolvedPath string) (string, string) {
// linkTarget will have been cleaned (no trailing path separators and dot) so
// we can manually join it with them
var rebaseName string
if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) {
resolvedPath += string(filepath.Separator) + "."
}
if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) {
resolvedPath += string(filepath.Separator)
}
if filepath.Base(path) != filepath.Base(resolvedPath) {
// In the case where the path had a trailing separator and a symlink
// evaluation has changed the last path component, we will need to
// rebase the name in the archive that is being copied to match the
// originally requested name.
rebaseName = filepath.Base(path)
}
return resolvedPath, rebaseName
}

View File

@ -1,11 +0,0 @@
// +build !windows
package archive
import (
"path/filepath"
)
func normalizePath(path string) string {
return filepath.ToSlash(path)
}

View File

@ -1,9 +0,0 @@
package archive
import (
"path/filepath"
)
func normalizePath(path string) string {
return filepath.FromSlash(path)
}

View File

@ -1,279 +0,0 @@
package archive
import (
"archive/tar"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/system"
)
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
// compressed or uncompressed.
// Returns the size in bytes of the contents of the layer.
func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
tr := tar.NewReader(layer)
trBuf := pools.BufioReader32KPool.Get(tr)
defer pools.BufioReader32KPool.Put(trBuf)
var dirs []*tar.Header
unpackedPaths := make(map[string]struct{})
if options == nil {
options = &TarOptions{}
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
if err != nil {
return 0, err
}
aufsTempdir := ""
aufsHardlinks := make(map[string]*tar.Header)
if options == nil {
options = &TarOptions{}
}
// Iterate through the files in the archive.
for {
hdr, err := tr.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
return 0, err
}
size += hdr.Size
// Normalize name, for safety and for a simple is-root check
hdr.Name = filepath.Clean(hdr.Name)
// Windows does not support filenames with colons in them. Ignore
// these files. This is not a problem though (although it might
// appear that it is). Let's suppose a client is running docker pull.
// The daemon it points to is Windows. Would it make sense for the
// client to be doing a docker pull Ubuntu for example (which has files
// with colons in the name under /usr/share/man/man3)? No, absolutely
// not as it would really only make sense that they were pulling a
// Windows image. However, for development, it is necessary to be able
// to pull Linux images which are in the repository.
//
// TODO Windows. Once the registry is aware of what images are Windows-
// specific or Linux-specific, this warning should be changed to an error
// to cater for the situation where someone does manage to upload a Linux
// image but have it tagged as Windows inadvertently.
if runtime.GOOS == "windows" {
if strings.Contains(hdr.Name, ":") {
logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
continue
}
}
// Note as these operations are platform specific, so must the slash be.
if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
// Not the root directory, ensure that the parent directory exists.
// This happened in some tests where an image had a tarfile without any
// parent directories.
parent := filepath.Dir(hdr.Name)
parentPath := filepath.Join(dest, parent)
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
err = system.MkdirAll(parentPath, 0600)
if err != nil {
return 0, err
}
}
}
// Skip AUFS metadata dirs
if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
// We don't want this directory, but we need the files in them so that
// such hardlinks can be resolved.
if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
basename := filepath.Base(hdr.Name)
aufsHardlinks[basename] = hdr
if aufsTempdir == "" {
if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
return 0, err
}
defer os.RemoveAll(aufsTempdir)
}
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil {
return 0, err
}
}
if hdr.Name != WhiteoutOpaqueDir {
continue
}
}
path := filepath.Join(dest, hdr.Name)
rel, err := filepath.Rel(dest, path)
if err != nil {
return 0, err
}
// Note as these operations are platform specific, so must the slash be.
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
}
base := filepath.Base(path)
if strings.HasPrefix(base, WhiteoutPrefix) {
dir := filepath.Dir(path)
if base == WhiteoutOpaqueDir {
_, err := os.Lstat(dir)
if err != nil {
return 0, err
}
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
if os.IsNotExist(err) {
err = nil // parent was deleted
}
return err
}
if path == dir {
return nil
}
if _, exists := unpackedPaths[path]; !exists {
err := os.RemoveAll(path)
return err
}
return nil
})
if err != nil {
return 0, err
}
} else {
originalBase := base[len(WhiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
if err := os.RemoveAll(originalPath); err != nil {
return 0, err
}
}
} else {
// If path exits we almost always just want to remove and replace it.
// The only exception is when it is a directory *and* the file from
// the layer is also a directory. Then we want to merge them (i.e.
// just apply the metadata from the layer).
if fi, err := os.Lstat(path); err == nil {
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
if err := os.RemoveAll(path); err != nil {
return 0, err
}
}
}
trBuf.Reset(tr)
srcData := io.Reader(trBuf)
srcHdr := hdr
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
// we manually retarget these into the temporary files we extracted them into
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
linkBasename := filepath.Base(hdr.Linkname)
srcHdr = aufsHardlinks[linkBasename]
if srcHdr == nil {
return 0, fmt.Errorf("Invalid aufs hardlink")
}
tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
if err != nil {
return 0, err
}
defer tmpFile.Close()
srcData = tmpFile
}
// if the options contain a uid & gid maps, convert header uid/gid
// entries using the maps such that lchown sets the proper mapped
// uid/gid after writing the file. We only perform this mapping if
// the file isn't already owned by the remapped root UID or GID, as
// that specific uid/gid has no mapping from container -> host, and
// those files already have the proper ownership for inside the
// container.
if srcHdr.Uid != remappedRootUID {
xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps)
if err != nil {
return 0, err
}
srcHdr.Uid = xUID
}
if srcHdr.Gid != remappedRootGID {
xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps)
if err != nil {
return 0, err
}
srcHdr.Gid = xGID
}
if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil {
return 0, err
}
// Directory mtimes must be handled at the end to avoid further
// file creation in them to modify the directory mtime
if hdr.Typeflag == tar.TypeDir {
dirs = append(dirs, hdr)
}
unpackedPaths[path] = struct{}{}
}
}
for _, hdr := range dirs {
path := filepath.Join(dest, hdr.Name)
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
return 0, err
}
}
return size, nil
}
// ApplyLayer parses a diff in the standard layer format from `layer`,
// and applies it to the directory `dest`. The stream `layer` can be
// compressed or uncompressed.
// Returns the size in bytes of the contents of the layer.
func ApplyLayer(dest string, layer io.Reader) (int64, error) {
return applyLayerHandler(dest, layer, &TarOptions{}, true)
}
// ApplyUncompressedLayer parses a diff in the standard layer format from
// `layer`, and applies it to the directory `dest`. The stream `layer`
// can only be uncompressed.
// Returns the size in bytes of the contents of the layer.
func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
return applyLayerHandler(dest, layer, options, false)
}
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) {
dest = filepath.Clean(dest)
// We need to be able to set any perms
oldmask, err := system.Umask(0)
if err != nil {
return 0, err
}
defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
if decompress {
layer, err = DecompressStream(layer)
if err != nil {
return 0, err
}
}
return UnpackLayer(dest, layer, options)
}

View File

@ -1,97 +0,0 @@
// +build ignore
// Simple tool to create an archive stream from an old and new directory
//
// By default it will stream the comparison of two temporary directories with junk files
package main
import (
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/archive"
)
var (
flDebug = flag.Bool("D", false, "debugging output")
flNewDir = flag.String("newdir", "", "")
flOldDir = flag.String("olddir", "", "")
log = logrus.New()
)
func main() {
flag.Usage = func() {
fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
fmt.Printf("%s [OPTIONS]\n", os.Args[0])
flag.PrintDefaults()
}
flag.Parse()
log.Out = os.Stderr
if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
logrus.SetLevel(logrus.DebugLevel)
}
var newDir, oldDir string
if len(*flNewDir) == 0 {
var err error
newDir, err = ioutil.TempDir("", "docker-test-newDir")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(newDir)
if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
log.Fatal(err)
}
} else {
newDir = *flNewDir
}
if len(*flOldDir) == 0 {
oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(oldDir)
} else {
oldDir = *flOldDir
}
changes, err := archive.ChangesDirs(newDir, oldDir)
if err != nil {
log.Fatal(err)
}
a, err := archive.ExportChanges(newDir, changes)
if err != nil {
log.Fatal(err)
}
defer a.Close()
i, err := io.Copy(os.Stdout, a)
if err != nil && err != io.EOF {
log.Fatal(err)
}
fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
}
func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
fileData := []byte("fooo")
for n := 0; n < numberOfFiles; n++ {
fileName := fmt.Sprintf("file-%d", n)
if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
return 0, err
}
if makeLinks {
if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
return 0, err
}
}
}
totalSize := numberOfFiles * len(fileData)
return totalSize, nil
}

View File

@ -1,16 +0,0 @@
package archive
import (
"syscall"
"time"
)
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
if time.IsZero() {
// Return UTIME_OMIT special value
ts.Sec = 0
ts.Nsec = ((1 << 30) - 2)
return
}
return syscall.NsecToTimespec(time.UnixNano())
}

View File

@ -1,16 +0,0 @@
// +build !linux
package archive
import (
"syscall"
"time"
)
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
nsec := int64(0)
if !time.IsZero() {
nsec = time.UnixNano()
}
return syscall.NsecToTimespec(nsec)
}

View File

@ -1,23 +0,0 @@
package archive
// Whiteouts are files with a special meaning for the layered filesystem.
// Docker uses AUFS whiteout files inside exported archives. In other
// filesystems these files are generated/handled on tar creation/extraction.
// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
// filename this means that file has been removed from the base layer.
const WhiteoutPrefix = ".wh."
// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
// for removing an actual file. Normally these files are excluded from exported
// archives.
const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
// layers. Normally these should not go into exported archives and all changed
// hardlinks should be copied to the top layer.
const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
// WhiteoutOpaqueDir file means directory has been made opaque - meaning
// readdir calls to this directory do not follow to lower layers.
const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"

View File

@ -1,59 +0,0 @@
package archive
import (
"archive/tar"
"bytes"
"io"
)
// Generate generates a new archive from the content provided
// as input.
//
// `files` is a sequence of path/content pairs. A new file is
// added to the archive for each pair.
// If the last pair is incomplete, the file is created with an
// empty content. For example:
//
// Generate("foo.txt", "hello world", "emptyfile")
//
// The above call will return an archive with 2 files:
// * ./foo.txt with content "hello world"
// * ./empty with empty content
//
// FIXME: stream content instead of buffering
// FIXME: specify permissions and other archive metadata
func Generate(input ...string) (io.Reader, error) {
files := parseStringPairs(input...)
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
for _, file := range files {
name, content := file[0], file[1]
hdr := &tar.Header{
Name: name,
Size: int64(len(content)),
}
if err := tw.WriteHeader(hdr); err != nil {
return nil, err
}
if _, err := tw.Write([]byte(content)); err != nil {
return nil, err
}
}
if err := tw.Close(); err != nil {
return nil, err
}
return buf, nil
}
func parseStringPairs(input ...string) (output [][2]string) {
output = make([][2]string, 0, len(input)/2+1)
for i := 0; i < len(input); i += 2 {
var pair [2]string
pair[0] = input[i]
if i+1 < len(input) {
pair[1] = input[i+1]
}
output = append(output, pair)
}
return
}

View File

@ -1,97 +0,0 @@
package chrootarchive
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/idtools"
)
var chrootArchiver = &archive.Archiver{Untar: Untar}
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
// The archive may be compressed with one of the following algorithms:
// identity (uncompressed), gzip, bzip2, xz.
func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
return untarHandler(tarArchive, dest, options, true)
}
// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
// The archive must be an uncompressed stream.
func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
return untarHandler(tarArchive, dest, options, false)
}
// Handler for teasing out the automatic decompression
func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error {
if tarArchive == nil {
return fmt.Errorf("Empty archive")
}
if options == nil {
options = &archive.TarOptions{}
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
if err != nil {
return err
}
dest = filepath.Clean(dest)
if _, err := os.Stat(dest); os.IsNotExist(err) {
if err := idtools.MkdirAllNewAs(dest, 0755, rootUID, rootGID); err != nil {
return err
}
}
r := ioutil.NopCloser(tarArchive)
if decompress {
decompressedArchive, err := archive.DecompressStream(tarArchive)
if err != nil {
return err
}
defer decompressedArchive.Close()
r = decompressedArchive
}
return invokeUnpack(r, dest, options)
}
// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
// If either Tar or Untar fails, TarUntar aborts and returns the error.
func TarUntar(src, dst string) error {
return chrootArchiver.TarUntar(src, dst)
}
// CopyWithTar creates a tar archive of filesystem path `src`, and
// unpacks it at filesystem path `dst`.
// The archive is streamed directly with fixed buffering and no
// intermediary disk IO.
func CopyWithTar(src, dst string) error {
return chrootArchiver.CopyWithTar(src, dst)
}
// CopyFileWithTar emulates the behavior of the 'cp' command-line
// for a single file. It copies a regular file from path `src` to
// path `dst`, and preserves all its metadata.
//
// If `dst` ends with a trailing slash '/' ('\' on Windows), the final
// destination path will be `dst/base(src)` or `dst\base(src)`
func CopyFileWithTar(src, dst string) (err error) {
return chrootArchiver.CopyFileWithTar(src, dst)
}
// UntarPath is a convenience function which looks for an archive
// at filesystem path `src`, and unpacks it at `dst`.
func UntarPath(src, dst string) error {
return chrootArchiver.UntarPath(src, dst)
}

View File

@ -1,86 +0,0 @@
// +build !windows
package chrootarchive
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"runtime"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/reexec"
)
// untar is the entry-point for docker-untar on re-exec. This is not used on
// Windows as it does not support chroot, hence no point sandboxing through
// chroot and rexec.
func untar() {
runtime.LockOSThread()
flag.Parse()
var options *archive.TarOptions
//read the options from the pipe "ExtraFiles"
if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
fatal(err)
}
if err := chroot(flag.Arg(0)); err != nil {
fatal(err)
}
if err := archive.Unpack(os.Stdin, "/", options); err != nil {
fatal(err)
}
// fully consume stdin in case it is zero padded
if _, err := flush(os.Stdin); err != nil {
fatal(err)
}
os.Exit(0)
}
func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error {
// We can't pass a potentially large exclude list directly via cmd line
// because we easily overrun the kernel's max argument/environment size
// when the full image list is passed (e.g. when this is used by
// `docker load`). We will marshall the options via a pipe to the
// child
r, w, err := os.Pipe()
if err != nil {
return fmt.Errorf("Untar pipe failure: %v", err)
}
cmd := reexec.Command("docker-untar", dest)
cmd.Stdin = decompressedArchive
cmd.ExtraFiles = append(cmd.ExtraFiles, r)
output := bytes.NewBuffer(nil)
cmd.Stdout = output
cmd.Stderr = output
if err := cmd.Start(); err != nil {
return fmt.Errorf("Untar error on re-exec cmd: %v", err)
}
//write the options to the pipe for the untar exec to read
if err := json.NewEncoder(w).Encode(options); err != nil {
return fmt.Errorf("Untar json encode to pipe failed: %v", err)
}
w.Close()
if err := cmd.Wait(); err != nil {
// when `xz -d -c -q | docker-untar ...` failed on docker-untar side,
// we need to exhaust `xz`'s output, otherwise the `xz` side will be
// pending on write pipe forever
io.Copy(ioutil.Discard, decompressedArchive)
return fmt.Errorf("Error processing tar file(%v): %s", err, output)
}
return nil
}

View File

@ -1,22 +0,0 @@
package chrootarchive
import (
"io"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/longpath"
)
// chroot is not supported by Windows
func chroot(path string) error {
return nil
}
func invokeUnpack(decompressedArchive io.ReadCloser,
dest string,
options *archive.TarOptions) error {
// Windows is different to Linux here because Windows does not support
// chroot. Hence there is no point sandboxing a chrooted process to
// do the unpack. We call inline instead within the daemon process.
return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options)
}

View File

@ -1,108 +0,0 @@
package chrootarchive
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"syscall"
"github.com/docker/docker/pkg/mount"
rsystem "github.com/opencontainers/runc/libcontainer/system"
)
// chroot on linux uses pivot_root instead of chroot
// pivot_root takes a new root and an old root.
// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root.
// New root is where the new rootfs is set to.
// Old root is removed after the call to pivot_root so it is no longer available under the new root.
// This is similar to how libcontainer sets up a container's rootfs
func chroot(path string) (err error) {
// if the engine is running in a user namespace we need to use actual chroot
if rsystem.RunningInUserNS() {
return realChroot(path)
}
if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil {
return fmt.Errorf("Error creating mount namespace before pivot: %v", err)
}
// make everything in new ns private
if err := mount.MakeRPrivate("/"); err != nil {
return err
}
if mounted, _ := mount.Mounted(path); !mounted {
if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil {
return realChroot(path)
}
}
// setup oldRoot for pivot_root
pivotDir, err := ioutil.TempDir(path, ".pivot_root")
if err != nil {
return fmt.Errorf("Error setting up pivot dir: %v", err)
}
var mounted bool
defer func() {
if mounted {
// make sure pivotDir is not mounted before we try to remove it
if errCleanup := syscall.Unmount(pivotDir, syscall.MNT_DETACH); errCleanup != nil {
if err == nil {
err = errCleanup
}
return
}
}
errCleanup := os.Remove(pivotDir)
// pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful
// because we already cleaned it up on failed pivot_root
if errCleanup != nil && !os.IsNotExist(errCleanup) {
errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup)
if err == nil {
err = errCleanup
}
}
}()
if err := syscall.PivotRoot(path, pivotDir); err != nil {
// If pivot fails, fall back to the normal chroot after cleaning up temp dir
if err := os.Remove(pivotDir); err != nil {
return fmt.Errorf("Error cleaning up after failed pivot: %v", err)
}
return realChroot(path)
}
mounted = true
// This is the new path for where the old root (prior to the pivot) has been moved to
// This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction
pivotDir = filepath.Join("/", filepath.Base(pivotDir))
if err := syscall.Chdir("/"); err != nil {
return fmt.Errorf("Error changing to new root: %v", err)
}
// Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host
if err := syscall.Mount("", pivotDir, "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil {
return fmt.Errorf("Error making old root private after pivot: %v", err)
}
// Now unmount the old root so it's no longer visible from the new root
if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil {
return fmt.Errorf("Error while unmounting old root after pivot: %v", err)
}
mounted = false
return nil
}
func realChroot(path string) error {
if err := syscall.Chroot(path); err != nil {
return fmt.Errorf("Error after fallback to chroot: %v", err)
}
if err := syscall.Chdir("/"); err != nil {
return fmt.Errorf("Error changing to new root after chroot: %v", err)
}
return nil
}

View File

@ -1,12 +0,0 @@
// +build !windows,!linux
package chrootarchive
import "syscall"
func chroot(path string) error {
if err := syscall.Chroot(path); err != nil {
return err
}
return syscall.Chdir("/")
}

View File

@ -1,23 +0,0 @@
package chrootarchive
import (
"io"
"github.com/docker/docker/pkg/archive"
)
// ApplyLayer parses a diff in the standard layer format from `layer`,
// and applies it to the directory `dest`. The stream `layer` can only be
// uncompressed.
// Returns the size in bytes of the contents of the layer.
func ApplyLayer(dest string, layer io.Reader) (size int64, err error) {
return applyLayerHandler(dest, layer, &archive.TarOptions{}, true)
}
// ApplyUncompressedLayer parses a diff in the standard layer format from
// `layer`, and applies it to the directory `dest`. The stream `layer`
// can only be uncompressed.
// Returns the size in bytes of the contents of the layer.
func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) {
return applyLayerHandler(dest, layer, options, false)
}

View File

@ -1,130 +0,0 @@
//+build !windows
package chrootarchive
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/reexec"
"github.com/docker/docker/pkg/system"
rsystem "github.com/opencontainers/runc/libcontainer/system"
)
type applyLayerResponse struct {
LayerSize int64 `json:"layerSize"`
}
// applyLayer is the entry-point for docker-applylayer on re-exec. This is not
// used on Windows as it does not support chroot, hence no point sandboxing
// through chroot and rexec.
func applyLayer() {
var (
tmpDir = ""
err error
options *archive.TarOptions
)
runtime.LockOSThread()
flag.Parse()
inUserns := rsystem.RunningInUserNS()
if err := chroot(flag.Arg(0)); err != nil {
fatal(err)
}
// We need to be able to set any perms
oldmask, err := system.Umask(0)
defer system.Umask(oldmask)
if err != nil {
fatal(err)
}
if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil {
fatal(err)
}
if inUserns {
options.InUserNS = true
}
if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil {
fatal(err)
}
os.Setenv("TMPDIR", tmpDir)
size, err := archive.UnpackLayer("/", os.Stdin, options)
os.RemoveAll(tmpDir)
if err != nil {
fatal(err)
}
encoder := json.NewEncoder(os.Stdout)
if err := encoder.Encode(applyLayerResponse{size}); err != nil {
fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err))
}
if _, err := flush(os.Stdin); err != nil {
fatal(err)
}
os.Exit(0)
}
// applyLayerHandler parses a diff in the standard layer format from `layer`, and
// applies it to the directory `dest`. Returns the size in bytes of the
// contents of the layer.
func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
dest = filepath.Clean(dest)
if decompress {
decompressed, err := archive.DecompressStream(layer)
if err != nil {
return 0, err
}
defer decompressed.Close()
layer = decompressed
}
if options == nil {
options = &archive.TarOptions{}
if rsystem.RunningInUserNS() {
options.InUserNS = true
}
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
data, err := json.Marshal(options)
if err != nil {
return 0, fmt.Errorf("ApplyLayer json encode: %v", err)
}
cmd := reexec.Command("docker-applyLayer", dest)
cmd.Stdin = layer
cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data))
outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer)
cmd.Stdout, cmd.Stderr = outBuf, errBuf
if err = cmd.Run(); err != nil {
return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf)
}
// Stdout should be a valid JSON struct representing an applyLayerResponse.
response := applyLayerResponse{}
decoder := json.NewDecoder(outBuf)
if err = decoder.Decode(&response); err != nil {
return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err)
}
return response.LayerSize, nil
}

View File

@ -1,45 +0,0 @@
package chrootarchive
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/longpath"
)
// applyLayerHandler parses a diff in the standard layer format from `layer`, and
// applies it to the directory `dest`. Returns the size in bytes of the
// contents of the layer.
func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
dest = filepath.Clean(dest)
// Ensure it is a Windows-style volume path
dest = longpath.AddPrefix(dest)
if decompress {
decompressed, err := archive.DecompressStream(layer)
if err != nil {
return 0, err
}
defer decompressed.Close()
layer = decompressed
}
tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract")
if err != nil {
return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err)
}
s, err := archive.UnpackLayer(dest, layer, nil)
os.RemoveAll(tmpDir)
if err != nil {
return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s", err, dest)
}
return s, nil
}

View File

@ -1,28 +0,0 @@
// +build !windows
package chrootarchive
import (
"fmt"
"io"
"io/ioutil"
"os"
"github.com/docker/docker/pkg/reexec"
)
func init() {
reexec.Register("docker-applyLayer", applyLayer)
reexec.Register("docker-untar", untar)
}
func fatal(err error) {
fmt.Fprint(os.Stderr, err)
os.Exit(1)
}
// flush consumes all the bytes from the reader discarding
// any errors
func flush(r io.Reader) (bytes int64, err error) {
return io.Copy(ioutil.Discard, r)
}

View File

@ -1,4 +0,0 @@
package chrootarchive
func init() {
}

View File

@ -1,283 +0,0 @@
package fileutils
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"regexp"
"strings"
"text/scanner"
"github.com/Sirupsen/logrus"
)
// exclusion returns true if the specified pattern is an exclusion
func exclusion(pattern string) bool {
return pattern[0] == '!'
}
// empty returns true if the specified pattern is empty
func empty(pattern string) bool {
return pattern == ""
}
// CleanPatterns takes a slice of patterns returns a new
// slice of patterns cleaned with filepath.Clean, stripped
// of any empty patterns and lets the caller know whether the
// slice contains any exception patterns (prefixed with !).
func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) {
// Loop over exclusion patterns and:
// 1. Clean them up.
// 2. Indicate whether we are dealing with any exception rules.
// 3. Error if we see a single exclusion marker on its own (!).
cleanedPatterns := []string{}
patternDirs := [][]string{}
exceptions := false
for _, pattern := range patterns {
// Eliminate leading and trailing whitespace.
pattern = strings.TrimSpace(pattern)
if empty(pattern) {
continue
}
if exclusion(pattern) {
if len(pattern) == 1 {
return nil, nil, false, errors.New("Illegal exclusion pattern: !")
}
exceptions = true
}
pattern = filepath.Clean(pattern)
cleanedPatterns = append(cleanedPatterns, pattern)
if exclusion(pattern) {
pattern = pattern[1:]
}
patternDirs = append(patternDirs, strings.Split(pattern, string(os.PathSeparator)))
}
return cleanedPatterns, patternDirs, exceptions, nil
}
// Matches returns true if file matches any of the patterns
// and isn't excluded by any of the subsequent patterns.
func Matches(file string, patterns []string) (bool, error) {
file = filepath.Clean(file)
if file == "." {
// Don't let them exclude everything, kind of silly.
return false, nil
}
patterns, patDirs, _, err := CleanPatterns(patterns)
if err != nil {
return false, err
}
return OptimizedMatches(file, patterns, patDirs)
}
// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go.
// It will assume that the inputs have been preprocessed and therefore the function
// doesn't need to do as much error checking and clean-up. This was done to avoid
// repeating these steps on each file being checked during the archive process.
// The more generic fileutils.Matches() can't make these assumptions.
func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) {
matched := false
file = filepath.FromSlash(file)
parentPath := filepath.Dir(file)
parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
for i, pattern := range patterns {
negative := false
if exclusion(pattern) {
negative = true
pattern = pattern[1:]
}
match, err := regexpMatch(pattern, file)
if err != nil {
return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err)
}
if !match && parentPath != "." {
// Check to see if the pattern matches one of our parent dirs.
if len(patDirs[i]) <= len(parentPathDirs) {
match, _ = regexpMatch(strings.Join(patDirs[i], string(os.PathSeparator)),
strings.Join(parentPathDirs[:len(patDirs[i])], string(os.PathSeparator)))
}
}
if match {
matched = !negative
}
}
if matched {
logrus.Debugf("Skipping excluded path: %s", file)
}
return matched, nil
}
// regexpMatch tries to match the logic of filepath.Match but
// does so using regexp logic. We do this so that we can expand the
// wildcard set to include other things, like "**" to mean any number
// of directories. This means that we should be backwards compatible
// with filepath.Match(). We'll end up supporting more stuff, due to
// the fact that we're using regexp, but that's ok - it does no harm.
//
// As per the comment in golangs filepath.Match, on Windows, escaping
// is disabled. Instead, '\\' is treated as path separator.
func regexpMatch(pattern, path string) (bool, error) {
regStr := "^"
// Do some syntax checking on the pattern.
// filepath's Match() has some really weird rules that are inconsistent
// so instead of trying to dup their logic, just call Match() for its
// error state and if there is an error in the pattern return it.
// If this becomes an issue we can remove this since its really only
// needed in the error (syntax) case - which isn't really critical.
if _, err := filepath.Match(pattern, path); err != nil {
return false, err
}
// Go through the pattern and convert it to a regexp.
// We use a scanner so we can support utf-8 chars.
var scan scanner.Scanner
scan.Init(strings.NewReader(pattern))
sl := string(os.PathSeparator)
escSL := sl
if sl == `\` {
escSL += `\`
}
for scan.Peek() != scanner.EOF {
ch := scan.Next()
if ch == '*' {
if scan.Peek() == '*' {
// is some flavor of "**"
scan.Next()
if scan.Peek() == scanner.EOF {
// is "**EOF" - to align with .gitignore just accept all
regStr += ".*"
} else {
// is "**"
regStr += "((.*" + escSL + ")|([^" + escSL + "]*))"
}
// Treat **/ as ** so eat the "/"
if string(scan.Peek()) == sl {
scan.Next()
}
} else {
// is "*" so map it to anything but "/"
regStr += "[^" + escSL + "]*"
}
} else if ch == '?' {
// "?" is any char except "/"
regStr += "[^" + escSL + "]"
} else if ch == '.' || ch == '$' {
// Escape some regexp special chars that have no meaning
// in golang's filepath.Match
regStr += `\` + string(ch)
} else if ch == '\\' {
// escape next char. Note that a trailing \ in the pattern
// will be left alone (but need to escape it)
if sl == `\` {
// On windows map "\" to "\\", meaning an escaped backslash,
// and then just continue because filepath.Match on
// Windows doesn't allow escaping at all
regStr += escSL
continue
}
if scan.Peek() != scanner.EOF {
regStr += `\` + string(scan.Next())
} else {
regStr += `\`
}
} else {
regStr += string(ch)
}
}
regStr += "$"
res, err := regexp.MatchString(regStr, path)
// Map regexp's error to filepath's so no one knows we're not using filepath
if err != nil {
err = filepath.ErrBadPattern
}
return res, err
}
// CopyFile copies from src to dst until either EOF is reached
// on src or an error occurs. It verifies src exists and removes
// the dst if it exists.
func CopyFile(src, dst string) (int64, error) {
cleanSrc := filepath.Clean(src)
cleanDst := filepath.Clean(dst)
if cleanSrc == cleanDst {
return 0, nil
}
sf, err := os.Open(cleanSrc)
if err != nil {
return 0, err
}
defer sf.Close()
if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) {
return 0, err
}
df, err := os.Create(cleanDst)
if err != nil {
return 0, err
}
defer df.Close()
return io.Copy(df, sf)
}
// ReadSymlinkedDirectory returns the target directory of a symlink.
// The target of the symbolic link may not be a file.
func ReadSymlinkedDirectory(path string) (string, error) {
var realPath string
var err error
if realPath, err = filepath.Abs(path); err != nil {
return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
}
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
}
realPathInfo, err := os.Stat(realPath)
if err != nil {
return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
}
if !realPathInfo.Mode().IsDir() {
return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
}
return realPath, nil
}
// CreateIfNotExists creates a file or a directory only if it does not already exist.
func CreateIfNotExists(path string, isDir bool) error {
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
if isDir {
return os.MkdirAll(path, 0755)
}
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
return err
}
f, err := os.OpenFile(path, os.O_CREATE, 0755)
if err != nil {
return err
}
f.Close()
}
}
return nil
}

View File

@ -1,27 +0,0 @@
package fileutils
import (
"os"
"os/exec"
"strconv"
"strings"
)
// GetTotalUsedFds returns the number of used File Descriptors by
// executing `lsof -p PID`
func GetTotalUsedFds() int {
pid := os.Getpid()
cmd := exec.Command("lsof", "-p", strconv.Itoa(pid))
output, err := cmd.CombinedOutput()
if err != nil {
return -1
}
outputStr := strings.TrimSpace(string(output))
fds := strings.Split(outputStr, "\n")
return len(fds) - 1
}

View File

@ -1,7 +0,0 @@
package fileutils
// GetTotalUsedFds Returns the number of used File Descriptors.
// On Solaris these limits are per process and not systemwide
func GetTotalUsedFds() int {
return -1
}

View File

@ -1,22 +0,0 @@
// +build linux freebsd
package fileutils
import (
"fmt"
"io/ioutil"
"os"
"github.com/Sirupsen/logrus"
)
// GetTotalUsedFds Returns the number of used File Descriptors by
// reading it via /proc filesystem.
func GetTotalUsedFds() int {
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
} else {
return len(fds)
}
return -1
}

View File

@ -1,7 +0,0 @@
package fileutils
// GetTotalUsedFds Returns the number of used File Descriptors. Not supported
// on Windows.
func GetTotalUsedFds() int {
return -1
}

View File

@ -1,39 +0,0 @@
package homedir
import (
"os"
"runtime"
"github.com/opencontainers/runc/libcontainer/user"
)
// Key returns the env var name for the user's home dir based on
// the platform being run on
func Key() string {
if runtime.GOOS == "windows" {
return "USERPROFILE"
}
return "HOME"
}
// Get returns the home directory of the current user with the help of
// environment variables depending on the target operating system.
// Returned path should be used with "path/filepath" to form new paths.
func Get() string {
home := os.Getenv(Key())
if home == "" && runtime.GOOS != "windows" {
if u, err := user.CurrentUser(); err == nil {
return u.Home
}
}
return home
}
// GetShortcutString returns the string that is shortcut to user's home directory
// in the native shell of the platform running on.
func GetShortcutString() string {
if runtime.GOOS == "windows" {
return "%USERPROFILE%" // be careful while using in format functions
}
return "~"
}

View File

@ -1,197 +0,0 @@
package idtools
import (
"bufio"
"fmt"
"os"
"sort"
"strconv"
"strings"
)
// IDMap contains a single entry for user namespace range remapping. An array
// of IDMap entries represents the structure that will be provided to the Linux
// kernel for creating a user namespace.
type IDMap struct {
ContainerID int `json:"container_id"`
HostID int `json:"host_id"`
Size int `json:"size"`
}
type subIDRange struct {
Start int
Length int
}
type ranges []subIDRange
func (e ranges) Len() int { return len(e) }
func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start }
const (
subuidFileName string = "/etc/subuid"
subgidFileName string = "/etc/subgid"
)
// MkdirAllAs creates a directory (include any along the path) and then modifies
// ownership to the requested uid/gid. If the directory already exists, this
// function will still change ownership to the requested uid/gid pair.
func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
return mkdirAs(path, mode, ownerUID, ownerGID, true, true)
}
// MkdirAllNewAs creates a directory (include any along the path) and then modifies
// ownership ONLY of newly created directories to the requested uid/gid. If the
// directories along the path exist, no change of ownership will be performed
func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
return mkdirAs(path, mode, ownerUID, ownerGID, true, false)
}
// MkdirAs creates a directory and then modifies ownership to the requested uid/gid.
// If the directory already exists, this function still changes ownership
func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
return mkdirAs(path, mode, ownerUID, ownerGID, false, true)
}
// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
// If the maps are empty, then the root uid/gid will default to "real" 0/0
func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
var uid, gid int
if uidMap != nil {
xUID, err := ToHost(0, uidMap)
if err != nil {
return -1, -1, err
}
uid = xUID
}
if gidMap != nil {
xGID, err := ToHost(0, gidMap)
if err != nil {
return -1, -1, err
}
gid = xGID
}
return uid, gid, nil
}
// ToContainer takes an id mapping, and uses it to translate a
// host ID to the remapped ID. If no map is provided, then the translation
// assumes a 1-to-1 mapping and returns the passed in id
func ToContainer(hostID int, idMap []IDMap) (int, error) {
if idMap == nil {
return hostID, nil
}
for _, m := range idMap {
if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) {
contID := m.ContainerID + (hostID - m.HostID)
return contID, nil
}
}
return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
}
// ToHost takes an id mapping and a remapped ID, and translates the
// ID to the mapped host ID. If no map is provided, then the translation
// assumes a 1-to-1 mapping and returns the passed in id #
func ToHost(contID int, idMap []IDMap) (int, error) {
if idMap == nil {
return contID, nil
}
for _, m := range idMap {
if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) {
hostID := m.HostID + (contID - m.ContainerID)
return hostID, nil
}
}
return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
}
// CreateIDMappings takes a requested user and group name and
// using the data from /etc/sub{uid,gid} ranges, creates the
// proper uid and gid remapping ranges for that user/group pair
func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) {
subuidRanges, err := parseSubuid(username)
if err != nil {
return nil, nil, err
}
subgidRanges, err := parseSubgid(groupname)
if err != nil {
return nil, nil, err
}
if len(subuidRanges) == 0 {
return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username)
}
if len(subgidRanges) == 0 {
return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname)
}
return createIDMap(subuidRanges), createIDMap(subgidRanges), nil
}
func createIDMap(subidRanges ranges) []IDMap {
idMap := []IDMap{}
// sort the ranges by lowest ID first
sort.Sort(subidRanges)
containerID := 0
for _, idrange := range subidRanges {
idMap = append(idMap, IDMap{
ContainerID: containerID,
HostID: idrange.Start,
Size: idrange.Length,
})
containerID = containerID + idrange.Length
}
return idMap
}
func parseSubuid(username string) (ranges, error) {
return parseSubidFile(subuidFileName, username)
}
func parseSubgid(username string) (ranges, error) {
return parseSubidFile(subgidFileName, username)
}
// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid)
// and return all found ranges for a specified username. If the special value
// "ALL" is supplied for username, then all ranges in the file will be returned
func parseSubidFile(path, username string) (ranges, error) {
var rangeList ranges
subidFile, err := os.Open(path)
if err != nil {
return rangeList, err
}
defer subidFile.Close()
s := bufio.NewScanner(subidFile)
for s.Scan() {
if err := s.Err(); err != nil {
return rangeList, err
}
text := strings.TrimSpace(s.Text())
if text == "" || strings.HasPrefix(text, "#") {
continue
}
parts := strings.Split(text, ":")
if len(parts) != 3 {
return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
}
if parts[0] == username || username == "ALL" {
startid, err := strconv.Atoi(parts[1])
if err != nil {
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
}
length, err := strconv.Atoi(parts[2])
if err != nil {
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
}
rangeList = append(rangeList, subIDRange{startid, length})
}
}
return rangeList, nil
}

View File

@ -1,207 +0,0 @@
// +build !windows
package idtools
import (
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"github.com/docker/docker/pkg/system"
"github.com/opencontainers/runc/libcontainer/user"
)
var (
entOnce sync.Once
getentCmd string
)
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
// make an array containing the original path asked for, plus (for mkAll == true)
// all path components leading up to the complete path that don't exist before we MkdirAll
// so that we can chown all of them properly at the end. If chownExisting is false, we won't
// chown the full directory path if it exists
var paths []string
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
paths = []string{path}
} else if err == nil && chownExisting {
if err := os.Chown(path, ownerUID, ownerGID); err != nil {
return err
}
// short-circuit--we were called with an existing directory and chown was requested
return nil
} else if err == nil {
// nothing to do; directory path fully exists already and chown was NOT requested
return nil
}
if mkAll {
// walk back to "/" looking for directories which do not exist
// and add them to the paths array for chown after creation
dirPath := path
for {
dirPath = filepath.Dir(dirPath)
if dirPath == "/" {
break
}
if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
paths = append(paths, dirPath)
}
}
if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
return err
}
} else {
if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) {
return err
}
}
// even if it existed, we will chown the requested path + any subpaths that
// didn't exist when we called MkdirAll
for _, pathComponent := range paths {
if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil {
return err
}
}
return nil
}
// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
// if that uid, gid pair has access (execute bit) to the directory
func CanAccess(path string, uid, gid int) bool {
statInfo, err := system.Stat(path)
if err != nil {
return false
}
fileMode := os.FileMode(statInfo.Mode())
permBits := fileMode.Perm()
return accessible(statInfo.UID() == uint32(uid),
statInfo.GID() == uint32(gid), permBits)
}
func accessible(isOwner, isGroup bool, perms os.FileMode) bool {
if isOwner && (perms&0100 == 0100) {
return true
}
if isGroup && (perms&0010 == 0010) {
return true
}
if perms&0001 == 0001 {
return true
}
return false
}
// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username,
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
func LookupUser(username string) (user.User, error) {
// first try a local system files lookup using existing capabilities
usr, err := user.LookupUser(username)
if err == nil {
return usr, nil
}
// local files lookup failed; attempt to call `getent` to query configured passwd dbs
usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username))
if err != nil {
return user.User{}, err
}
return usr, nil
}
// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid,
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
func LookupUID(uid int) (user.User, error) {
// first try a local system files lookup using existing capabilities
usr, err := user.LookupUid(uid)
if err == nil {
return usr, nil
}
// local files lookup failed; attempt to call `getent` to query configured passwd dbs
return getentUser(fmt.Sprintf("%s %d", "passwd", uid))
}
func getentUser(args string) (user.User, error) {
reader, err := callGetent(args)
if err != nil {
return user.User{}, err
}
users, err := user.ParsePasswd(reader)
if err != nil {
return user.User{}, err
}
if len(users) == 0 {
return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1])
}
return users[0], nil
}
// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name,
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
func LookupGroup(groupname string) (user.Group, error) {
// first try a local system files lookup using existing capabilities
group, err := user.LookupGroup(groupname)
if err == nil {
return group, nil
}
// local files lookup failed; attempt to call `getent` to query configured group dbs
return getentGroup(fmt.Sprintf("%s %s", "group", groupname))
}
// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID,
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
func LookupGID(gid int) (user.Group, error) {
// first try a local system files lookup using existing capabilities
group, err := user.LookupGid(gid)
if err == nil {
return group, nil
}
// local files lookup failed; attempt to call `getent` to query configured group dbs
return getentGroup(fmt.Sprintf("%s %d", "group", gid))
}
func getentGroup(args string) (user.Group, error) {
reader, err := callGetent(args)
if err != nil {
return user.Group{}, err
}
groups, err := user.ParseGroup(reader)
if err != nil {
return user.Group{}, err
}
if len(groups) == 0 {
return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1])
}
return groups[0], nil
}
func callGetent(args string) (io.Reader, error) {
entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") })
// if no `getent` command on host, can't do anything else
if getentCmd == "" {
return nil, fmt.Errorf("")
}
out, err := execCmd(getentCmd, args)
if err != nil {
exitCode, errC := system.GetExitCode(err)
if errC != nil {
return nil, err
}
switch exitCode {
case 1:
return nil, fmt.Errorf("getent reported invalid parameters/database unknown")
case 2:
terms := strings.Split(args, " ")
return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0])
case 3:
return nil, fmt.Errorf("getent database doesn't support enumeration")
default:
return nil, err
}
}
return bytes.NewReader(out), nil
}

View File

@ -1,25 +0,0 @@
// +build windows
package idtools
import (
"os"
"github.com/docker/docker/pkg/system"
)
// Platforms such as Windows do not support the UID/GID concept. So make this
// just a wrapper around system.MkdirAll.
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
return err
}
return nil
}
// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
// if that uid, gid pair has access (execute bit) to the directory
// Windows does not require/support this function, so always return true
func CanAccess(path string, uid, gid int) bool {
return true
}

View File

@ -1,164 +0,0 @@
package idtools
import (
"fmt"
"regexp"
"sort"
"strconv"
"strings"
"sync"
)
// add a user and/or group to Linux /etc/passwd, /etc/group using standard
// Linux distribution commands:
// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group <username>
// useradd -r -s /bin/false <username>
var (
once sync.Once
userCommand string
cmdTemplates = map[string]string{
"adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s",
"useradd": "-r -s /bin/false %s",
"usermod": "-%s %d-%d %s",
}
idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`)
// default length for a UID/GID subordinate range
defaultRangeLen = 65536
defaultRangeStart = 100000
userMod = "usermod"
)
// AddNamespaceRangesUser takes a username and uses the standard system
// utility to create a system user/group pair used to hold the
// /etc/sub{uid,gid} ranges which will be used for user namespace
// mapping ranges in containers.
func AddNamespaceRangesUser(name string) (int, int, error) {
if err := addUser(name); err != nil {
return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
}
// Query the system for the created uid and gid pair
out, err := execCmd("id", name)
if err != nil {
return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err)
}
matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out)))
if len(matches) != 3 {
return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out))
}
uid, err := strconv.Atoi(matches[1])
if err != nil {
return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err)
}
gid, err := strconv.Atoi(matches[2])
if err != nil {
return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err)
}
// Now we need to create the subuid/subgid ranges for our new user/group (system users
// do not get auto-created ranges in subuid/subgid)
if err := createSubordinateRanges(name); err != nil {
return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err)
}
return uid, gid, nil
}
func addUser(userName string) error {
once.Do(func() {
// set up which commands are used for adding users/groups dependent on distro
if _, err := resolveBinary("adduser"); err == nil {
userCommand = "adduser"
} else if _, err := resolveBinary("useradd"); err == nil {
userCommand = "useradd"
}
})
if userCommand == "" {
return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
}
args := fmt.Sprintf(cmdTemplates[userCommand], userName)
out, err := execCmd(userCommand, args)
if err != nil {
return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out))
}
return nil
}
func createSubordinateRanges(name string) error {
// first, we should verify that ranges weren't automatically created
// by the distro tooling
ranges, err := parseSubuid(name)
if err != nil {
return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err)
}
if len(ranges) == 0 {
// no UID ranges; let's create one
startID, err := findNextUIDRange()
if err != nil {
return fmt.Errorf("Can't find available subuid range: %v", err)
}
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name))
if err != nil {
return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err)
}
}
ranges, err = parseSubgid(name)
if err != nil {
return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err)
}
if len(ranges) == 0 {
// no GID ranges; let's create one
startID, err := findNextGIDRange()
if err != nil {
return fmt.Errorf("Can't find available subgid range: %v", err)
}
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name))
if err != nil {
return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err)
}
}
return nil
}
func findNextUIDRange() (int, error) {
ranges, err := parseSubuid("ALL")
if err != nil {
return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err)
}
sort.Sort(ranges)
return findNextRangeStart(ranges)
}
func findNextGIDRange() (int, error) {
ranges, err := parseSubgid("ALL")
if err != nil {
return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err)
}
sort.Sort(ranges)
return findNextRangeStart(ranges)
}
func findNextRangeStart(rangeList ranges) (int, error) {
startID := defaultRangeStart
for _, arange := range rangeList {
if wouldOverlap(arange, startID) {
startID = arange.Start + arange.Length
}
}
return startID, nil
}
func wouldOverlap(arange subIDRange, ID int) bool {
low := ID
high := ID + defaultRangeLen
if (low >= arange.Start && low <= arange.Start+arange.Length) ||
(high <= arange.Start+arange.Length && high >= arange.Start) {
return true
}
return false
}

View File

@ -1,12 +0,0 @@
// +build !linux
package idtools
import "fmt"
// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
// and calls the appropriate helper function to add the group and then
// the user to the group in /etc/group and /etc/passwd respectively.
func AddNamespaceRangesUser(name string) (int, int, error) {
return -1, -1, fmt.Errorf("No support for adding users or groups on this OS")
}

View File

@ -1,32 +0,0 @@
// +build !windows
package idtools
import (
"fmt"
"os/exec"
"path/filepath"
"strings"
)
func resolveBinary(binname string) (string, error) {
binaryPath, err := exec.LookPath(binname)
if err != nil {
return "", err
}
resolvedPath, err := filepath.EvalSymlinks(binaryPath)
if err != nil {
return "", err
}
//only return no error if the final resolved binary basename
//matches what was searched for
if filepath.Base(resolvedPath) == binname {
return resolvedPath, nil
}
return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
}
func execCmd(cmd, args string) ([]byte, error) {
execCmd := exec.Command(cmd, strings.Split(args, " ")...)
return execCmd.CombinedOutput()
}

View File

@ -1,51 +0,0 @@
package ioutils
import (
"errors"
"io"
)
var errBufferFull = errors.New("buffer is full")
type fixedBuffer struct {
buf []byte
pos int
lastRead int
}
func (b *fixedBuffer) Write(p []byte) (int, error) {
n := copy(b.buf[b.pos:cap(b.buf)], p)
b.pos += n
if n < len(p) {
if b.pos == cap(b.buf) {
return n, errBufferFull
}
return n, io.ErrShortWrite
}
return n, nil
}
func (b *fixedBuffer) Read(p []byte) (int, error) {
n := copy(p, b.buf[b.lastRead:b.pos])
b.lastRead += n
return n, nil
}
func (b *fixedBuffer) Len() int {
return b.pos - b.lastRead
}
func (b *fixedBuffer) Cap() int {
return cap(b.buf)
}
func (b *fixedBuffer) Reset() {
b.pos = 0
b.lastRead = 0
b.buf = b.buf[:0]
}
func (b *fixedBuffer) String() string {
return string(b.buf[b.lastRead:b.pos])
}

View File

@ -1,186 +0,0 @@
package ioutils
import (
"errors"
"io"
"sync"
)
// maxCap is the highest capacity to use in byte slices that buffer data.
const maxCap = 1e6
// minCap is the lowest capacity to use in byte slices that buffer data
const minCap = 64
// blockThreshold is the minimum number of bytes in the buffer which will cause
// a write to BytesPipe to block when allocating a new slice.
const blockThreshold = 1e6
var (
// ErrClosed is returned when Write is called on a closed BytesPipe.
ErrClosed = errors.New("write to closed BytesPipe")
bufPools = make(map[int]*sync.Pool)
bufPoolsLock sync.Mutex
)
// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue).
// All written data may be read at most once. Also, BytesPipe allocates
// and releases new byte slices to adjust to current needs, so the buffer
// won't be overgrown after peak loads.
type BytesPipe struct {
mu sync.Mutex
wait *sync.Cond
buf []*fixedBuffer
bufLen int
closeErr error // error to return from next Read. set to nil if not closed.
}
// NewBytesPipe creates new BytesPipe, initialized by specified slice.
// If buf is nil, then it will be initialized with slice which cap is 64.
// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
func NewBytesPipe() *BytesPipe {
bp := &BytesPipe{}
bp.buf = append(bp.buf, getBuffer(minCap))
bp.wait = sync.NewCond(&bp.mu)
return bp
}
// Write writes p to BytesPipe.
// It can allocate new []byte slices in a process of writing.
func (bp *BytesPipe) Write(p []byte) (int, error) {
bp.mu.Lock()
written := 0
loop0:
for {
if bp.closeErr != nil {
bp.mu.Unlock()
return written, ErrClosed
}
if len(bp.buf) == 0 {
bp.buf = append(bp.buf, getBuffer(64))
}
// get the last buffer
b := bp.buf[len(bp.buf)-1]
n, err := b.Write(p)
written += n
bp.bufLen += n
// errBufferFull is an error we expect to get if the buffer is full
if err != nil && err != errBufferFull {
bp.wait.Broadcast()
bp.mu.Unlock()
return written, err
}
// if there was enough room to write all then break
if len(p) == n {
break
}
// more data: write to the next slice
p = p[n:]
// make sure the buffer doesn't grow too big from this write
for bp.bufLen >= blockThreshold {
bp.wait.Wait()
if bp.closeErr != nil {
continue loop0
}
}
// add new byte slice to the buffers slice and continue writing
nextCap := b.Cap() * 2
if nextCap > maxCap {
nextCap = maxCap
}
bp.buf = append(bp.buf, getBuffer(nextCap))
}
bp.wait.Broadcast()
bp.mu.Unlock()
return written, nil
}
// CloseWithError causes further reads from a BytesPipe to return immediately.
func (bp *BytesPipe) CloseWithError(err error) error {
bp.mu.Lock()
if err != nil {
bp.closeErr = err
} else {
bp.closeErr = io.EOF
}
bp.wait.Broadcast()
bp.mu.Unlock()
return nil
}
// Close causes further reads from a BytesPipe to return immediately.
func (bp *BytesPipe) Close() error {
return bp.CloseWithError(nil)
}
// Read reads bytes from BytesPipe.
// Data could be read only once.
func (bp *BytesPipe) Read(p []byte) (n int, err error) {
bp.mu.Lock()
if bp.bufLen == 0 {
if bp.closeErr != nil {
bp.mu.Unlock()
return 0, bp.closeErr
}
bp.wait.Wait()
if bp.bufLen == 0 && bp.closeErr != nil {
err := bp.closeErr
bp.mu.Unlock()
return 0, err
}
}
for bp.bufLen > 0 {
b := bp.buf[0]
read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error
n += read
bp.bufLen -= read
if b.Len() == 0 {
// it's empty so return it to the pool and move to the next one
returnBuffer(b)
bp.buf[0] = nil
bp.buf = bp.buf[1:]
}
if len(p) == read {
break
}
p = p[read:]
}
bp.wait.Broadcast()
bp.mu.Unlock()
return
}
func returnBuffer(b *fixedBuffer) {
b.Reset()
bufPoolsLock.Lock()
pool := bufPools[b.Cap()]
bufPoolsLock.Unlock()
if pool != nil {
pool.Put(b)
}
}
func getBuffer(size int) *fixedBuffer {
bufPoolsLock.Lock()
pool, ok := bufPools[size]
if !ok {
pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }}
bufPools[size] = pool
}
bufPoolsLock.Unlock()
return pool.Get().(*fixedBuffer)
}

View File

@ -1,22 +0,0 @@
package ioutils
import (
"fmt"
"io"
)
// FprintfIfNotEmpty prints the string value if it's not empty
func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) {
if value != "" {
return fmt.Fprintf(w, format, value)
}
return 0, nil
}
// FprintfIfTrue prints the boolean value if it's true
func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) {
if ok {
return fmt.Fprintf(w, format, ok)
}
return 0, nil
}

View File

@ -1,162 +0,0 @@
package ioutils
import (
"io"
"io/ioutil"
"os"
"path/filepath"
)
// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a
// temporary file and closing it atomically changes the temporary file to
// destination path. Writing and closing concurrently is not allowed.
func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) {
f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
if err != nil {
return nil, err
}
abspath, err := filepath.Abs(filename)
if err != nil {
return nil, err
}
return &atomicFileWriter{
f: f,
fn: abspath,
perm: perm,
}, nil
}
// AtomicWriteFile atomically writes data to a file named by filename.
func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
f, err := NewAtomicFileWriter(filename, perm)
if err != nil {
return err
}
n, err := f.Write(data)
if err == nil && n < len(data) {
err = io.ErrShortWrite
f.(*atomicFileWriter).writeErr = err
}
if err1 := f.Close(); err == nil {
err = err1
}
return err
}
type atomicFileWriter struct {
f *os.File
fn string
writeErr error
perm os.FileMode
}
func (w *atomicFileWriter) Write(dt []byte) (int, error) {
n, err := w.f.Write(dt)
if err != nil {
w.writeErr = err
}
return n, err
}
func (w *atomicFileWriter) Close() (retErr error) {
defer func() {
if retErr != nil || w.writeErr != nil {
os.Remove(w.f.Name())
}
}()
if err := w.f.Sync(); err != nil {
w.f.Close()
return err
}
if err := w.f.Close(); err != nil {
return err
}
if err := os.Chmod(w.f.Name(), w.perm); err != nil {
return err
}
if w.writeErr == nil {
return os.Rename(w.f.Name(), w.fn)
}
return nil
}
// AtomicWriteSet is used to atomically write a set
// of files and ensure they are visible at the same time.
// Must be committed to a new directory.
type AtomicWriteSet struct {
root string
}
// NewAtomicWriteSet creates a new atomic write set to
// atomically create a set of files. The given directory
// is used as the base directory for storing files before
// commit. If no temporary directory is given the system
// default is used.
func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) {
td, err := ioutil.TempDir(tmpDir, "write-set-")
if err != nil {
return nil, err
}
return &AtomicWriteSet{
root: td,
}, nil
}
// WriteFile writes a file to the set, guaranteeing the file
// has been synced.
func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error {
f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil {
return err
}
n, err := f.Write(data)
if err == nil && n < len(data) {
err = io.ErrShortWrite
}
if err1 := f.Close(); err == nil {
err = err1
}
return err
}
type syncFileCloser struct {
*os.File
}
func (w syncFileCloser) Close() error {
err := w.File.Sync()
if err1 := w.File.Close(); err == nil {
err = err1
}
return err
}
// FileWriter opens a file writer inside the set. The file
// should be synced and closed before calling commit.
func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) {
f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm)
if err != nil {
return nil, err
}
return syncFileCloser{f}, nil
}
// Cancel cancels the set and removes all temporary data
// created in the set.
func (ws *AtomicWriteSet) Cancel() error {
return os.RemoveAll(ws.root)
}
// Commit moves all created files to the target directory. The
// target directory must not exist and the parent of the target
// directory must exist.
func (ws *AtomicWriteSet) Commit(target string) error {
return os.Rename(ws.root, target)
}
// String returns the location the set is writing to.
func (ws *AtomicWriteSet) String() string {
return ws.root
}

View File

@ -1,223 +0,0 @@
package ioutils
import (
"bytes"
"fmt"
"io"
"os"
)
type pos struct {
idx int
offset int64
}
type multiReadSeeker struct {
readers []io.ReadSeeker
pos *pos
posIdx map[io.ReadSeeker]int
}
func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) {
var tmpOffset int64
switch whence {
case os.SEEK_SET:
for i, rdr := range r.readers {
// get size of the current reader
s, err := rdr.Seek(0, os.SEEK_END)
if err != nil {
return -1, err
}
if offset > tmpOffset+s {
if i == len(r.readers)-1 {
rdrOffset := s + (offset - tmpOffset)
if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil {
return -1, err
}
r.pos = &pos{i, rdrOffset}
return offset, nil
}
tmpOffset += s
continue
}
rdrOffset := offset - tmpOffset
idx := i
rdr.Seek(rdrOffset, os.SEEK_SET)
// make sure all following readers are at 0
for _, rdr := range r.readers[i+1:] {
rdr.Seek(0, os.SEEK_SET)
}
if rdrOffset == s && i != len(r.readers)-1 {
idx++
rdrOffset = 0
}
r.pos = &pos{idx, rdrOffset}
return offset, nil
}
case os.SEEK_END:
for _, rdr := range r.readers {
s, err := rdr.Seek(0, os.SEEK_END)
if err != nil {
return -1, err
}
tmpOffset += s
}
r.Seek(tmpOffset+offset, os.SEEK_SET)
return tmpOffset + offset, nil
case os.SEEK_CUR:
if r.pos == nil {
return r.Seek(offset, os.SEEK_SET)
}
// Just return the current offset
if offset == 0 {
return r.getCurOffset()
}
curOffset, err := r.getCurOffset()
if err != nil {
return -1, err
}
rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset)
if err != nil {
return -1, err
}
r.pos = &pos{r.posIdx[rdr], rdrOffset}
return curOffset + offset, nil
default:
return -1, fmt.Errorf("Invalid whence: %d", whence)
}
return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset)
}
func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) {
var offsetTo int64
for _, rdr := range r.readers {
size, err := getReadSeekerSize(rdr)
if err != nil {
return nil, -1, err
}
if offsetTo+size > offset {
return rdr, offset - offsetTo, nil
}
if rdr == r.readers[len(r.readers)-1] {
return rdr, offsetTo + offset, nil
}
offsetTo += size
}
return nil, 0, nil
}
func (r *multiReadSeeker) getCurOffset() (int64, error) {
var totalSize int64
for _, rdr := range r.readers[:r.pos.idx+1] {
if r.posIdx[rdr] == r.pos.idx {
totalSize += r.pos.offset
break
}
size, err := getReadSeekerSize(rdr)
if err != nil {
return -1, fmt.Errorf("error getting seeker size: %v", err)
}
totalSize += size
}
return totalSize, nil
}
func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) {
var offset int64
for _, r := range r.readers {
if r == rdr {
break
}
size, err := getReadSeekerSize(rdr)
if err != nil {
return -1, err
}
offset += size
}
return offset, nil
}
func (r *multiReadSeeker) Read(b []byte) (int, error) {
if r.pos == nil {
r.pos = &pos{0, 0}
}
bLen := int64(len(b))
buf := bytes.NewBuffer(nil)
var rdr io.ReadSeeker
for _, rdr = range r.readers[r.pos.idx:] {
readBytes, err := io.CopyN(buf, rdr, bLen)
if err != nil && err != io.EOF {
return -1, err
}
bLen -= readBytes
if bLen == 0 {
break
}
}
rdrPos, err := rdr.Seek(0, os.SEEK_CUR)
if err != nil {
return -1, err
}
r.pos = &pos{r.posIdx[rdr], rdrPos}
return buf.Read(b)
}
func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) {
// save the current position
pos, err := rdr.Seek(0, os.SEEK_CUR)
if err != nil {
return -1, err
}
// get the size
size, err := rdr.Seek(0, os.SEEK_END)
if err != nil {
return -1, err
}
// reset the position
if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil {
return -1, err
}
return size, nil
}
// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided
// input readseekers. After calling this method the initial position is set to the
// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances
// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker.
// Seek can be used over the sum of lengths of all readseekers.
//
// When a MultiReadSeeker is used, no Read and Seek operations should be made on
// its ReadSeeker components. Also, users should make no assumption on the state
// of individual readseekers while the MultiReadSeeker is used.
func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker {
if len(readers) == 1 {
return readers[0]
}
idx := make(map[io.ReadSeeker]int)
for i, rdr := range readers {
idx[rdr] = i
}
return &multiReadSeeker{
readers: readers,
posIdx: idx,
}
}

View File

@ -1,154 +0,0 @@
package ioutils
import (
"crypto/sha256"
"encoding/hex"
"io"
"golang.org/x/net/context"
)
type readCloserWrapper struct {
io.Reader
closer func() error
}
func (r *readCloserWrapper) Close() error {
return r.closer()
}
// NewReadCloserWrapper returns a new io.ReadCloser.
func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
return &readCloserWrapper{
Reader: r,
closer: closer,
}
}
type readerErrWrapper struct {
reader io.Reader
closer func()
}
func (r *readerErrWrapper) Read(p []byte) (int, error) {
n, err := r.reader.Read(p)
if err != nil {
r.closer()
}
return n, err
}
// NewReaderErrWrapper returns a new io.Reader.
func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
return &readerErrWrapper{
reader: r,
closer: closer,
}
}
// HashData returns the sha256 sum of src.
func HashData(src io.Reader) (string, error) {
h := sha256.New()
if _, err := io.Copy(h, src); err != nil {
return "", err
}
return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
}
// OnEOFReader wraps an io.ReadCloser and a function
// the function will run at the end of file or close the file.
type OnEOFReader struct {
Rc io.ReadCloser
Fn func()
}
func (r *OnEOFReader) Read(p []byte) (n int, err error) {
n, err = r.Rc.Read(p)
if err == io.EOF {
r.runFunc()
}
return
}
// Close closes the file and run the function.
func (r *OnEOFReader) Close() error {
err := r.Rc.Close()
r.runFunc()
return err
}
func (r *OnEOFReader) runFunc() {
if fn := r.Fn; fn != nil {
fn()
r.Fn = nil
}
}
// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read
// operations.
type cancelReadCloser struct {
cancel func()
pR *io.PipeReader // Stream to read from
pW *io.PipeWriter
}
// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the
// context is cancelled. The returned io.ReadCloser must be closed when it is
// no longer needed.
func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser {
pR, pW := io.Pipe()
// Create a context used to signal when the pipe is closed
doneCtx, cancel := context.WithCancel(context.Background())
p := &cancelReadCloser{
cancel: cancel,
pR: pR,
pW: pW,
}
go func() {
_, err := io.Copy(pW, in)
select {
case <-ctx.Done():
// If the context was closed, p.closeWithError
// was already called. Calling it again would
// change the error that Read returns.
default:
p.closeWithError(err)
}
in.Close()
}()
go func() {
for {
select {
case <-ctx.Done():
p.closeWithError(ctx.Err())
case <-doneCtx.Done():
return
}
}
}()
return p
}
// Read wraps the Read method of the pipe that provides data from the wrapped
// ReadCloser.
func (p *cancelReadCloser) Read(buf []byte) (n int, err error) {
return p.pR.Read(buf)
}
// closeWithError closes the wrapper and its underlying reader. It will
// cause future calls to Read to return err.
func (p *cancelReadCloser) closeWithError(err error) {
p.pW.CloseWithError(err)
p.cancel()
}
// Close closes the wrapper its underlying reader. It will cause
// future calls to Read to return io.EOF.
func (p *cancelReadCloser) Close() error {
p.closeWithError(io.EOF)
return nil
}

View File

@ -1,10 +0,0 @@
// +build !windows
package ioutils
import "io/ioutil"
// TempDir on Unix systems is equivalent to ioutil.TempDir.
func TempDir(dir, prefix string) (string, error) {
return ioutil.TempDir(dir, prefix)
}

View File

@ -1,18 +0,0 @@
// +build windows
package ioutils
import (
"io/ioutil"
"github.com/docker/docker/pkg/longpath"
)
// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.
func TempDir(dir, prefix string) (string, error) {
tempDir, err := ioutil.TempDir(dir, prefix)
if err != nil {
return "", err
}
return longpath.AddPrefix(tempDir), nil
}

View File

@ -1,92 +0,0 @@
package ioutils
import (
"io"
"sync"
)
// WriteFlusher wraps the Write and Flush operation ensuring that every write
// is a flush. In addition, the Close method can be called to intercept
// Read/Write calls if the targets lifecycle has already ended.
type WriteFlusher struct {
w io.Writer
flusher flusher
flushed chan struct{}
flushedOnce sync.Once
closed chan struct{}
closeLock sync.Mutex
}
type flusher interface {
Flush()
}
var errWriteFlusherClosed = io.EOF
func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
select {
case <-wf.closed:
return 0, errWriteFlusherClosed
default:
}
n, err = wf.w.Write(b)
wf.Flush() // every write is a flush.
return n, err
}
// Flush the stream immediately.
func (wf *WriteFlusher) Flush() {
select {
case <-wf.closed:
return
default:
}
wf.flushedOnce.Do(func() {
close(wf.flushed)
})
wf.flusher.Flush()
}
// Flushed returns the state of flushed.
// If it's flushed, return true, or else it return false.
func (wf *WriteFlusher) Flushed() bool {
// BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to
// be used to detect whether or a response code has been issued or not.
// Another hook should be used instead.
var flushed bool
select {
case <-wf.flushed:
flushed = true
default:
}
return flushed
}
// Close closes the write flusher, disallowing any further writes to the
// target. After the flusher is closed, all calls to write or flush will
// result in an error.
func (wf *WriteFlusher) Close() error {
wf.closeLock.Lock()
defer wf.closeLock.Unlock()
select {
case <-wf.closed:
return errWriteFlusherClosed
default:
close(wf.closed)
}
return nil
}
// NewWriteFlusher returns a new WriteFlusher.
func NewWriteFlusher(w io.Writer) *WriteFlusher {
var fl flusher
if f, ok := w.(flusher); ok {
fl = f
} else {
fl = &NopFlusher{}
}
return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})}
}

View File

@ -1,66 +0,0 @@
package ioutils
import "io"
// NopWriter represents a type which write operation is nop.
type NopWriter struct{}
func (*NopWriter) Write(buf []byte) (int, error) {
return len(buf), nil
}
type nopWriteCloser struct {
io.Writer
}
func (w *nopWriteCloser) Close() error { return nil }
// NopWriteCloser returns a nopWriteCloser.
func NopWriteCloser(w io.Writer) io.WriteCloser {
return &nopWriteCloser{w}
}
// NopFlusher represents a type which flush operation is nop.
type NopFlusher struct{}
// Flush is a nop operation.
func (f *NopFlusher) Flush() {}
type writeCloserWrapper struct {
io.Writer
closer func() error
}
func (r *writeCloserWrapper) Close() error {
return r.closer()
}
// NewWriteCloserWrapper returns a new io.WriteCloser.
func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
return &writeCloserWrapper{
Writer: r,
closer: closer,
}
}
// WriteCounter wraps a concrete io.Writer and hold a count of the number
// of bytes written to the writer during a "session".
// This can be convenient when write return is masked
// (e.g., json.Encoder.Encode())
type WriteCounter struct {
Count int64
Writer io.Writer
}
// NewWriteCounter returns a new WriteCounter.
func NewWriteCounter(w io.Writer) *WriteCounter {
return &WriteCounter{
Writer: w,
}
}
func (wc *WriteCounter) Write(p []byte) (count int, err error) {
count, err = wc.Writer.Write(p)
wc.Count += int64(count)
return
}

View File

@ -1,149 +0,0 @@
package mount
import (
"fmt"
"strings"
)
var flags = map[string]struct {
clear bool
flag int
}{
"defaults": {false, 0},
"ro": {false, RDONLY},
"rw": {true, RDONLY},
"suid": {true, NOSUID},
"nosuid": {false, NOSUID},
"dev": {true, NODEV},
"nodev": {false, NODEV},
"exec": {true, NOEXEC},
"noexec": {false, NOEXEC},
"sync": {false, SYNCHRONOUS},
"async": {true, SYNCHRONOUS},
"dirsync": {false, DIRSYNC},
"remount": {false, REMOUNT},
"mand": {false, MANDLOCK},
"nomand": {true, MANDLOCK},
"atime": {true, NOATIME},
"noatime": {false, NOATIME},
"diratime": {true, NODIRATIME},
"nodiratime": {false, NODIRATIME},
"bind": {false, BIND},
"rbind": {false, RBIND},
"unbindable": {false, UNBINDABLE},
"runbindable": {false, RUNBINDABLE},
"private": {false, PRIVATE},
"rprivate": {false, RPRIVATE},
"shared": {false, SHARED},
"rshared": {false, RSHARED},
"slave": {false, SLAVE},
"rslave": {false, RSLAVE},
"relatime": {false, RELATIME},
"norelatime": {true, RELATIME},
"strictatime": {false, STRICTATIME},
"nostrictatime": {true, STRICTATIME},
}
var validFlags = map[string]bool{
"": true,
"size": true,
"mode": true,
"uid": true,
"gid": true,
"nr_inodes": true,
"nr_blocks": true,
"mpol": true,
}
var propagationFlags = map[string]bool{
"bind": true,
"rbind": true,
"unbindable": true,
"runbindable": true,
"private": true,
"rprivate": true,
"shared": true,
"rshared": true,
"slave": true,
"rslave": true,
}
// MergeTmpfsOptions merge mount options to make sure there is no duplicate.
func MergeTmpfsOptions(options []string) ([]string, error) {
// We use collisions maps to remove duplicates.
// For flag, the key is the flag value (the key for propagation flag is -1)
// For data=value, the key is the data
flagCollisions := map[int]bool{}
dataCollisions := map[string]bool{}
var newOptions []string
// We process in reverse order
for i := len(options) - 1; i >= 0; i-- {
option := options[i]
if option == "defaults" {
continue
}
if f, ok := flags[option]; ok && f.flag != 0 {
// There is only one propagation mode
key := f.flag
if propagationFlags[option] {
key = -1
}
// Check to see if there is collision for flag
if !flagCollisions[key] {
// We prepend the option and add to collision map
newOptions = append([]string{option}, newOptions...)
flagCollisions[key] = true
}
continue
}
opt := strings.SplitN(option, "=", 2)
if len(opt) != 2 || !validFlags[opt[0]] {
return nil, fmt.Errorf("Invalid tmpfs option %q", opt)
}
if !dataCollisions[opt[0]] {
// We prepend the option and add to collision map
newOptions = append([]string{option}, newOptions...)
dataCollisions[opt[0]] = true
}
}
return newOptions, nil
}
// Parse fstab type mount options into mount() flags
// and device specific data
func parseOptions(options string) (int, string) {
var (
flag int
data []string
)
for _, o := range strings.Split(options, ",") {
// If the option does not exist in the flags table or the flag
// is not supported on the platform,
// then it is a data value for a specific fs type
if f, exists := flags[o]; exists && f.flag != 0 {
if f.clear {
flag &= ^f.flag
} else {
flag |= f.flag
}
} else {
data = append(data, o)
}
}
return flag, strings.Join(data, ",")
}
// ParseTmpfsOptions parse fstab type mount options into flags and data
func ParseTmpfsOptions(options string) (int, string, error) {
flags, data := parseOptions(options)
for _, o := range strings.Split(data, ",") {
opt := strings.SplitN(o, "=", 2)
if !validFlags[opt[0]] {
return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt)
}
}
return flags, data, nil
}

View File

@ -1,48 +0,0 @@
// +build freebsd,cgo
package mount
/*
#include <sys/mount.h>
*/
import "C"
const (
// RDONLY will mount the filesystem as read-only.
RDONLY = C.MNT_RDONLY
// NOSUID will not allow set-user-identifier or set-group-identifier bits to
// take effect.
NOSUID = C.MNT_NOSUID
// NOEXEC will not allow execution of any binaries on the mounted file system.
NOEXEC = C.MNT_NOEXEC
// SYNCHRONOUS will allow any I/O to the file system to be done synchronously.
SYNCHRONOUS = C.MNT_SYNCHRONOUS
// NOATIME will not update the file access time when reading from a file.
NOATIME = C.MNT_NOATIME
)
// These flags are unsupported.
const (
BIND = 0
DIRSYNC = 0
MANDLOCK = 0
NODEV = 0
NODIRATIME = 0
UNBINDABLE = 0
RUNBINDABLE = 0
PRIVATE = 0
RPRIVATE = 0
SHARED = 0
RSHARED = 0
SLAVE = 0
RSLAVE = 0
RBIND = 0
RELATIVE = 0
RELATIME = 0
REMOUNT = 0
STRICTATIME = 0
)

View File

@ -1,85 +0,0 @@
package mount
import (
"syscall"
)
const (
// RDONLY will mount the file system read-only.
RDONLY = syscall.MS_RDONLY
// NOSUID will not allow set-user-identifier or set-group-identifier bits to
// take effect.
NOSUID = syscall.MS_NOSUID
// NODEV will not interpret character or block special devices on the file
// system.
NODEV = syscall.MS_NODEV
// NOEXEC will not allow execution of any binaries on the mounted file system.
NOEXEC = syscall.MS_NOEXEC
// SYNCHRONOUS will allow I/O to the file system to be done synchronously.
SYNCHRONOUS = syscall.MS_SYNCHRONOUS
// DIRSYNC will force all directory updates within the file system to be done
// synchronously. This affects the following system calls: create, link,
// unlink, symlink, mkdir, rmdir, mknod and rename.
DIRSYNC = syscall.MS_DIRSYNC
// REMOUNT will attempt to remount an already-mounted file system. This is
// commonly used to change the mount flags for a file system, especially to
// make a readonly file system writeable. It does not change device or mount
// point.
REMOUNT = syscall.MS_REMOUNT
// MANDLOCK will force mandatory locks on a filesystem.
MANDLOCK = syscall.MS_MANDLOCK
// NOATIME will not update the file access time when reading from a file.
NOATIME = syscall.MS_NOATIME
// NODIRATIME will not update the directory access time.
NODIRATIME = syscall.MS_NODIRATIME
// BIND remounts a subtree somewhere else.
BIND = syscall.MS_BIND
// RBIND remounts a subtree and all possible submounts somewhere else.
RBIND = syscall.MS_BIND | syscall.MS_REC
// UNBINDABLE creates a mount which cannot be cloned through a bind operation.
UNBINDABLE = syscall.MS_UNBINDABLE
// RUNBINDABLE marks the entire mount tree as UNBINDABLE.
RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC
// PRIVATE creates a mount which carries no propagation abilities.
PRIVATE = syscall.MS_PRIVATE
// RPRIVATE marks the entire mount tree as PRIVATE.
RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC
// SLAVE creates a mount which receives propagation from its master, but not
// vice versa.
SLAVE = syscall.MS_SLAVE
// RSLAVE marks the entire mount tree as SLAVE.
RSLAVE = syscall.MS_SLAVE | syscall.MS_REC
// SHARED creates a mount which provides the ability to create mirrors of
// that mount such that mounts and unmounts within any of the mirrors
// propagate to the other mirrors.
SHARED = syscall.MS_SHARED
// RSHARED marks the entire mount tree as SHARED.
RSHARED = syscall.MS_SHARED | syscall.MS_REC
// RELATIME updates inode access times relative to modify or change time.
RELATIME = syscall.MS_RELATIME
// STRICTATIME allows to explicitly request full atime updates. This makes
// it possible for the kernel to default to relatime or noatime but still
// allow userspace to override it.
STRICTATIME = syscall.MS_STRICTATIME
)

View File

@ -1,30 +0,0 @@
// +build !linux,!freebsd freebsd,!cgo solaris,!cgo
package mount
// These flags are unsupported.
const (
BIND = 0
DIRSYNC = 0
MANDLOCK = 0
NOATIME = 0
NODEV = 0
NODIRATIME = 0
NOEXEC = 0
NOSUID = 0
UNBINDABLE = 0
RUNBINDABLE = 0
PRIVATE = 0
RPRIVATE = 0
SHARED = 0
RSHARED = 0
SLAVE = 0
RSLAVE = 0
RBIND = 0
RELATIME = 0
RELATIVE = 0
REMOUNT = 0
STRICTATIME = 0
SYNCHRONOUS = 0
RDONLY = 0
)

View File

@ -1,74 +0,0 @@
package mount
import (
"time"
)
// GetMounts retrieves a list of mounts for the current running process.
func GetMounts() ([]*Info, error) {
return parseMountTable()
}
// Mounted determines if a specified mountpoint has been mounted.
// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab.
func Mounted(mountpoint string) (bool, error) {
entries, err := parseMountTable()
if err != nil {
return false, err
}
// Search the table for the mountpoint
for _, e := range entries {
if e.Mountpoint == mountpoint {
return true, nil
}
}
return false, nil
}
// Mount will mount filesystem according to the specified configuration, on the
// condition that the target path is *not* already mounted. Options must be
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
// flags.go for supported option flags.
func Mount(device, target, mType, options string) error {
flag, _ := parseOptions(options)
if flag&REMOUNT != REMOUNT {
if mounted, err := Mounted(target); err != nil || mounted {
return err
}
}
return ForceMount(device, target, mType, options)
}
// ForceMount will mount a filesystem according to the specified configuration,
// *regardless* if the target path is not already mounted. Options must be
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
// flags.go for supported option flags.
func ForceMount(device, target, mType, options string) error {
flag, data := parseOptions(options)
if err := mount(device, target, mType, uintptr(flag), data); err != nil {
return err
}
return nil
}
// Unmount will unmount the target filesystem, so long as it is mounted.
func Unmount(target string) error {
if mounted, err := Mounted(target); err != nil || !mounted {
return err
}
return ForceUnmount(target)
}
// ForceUnmount will force an unmount of the target filesystem, regardless if
// it is mounted or not.
func ForceUnmount(target string) (err error) {
// Simple retry logic for unmount
for i := 0; i < 10; i++ {
if err = unmount(target, 0); err == nil {
return nil
}
time.Sleep(100 * time.Millisecond)
}
return
}

View File

@ -1,59 +0,0 @@
package mount
/*
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <sys/_iovec.h>
#include <sys/mount.h>
#include <sys/param.h>
*/
import "C"
import (
"fmt"
"strings"
"syscall"
"unsafe"
)
func allocateIOVecs(options []string) []C.struct_iovec {
out := make([]C.struct_iovec, len(options))
for i, option := range options {
out[i].iov_base = unsafe.Pointer(C.CString(option))
out[i].iov_len = C.size_t(len(option) + 1)
}
return out
}
func mount(device, target, mType string, flag uintptr, data string) error {
isNullFS := false
xs := strings.Split(data, ",")
for _, x := range xs {
if x == "bind" {
isNullFS = true
}
}
options := []string{"fspath", target}
if isNullFS {
options = append(options, "fstype", "nullfs", "target", device)
} else {
options = append(options, "fstype", mType, "from", device)
}
rawOptions := allocateIOVecs(options)
for _, rawOption := range rawOptions {
defer C.free(rawOption.iov_base)
}
if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 {
reason := C.GoString(C.strerror(*C.__error()))
return fmt.Errorf("Failed to call nmount: %s", reason)
}
return nil
}
func unmount(target string, flag int) error {
return syscall.Unmount(target, flag)
}

View File

@ -1,21 +0,0 @@
package mount
import (
"syscall"
)
func mount(device, target, mType string, flag uintptr, data string) error {
if err := syscall.Mount(device, target, mType, flag, data); err != nil {
return err
}
// If we have a bind mount or remount, remount...
if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY {
return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data)
}
return nil
}
func unmount(target string, flag int) error {
return syscall.Unmount(target, flag)
}

View File

@ -1,33 +0,0 @@
// +build solaris,cgo
package mount
import (
"golang.org/x/sys/unix"
"unsafe"
)
// #include <stdlib.h>
// #include <stdio.h>
// #include <sys/mount.h>
// int Mount(const char *spec, const char *dir, int mflag,
// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) {
// return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen);
// }
import "C"
func mount(device, target, mType string, flag uintptr, data string) error {
spec := C.CString(device)
dir := C.CString(target)
fstype := C.CString(mType)
_, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0)
C.free(unsafe.Pointer(spec))
C.free(unsafe.Pointer(dir))
C.free(unsafe.Pointer(fstype))
return err
}
func unmount(target string, flag int) error {
err := unix.Unmount(target, flag)
return err
}

View File

@ -1,11 +0,0 @@
// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo
package mount
func mount(device, target, mType string, flag uintptr, data string) error {
panic("Not implemented")
}
func unmount(target string, flag int) error {
panic("Not implemented")
}

Some files were not shown because too many files have changed in this diff Show More