updates
Signed-off-by: Jess Frazelle <jess@mesosphere.com>
This commit is contained in:
parent
fec7085140
commit
3cf304f3e1
380 changed files with 53507 additions and 67 deletions
6
.gitignore
vendored
6
.gitignore
vendored
|
@ -45,3 +45,9 @@ Icon
|
||||||
.Trashes
|
.Trashes
|
||||||
|
|
||||||
binctr
|
binctr
|
||||||
|
*.tar
|
||||||
|
rootfs
|
||||||
|
config.json
|
||||||
|
bin
|
||||||
|
rootfs.go
|
||||||
|
.ip
|
||||||
|
|
42
Makefile
42
Makefile
|
@ -1,4 +1,4 @@
|
||||||
.PHONY: clean all fmt vet lint build test install static
|
.PHONY: clean clean-rootfs all fmt vet lint build test install image.tar rootfs.go static
|
||||||
PREFIX?=$(shell pwd)
|
PREFIX?=$(shell pwd)
|
||||||
BUILDTAGS=seccomp apparmor
|
BUILDTAGS=seccomp apparmor
|
||||||
|
|
||||||
|
@ -16,20 +16,40 @@ ifneq ($(GITUNTRACKEDCHANGES),)
|
||||||
GITCOMMIT := $(GITCOMMIT)-dirty
|
GITCOMMIT := $(GITCOMMIT)-dirty
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
IMAGE := alpine
|
||||||
|
DOCKER_ROOTFS_IMAGE := $(IMAGE)
|
||||||
|
|
||||||
LDFLAGS := ${LDFLAGS} \
|
LDFLAGS := ${LDFLAGS} \
|
||||||
-X $(PROJECT)/main.GITCOMMIT=${GITCOMMIT} \
|
-X main.GITCOMMIT=${GITCOMMIT} \
|
||||||
-X $(PROJECT)/main.VERSION=${VERSION} \
|
-X main.VERSION=${VERSION} \
|
||||||
|
-X main.IMAGE=${IMAGE} \
|
||||||
|
-X main.IMAGESHA=$(shell docker inspect --format "{{.Id}}" $(IMAGE))
|
||||||
|
|
||||||
all: clean build fmt lint test vet install
|
BINDIR := $(CURDIR)/bin
|
||||||
|
|
||||||
build:
|
all: clean static fmt lint test vet install
|
||||||
|
|
||||||
|
build: rootfs.go
|
||||||
@echo "+ $@"
|
@echo "+ $@"
|
||||||
go build -tags "$(BUILDTAGS)" -ldflags "${LDFLAGS}" .
|
go build -tags "$(BUILDTAGS)" -ldflags "${LDFLAGS}" .
|
||||||
|
|
||||||
static:
|
$(BINDIR):
|
||||||
|
@mkdir -p $@
|
||||||
|
|
||||||
|
static: $(BINDIR) rootfs.go
|
||||||
@echo "+ $@"
|
@echo "+ $@"
|
||||||
CGO_ENABLED=1 go build -tags "$(BUILDTAGS) cgo static_build" \
|
CGO_ENABLED=1 go build -tags "$(BUILDTAGS) cgo static_build" \
|
||||||
-ldflags "-w -extldflags -static ${LDFLAGS}" -o binctr .
|
-ldflags "-w -extldflags -static ${LDFLAGS}" -o bin/$(notdir $(IMAGE)) .
|
||||||
|
@sudo setcap cap_chown,cap_fowner,cap_dac_override,cap_setuid,cap_setgid+ep ./bin/$(notdir $(IMAGE))
|
||||||
|
@echo "Static container created at: ./bin/$(notdir $(IMAGE))"
|
||||||
|
@echo "Run with ./bin/$(notdir $(IMAGE))"
|
||||||
|
|
||||||
|
image.tar:
|
||||||
|
docker pull --disable-content-trust=false $(DOCKER_ROOTFS_IMAGE)
|
||||||
|
docker export $(shell docker create $(DOCKER_ROOTFS_IMAGE) sh) > $@
|
||||||
|
|
||||||
|
rootfs.go: image.tar
|
||||||
|
go generate
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
@echo "+ $@"
|
@echo "+ $@"
|
||||||
|
@ -47,9 +67,15 @@ vet:
|
||||||
@echo "+ $@"
|
@echo "+ $@"
|
||||||
@go vet $(shell go list ./... | grep -v $(VENDOR))
|
@go vet $(shell go list ./... | grep -v $(VENDOR))
|
||||||
|
|
||||||
clean:
|
clean-rootfs:
|
||||||
|
@sudo $(RM) -r rootfs
|
||||||
|
|
||||||
|
clean: clean-rootfs
|
||||||
@echo "+ $@"
|
@echo "+ $@"
|
||||||
@$(RM) binctr
|
@$(RM) binctr
|
||||||
|
@$(RM) *.tar
|
||||||
|
@$(RM) rootfs.go
|
||||||
|
@$(RM) -r $(BINDIR)
|
||||||
|
|
||||||
install:
|
install:
|
||||||
@echo "+ $@"
|
@echo "+ $@"
|
||||||
|
|
103
README.md
Normal file
103
README.md
Normal file
|
@ -0,0 +1,103 @@
|
||||||
|
# binctr
|
||||||
|
|
||||||
|
Create fully static, including rootfs embedded, binaries that pop you directly
|
||||||
|
into a container. Can be run by an unprivileged user.
|
||||||
|
|
||||||
|
### Building
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ make static
|
||||||
|
Static container created at: ./bin/alpine
|
||||||
|
Run with ./bin/alpine
|
||||||
|
|
||||||
|
# building a different base image
|
||||||
|
$ make static IMAGE=busybox
|
||||||
|
Static container created at: ./bin/busybox
|
||||||
|
Run with ./bin/busybox
|
||||||
|
```
|
||||||
|
|
||||||
|
### Running
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ ./alpine
|
||||||
|
$ ./busybox --read-only
|
||||||
|
```
|
||||||
|
|
||||||
|
### Running with custom commands & args
|
||||||
|
|
||||||
|
```console
|
||||||
|
# let's make an nginx binary
|
||||||
|
$ make static IMAGE=nginx
|
||||||
|
Static container created at: ./bin/nginx
|
||||||
|
Run with ./bin/nginx
|
||||||
|
|
||||||
|
$ ./bin/nginx nginx -g daemon off
|
||||||
|
|
||||||
|
# But we have no networking! Don't worry we can fix this
|
||||||
|
# Let's install my super cool binary for setting up networking in a container
|
||||||
|
$ go get github.com/jfrazelle/netns
|
||||||
|
|
||||||
|
# now we can all this as a prestart hook
|
||||||
|
$ ./bin/nginx --hook prestart:netns nginx -g daemon off
|
||||||
|
|
||||||
|
# let's get the ip file
|
||||||
|
$ cat .ip
|
||||||
|
172.19.0.10
|
||||||
|
|
||||||
|
Success!
|
||||||
|
```
|
||||||
|
|
||||||
|
### Usage
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ ./bin/alpine -h
|
||||||
|
_ _ _
|
||||||
|
| |__ (_)_ __ ___| |_ _ __
|
||||||
|
| '_ \| | '_ \ / __| __| '__|
|
||||||
|
| |_) | | | | | (__| |_| |
|
||||||
|
|_.__/|_|_| |_|\___|\__|_|
|
||||||
|
|
||||||
|
Fully static, self-contained container including the rootfs
|
||||||
|
that can be run by an unprivileged user.
|
||||||
|
|
||||||
|
Embedded Image: alpine - sha256:70c557e50ed630deed07cbb0dc4d28aa0f2a485cf7af124cc48f06bce83f784b
|
||||||
|
Version: 0.1.0
|
||||||
|
GitCommit: 13fcd27-dirty
|
||||||
|
|
||||||
|
-D run in debug mode
|
||||||
|
-console string
|
||||||
|
the pty slave path for use with the container
|
||||||
|
-d detach from the container's process
|
||||||
|
-hook value
|
||||||
|
Hooks to prefill into spec file. (ex. --hook prestart:netns) (default [])
|
||||||
|
-id string
|
||||||
|
container ID (default "nginx")
|
||||||
|
-pid-file string
|
||||||
|
specify the file to write the process id to
|
||||||
|
-read-only
|
||||||
|
make container filesystem readonly
|
||||||
|
-root string
|
||||||
|
root directory of container state, should be tmpfs (default "/run/binctr")
|
||||||
|
-t allocate a tty for the container (default true)
|
||||||
|
-v print version and exit (shorthand)
|
||||||
|
-version
|
||||||
|
print version and exit
|
||||||
|
```
|
||||||
|
|
||||||
|
## Caveats
|
||||||
|
|
||||||
|
**Caps the binary needs TO UNPACK AND SET THE RIGHT PERMS ON THE ROOTFS FOR THE USERNS USER**
|
||||||
|
|
||||||
|
- **CAP_CHOWN**: chown the rootfs to the userns user
|
||||||
|
- **CAP_FOWNER**: chmod rootfs
|
||||||
|
- **CAP_DAC_OVERRIDE**: symlinks
|
||||||
|
|
||||||
|
**These can be dropped after the rootfs is unpacked and chowned.**
|
||||||
|
|
||||||
|
-------
|
||||||
|
|
||||||
|
**Caps for libcontainer**
|
||||||
|
|
||||||
|
- **CAP_SETUID**, **CAP_SETGID**: so we can write to `uid_map`, `gid_map`, in
|
||||||
|
`nsexec.c`
|
||||||
|
See: http://man7.org/linux/man-pages/man7/user_namespaces.7.html
|
36
generate.go
Normal file
36
generate.go
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reads static/index.html and saves as a constant in static.go
|
||||||
|
func main() {
|
||||||
|
wd, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
out, err := os.Create(filepath.Join(wd, "rootfs.go"))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
tarPath := filepath.Join(wd, "image.tar")
|
||||||
|
|
||||||
|
out.Write([]byte("// This file is autogenerated; DO NOT EDIT DIRECTLY\n// See generate.go for more info\npackage main\n\nconst (\n"))
|
||||||
|
out.Write([]byte("\tDATA = `"))
|
||||||
|
f, err := ioutil.ReadFile(tarPath)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tar := base64.StdEncoding.EncodeToString(f)
|
||||||
|
out.Write([]byte(tar))
|
||||||
|
|
||||||
|
out.Write([]byte("`\n"))
|
||||||
|
out.Write([]byte(")\n"))
|
||||||
|
}
|
149
main.go
149
main.go
|
@ -1,16 +1,18 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"encoding/base64"
|
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"os/exec"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
"github.com/docker/docker/pkg/archive"
|
aaprofile "github.com/docker/docker/profiles/apparmor"
|
||||||
"github.com/opencontainers/runc/libcontainer"
|
"github.com/opencontainers/runc/libcontainer"
|
||||||
|
"github.com/opencontainers/runc/libcontainer/apparmor"
|
||||||
|
"github.com/opencontainers/runtime-spec/specs-go"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -21,19 +23,36 @@ const (
|
||||||
| |_) | | | | | (__| |_| |
|
| |_) | | | | | (__| |_| |
|
||||||
|_.__/|_|_| |_|\___|\__|_|
|
|_.__/|_|_| |_|\___|\__|_|
|
||||||
|
|
||||||
Fully static self-contained container including the rootfs.
|
Fully static, self-contained container including the rootfs
|
||||||
|
that can be run by an unprivileged user.
|
||||||
|
|
||||||
|
Embedded Image: %s - %s
|
||||||
Version: %s
|
Version: %s
|
||||||
GitCommit: %s
|
GitCommit: %s
|
||||||
|
|
||||||
`
|
`
|
||||||
|
|
||||||
defaultRoot = "/run/binctr"
|
defaultRoot = "/run/binctr"
|
||||||
|
defaultRootfsDir = "rootfs"
|
||||||
|
defaultApparmorProfile = "docker-default"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
console = os.Getenv("console")
|
console = os.Getenv("console")
|
||||||
containerID string
|
containerID string
|
||||||
|
pidFile string
|
||||||
root string
|
root string
|
||||||
|
|
||||||
|
allocateTty bool
|
||||||
|
readonly bool
|
||||||
|
detach bool
|
||||||
|
|
||||||
|
hooks specs.Hooks
|
||||||
|
hookflags stringSlice
|
||||||
|
|
||||||
|
remappedUID uint32 = 886432
|
||||||
|
remappedGID uint32 = 886432
|
||||||
|
|
||||||
debug bool
|
debug bool
|
||||||
version bool
|
version bool
|
||||||
|
|
||||||
|
@ -43,28 +62,80 @@ var (
|
||||||
// VERSION is the binary version.
|
// VERSION is the binary version.
|
||||||
VERSION = "v0.1.0"
|
VERSION = "v0.1.0"
|
||||||
|
|
||||||
// DATA is the rootfs tar that is added at compile time.
|
// IMAGE is the name of the image that is embedded at compile time.
|
||||||
DATA = ""
|
IMAGE = "alpine"
|
||||||
|
// IMAGESHA is the sha digest of the image that is embedded at compile time.
|
||||||
|
IMAGESHA = "sha256:70c557e50ed630deed07cbb0dc4d28aa0f2a485cf7af124cc48f06bce83f784b"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// stringSlice is a slice of strings
|
||||||
|
type stringSlice []string
|
||||||
|
|
||||||
|
// implement the flag interface for stringSlice
|
||||||
|
func (s *stringSlice) String() string {
|
||||||
|
return fmt.Sprintf("%s", *s)
|
||||||
|
}
|
||||||
|
func (s *stringSlice) Set(value string) error {
|
||||||
|
*s = append(*s, value)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (s stringSlice) ParseHooks() (hooks specs.Hooks, err error) {
|
||||||
|
for _, v := range s {
|
||||||
|
parts := strings.SplitN(v, ":", 2)
|
||||||
|
if len(parts) <= 1 {
|
||||||
|
return hooks, fmt.Errorf("parsing %s as hook_name:exec failed", v)
|
||||||
|
}
|
||||||
|
cmd := strings.Split(parts[1], " ")
|
||||||
|
exec, err := exec.LookPath(cmd[0])
|
||||||
|
if err != nil {
|
||||||
|
return hooks, fmt.Errorf("looking up exec path for %s failed: %v", cmd[0], err)
|
||||||
|
}
|
||||||
|
hook := specs.Hook{
|
||||||
|
Path: exec,
|
||||||
|
}
|
||||||
|
if len(cmd) > 1 {
|
||||||
|
hook.Args = cmd[:1]
|
||||||
|
}
|
||||||
|
switch parts[0] {
|
||||||
|
case "prestart":
|
||||||
|
hooks.Prestart = append(hooks.Prestart, hook)
|
||||||
|
case "poststart":
|
||||||
|
hooks.Poststart = append(hooks.Poststart, hook)
|
||||||
|
case "poststop":
|
||||||
|
hooks.Poststop = append(hooks.Poststop, hook)
|
||||||
|
default:
|
||||||
|
return hooks, fmt.Errorf("%s is not a valid hook, try 'prestart', 'poststart', or 'poststop'", parts[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return hooks, nil
|
||||||
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// Parse flags
|
// Parse flags
|
||||||
flag.StringVar(&containerID, "id", "jessiscool", "container ID")
|
flag.StringVar(&containerID, "id", IMAGE, "container ID")
|
||||||
flag.StringVar(&console, "console", console, "the pty slave path for use with the container")
|
flag.StringVar(&console, "console", console, "the pty slave path for use with the container")
|
||||||
|
flag.StringVar(&pidFile, "pid-file", "", "specify the file to write the process id to")
|
||||||
flag.StringVar(&root, "root", defaultRoot, "root directory of container state, should be tmpfs")
|
flag.StringVar(&root, "root", defaultRoot, "root directory of container state, should be tmpfs")
|
||||||
|
|
||||||
|
flag.Var(&hookflags, "hook", "Hooks to prefill into spec file. (ex. --hook prestart:netns)")
|
||||||
|
|
||||||
|
flag.BoolVar(&allocateTty, "t", true, "allocate a tty for the container")
|
||||||
|
flag.BoolVar(&detach, "d", false, "detach from the container's process")
|
||||||
|
flag.BoolVar(&readonly, "read-only", false, "make container filesystem readonly")
|
||||||
|
|
||||||
flag.BoolVar(&version, "version", false, "print version and exit")
|
flag.BoolVar(&version, "version", false, "print version and exit")
|
||||||
flag.BoolVar(&version, "v", false, "print version and exit (shorthand)")
|
flag.BoolVar(&version, "v", false, "print version and exit (shorthand)")
|
||||||
flag.BoolVar(&debug, "d", false, "run in debug mode")
|
flag.BoolVar(&debug, "D", false, "run in debug mode")
|
||||||
|
|
||||||
flag.Usage = func() {
|
flag.Usage = func() {
|
||||||
fmt.Fprint(os.Stderr, fmt.Sprintf(BANNER, VERSION, GITCOMMIT))
|
fmt.Fprint(os.Stderr, fmt.Sprintf(BANNER, IMAGE, IMAGESHA, VERSION, GITCOMMIT))
|
||||||
flag.PrintDefaults()
|
flag.PrintDefaults()
|
||||||
}
|
}
|
||||||
|
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
if version {
|
if version {
|
||||||
fmt.Printf("%s, commit: %s", VERSION, GITCOMMIT)
|
fmt.Printf("%s, commit: %s, image: %s, image digest: %s", VERSION, GITCOMMIT, IMAGE, IMAGESHA)
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,41 +143,68 @@ func init() {
|
||||||
if debug {
|
if debug {
|
||||||
logrus.SetLevel(logrus.DebugLevel)
|
logrus.SetLevel(logrus.DebugLevel)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parse the hook flags
|
||||||
|
var err error
|
||||||
|
hooks, err = hookflags.ParseHooks()
|
||||||
|
if err != nil {
|
||||||
|
logrus.Fatal(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//go:generate go run generate.go
|
||||||
func main() {
|
func main() {
|
||||||
if len(os.Args) > 1 && os.Args[1] == "init" {
|
if len(os.Args) > 1 && os.Args[1] == "init" {
|
||||||
runInit()
|
runInit()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := unpackRootfs(); err != nil {
|
notifySocket := os.Getenv("NOTIFY_SOCKET")
|
||||||
|
if notifySocket != "" {
|
||||||
|
setupSdNotify(spec, notifySocket)
|
||||||
|
}
|
||||||
|
|
||||||
|
// override the cmd in the spec with any args specified
|
||||||
|
if len(flag.Args()) > 0 {
|
||||||
|
spec.Process.Args = flag.Args()
|
||||||
|
}
|
||||||
|
|
||||||
|
// setup readonly fs in spec
|
||||||
|
spec.Root.Readonly = readonly
|
||||||
|
|
||||||
|
// setup tty in spec
|
||||||
|
spec.Process.Terminal = allocateTty
|
||||||
|
|
||||||
|
// pass in any hooks
|
||||||
|
spec.Hooks = hooks
|
||||||
|
|
||||||
|
// install the default apparmor profile
|
||||||
|
if apparmor.IsEnabled() {
|
||||||
|
if err := aaprofile.InstallDefault(defaultApparmorProfile); err != nil {
|
||||||
|
logrus.Warnf("AppArmor is enabled on the the system, but the profile (%s) could not be loaded", defaultApparmorProfile)
|
||||||
|
} else {
|
||||||
|
spec.Process.ApparmorProfile = defaultApparmorProfile
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := unpackRootfs(spec); err != nil {
|
||||||
logrus.Fatal(err)
|
logrus.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
status, err := startContainer(spec, containerID)
|
status, err := startContainer(spec, containerID, pidFile, detach)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Fatal(err)
|
logrus.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := os.RemoveAll(defaultRootfsDir); err != nil {
|
||||||
|
logrus.Warnf("removing rootfs failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
// exit with the container's exit status
|
// exit with the container's exit status
|
||||||
os.Exit(status)
|
os.Exit(status)
|
||||||
}
|
}
|
||||||
|
|
||||||
func unpackRootfs() error {
|
|
||||||
data, err := base64.StdEncoding.DecodeString(DATA)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
r := bytes.NewReader(data)
|
|
||||||
if err := os.Mkdir("container", 0755); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return archive.Untar(r, "container", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func runInit() {
|
func runInit() {
|
||||||
if len(os.Args) > 1 && os.Args[1] == "init" {
|
|
||||||
runtime.GOMAXPROCS(1)
|
runtime.GOMAXPROCS(1)
|
||||||
runtime.LockOSThread()
|
runtime.LockOSThread()
|
||||||
factory, _ := libcontainer.New("")
|
factory, _ := libcontainer.New("")
|
||||||
|
@ -116,5 +214,4 @@ func runInit() {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
panic("libcontainer: container init failed to exec")
|
panic("libcontainer: container init failed to exec")
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
5
main_unix.go
Normal file
5
main_unix.go
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import _ "github.com/opencontainers/runc/libcontainer/nsenter"
|
56
rootfs_ops.go
Normal file
56
rootfs_ops.go
Normal file
|
@ -0,0 +1,56 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/base64"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/archive"
|
||||||
|
"github.com/docker/docker/pkg/idtools"
|
||||||
|
"github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
func unpackRootfs(spec *specs.Spec) error {
|
||||||
|
data, err := base64.StdEncoding.DecodeString(DATA)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(spec.Linux.UIDMappings) > 0 && len(spec.Linux.GIDMappings) > 0 {
|
||||||
|
if err := idtools.MkdirAs(defaultRootfsDir, 0755, int(spec.Linux.UIDMappings[0].HostID), int(spec.Linux.GIDMappings[0].HostID)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := os.MkdirAll(defaultRootfsDir, 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
uidMaps := []idtools.IDMap{}
|
||||||
|
gidMaps := []idtools.IDMap{}
|
||||||
|
for _, u := range spec.Linux.UIDMappings {
|
||||||
|
uidMaps = append(uidMaps, idtools.IDMap{
|
||||||
|
ContainerID: int(u.ContainerID),
|
||||||
|
HostID: int(u.HostID),
|
||||||
|
Size: int(u.Size),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, g := range spec.Linux.GIDMappings {
|
||||||
|
gidMaps = append(gidMaps, idtools.IDMap{
|
||||||
|
ContainerID: int(g.ContainerID),
|
||||||
|
HostID: int(g.HostID),
|
||||||
|
Size: int(g.Size),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
r := bytes.NewReader(data)
|
||||||
|
if err := archive.Untar(r, defaultRootfsDir, &archive.TarOptions{
|
||||||
|
UIDMaps: uidMaps,
|
||||||
|
GIDMaps: gidMaps,
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -1,5 +1,3 @@
|
||||||
// +build linux
|
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
57
spec.go
57
spec.go
|
@ -1,6 +1,9 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
"github.com/opencontainers/runtime-spec/specs-go"
|
"github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
@ -30,9 +33,18 @@ var (
|
||||||
Cwd: "/",
|
Cwd: "/",
|
||||||
NoNewPrivileges: true,
|
NoNewPrivileges: true,
|
||||||
Capabilities: []string{
|
Capabilities: []string{
|
||||||
"CAP_AUDIT_WRITE",
|
"CAP_CHOWN",
|
||||||
"CAP_KILL",
|
"CAP_DAC_OVERRIDE",
|
||||||
|
"CAP_FSETID",
|
||||||
|
"CAP_FOWNER",
|
||||||
|
"CAP_MKNOD",
|
||||||
|
"CAP_SETGID",
|
||||||
|
"CAP_SETUID",
|
||||||
|
"CAP_SETFCAP",
|
||||||
|
"CAP_SETPCAP",
|
||||||
"CAP_NET_BIND_SERVICE",
|
"CAP_NET_BIND_SERVICE",
|
||||||
|
"CAP_KILL",
|
||||||
|
"CAP_AUDIT_WRITE",
|
||||||
},
|
},
|
||||||
Rlimits: []specs.Rlimit{
|
Rlimits: []specs.Rlimit{
|
||||||
{
|
{
|
||||||
|
@ -60,7 +72,7 @@ var (
|
||||||
Destination: "/dev/pts",
|
Destination: "/dev/pts",
|
||||||
Type: "devpts",
|
Type: "devpts",
|
||||||
Source: "devpts",
|
Source: "devpts",
|
||||||
Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"},
|
Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Destination: "/dev/shm",
|
Destination: "/dev/shm",
|
||||||
|
@ -84,10 +96,24 @@ var (
|
||||||
Destination: "/sys/fs/cgroup",
|
Destination: "/sys/fs/cgroup",
|
||||||
Type: "cgroup",
|
Type: "cgroup",
|
||||||
Source: "cgroup",
|
Source: "cgroup",
|
||||||
Options: []string{"nosuid", "noexec", "nodev", "relatime", "ro"},
|
Options: []string{"nosuid", "noexec", "nodev", "relatime"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Linux: specs.Linux{
|
Linux: specs.Linux{
|
||||||
|
UIDMappings: []specs.IDMapping{
|
||||||
|
{
|
||||||
|
HostID: remappedUID,
|
||||||
|
ContainerID: 0,
|
||||||
|
Size: 46578392,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
GIDMappings: []specs.IDMapping{
|
||||||
|
{
|
||||||
|
HostID: remappedGID,
|
||||||
|
ContainerID: 0,
|
||||||
|
Size: 46578392,
|
||||||
|
},
|
||||||
|
},
|
||||||
MaskedPaths: []string{
|
MaskedPaths: []string{
|
||||||
"/proc/kcore",
|
"/proc/kcore",
|
||||||
"/proc/latency_stats",
|
"/proc/latency_stats",
|
||||||
|
@ -118,11 +144,14 @@ var (
|
||||||
Type: "ipc",
|
Type: "ipc",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Type: "uts",
|
Type: "network",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Type: "user",
|
Type: "user",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Type: "uts",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Type: "mount",
|
Type: "mount",
|
||||||
},
|
},
|
||||||
|
@ -131,3 +160,21 @@ var (
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// loadSpec loads the specification from the provided path.
|
||||||
|
// If the path is empty then the default path will be "config.json"
|
||||||
|
func loadSpec(cPath string) (spec *specs.Spec, err error) {
|
||||||
|
cf, err := os.Open(cPath)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil, fmt.Errorf("JSON specification file %s not found", cPath)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer cf.Close()
|
||||||
|
|
||||||
|
if err = json.NewDecoder(cf).Decode(&spec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return spec, nil
|
||||||
|
}
|
||||||
|
|
2
tty.go
2
tty.go
|
@ -1,5 +1,3 @@
|
||||||
// +build linux
|
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
57
utils.go
57
utils.go
|
@ -7,50 +7,68 @@ import (
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/coreos/go-systemd/activation"
|
||||||
"github.com/opencontainers/runc/libcontainer"
|
"github.com/opencontainers/runc/libcontainer"
|
||||||
"github.com/opencontainers/runc/libcontainer/configs"
|
"github.com/opencontainers/runc/libcontainer/configs"
|
||||||
"github.com/opencontainers/runc/libcontainer/specconv"
|
"github.com/opencontainers/runc/libcontainer/specconv"
|
||||||
"github.com/opencontainers/runtime-spec/specs-go"
|
"github.com/opencontainers/runtime-spec/specs-go"
|
||||||
)
|
)
|
||||||
|
|
||||||
// startContainer starts the container. Returns the exit status or -1 and an
|
var container libcontainer.Container
|
||||||
// error. Signals sent to the current process will be forwarded to container.
|
|
||||||
func startContainer(spec *specs.Spec, id string) (int, error) {
|
func createContainer(id string, spec *specs.Spec) (libcontainer.Container, error) {
|
||||||
// create the libcontainer config
|
// create the libcontainer config
|
||||||
config, err := specconv.CreateLibcontainerConfig(&specconv.CreateOpts{
|
config, err := specconv.CreateLibcontainerConfig(&specconv.CreateOpts{
|
||||||
CgroupName: id,
|
CgroupName: id,
|
||||||
|
UseSystemdCgroup: false,
|
||||||
|
NoPivotRoot: false,
|
||||||
Spec: spec,
|
Spec: spec,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return -1, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := os.Stat(config.Rootfs); err != nil {
|
if _, err := os.Stat(config.Rootfs); err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return -1, fmt.Errorf("rootfs (%q) does not exist", config.Rootfs)
|
return nil, fmt.Errorf("rootfs (%q) does not exist", config.Rootfs)
|
||||||
}
|
}
|
||||||
return -1, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logrus.Debugf("loading factory")
|
||||||
factory, err := loadFactory()
|
factory, err := loadFactory()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
logrus.Debugf("creating factory")
|
||||||
|
return factory.Create(id, config)
|
||||||
|
}
|
||||||
|
|
||||||
|
// startContainer starts the container. Returns the exit status or -1 and an
|
||||||
|
// error. Signals sent to the current process will be forwarded to container.
|
||||||
|
func startContainer(spec *specs.Spec, id, pidFile string, detach bool) (int, error) {
|
||||||
|
container, err := createContainer(id, spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return -1, err
|
return -1, err
|
||||||
}
|
}
|
||||||
|
|
||||||
ctr, err := factory.Create(id, config)
|
// Support on-demand socket activation by passing file descriptors into the container init process.
|
||||||
if err != nil {
|
listenFDs := []*os.File{}
|
||||||
return -1, err
|
if os.Getenv("LISTEN_FDS") != "" {
|
||||||
|
listenFDs = activation.Files(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
r := &runner{
|
r := &runner{
|
||||||
enableSubreaper: true,
|
enableSubreaper: true,
|
||||||
shouldDestroy: true,
|
shouldDestroy: true,
|
||||||
container: ctr,
|
container: container,
|
||||||
console: console,
|
console: console,
|
||||||
detach: false,
|
detach: detach,
|
||||||
pidFile: "",
|
pidFile: pidFile,
|
||||||
listenFDs: []*os.File{},
|
listenFDs: listenFDs,
|
||||||
}
|
}
|
||||||
|
logrus.Debugf("running %#v", *r)
|
||||||
return r.run(&spec.Process)
|
return r.run(&spec.Process)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -167,6 +185,7 @@ type runner struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *runner) run(config *specs.Process) (int, error) {
|
func (r *runner) run(config *specs.Process) (int, error) {
|
||||||
|
logrus.Debugf("runner new process")
|
||||||
process, err := newProcess(*config)
|
process, err := newProcess(*config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.destroy()
|
r.destroy()
|
||||||
|
@ -176,22 +195,26 @@ func (r *runner) run(config *specs.Process) (int, error) {
|
||||||
process.Env = append(process.Env, fmt.Sprintf("LISTEN_FDS=%d", len(r.listenFDs)), "LISTEN_PID=1")
|
process.Env = append(process.Env, fmt.Sprintf("LISTEN_FDS=%d", len(r.listenFDs)), "LISTEN_PID=1")
|
||||||
process.ExtraFiles = append(process.ExtraFiles, r.listenFDs...)
|
process.ExtraFiles = append(process.ExtraFiles, r.listenFDs...)
|
||||||
}
|
}
|
||||||
|
logrus.Debugf("runner hostuid")
|
||||||
rootuid, err := r.container.Config().HostUID()
|
rootuid, err := r.container.Config().HostUID()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.destroy()
|
r.destroy()
|
||||||
return -1, err
|
return -1, err
|
||||||
}
|
}
|
||||||
|
logrus.Debugf("runner setupio")
|
||||||
tty, err := setupIO(process, rootuid, r.console, config.Terminal, r.detach)
|
tty, err := setupIO(process, rootuid, r.console, config.Terminal, r.detach)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.destroy()
|
r.destroy()
|
||||||
return -1, err
|
return -1, err
|
||||||
}
|
}
|
||||||
handler := newSignalHandler(tty, r.enableSubreaper)
|
handler := newSignalHandler(tty, r.enableSubreaper)
|
||||||
|
logrus.Debugf("container start, %#v", r.container)
|
||||||
if err := r.container.Start(process); err != nil {
|
if err := r.container.Start(process); err != nil {
|
||||||
r.destroy()
|
r.destroy()
|
||||||
tty.Close()
|
tty.Close()
|
||||||
return -1, err
|
return -1, err
|
||||||
}
|
}
|
||||||
|
logrus.Debugf("close post start")
|
||||||
if err := tty.ClosePostStart(); err != nil {
|
if err := tty.ClosePostStart(); err != nil {
|
||||||
r.terminate(process)
|
r.terminate(process)
|
||||||
r.destroy()
|
r.destroy()
|
||||||
|
@ -210,6 +233,7 @@ func (r *runner) run(config *specs.Process) (int, error) {
|
||||||
tty.Close()
|
tty.Close()
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
logrus.Debugf("forward handler")
|
||||||
status, err := handler.forward(process)
|
status, err := handler.forward(process)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.terminate(process)
|
r.terminate(process)
|
||||||
|
@ -243,3 +267,10 @@ func createLibContainerRlimit(rlimit specs.Rlimit) (configs.Rlimit, error) {
|
||||||
Soft: uint64(rlimit.Soft),
|
Soft: uint64(rlimit.Soft),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If systemd is supporting sd_notify protocol, this function will add support
|
||||||
|
// for sd_notify protocol from within the container.
|
||||||
|
func setupSdNotify(spec *specs.Spec, notifySocket string) {
|
||||||
|
spec.Mounts = append(spec.Mounts, specs.Mount{Destination: notifySocket, Type: "bind", Source: notifySocket, Options: []string{"bind"}})
|
||||||
|
spec.Process.Env = append(spec.Process.Env, fmt.Sprintf("NOTIFY_SOCKET=%s", notifySocket))
|
||||||
|
}
|
||||||
|
|
21
vendor/github.com/Sirupsen/logrus/LICENSE
generated
vendored
Normal file
21
vendor/github.com/Sirupsen/logrus/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2014 Simon Eskildsen
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
26
vendor/github.com/Sirupsen/logrus/doc.go
generated
vendored
Normal file
26
vendor/github.com/Sirupsen/logrus/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
/*
|
||||||
|
Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
|
||||||
|
|
||||||
|
|
||||||
|
The simplest way to use Logrus is simply the package-level exported logger:
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
log "github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"animal": "walrus",
|
||||||
|
"number": 1,
|
||||||
|
"size": 10,
|
||||||
|
}).Info("A walrus appears")
|
||||||
|
}
|
||||||
|
|
||||||
|
Output:
|
||||||
|
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
|
||||||
|
|
||||||
|
For a full guide visit https://github.com/Sirupsen/logrus
|
||||||
|
*/
|
||||||
|
package logrus
|
264
vendor/github.com/Sirupsen/logrus/entry.go
generated
vendored
Normal file
264
vendor/github.com/Sirupsen/logrus/entry.go
generated
vendored
Normal file
|
@ -0,0 +1,264 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Defines the key when adding errors using WithError.
|
||||||
|
var ErrorKey = "error"
|
||||||
|
|
||||||
|
// An entry is the final or intermediate Logrus logging entry. It contains all
|
||||||
|
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
|
||||||
|
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
|
||||||
|
// passed around as much as you wish to avoid field duplication.
|
||||||
|
type Entry struct {
|
||||||
|
Logger *Logger
|
||||||
|
|
||||||
|
// Contains all the fields set by the user.
|
||||||
|
Data Fields
|
||||||
|
|
||||||
|
// Time at which the log entry was created
|
||||||
|
Time time.Time
|
||||||
|
|
||||||
|
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
|
||||||
|
Level Level
|
||||||
|
|
||||||
|
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
|
||||||
|
Message string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewEntry(logger *Logger) *Entry {
|
||||||
|
return &Entry{
|
||||||
|
Logger: logger,
|
||||||
|
// Default is three fields, give a little extra room
|
||||||
|
Data: make(Fields, 5),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns a reader for the entry, which is a proxy to the formatter.
|
||||||
|
func (entry *Entry) Reader() (*bytes.Buffer, error) {
|
||||||
|
serialized, err := entry.Logger.Formatter.Format(entry)
|
||||||
|
return bytes.NewBuffer(serialized), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the string representation from the reader and ultimately the
|
||||||
|
// formatter.
|
||||||
|
func (entry *Entry) String() (string, error) {
|
||||||
|
reader, err := entry.Reader()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return reader.String(), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add an error as single field (using the key defined in ErrorKey) to the Entry.
|
||||||
|
func (entry *Entry) WithError(err error) *Entry {
|
||||||
|
return entry.WithField(ErrorKey, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add a single field to the Entry.
|
||||||
|
func (entry *Entry) WithField(key string, value interface{}) *Entry {
|
||||||
|
return entry.WithFields(Fields{key: value})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add a map of fields to the Entry.
|
||||||
|
func (entry *Entry) WithFields(fields Fields) *Entry {
|
||||||
|
data := make(Fields, len(entry.Data)+len(fields))
|
||||||
|
for k, v := range entry.Data {
|
||||||
|
data[k] = v
|
||||||
|
}
|
||||||
|
for k, v := range fields {
|
||||||
|
data[k] = v
|
||||||
|
}
|
||||||
|
return &Entry{Logger: entry.Logger, Data: data}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This function is not declared with a pointer value because otherwise
|
||||||
|
// race conditions will occur when using multiple goroutines
|
||||||
|
func (entry Entry) log(level Level, msg string) {
|
||||||
|
entry.Time = time.Now()
|
||||||
|
entry.Level = level
|
||||||
|
entry.Message = msg
|
||||||
|
|
||||||
|
if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
|
||||||
|
entry.Logger.mu.Lock()
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
|
||||||
|
entry.Logger.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
reader, err := entry.Reader()
|
||||||
|
if err != nil {
|
||||||
|
entry.Logger.mu.Lock()
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
|
||||||
|
entry.Logger.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
entry.Logger.mu.Lock()
|
||||||
|
defer entry.Logger.mu.Unlock()
|
||||||
|
|
||||||
|
_, err = io.Copy(entry.Logger.Out, reader)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// To avoid Entry#log() returning a value that only would make sense for
|
||||||
|
// panic() to use in Entry#Panic(), we avoid the allocation by checking
|
||||||
|
// directly here.
|
||||||
|
if level <= PanicLevel {
|
||||||
|
panic(&entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Debug(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= DebugLevel {
|
||||||
|
entry.log(DebugLevel, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Print(args ...interface{}) {
|
||||||
|
entry.Info(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Info(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= InfoLevel {
|
||||||
|
entry.log(InfoLevel, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Warn(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= WarnLevel {
|
||||||
|
entry.log(WarnLevel, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Warning(args ...interface{}) {
|
||||||
|
entry.Warn(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Error(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= ErrorLevel {
|
||||||
|
entry.log(ErrorLevel, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Fatal(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= FatalLevel {
|
||||||
|
entry.log(FatalLevel, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Panic(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= PanicLevel {
|
||||||
|
entry.log(PanicLevel, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
panic(fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Entry Printf family functions
|
||||||
|
|
||||||
|
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= DebugLevel {
|
||||||
|
entry.Debug(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Infof(format string, args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= InfoLevel {
|
||||||
|
entry.Info(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Printf(format string, args ...interface{}) {
|
||||||
|
entry.Infof(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= WarnLevel {
|
||||||
|
entry.Warn(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Warningf(format string, args ...interface{}) {
|
||||||
|
entry.Warnf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= ErrorLevel {
|
||||||
|
entry.Error(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= FatalLevel {
|
||||||
|
entry.Fatal(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= PanicLevel {
|
||||||
|
entry.Panic(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Entry Println family functions
|
||||||
|
|
||||||
|
func (entry *Entry) Debugln(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= DebugLevel {
|
||||||
|
entry.Debug(entry.sprintlnn(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Infoln(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= InfoLevel {
|
||||||
|
entry.Info(entry.sprintlnn(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Println(args ...interface{}) {
|
||||||
|
entry.Infoln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Warnln(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= WarnLevel {
|
||||||
|
entry.Warn(entry.sprintlnn(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Warningln(args ...interface{}) {
|
||||||
|
entry.Warnln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Errorln(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= ErrorLevel {
|
||||||
|
entry.Error(entry.sprintlnn(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Fatalln(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= FatalLevel {
|
||||||
|
entry.Fatal(entry.sprintlnn(args...))
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Panicln(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= PanicLevel {
|
||||||
|
entry.Panic(entry.sprintlnn(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprintlnn => Sprint no newline. This is to get the behavior of how
|
||||||
|
// fmt.Sprintln where spaces are always added between operands, regardless of
|
||||||
|
// their type. Instead of vendoring the Sprintln implementation to spare a
|
||||||
|
// string allocation, we do the simplest thing.
|
||||||
|
func (entry *Entry) sprintlnn(args ...interface{}) string {
|
||||||
|
msg := fmt.Sprintln(args...)
|
||||||
|
return msg[:len(msg)-1]
|
||||||
|
}
|
50
vendor/github.com/Sirupsen/logrus/examples/basic/basic.go
generated
vendored
Normal file
50
vendor/github.com/Sirupsen/logrus/examples/basic/basic.go
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
var log = logrus.New()
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
log.Formatter = new(logrus.JSONFormatter)
|
||||||
|
log.Formatter = new(logrus.TextFormatter) // default
|
||||||
|
log.Level = logrus.DebugLevel
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
defer func() {
|
||||||
|
err := recover()
|
||||||
|
if err != nil {
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"omg": true,
|
||||||
|
"err": err,
|
||||||
|
"number": 100,
|
||||||
|
}).Fatal("The ice breaks!")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"animal": "walrus",
|
||||||
|
"number": 8,
|
||||||
|
}).Debug("Started observing beach")
|
||||||
|
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"animal": "walrus",
|
||||||
|
"size": 10,
|
||||||
|
}).Info("A group of walrus emerges from the ocean")
|
||||||
|
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"omg": true,
|
||||||
|
"number": 122,
|
||||||
|
}).Warn("The group's number increased tremendously!")
|
||||||
|
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"temperature": -4,
|
||||||
|
}).Debug("Temperature changes")
|
||||||
|
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"animal": "orca",
|
||||||
|
"size": 9009,
|
||||||
|
}).Panic("It's over 9000!")
|
||||||
|
}
|
30
vendor/github.com/Sirupsen/logrus/examples/hook/hook.go
generated
vendored
Normal file
30
vendor/github.com/Sirupsen/logrus/examples/hook/hook.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"gopkg.in/gemnasium/logrus-airbrake-hook.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var log = logrus.New()
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
log.Formatter = new(logrus.TextFormatter) // default
|
||||||
|
log.Hooks.Add(airbrake.NewHook(123, "xyz", "development"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"animal": "walrus",
|
||||||
|
"size": 10,
|
||||||
|
}).Info("A group of walrus emerges from the ocean")
|
||||||
|
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"omg": true,
|
||||||
|
"number": 122,
|
||||||
|
}).Warn("The group's number increased tremendously!")
|
||||||
|
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"omg": true,
|
||||||
|
"number": 100,
|
||||||
|
}).Fatal("The ice breaks!")
|
||||||
|
}
|
193
vendor/github.com/Sirupsen/logrus/exported.go
generated
vendored
Normal file
193
vendor/github.com/Sirupsen/logrus/exported.go
generated
vendored
Normal file
|
@ -0,0 +1,193 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// std is the name of the standard logger in stdlib `log`
|
||||||
|
std = New()
|
||||||
|
)
|
||||||
|
|
||||||
|
func StandardLogger() *Logger {
|
||||||
|
return std
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetOutput sets the standard logger output.
|
||||||
|
func SetOutput(out io.Writer) {
|
||||||
|
std.mu.Lock()
|
||||||
|
defer std.mu.Unlock()
|
||||||
|
std.Out = out
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetFormatter sets the standard logger formatter.
|
||||||
|
func SetFormatter(formatter Formatter) {
|
||||||
|
std.mu.Lock()
|
||||||
|
defer std.mu.Unlock()
|
||||||
|
std.Formatter = formatter
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetLevel sets the standard logger level.
|
||||||
|
func SetLevel(level Level) {
|
||||||
|
std.mu.Lock()
|
||||||
|
defer std.mu.Unlock()
|
||||||
|
std.Level = level
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLevel returns the standard logger level.
|
||||||
|
func GetLevel() Level {
|
||||||
|
std.mu.Lock()
|
||||||
|
defer std.mu.Unlock()
|
||||||
|
return std.Level
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddHook adds a hook to the standard logger hooks.
|
||||||
|
func AddHook(hook Hook) {
|
||||||
|
std.mu.Lock()
|
||||||
|
defer std.mu.Unlock()
|
||||||
|
std.Hooks.Add(hook)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
|
||||||
|
func WithError(err error) *Entry {
|
||||||
|
return std.WithField(ErrorKey, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithField creates an entry from the standard logger and adds a field to
|
||||||
|
// it. If you want multiple fields, use `WithFields`.
|
||||||
|
//
|
||||||
|
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||||
|
// or Panic on the Entry it returns.
|
||||||
|
func WithField(key string, value interface{}) *Entry {
|
||||||
|
return std.WithField(key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithFields creates an entry from the standard logger and adds multiple
|
||||||
|
// fields to it. This is simply a helper for `WithField`, invoking it
|
||||||
|
// once for each field.
|
||||||
|
//
|
||||||
|
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||||
|
// or Panic on the Entry it returns.
|
||||||
|
func WithFields(fields Fields) *Entry {
|
||||||
|
return std.WithFields(fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debug logs a message at level Debug on the standard logger.
|
||||||
|
func Debug(args ...interface{}) {
|
||||||
|
std.Debug(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print logs a message at level Info on the standard logger.
|
||||||
|
func Print(args ...interface{}) {
|
||||||
|
std.Print(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Info logs a message at level Info on the standard logger.
|
||||||
|
func Info(args ...interface{}) {
|
||||||
|
std.Info(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warn logs a message at level Warn on the standard logger.
|
||||||
|
func Warn(args ...interface{}) {
|
||||||
|
std.Warn(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warning logs a message at level Warn on the standard logger.
|
||||||
|
func Warning(args ...interface{}) {
|
||||||
|
std.Warning(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error logs a message at level Error on the standard logger.
|
||||||
|
func Error(args ...interface{}) {
|
||||||
|
std.Error(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Panic logs a message at level Panic on the standard logger.
|
||||||
|
func Panic(args ...interface{}) {
|
||||||
|
std.Panic(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fatal logs a message at level Fatal on the standard logger.
|
||||||
|
func Fatal(args ...interface{}) {
|
||||||
|
std.Fatal(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debugf logs a message at level Debug on the standard logger.
|
||||||
|
func Debugf(format string, args ...interface{}) {
|
||||||
|
std.Debugf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Printf logs a message at level Info on the standard logger.
|
||||||
|
func Printf(format string, args ...interface{}) {
|
||||||
|
std.Printf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Infof logs a message at level Info on the standard logger.
|
||||||
|
func Infof(format string, args ...interface{}) {
|
||||||
|
std.Infof(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warnf logs a message at level Warn on the standard logger.
|
||||||
|
func Warnf(format string, args ...interface{}) {
|
||||||
|
std.Warnf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warningf logs a message at level Warn on the standard logger.
|
||||||
|
func Warningf(format string, args ...interface{}) {
|
||||||
|
std.Warningf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Errorf logs a message at level Error on the standard logger.
|
||||||
|
func Errorf(format string, args ...interface{}) {
|
||||||
|
std.Errorf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Panicf logs a message at level Panic on the standard logger.
|
||||||
|
func Panicf(format string, args ...interface{}) {
|
||||||
|
std.Panicf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fatalf logs a message at level Fatal on the standard logger.
|
||||||
|
func Fatalf(format string, args ...interface{}) {
|
||||||
|
std.Fatalf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debugln logs a message at level Debug on the standard logger.
|
||||||
|
func Debugln(args ...interface{}) {
|
||||||
|
std.Debugln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Println logs a message at level Info on the standard logger.
|
||||||
|
func Println(args ...interface{}) {
|
||||||
|
std.Println(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Infoln logs a message at level Info on the standard logger.
|
||||||
|
func Infoln(args ...interface{}) {
|
||||||
|
std.Infoln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warnln logs a message at level Warn on the standard logger.
|
||||||
|
func Warnln(args ...interface{}) {
|
||||||
|
std.Warnln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warningln logs a message at level Warn on the standard logger.
|
||||||
|
func Warningln(args ...interface{}) {
|
||||||
|
std.Warningln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Errorln logs a message at level Error on the standard logger.
|
||||||
|
func Errorln(args ...interface{}) {
|
||||||
|
std.Errorln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Panicln logs a message at level Panic on the standard logger.
|
||||||
|
func Panicln(args ...interface{}) {
|
||||||
|
std.Panicln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fatalln logs a message at level Fatal on the standard logger.
|
||||||
|
func Fatalln(args ...interface{}) {
|
||||||
|
std.Fatalln(args...)
|
||||||
|
}
|
48
vendor/github.com/Sirupsen/logrus/formatter.go
generated
vendored
Normal file
48
vendor/github.com/Sirupsen/logrus/formatter.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
const DefaultTimestampFormat = time.RFC3339
|
||||||
|
|
||||||
|
// The Formatter interface is used to implement a custom Formatter. It takes an
|
||||||
|
// `Entry`. It exposes all the fields, including the default ones:
|
||||||
|
//
|
||||||
|
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
|
||||||
|
// * `entry.Data["time"]`. The timestamp.
|
||||||
|
// * `entry.Data["level"]. The level the entry was logged at.
|
||||||
|
//
|
||||||
|
// Any additional fields added with `WithField` or `WithFields` are also in
|
||||||
|
// `entry.Data`. Format is expected to return an array of bytes which are then
|
||||||
|
// logged to `logger.Out`.
|
||||||
|
type Formatter interface {
|
||||||
|
Format(*Entry) ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is to not silently overwrite `time`, `msg` and `level` fields when
|
||||||
|
// dumping it. If this code wasn't there doing:
|
||||||
|
//
|
||||||
|
// logrus.WithField("level", 1).Info("hello")
|
||||||
|
//
|
||||||
|
// Would just silently drop the user provided level. Instead with this code
|
||||||
|
// it'll logged as:
|
||||||
|
//
|
||||||
|
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
|
||||||
|
//
|
||||||
|
// It's not exported because it's still using Data in an opinionated way. It's to
|
||||||
|
// avoid code duplication between the two default formatters.
|
||||||
|
func prefixFieldClashes(data Fields) {
|
||||||
|
_, ok := data["time"]
|
||||||
|
if ok {
|
||||||
|
data["fields.time"] = data["time"]
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ok = data["msg"]
|
||||||
|
if ok {
|
||||||
|
data["fields.msg"] = data["msg"]
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ok = data["level"]
|
||||||
|
if ok {
|
||||||
|
data["fields.level"] = data["level"]
|
||||||
|
}
|
||||||
|
}
|
61
vendor/github.com/Sirupsen/logrus/formatters/logstash/logstash.go
generated
vendored
Normal file
61
vendor/github.com/Sirupsen/logrus/formatters/logstash/logstash.go
generated
vendored
Normal file
|
@ -0,0 +1,61 @@
|
||||||
|
package logstash
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Formatter generates json in logstash format.
|
||||||
|
// Logstash site: http://logstash.net/
|
||||||
|
type LogstashFormatter struct {
|
||||||
|
Type string // if not empty use for logstash type field.
|
||||||
|
|
||||||
|
// TimestampFormat sets the format used for timestamps.
|
||||||
|
TimestampFormat string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *LogstashFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||||
|
fields := make(logrus.Fields)
|
||||||
|
for k, v := range entry.Data {
|
||||||
|
fields[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
fields["@version"] = 1
|
||||||
|
|
||||||
|
if f.TimestampFormat == "" {
|
||||||
|
f.TimestampFormat = logrus.DefaultTimestampFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
fields["@timestamp"] = entry.Time.Format(f.TimestampFormat)
|
||||||
|
|
||||||
|
// set message field
|
||||||
|
v, ok := entry.Data["message"]
|
||||||
|
if ok {
|
||||||
|
fields["fields.message"] = v
|
||||||
|
}
|
||||||
|
fields["message"] = entry.Message
|
||||||
|
|
||||||
|
// set level field
|
||||||
|
v, ok = entry.Data["level"]
|
||||||
|
if ok {
|
||||||
|
fields["fields.level"] = v
|
||||||
|
}
|
||||||
|
fields["level"] = entry.Level.String()
|
||||||
|
|
||||||
|
// set type field
|
||||||
|
if f.Type != "" {
|
||||||
|
v, ok = entry.Data["type"]
|
||||||
|
if ok {
|
||||||
|
fields["fields.type"] = v
|
||||||
|
}
|
||||||
|
fields["type"] = f.Type
|
||||||
|
}
|
||||||
|
|
||||||
|
serialized, err := json.Marshal(fields)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||||
|
}
|
||||||
|
return append(serialized, '\n'), nil
|
||||||
|
}
|
34
vendor/github.com/Sirupsen/logrus/hooks.go
generated
vendored
Normal file
34
vendor/github.com/Sirupsen/logrus/hooks.go
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
// A hook to be fired when logging on the logging levels returned from
|
||||||
|
// `Levels()` on your implementation of the interface. Note that this is not
|
||||||
|
// fired in a goroutine or a channel with workers, you should handle such
|
||||||
|
// functionality yourself if your call is non-blocking and you don't wish for
|
||||||
|
// the logging calls for levels returned from `Levels()` to block.
|
||||||
|
type Hook interface {
|
||||||
|
Levels() []Level
|
||||||
|
Fire(*Entry) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Internal type for storing the hooks on a logger instance.
|
||||||
|
type LevelHooks map[Level][]Hook
|
||||||
|
|
||||||
|
// Add a hook to an instance of logger. This is called with
|
||||||
|
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
|
||||||
|
func (hooks LevelHooks) Add(hook Hook) {
|
||||||
|
for _, level := range hook.Levels() {
|
||||||
|
hooks[level] = append(hooks[level], hook)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fire all the hooks for the passed level. Used by `entry.log` to fire
|
||||||
|
// appropriate hooks for a log entry.
|
||||||
|
func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
|
||||||
|
for _, hook := range hooks[level] {
|
||||||
|
if err := hook.Fire(entry); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
54
vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
generated
vendored
Normal file
54
vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
// +build !windows,!nacl,!plan9
|
||||||
|
|
||||||
|
package logrus_syslog
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"log/syslog"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SyslogHook to send logs via syslog.
|
||||||
|
type SyslogHook struct {
|
||||||
|
Writer *syslog.Writer
|
||||||
|
SyslogNetwork string
|
||||||
|
SyslogRaddr string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates a hook to be added to an instance of logger. This is called with
|
||||||
|
// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")`
|
||||||
|
// `if err == nil { log.Hooks.Add(hook) }`
|
||||||
|
func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) {
|
||||||
|
w, err := syslog.Dial(network, raddr, priority, tag)
|
||||||
|
return &SyslogHook{w, network, raddr}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hook *SyslogHook) Fire(entry *logrus.Entry) error {
|
||||||
|
line, err := entry.String()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch entry.Level {
|
||||||
|
case logrus.PanicLevel:
|
||||||
|
return hook.Writer.Crit(line)
|
||||||
|
case logrus.FatalLevel:
|
||||||
|
return hook.Writer.Crit(line)
|
||||||
|
case logrus.ErrorLevel:
|
||||||
|
return hook.Writer.Err(line)
|
||||||
|
case logrus.WarnLevel:
|
||||||
|
return hook.Writer.Warning(line)
|
||||||
|
case logrus.InfoLevel:
|
||||||
|
return hook.Writer.Info(line)
|
||||||
|
case logrus.DebugLevel:
|
||||||
|
return hook.Writer.Debug(line)
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hook *SyslogHook) Levels() []logrus.Level {
|
||||||
|
return logrus.AllLevels
|
||||||
|
}
|
67
vendor/github.com/Sirupsen/logrus/hooks/test/test.go
generated
vendored
Normal file
67
vendor/github.com/Sirupsen/logrus/hooks/test/test.go
generated
vendored
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
package test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// test.Hook is a hook designed for dealing with logs in test scenarios.
|
||||||
|
type Hook struct {
|
||||||
|
Entries []*logrus.Entry
|
||||||
|
}
|
||||||
|
|
||||||
|
// Installs a test hook for the global logger.
|
||||||
|
func NewGlobal() *Hook {
|
||||||
|
|
||||||
|
hook := new(Hook)
|
||||||
|
logrus.AddHook(hook)
|
||||||
|
|
||||||
|
return hook
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Installs a test hook for a given local logger.
|
||||||
|
func NewLocal(logger *logrus.Logger) *Hook {
|
||||||
|
|
||||||
|
hook := new(Hook)
|
||||||
|
logger.Hooks.Add(hook)
|
||||||
|
|
||||||
|
return hook
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates a discarding logger and installs the test hook.
|
||||||
|
func NewNullLogger() (*logrus.Logger, *Hook) {
|
||||||
|
|
||||||
|
logger := logrus.New()
|
||||||
|
logger.Out = ioutil.Discard
|
||||||
|
|
||||||
|
return logger, NewLocal(logger)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Hook) Fire(e *logrus.Entry) error {
|
||||||
|
t.Entries = append(t.Entries, e)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Hook) Levels() []logrus.Level {
|
||||||
|
return logrus.AllLevels
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastEntry returns the last entry that was logged or nil.
|
||||||
|
func (t *Hook) LastEntry() (l *logrus.Entry) {
|
||||||
|
|
||||||
|
if i := len(t.Entries) - 1; i < 0 {
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
return t.Entries[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset removes all Entries from this test hook.
|
||||||
|
func (t *Hook) Reset() {
|
||||||
|
t.Entries = make([]*logrus.Entry, 0)
|
||||||
|
}
|
41
vendor/github.com/Sirupsen/logrus/json_formatter.go
generated
vendored
Normal file
41
vendor/github.com/Sirupsen/logrus/json_formatter.go
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
type JSONFormatter struct {
|
||||||
|
// TimestampFormat sets the format used for marshaling timestamps.
|
||||||
|
TimestampFormat string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||||
|
data := make(Fields, len(entry.Data)+3)
|
||||||
|
for k, v := range entry.Data {
|
||||||
|
switch v := v.(type) {
|
||||||
|
case error:
|
||||||
|
// Otherwise errors are ignored by `encoding/json`
|
||||||
|
// https://github.com/Sirupsen/logrus/issues/137
|
||||||
|
data[k] = v.Error()
|
||||||
|
default:
|
||||||
|
data[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
prefixFieldClashes(data)
|
||||||
|
|
||||||
|
timestampFormat := f.TimestampFormat
|
||||||
|
if timestampFormat == "" {
|
||||||
|
timestampFormat = DefaultTimestampFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
data["time"] = entry.Time.Format(timestampFormat)
|
||||||
|
data["msg"] = entry.Message
|
||||||
|
data["level"] = entry.Level.String()
|
||||||
|
|
||||||
|
serialized, err := json.Marshal(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||||
|
}
|
||||||
|
return append(serialized, '\n'), nil
|
||||||
|
}
|
212
vendor/github.com/Sirupsen/logrus/logger.go
generated
vendored
Normal file
212
vendor/github.com/Sirupsen/logrus/logger.go
generated
vendored
Normal file
|
@ -0,0 +1,212 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Logger struct {
|
||||||
|
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
|
||||||
|
// file, or leave it default which is `os.Stderr`. You can also set this to
|
||||||
|
// something more adventorous, such as logging to Kafka.
|
||||||
|
Out io.Writer
|
||||||
|
// Hooks for the logger instance. These allow firing events based on logging
|
||||||
|
// levels and log entries. For example, to send errors to an error tracking
|
||||||
|
// service, log to StatsD or dump the core on fatal errors.
|
||||||
|
Hooks LevelHooks
|
||||||
|
// All log entries pass through the formatter before logged to Out. The
|
||||||
|
// included formatters are `TextFormatter` and `JSONFormatter` for which
|
||||||
|
// TextFormatter is the default. In development (when a TTY is attached) it
|
||||||
|
// logs with colors, but to a file it wouldn't. You can easily implement your
|
||||||
|
// own that implements the `Formatter` interface, see the `README` or included
|
||||||
|
// formatters for examples.
|
||||||
|
Formatter Formatter
|
||||||
|
// The logging level the logger should log at. This is typically (and defaults
|
||||||
|
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
|
||||||
|
// logged. `logrus.Debug` is useful in
|
||||||
|
Level Level
|
||||||
|
// Used to sync writing to the log.
|
||||||
|
mu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates a new logger. Configuration should be set by changing `Formatter`,
|
||||||
|
// `Out` and `Hooks` directly on the default logger instance. You can also just
|
||||||
|
// instantiate your own:
|
||||||
|
//
|
||||||
|
// var log = &Logger{
|
||||||
|
// Out: os.Stderr,
|
||||||
|
// Formatter: new(JSONFormatter),
|
||||||
|
// Hooks: make(LevelHooks),
|
||||||
|
// Level: logrus.DebugLevel,
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// It's recommended to make this a global instance called `log`.
|
||||||
|
func New() *Logger {
|
||||||
|
return &Logger{
|
||||||
|
Out: os.Stderr,
|
||||||
|
Formatter: new(TextFormatter),
|
||||||
|
Hooks: make(LevelHooks),
|
||||||
|
Level: InfoLevel,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adds a field to the log entry, note that you it doesn't log until you call
|
||||||
|
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
|
||||||
|
// If you want multiple fields, use `WithFields`.
|
||||||
|
func (logger *Logger) WithField(key string, value interface{}) *Entry {
|
||||||
|
return NewEntry(logger).WithField(key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adds a struct of fields to the log entry. All it does is call `WithField` for
|
||||||
|
// each `Field`.
|
||||||
|
func (logger *Logger) WithFields(fields Fields) *Entry {
|
||||||
|
return NewEntry(logger).WithFields(fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add an error as single field to the log entry. All it does is call
|
||||||
|
// `WithError` for the given `error`.
|
||||||
|
func (logger *Logger) WithError(err error) *Entry {
|
||||||
|
return NewEntry(logger).WithError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= DebugLevel {
|
||||||
|
NewEntry(logger).Debugf(format, args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Infof(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= InfoLevel {
|
||||||
|
NewEntry(logger).Infof(format, args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Printf(format string, args ...interface{}) {
|
||||||
|
NewEntry(logger).Printf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= WarnLevel {
|
||||||
|
NewEntry(logger).Warnf(format, args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= WarnLevel {
|
||||||
|
NewEntry(logger).Warnf(format, args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= ErrorLevel {
|
||||||
|
NewEntry(logger).Errorf(format, args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= FatalLevel {
|
||||||
|
NewEntry(logger).Fatalf(format, args...)
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= PanicLevel {
|
||||||
|
NewEntry(logger).Panicf(format, args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Debug(args ...interface{}) {
|
||||||
|
if logger.Level >= DebugLevel {
|
||||||
|
NewEntry(logger).Debug(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Info(args ...interface{}) {
|
||||||
|
if logger.Level >= InfoLevel {
|
||||||
|
NewEntry(logger).Info(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Print(args ...interface{}) {
|
||||||
|
NewEntry(logger).Info(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Warn(args ...interface{}) {
|
||||||
|
if logger.Level >= WarnLevel {
|
||||||
|
NewEntry(logger).Warn(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Warning(args ...interface{}) {
|
||||||
|
if logger.Level >= WarnLevel {
|
||||||
|
NewEntry(logger).Warn(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Error(args ...interface{}) {
|
||||||
|
if logger.Level >= ErrorLevel {
|
||||||
|
NewEntry(logger).Error(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Fatal(args ...interface{}) {
|
||||||
|
if logger.Level >= FatalLevel {
|
||||||
|
NewEntry(logger).Fatal(args...)
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Panic(args ...interface{}) {
|
||||||
|
if logger.Level >= PanicLevel {
|
||||||
|
NewEntry(logger).Panic(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Debugln(args ...interface{}) {
|
||||||
|
if logger.Level >= DebugLevel {
|
||||||
|
NewEntry(logger).Debugln(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Infoln(args ...interface{}) {
|
||||||
|
if logger.Level >= InfoLevel {
|
||||||
|
NewEntry(logger).Infoln(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Println(args ...interface{}) {
|
||||||
|
NewEntry(logger).Println(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Warnln(args ...interface{}) {
|
||||||
|
if logger.Level >= WarnLevel {
|
||||||
|
NewEntry(logger).Warnln(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Warningln(args ...interface{}) {
|
||||||
|
if logger.Level >= WarnLevel {
|
||||||
|
NewEntry(logger).Warnln(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Errorln(args ...interface{}) {
|
||||||
|
if logger.Level >= ErrorLevel {
|
||||||
|
NewEntry(logger).Errorln(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Fatalln(args ...interface{}) {
|
||||||
|
if logger.Level >= FatalLevel {
|
||||||
|
NewEntry(logger).Fatalln(args...)
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Panicln(args ...interface{}) {
|
||||||
|
if logger.Level >= PanicLevel {
|
||||||
|
NewEntry(logger).Panicln(args...)
|
||||||
|
}
|
||||||
|
}
|
143
vendor/github.com/Sirupsen/logrus/logrus.go
generated
vendored
Normal file
143
vendor/github.com/Sirupsen/logrus/logrus.go
generated
vendored
Normal file
|
@ -0,0 +1,143 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Fields type, used to pass to `WithFields`.
|
||||||
|
type Fields map[string]interface{}
|
||||||
|
|
||||||
|
// Level type
|
||||||
|
type Level uint8
|
||||||
|
|
||||||
|
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
||||||
|
func (level Level) String() string {
|
||||||
|
switch level {
|
||||||
|
case DebugLevel:
|
||||||
|
return "debug"
|
||||||
|
case InfoLevel:
|
||||||
|
return "info"
|
||||||
|
case WarnLevel:
|
||||||
|
return "warning"
|
||||||
|
case ErrorLevel:
|
||||||
|
return "error"
|
||||||
|
case FatalLevel:
|
||||||
|
return "fatal"
|
||||||
|
case PanicLevel:
|
||||||
|
return "panic"
|
||||||
|
}
|
||||||
|
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseLevel takes a string level and returns the Logrus log level constant.
|
||||||
|
func ParseLevel(lvl string) (Level, error) {
|
||||||
|
switch strings.ToLower(lvl) {
|
||||||
|
case "panic":
|
||||||
|
return PanicLevel, nil
|
||||||
|
case "fatal":
|
||||||
|
return FatalLevel, nil
|
||||||
|
case "error":
|
||||||
|
return ErrorLevel, nil
|
||||||
|
case "warn", "warning":
|
||||||
|
return WarnLevel, nil
|
||||||
|
case "info":
|
||||||
|
return InfoLevel, nil
|
||||||
|
case "debug":
|
||||||
|
return DebugLevel, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var l Level
|
||||||
|
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A constant exposing all logging levels
|
||||||
|
var AllLevels = []Level{
|
||||||
|
PanicLevel,
|
||||||
|
FatalLevel,
|
||||||
|
ErrorLevel,
|
||||||
|
WarnLevel,
|
||||||
|
InfoLevel,
|
||||||
|
DebugLevel,
|
||||||
|
}
|
||||||
|
|
||||||
|
// These are the different logging levels. You can set the logging level to log
|
||||||
|
// on your instance of logger, obtained with `logrus.New()`.
|
||||||
|
const (
|
||||||
|
// PanicLevel level, highest level of severity. Logs and then calls panic with the
|
||||||
|
// message passed to Debug, Info, ...
|
||||||
|
PanicLevel Level = iota
|
||||||
|
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
|
||||||
|
// logging level is set to Panic.
|
||||||
|
FatalLevel
|
||||||
|
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
|
||||||
|
// Commonly used for hooks to send errors to an error tracking service.
|
||||||
|
ErrorLevel
|
||||||
|
// WarnLevel level. Non-critical entries that deserve eyes.
|
||||||
|
WarnLevel
|
||||||
|
// InfoLevel level. General operational entries about what's going on inside the
|
||||||
|
// application.
|
||||||
|
InfoLevel
|
||||||
|
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
|
||||||
|
DebugLevel
|
||||||
|
)
|
||||||
|
|
||||||
|
// Won't compile if StdLogger can't be realized by a log.Logger
|
||||||
|
var (
|
||||||
|
_ StdLogger = &log.Logger{}
|
||||||
|
_ StdLogger = &Entry{}
|
||||||
|
_ StdLogger = &Logger{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// StdLogger is what your logrus-enabled library should take, that way
|
||||||
|
// it'll accept a stdlib logger and a logrus logger. There's no standard
|
||||||
|
// interface, this is the closest we get, unfortunately.
|
||||||
|
type StdLogger interface {
|
||||||
|
Print(...interface{})
|
||||||
|
Printf(string, ...interface{})
|
||||||
|
Println(...interface{})
|
||||||
|
|
||||||
|
Fatal(...interface{})
|
||||||
|
Fatalf(string, ...interface{})
|
||||||
|
Fatalln(...interface{})
|
||||||
|
|
||||||
|
Panic(...interface{})
|
||||||
|
Panicf(string, ...interface{})
|
||||||
|
Panicln(...interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// The FieldLogger interface generalizes the Entry and Logger types
|
||||||
|
type FieldLogger interface {
|
||||||
|
WithField(key string, value interface{}) *Entry
|
||||||
|
WithFields(fields Fields) *Entry
|
||||||
|
WithError(err error) *Entry
|
||||||
|
|
||||||
|
Debugf(format string, args ...interface{})
|
||||||
|
Infof(format string, args ...interface{})
|
||||||
|
Printf(format string, args ...interface{})
|
||||||
|
Warnf(format string, args ...interface{})
|
||||||
|
Warningf(format string, args ...interface{})
|
||||||
|
Errorf(format string, args ...interface{})
|
||||||
|
Fatalf(format string, args ...interface{})
|
||||||
|
Panicf(format string, args ...interface{})
|
||||||
|
|
||||||
|
Debug(args ...interface{})
|
||||||
|
Info(args ...interface{})
|
||||||
|
Print(args ...interface{})
|
||||||
|
Warn(args ...interface{})
|
||||||
|
Warning(args ...interface{})
|
||||||
|
Error(args ...interface{})
|
||||||
|
Fatal(args ...interface{})
|
||||||
|
Panic(args ...interface{})
|
||||||
|
|
||||||
|
Debugln(args ...interface{})
|
||||||
|
Infoln(args ...interface{})
|
||||||
|
Println(args ...interface{})
|
||||||
|
Warnln(args ...interface{})
|
||||||
|
Warningln(args ...interface{})
|
||||||
|
Errorln(args ...interface{})
|
||||||
|
Fatalln(args ...interface{})
|
||||||
|
Panicln(args ...interface{})
|
||||||
|
}
|
9
vendor/github.com/Sirupsen/logrus/terminal_bsd.go
generated
vendored
Normal file
9
vendor/github.com/Sirupsen/logrus/terminal_bsd.go
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
// +build darwin freebsd openbsd netbsd dragonfly
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
const ioctlReadTermios = syscall.TIOCGETA
|
||||||
|
|
||||||
|
type Termios syscall.Termios
|
12
vendor/github.com/Sirupsen/logrus/terminal_linux.go
generated
vendored
Normal file
12
vendor/github.com/Sirupsen/logrus/terminal_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
// Based on ssh/terminal:
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
const ioctlReadTermios = syscall.TCGETS
|
||||||
|
|
||||||
|
type Termios syscall.Termios
|
21
vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
generated
vendored
Normal file
21
vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
// Based on ssh/terminal:
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux darwin freebsd openbsd netbsd dragonfly
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||||
|
func IsTerminal() bool {
|
||||||
|
fd := syscall.Stderr
|
||||||
|
var termios Termios
|
||||||
|
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
||||||
|
return err == 0
|
||||||
|
}
|
15
vendor/github.com/Sirupsen/logrus/terminal_solaris.go
generated
vendored
Normal file
15
vendor/github.com/Sirupsen/logrus/terminal_solaris.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
// +build solaris
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||||
|
func IsTerminal() bool {
|
||||||
|
_, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
|
||||||
|
return err == nil
|
||||||
|
}
|
27
vendor/github.com/Sirupsen/logrus/terminal_windows.go
generated
vendored
Normal file
27
vendor/github.com/Sirupsen/logrus/terminal_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
// Based on ssh/terminal:
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||||
|
|
||||||
|
var (
|
||||||
|
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||||
|
func IsTerminal() bool {
|
||||||
|
fd := syscall.Stderr
|
||||||
|
var st uint32
|
||||||
|
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
|
||||||
|
return r != 0 && e == 0
|
||||||
|
}
|
161
vendor/github.com/Sirupsen/logrus/text_formatter.go
generated
vendored
Normal file
161
vendor/github.com/Sirupsen/logrus/text_formatter.go
generated
vendored
Normal file
|
@ -0,0 +1,161 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
nocolor = 0
|
||||||
|
red = 31
|
||||||
|
green = 32
|
||||||
|
yellow = 33
|
||||||
|
blue = 34
|
||||||
|
gray = 37
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
baseTimestamp time.Time
|
||||||
|
isTerminal bool
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
baseTimestamp = time.Now()
|
||||||
|
isTerminal = IsTerminal()
|
||||||
|
}
|
||||||
|
|
||||||
|
func miniTS() int {
|
||||||
|
return int(time.Since(baseTimestamp) / time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
type TextFormatter struct {
|
||||||
|
// Set to true to bypass checking for a TTY before outputting colors.
|
||||||
|
ForceColors bool
|
||||||
|
|
||||||
|
// Force disabling colors.
|
||||||
|
DisableColors bool
|
||||||
|
|
||||||
|
// Disable timestamp logging. useful when output is redirected to logging
|
||||||
|
// system that already adds timestamps.
|
||||||
|
DisableTimestamp bool
|
||||||
|
|
||||||
|
// Enable logging the full timestamp when a TTY is attached instead of just
|
||||||
|
// the time passed since beginning of execution.
|
||||||
|
FullTimestamp bool
|
||||||
|
|
||||||
|
// TimestampFormat to use for display when a full timestamp is printed
|
||||||
|
TimestampFormat string
|
||||||
|
|
||||||
|
// The fields are sorted by default for a consistent output. For applications
|
||||||
|
// that log extremely frequently and don't use the JSON formatter this may not
|
||||||
|
// be desired.
|
||||||
|
DisableSorting bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
||||||
|
var keys []string = make([]string, 0, len(entry.Data))
|
||||||
|
for k := range entry.Data {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !f.DisableSorting {
|
||||||
|
sort.Strings(keys)
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &bytes.Buffer{}
|
||||||
|
|
||||||
|
prefixFieldClashes(entry.Data)
|
||||||
|
|
||||||
|
isColorTerminal := isTerminal && (runtime.GOOS != "windows")
|
||||||
|
isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
|
||||||
|
|
||||||
|
timestampFormat := f.TimestampFormat
|
||||||
|
if timestampFormat == "" {
|
||||||
|
timestampFormat = DefaultTimestampFormat
|
||||||
|
}
|
||||||
|
if isColored {
|
||||||
|
f.printColored(b, entry, keys, timestampFormat)
|
||||||
|
} else {
|
||||||
|
if !f.DisableTimestamp {
|
||||||
|
f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
|
||||||
|
}
|
||||||
|
f.appendKeyValue(b, "level", entry.Level.String())
|
||||||
|
if entry.Message != "" {
|
||||||
|
f.appendKeyValue(b, "msg", entry.Message)
|
||||||
|
}
|
||||||
|
for _, key := range keys {
|
||||||
|
f.appendKeyValue(b, key, entry.Data[key])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b.WriteByte('\n')
|
||||||
|
return b.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
|
||||||
|
var levelColor int
|
||||||
|
switch entry.Level {
|
||||||
|
case DebugLevel:
|
||||||
|
levelColor = gray
|
||||||
|
case WarnLevel:
|
||||||
|
levelColor = yellow
|
||||||
|
case ErrorLevel, FatalLevel, PanicLevel:
|
||||||
|
levelColor = red
|
||||||
|
default:
|
||||||
|
levelColor = blue
|
||||||
|
}
|
||||||
|
|
||||||
|
levelText := strings.ToUpper(entry.Level.String())[0:4]
|
||||||
|
|
||||||
|
if !f.FullTimestamp {
|
||||||
|
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
|
||||||
|
}
|
||||||
|
for _, k := range keys {
|
||||||
|
v := entry.Data[k]
|
||||||
|
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func needsQuoting(text string) bool {
|
||||||
|
for _, ch := range text {
|
||||||
|
if !((ch >= 'a' && ch <= 'z') ||
|
||||||
|
(ch >= 'A' && ch <= 'Z') ||
|
||||||
|
(ch >= '0' && ch <= '9') ||
|
||||||
|
ch == '-' || ch == '.') {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
|
||||||
|
|
||||||
|
b.WriteString(key)
|
||||||
|
b.WriteByte('=')
|
||||||
|
|
||||||
|
switch value := value.(type) {
|
||||||
|
case string:
|
||||||
|
if needsQuoting(value) {
|
||||||
|
b.WriteString(value)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(b, "%q", value)
|
||||||
|
}
|
||||||
|
case error:
|
||||||
|
errmsg := value.Error()
|
||||||
|
if needsQuoting(errmsg) {
|
||||||
|
b.WriteString(errmsg)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(b, "%q", value)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
fmt.Fprint(b, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.WriteByte(' ')
|
||||||
|
}
|
31
vendor/github.com/Sirupsen/logrus/writer.go
generated
vendored
Normal file
31
vendor/github.com/Sirupsen/logrus/writer.go
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"io"
|
||||||
|
"runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (logger *Logger) Writer() *io.PipeWriter {
|
||||||
|
reader, writer := io.Pipe()
|
||||||
|
|
||||||
|
go logger.writerScanner(reader)
|
||||||
|
runtime.SetFinalizer(writer, writerFinalizer)
|
||||||
|
|
||||||
|
return writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) writerScanner(reader *io.PipeReader) {
|
||||||
|
scanner := bufio.NewScanner(reader)
|
||||||
|
for scanner.Scan() {
|
||||||
|
logger.Print(scanner.Text())
|
||||||
|
}
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
logger.Errorf("Error while reading from Writer: %s", err)
|
||||||
|
}
|
||||||
|
reader.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func writerFinalizer(writer *io.PipeWriter) {
|
||||||
|
writer.Close()
|
||||||
|
}
|
191
vendor/github.com/coreos/go-systemd/activation/LICENSE
generated
vendored
Normal file
191
vendor/github.com/coreos/go-systemd/activation/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction, and
|
||||||
|
distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||||
|
owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||||
|
that control, are controlled by, or are under common control with that entity.
|
||||||
|
For the purposes of this definition, "control" means (i) the power, direct or
|
||||||
|
indirect, to cause the direction or management of such entity, whether by
|
||||||
|
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||||
|
permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications, including
|
||||||
|
but not limited to software source code, documentation source, and configuration
|
||||||
|
files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical transformation or
|
||||||
|
translation of a Source form, including but not limited to compiled object code,
|
||||||
|
generated documentation, and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||||
|
available under the License, as indicated by a copyright notice that is included
|
||||||
|
in or attached to the work (an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||||
|
is based on (or derived from) the Work and for which the editorial revisions,
|
||||||
|
annotations, elaborations, or other modifications represent, as a whole, an
|
||||||
|
original work of authorship. For the purposes of this License, Derivative Works
|
||||||
|
shall not include works that remain separable from, or merely link (or bind by
|
||||||
|
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including the original version
|
||||||
|
of the Work and any modifications or additions to that Work or Derivative Works
|
||||||
|
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||||
|
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||||
|
on behalf of the copyright owner. For the purposes of this definition,
|
||||||
|
"submitted" means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems, and
|
||||||
|
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||||
|
the purpose of discussing and improving the Work, but excluding communication
|
||||||
|
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||||
|
owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||||
|
of whom a Contribution has been received by Licensor and subsequently
|
||||||
|
incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License.
|
||||||
|
|
||||||
|
Subject to the terms and conditions of this License, each Contributor hereby
|
||||||
|
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||||
|
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||||
|
Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License.
|
||||||
|
|
||||||
|
Subject to the terms and conditions of this License, each Contributor hereby
|
||||||
|
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||||
|
irrevocable (except as stated in this section) patent license to make, have
|
||||||
|
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||||
|
such license applies only to those patent claims licensable by such Contributor
|
||||||
|
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||||
|
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||||
|
submitted. If You institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||||
|
Contribution incorporated within the Work constitutes direct or contributory
|
||||||
|
patent infringement, then any patent licenses granted to You under this License
|
||||||
|
for that Work shall terminate as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution.
|
||||||
|
|
||||||
|
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||||
|
in any medium, with or without modifications, and in Source or Object form,
|
||||||
|
provided that You meet the following conditions:
|
||||||
|
|
||||||
|
You must give any other recipients of the Work or Derivative Works a copy of
|
||||||
|
this License; and
|
||||||
|
You must cause any modified files to carry prominent notices stating that You
|
||||||
|
changed the files; and
|
||||||
|
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||||
|
all copyright, patent, trademark, and attribution notices from the Source form
|
||||||
|
of the Work, excluding those notices that do not pertain to any part of the
|
||||||
|
Derivative Works; and
|
||||||
|
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||||
|
Derivative Works that You distribute must include a readable copy of the
|
||||||
|
attribution notices contained within such NOTICE file, excluding those notices
|
||||||
|
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||||
|
following places: within a NOTICE text file distributed as part of the
|
||||||
|
Derivative Works; within the Source form or documentation, if provided along
|
||||||
|
with the Derivative Works; or, within a display generated by the Derivative
|
||||||
|
Works, if and wherever such third-party notices normally appear. The contents of
|
||||||
|
the NOTICE file are for informational purposes only and do not modify the
|
||||||
|
License. You may add Your own attribution notices within Derivative Works that
|
||||||
|
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||||
|
provided that such additional attribution notices cannot be construed as
|
||||||
|
modifying the License.
|
||||||
|
You may add Your own copyright statement to Your modifications and may provide
|
||||||
|
additional or different license terms and conditions for use, reproduction, or
|
||||||
|
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||||
|
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||||
|
with the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions.
|
||||||
|
|
||||||
|
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||||
|
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||||
|
conditions of this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||||
|
any separate license agreement you may have executed with Licensor regarding
|
||||||
|
such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks.
|
||||||
|
|
||||||
|
This License does not grant permission to use the trade names, trademarks,
|
||||||
|
service marks, or product names of the Licensor, except as required for
|
||||||
|
reasonable and customary use in describing the origin of the Work and
|
||||||
|
reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty.
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||||
|
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||||
|
including, without limitation, any warranties or conditions of TITLE,
|
||||||
|
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||||
|
solely responsible for determining the appropriateness of using or
|
||||||
|
redistributing the Work and assume any risks associated with Your exercise of
|
||||||
|
permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability.
|
||||||
|
|
||||||
|
In no event and under no legal theory, whether in tort (including negligence),
|
||||||
|
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||||
|
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special, incidental,
|
||||||
|
or consequential damages of any character arising as a result of this License or
|
||||||
|
out of the use or inability to use the Work (including but not limited to
|
||||||
|
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||||
|
any and all other commercial damages or losses), even if such Contributor has
|
||||||
|
been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability.
|
||||||
|
|
||||||
|
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||||
|
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||||
|
other liability obligations and/or rights consistent with this License. However,
|
||||||
|
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||||
|
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||||
|
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason of your
|
||||||
|
accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following boilerplate
|
||||||
|
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||||
|
identifying information. (Don't include the brackets!) The text should be
|
||||||
|
enclosed in the appropriate comment syntax for the file format. We also
|
||||||
|
recommend that a file or class name and description of purpose be included on
|
||||||
|
the same "printed page" as the copyright notice for easier identification within
|
||||||
|
third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
52
vendor/github.com/coreos/go-systemd/activation/files.go
generated
vendored
Normal file
52
vendor/github.com/coreos/go-systemd/activation/files.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
// Copyright 2015 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package activation implements primitives for systemd socket activation.
|
||||||
|
package activation
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
// based on: https://gist.github.com/alberts/4640792
|
||||||
|
const (
|
||||||
|
listenFdsStart = 3
|
||||||
|
)
|
||||||
|
|
||||||
|
func Files(unsetEnv bool) []*os.File {
|
||||||
|
if unsetEnv {
|
||||||
|
defer os.Unsetenv("LISTEN_PID")
|
||||||
|
defer os.Unsetenv("LISTEN_FDS")
|
||||||
|
}
|
||||||
|
|
||||||
|
pid, err := strconv.Atoi(os.Getenv("LISTEN_PID"))
|
||||||
|
if err != nil || pid != os.Getpid() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
nfds, err := strconv.Atoi(os.Getenv("LISTEN_FDS"))
|
||||||
|
if err != nil || nfds == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
files := make([]*os.File, 0, nfds)
|
||||||
|
for fd := listenFdsStart; fd < listenFdsStart+nfds; fd++ {
|
||||||
|
syscall.CloseOnExec(fd)
|
||||||
|
files = append(files, os.NewFile(uintptr(fd), "LISTEN_FD_"+strconv.Itoa(fd)))
|
||||||
|
}
|
||||||
|
|
||||||
|
return files
|
||||||
|
}
|
62
vendor/github.com/coreos/go-systemd/activation/listeners.go
generated
vendored
Normal file
62
vendor/github.com/coreos/go-systemd/activation/listeners.go
generated
vendored
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
// Copyright 2015 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package activation
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"net"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Listeners returns a slice containing a net.Listener for each matching socket type
|
||||||
|
// passed to this process.
|
||||||
|
//
|
||||||
|
// The order of the file descriptors is preserved in the returned slice.
|
||||||
|
// Nil values are used to fill any gaps. For example if systemd were to return file descriptors
|
||||||
|
// corresponding with "udp, tcp, tcp", then the slice would contain {nil, net.Listener, net.Listener}
|
||||||
|
func Listeners(unsetEnv bool) ([]net.Listener, error) {
|
||||||
|
files := Files(unsetEnv)
|
||||||
|
listeners := make([]net.Listener, len(files))
|
||||||
|
|
||||||
|
for i, f := range files {
|
||||||
|
if pc, err := net.FileListener(f); err == nil {
|
||||||
|
listeners[i] = pc
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return listeners, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TLSListeners returns a slice containing a net.listener for each matching TCP socket type
|
||||||
|
// passed to this process.
|
||||||
|
// It uses default Listeners func and forces TCP sockets handlers to use TLS based on tlsConfig.
|
||||||
|
func TLSListeners(unsetEnv bool, tlsConfig *tls.Config) ([]net.Listener, error) {
|
||||||
|
listeners, err := Listeners(unsetEnv)
|
||||||
|
|
||||||
|
if listeners == nil || err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if tlsConfig != nil && err == nil {
|
||||||
|
tlsConfig.NextProtos = []string{"http/1.1"}
|
||||||
|
|
||||||
|
for i, l := range listeners {
|
||||||
|
// Activate TLS only for TCP sockets
|
||||||
|
if l.Addr().Network() == "tcp" {
|
||||||
|
listeners[i] = tls.NewListener(l, tlsConfig)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return listeners, err
|
||||||
|
}
|
37
vendor/github.com/coreos/go-systemd/activation/packetconns.go
generated
vendored
Normal file
37
vendor/github.com/coreos/go-systemd/activation/packetconns.go
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
// Copyright 2015 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package activation
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PacketConns returns a slice containing a net.PacketConn for each matching socket type
|
||||||
|
// passed to this process.
|
||||||
|
//
|
||||||
|
// The order of the file descriptors is preserved in the returned slice.
|
||||||
|
// Nil values are used to fill any gaps. For example if systemd were to return file descriptors
|
||||||
|
// corresponding with "udp, tcp, udp", then the slice would contain {net.PacketConn, nil, net.PacketConn}
|
||||||
|
func PacketConns(unsetEnv bool) ([]net.PacketConn, error) {
|
||||||
|
files := Files(unsetEnv)
|
||||||
|
conns := make([]net.PacketConn, len(files))
|
||||||
|
|
||||||
|
for i, f := range files {
|
||||||
|
if pc, err := net.FilePacketConn(f); err == nil {
|
||||||
|
conns[i] = pc
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return conns, nil
|
||||||
|
}
|
191
vendor/github.com/coreos/go-systemd/dbus/LICENSE
generated
vendored
Normal file
191
vendor/github.com/coreos/go-systemd/dbus/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction, and
|
||||||
|
distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||||
|
owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||||
|
that control, are controlled by, or are under common control with that entity.
|
||||||
|
For the purposes of this definition, "control" means (i) the power, direct or
|
||||||
|
indirect, to cause the direction or management of such entity, whether by
|
||||||
|
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||||
|
permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications, including
|
||||||
|
but not limited to software source code, documentation source, and configuration
|
||||||
|
files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical transformation or
|
||||||
|
translation of a Source form, including but not limited to compiled object code,
|
||||||
|
generated documentation, and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||||
|
available under the License, as indicated by a copyright notice that is included
|
||||||
|
in or attached to the work (an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||||
|
is based on (or derived from) the Work and for which the editorial revisions,
|
||||||
|
annotations, elaborations, or other modifications represent, as a whole, an
|
||||||
|
original work of authorship. For the purposes of this License, Derivative Works
|
||||||
|
shall not include works that remain separable from, or merely link (or bind by
|
||||||
|
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including the original version
|
||||||
|
of the Work and any modifications or additions to that Work or Derivative Works
|
||||||
|
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||||
|
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||||
|
on behalf of the copyright owner. For the purposes of this definition,
|
||||||
|
"submitted" means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems, and
|
||||||
|
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||||
|
the purpose of discussing and improving the Work, but excluding communication
|
||||||
|
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||||
|
owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||||
|
of whom a Contribution has been received by Licensor and subsequently
|
||||||
|
incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License.
|
||||||
|
|
||||||
|
Subject to the terms and conditions of this License, each Contributor hereby
|
||||||
|
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||||
|
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||||
|
Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License.
|
||||||
|
|
||||||
|
Subject to the terms and conditions of this License, each Contributor hereby
|
||||||
|
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||||
|
irrevocable (except as stated in this section) patent license to make, have
|
||||||
|
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||||
|
such license applies only to those patent claims licensable by such Contributor
|
||||||
|
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||||
|
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||||
|
submitted. If You institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||||
|
Contribution incorporated within the Work constitutes direct or contributory
|
||||||
|
patent infringement, then any patent licenses granted to You under this License
|
||||||
|
for that Work shall terminate as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution.
|
||||||
|
|
||||||
|
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||||
|
in any medium, with or without modifications, and in Source or Object form,
|
||||||
|
provided that You meet the following conditions:
|
||||||
|
|
||||||
|
You must give any other recipients of the Work or Derivative Works a copy of
|
||||||
|
this License; and
|
||||||
|
You must cause any modified files to carry prominent notices stating that You
|
||||||
|
changed the files; and
|
||||||
|
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||||
|
all copyright, patent, trademark, and attribution notices from the Source form
|
||||||
|
of the Work, excluding those notices that do not pertain to any part of the
|
||||||
|
Derivative Works; and
|
||||||
|
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||||
|
Derivative Works that You distribute must include a readable copy of the
|
||||||
|
attribution notices contained within such NOTICE file, excluding those notices
|
||||||
|
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||||
|
following places: within a NOTICE text file distributed as part of the
|
||||||
|
Derivative Works; within the Source form or documentation, if provided along
|
||||||
|
with the Derivative Works; or, within a display generated by the Derivative
|
||||||
|
Works, if and wherever such third-party notices normally appear. The contents of
|
||||||
|
the NOTICE file are for informational purposes only and do not modify the
|
||||||
|
License. You may add Your own attribution notices within Derivative Works that
|
||||||
|
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||||
|
provided that such additional attribution notices cannot be construed as
|
||||||
|
modifying the License.
|
||||||
|
You may add Your own copyright statement to Your modifications and may provide
|
||||||
|
additional or different license terms and conditions for use, reproduction, or
|
||||||
|
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||||
|
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||||
|
with the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions.
|
||||||
|
|
||||||
|
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||||
|
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||||
|
conditions of this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||||
|
any separate license agreement you may have executed with Licensor regarding
|
||||||
|
such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks.
|
||||||
|
|
||||||
|
This License does not grant permission to use the trade names, trademarks,
|
||||||
|
service marks, or product names of the Licensor, except as required for
|
||||||
|
reasonable and customary use in describing the origin of the Work and
|
||||||
|
reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty.
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||||
|
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||||
|
including, without limitation, any warranties or conditions of TITLE,
|
||||||
|
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||||
|
solely responsible for determining the appropriateness of using or
|
||||||
|
redistributing the Work and assume any risks associated with Your exercise of
|
||||||
|
permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability.
|
||||||
|
|
||||||
|
In no event and under no legal theory, whether in tort (including negligence),
|
||||||
|
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||||
|
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special, incidental,
|
||||||
|
or consequential damages of any character arising as a result of this License or
|
||||||
|
out of the use or inability to use the Work (including but not limited to
|
||||||
|
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||||
|
any and all other commercial damages or losses), even if such Contributor has
|
||||||
|
been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability.
|
||||||
|
|
||||||
|
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||||
|
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||||
|
other liability obligations and/or rights consistent with this License. However,
|
||||||
|
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||||
|
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||||
|
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason of your
|
||||||
|
accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following boilerplate
|
||||||
|
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||||
|
identifying information. (Don't include the brackets!) The text should be
|
||||||
|
enclosed in the appropriate comment syntax for the file format. We also
|
||||||
|
recommend that a file or class name and description of purpose be included on
|
||||||
|
the same "printed page" as the copyright notice for easier identification within
|
||||||
|
third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
198
vendor/github.com/coreos/go-systemd/dbus/dbus.go
generated
vendored
Normal file
198
vendor/github.com/coreos/go-systemd/dbus/dbus.go
generated
vendored
Normal file
|
@ -0,0 +1,198 @@
|
||||||
|
// Copyright 2015 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/
|
||||||
|
package dbus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/godbus/dbus"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ`
|
||||||
|
num = `0123456789`
|
||||||
|
alphanum = alpha + num
|
||||||
|
signalBuffer = 100
|
||||||
|
)
|
||||||
|
|
||||||
|
// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped
|
||||||
|
func needsEscape(i int, b byte) bool {
|
||||||
|
// Escape everything that is not a-z-A-Z-0-9
|
||||||
|
// Also escape 0-9 if it's the first character
|
||||||
|
return strings.IndexByte(alphanum, b) == -1 ||
|
||||||
|
(i == 0 && strings.IndexByte(num, b) != -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the
|
||||||
|
// rules that systemd uses for serializing special characters.
|
||||||
|
func PathBusEscape(path string) string {
|
||||||
|
// Special case the empty string
|
||||||
|
if len(path) == 0 {
|
||||||
|
return "_"
|
||||||
|
}
|
||||||
|
n := []byte{}
|
||||||
|
for i := 0; i < len(path); i++ {
|
||||||
|
c := path[i]
|
||||||
|
if needsEscape(i, c) {
|
||||||
|
e := fmt.Sprintf("_%x", c)
|
||||||
|
n = append(n, []byte(e)...)
|
||||||
|
} else {
|
||||||
|
n = append(n, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return string(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Conn is a connection to systemd's dbus endpoint.
|
||||||
|
type Conn struct {
|
||||||
|
// sysconn/sysobj are only used to call dbus methods
|
||||||
|
sysconn *dbus.Conn
|
||||||
|
sysobj dbus.BusObject
|
||||||
|
|
||||||
|
// sigconn/sigobj are only used to receive dbus signals
|
||||||
|
sigconn *dbus.Conn
|
||||||
|
sigobj dbus.BusObject
|
||||||
|
|
||||||
|
jobListener struct {
|
||||||
|
jobs map[dbus.ObjectPath]chan<- string
|
||||||
|
sync.Mutex
|
||||||
|
}
|
||||||
|
subscriber struct {
|
||||||
|
updateCh chan<- *SubStateUpdate
|
||||||
|
errCh chan<- error
|
||||||
|
sync.Mutex
|
||||||
|
ignore map[dbus.ObjectPath]int64
|
||||||
|
cleanIgnore int64
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// New establishes a connection to the system bus and authenticates.
|
||||||
|
// Callers should call Close() when done with the connection.
|
||||||
|
func New() (*Conn, error) {
|
||||||
|
return newConnection(func() (*dbus.Conn, error) {
|
||||||
|
return dbusAuthHelloConnection(dbus.SystemBusPrivate)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUserConnection establishes a connection to the session bus and
|
||||||
|
// authenticates. This can be used to connect to systemd user instances.
|
||||||
|
// Callers should call Close() when done with the connection.
|
||||||
|
func NewUserConnection() (*Conn, error) {
|
||||||
|
return newConnection(func() (*dbus.Conn, error) {
|
||||||
|
return dbusAuthHelloConnection(dbus.SessionBusPrivate)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSystemdConnection establishes a private, direct connection to systemd.
|
||||||
|
// This can be used for communicating with systemd without a dbus daemon.
|
||||||
|
// Callers should call Close() when done with the connection.
|
||||||
|
func NewSystemdConnection() (*Conn, error) {
|
||||||
|
return newConnection(func() (*dbus.Conn, error) {
|
||||||
|
// We skip Hello when talking directly to systemd.
|
||||||
|
return dbusAuthConnection(func() (*dbus.Conn, error) {
|
||||||
|
return dbus.Dial("unix:path=/run/systemd/private")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes an established connection
|
||||||
|
func (c *Conn) Close() {
|
||||||
|
c.sysconn.Close()
|
||||||
|
c.sigconn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func newConnection(createBus func() (*dbus.Conn, error)) (*Conn, error) {
|
||||||
|
sysconn, err := createBus()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sigconn, err := createBus()
|
||||||
|
if err != nil {
|
||||||
|
sysconn.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
c := &Conn{
|
||||||
|
sysconn: sysconn,
|
||||||
|
sysobj: systemdObject(sysconn),
|
||||||
|
sigconn: sigconn,
|
||||||
|
sigobj: systemdObject(sigconn),
|
||||||
|
}
|
||||||
|
|
||||||
|
c.subscriber.ignore = make(map[dbus.ObjectPath]int64)
|
||||||
|
c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string)
|
||||||
|
|
||||||
|
// Setup the listeners on jobs so that we can get completions
|
||||||
|
c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
|
||||||
|
"type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'")
|
||||||
|
|
||||||
|
c.dispatch()
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager
|
||||||
|
// interface. The value is returned in its string representation, as defined at
|
||||||
|
// https://developer.gnome.org/glib/unstable/gvariant-text.html
|
||||||
|
func (c *Conn) GetManagerProperty(prop string) (string, error) {
|
||||||
|
variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return variant.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func dbusAuthConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) {
|
||||||
|
conn, err := createBus()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only use EXTERNAL method, and hardcode the uid (not username)
|
||||||
|
// to avoid a username lookup (which requires a dynamically linked
|
||||||
|
// libc)
|
||||||
|
methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))}
|
||||||
|
|
||||||
|
err = conn.Auth(methods)
|
||||||
|
if err != nil {
|
||||||
|
conn.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return conn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func dbusAuthHelloConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) {
|
||||||
|
conn, err := dbusAuthConnection(createBus)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = conn.Hello(); err != nil {
|
||||||
|
conn.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return conn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func systemdObject(conn *dbus.Conn) dbus.BusObject {
|
||||||
|
return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1"))
|
||||||
|
}
|
442
vendor/github.com/coreos/go-systemd/dbus/methods.go
generated
vendored
Normal file
442
vendor/github.com/coreos/go-systemd/dbus/methods.go
generated
vendored
Normal file
|
@ -0,0 +1,442 @@
|
||||||
|
// Copyright 2015 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package dbus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/godbus/dbus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c *Conn) jobComplete(signal *dbus.Signal) {
|
||||||
|
var id uint32
|
||||||
|
var job dbus.ObjectPath
|
||||||
|
var unit string
|
||||||
|
var result string
|
||||||
|
dbus.Store(signal.Body, &id, &job, &unit, &result)
|
||||||
|
c.jobListener.Lock()
|
||||||
|
out, ok := c.jobListener.jobs[job]
|
||||||
|
if ok {
|
||||||
|
out <- result
|
||||||
|
delete(c.jobListener.jobs, job)
|
||||||
|
}
|
||||||
|
c.jobListener.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) {
|
||||||
|
if ch != nil {
|
||||||
|
c.jobListener.Lock()
|
||||||
|
defer c.jobListener.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
var p dbus.ObjectPath
|
||||||
|
err := c.sysobj.Call(job, 0, args...).Store(&p)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if ch != nil {
|
||||||
|
c.jobListener.jobs[p] = ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// ignore error since 0 is fine if conversion fails
|
||||||
|
jobID, _ := strconv.Atoi(path.Base(string(p)))
|
||||||
|
|
||||||
|
return jobID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartUnit enqueues a start job and depending jobs, if any (unless otherwise
|
||||||
|
// specified by the mode string).
|
||||||
|
//
|
||||||
|
// Takes the unit to activate, plus a mode string. The mode needs to be one of
|
||||||
|
// replace, fail, isolate, ignore-dependencies, ignore-requirements. If
|
||||||
|
// "replace" the call will start the unit and its dependencies, possibly
|
||||||
|
// replacing already queued jobs that conflict with this. If "fail" the call
|
||||||
|
// will start the unit and its dependencies, but will fail if this would change
|
||||||
|
// an already queued job. If "isolate" the call will start the unit in question
|
||||||
|
// and terminate all units that aren't dependencies of it. If
|
||||||
|
// "ignore-dependencies" it will start a unit but ignore all its dependencies.
|
||||||
|
// If "ignore-requirements" it will start a unit but only ignore the
|
||||||
|
// requirement dependencies. It is not recommended to make use of the latter
|
||||||
|
// two options.
|
||||||
|
//
|
||||||
|
// If the provided channel is non-nil, a result string will be sent to it upon
|
||||||
|
// job completion: one of done, canceled, timeout, failed, dependency, skipped.
|
||||||
|
// done indicates successful execution of a job. canceled indicates that a job
|
||||||
|
// has been canceled before it finished execution. timeout indicates that the
|
||||||
|
// job timeout was reached. failed indicates that the job failed. dependency
|
||||||
|
// indicates that a job this job has been depending on failed and the job hence
|
||||||
|
// has been removed too. skipped indicates that a job was skipped because it
|
||||||
|
// didn't apply to the units current state.
|
||||||
|
//
|
||||||
|
// If no error occurs, the ID of the underlying systemd job will be returned. There
|
||||||
|
// does exist the possibility for no error to be returned, but for the returned job
|
||||||
|
// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint
|
||||||
|
// should not be considered authoritative.
|
||||||
|
//
|
||||||
|
// If an error does occur, it will be returned to the user alongside a job ID of 0.
|
||||||
|
func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||||
|
return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StopUnit is similar to StartUnit but stops the specified unit rather
|
||||||
|
// than starting it.
|
||||||
|
func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||||
|
return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise.
|
||||||
|
func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||||
|
return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RestartUnit restarts a service. If a service is restarted that isn't
|
||||||
|
// running it will be started.
|
||||||
|
func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||||
|
return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TryRestartUnit is like RestartUnit, except that a service that isn't running
|
||||||
|
// is not affected by the restart.
|
||||||
|
func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||||
|
return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReloadOrRestart attempts a reload if the unit supports it and use a restart
|
||||||
|
// otherwise.
|
||||||
|
func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||||
|
return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try"
|
||||||
|
// flavored restart otherwise.
|
||||||
|
func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||||
|
return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartTransientUnit() may be used to create and start a transient unit, which
|
||||||
|
// will be released as soon as it is not running or referenced anymore or the
|
||||||
|
// system is rebooted. name is the unit name including suffix, and must be
|
||||||
|
// unique. mode is the same as in StartUnit(), properties contains properties
|
||||||
|
// of the unit.
|
||||||
|
func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) {
|
||||||
|
return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's
|
||||||
|
// processes are killed.
|
||||||
|
func (c *Conn) KillUnit(name string, signal int32) {
|
||||||
|
c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResetFailedUnit resets the "failed" state of a specific unit.
|
||||||
|
func (c *Conn) ResetFailedUnit(name string) error {
|
||||||
|
return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store()
|
||||||
|
}
|
||||||
|
|
||||||
|
// getProperties takes the unit name and returns all of its dbus object properties, for the given dbus interface
|
||||||
|
func (c *Conn) getProperties(unit string, dbusInterface string) (map[string]interface{}, error) {
|
||||||
|
var err error
|
||||||
|
var props map[string]dbus.Variant
|
||||||
|
|
||||||
|
path := unitPath(unit)
|
||||||
|
if !path.IsValid() {
|
||||||
|
return nil, errors.New("invalid unit name: " + unit)
|
||||||
|
}
|
||||||
|
|
||||||
|
obj := c.sysconn.Object("org.freedesktop.systemd1", path)
|
||||||
|
err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make(map[string]interface{}, len(props))
|
||||||
|
for k, v := range props {
|
||||||
|
out[k] = v.Value()
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUnitProperties takes the unit name and returns all of its dbus object properties.
|
||||||
|
func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) {
|
||||||
|
return c.getProperties(unit, "org.freedesktop.systemd1.Unit")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) {
|
||||||
|
var err error
|
||||||
|
var prop dbus.Variant
|
||||||
|
|
||||||
|
path := unitPath(unit)
|
||||||
|
if !path.IsValid() {
|
||||||
|
return nil, errors.New("invalid unit name: " + unit)
|
||||||
|
}
|
||||||
|
|
||||||
|
obj := c.sysconn.Object("org.freedesktop.systemd1", path)
|
||||||
|
err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Property{Name: propertyName, Value: prop}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) {
|
||||||
|
return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type.
|
||||||
|
// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope
|
||||||
|
// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit
|
||||||
|
func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) {
|
||||||
|
return c.getProperties(unit, "org.freedesktop.systemd1."+unitType)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUnitProperties() may be used to modify certain unit properties at runtime.
|
||||||
|
// Not all properties may be changed at runtime, but many resource management
|
||||||
|
// settings (primarily those in systemd.cgroup(5)) may. The changes are applied
|
||||||
|
// instantly, and stored on disk for future boots, unless runtime is true, in which
|
||||||
|
// case the settings only apply until the next reboot. name is the name of the unit
|
||||||
|
// to modify. properties are the settings to set, encoded as an array of property
|
||||||
|
// name and value pairs.
|
||||||
|
func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error {
|
||||||
|
return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) {
|
||||||
|
return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName)
|
||||||
|
}
|
||||||
|
|
||||||
|
type UnitStatus struct {
|
||||||
|
Name string // The primary unit name as string
|
||||||
|
Description string // The human readable description string
|
||||||
|
LoadState string // The load state (i.e. whether the unit file has been loaded successfully)
|
||||||
|
ActiveState string // The active state (i.e. whether the unit is currently started or not)
|
||||||
|
SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not)
|
||||||
|
Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string.
|
||||||
|
Path dbus.ObjectPath // The unit object path
|
||||||
|
JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise
|
||||||
|
JobType string // The job type as string
|
||||||
|
JobPath dbus.ObjectPath // The job object path
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListUnits returns an array with all currently loaded units. Note that
|
||||||
|
// units may be known by multiple names at the same time, and hence there might
|
||||||
|
// be more unit names loaded than actual units behind them.
|
||||||
|
func (c *Conn) ListUnits() ([]UnitStatus, error) {
|
||||||
|
result := make([][]interface{}, 0)
|
||||||
|
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store(&result)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resultInterface := make([]interface{}, len(result))
|
||||||
|
for i := range result {
|
||||||
|
resultInterface[i] = result[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
status := make([]UnitStatus, len(result))
|
||||||
|
statusInterface := make([]interface{}, len(status))
|
||||||
|
for i := range status {
|
||||||
|
statusInterface[i] = &status[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
err = dbus.Store(resultInterface, statusInterface...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return status, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type UnitFile struct {
|
||||||
|
Path string
|
||||||
|
Type string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListUnitFiles returns an array of all available units on disk.
|
||||||
|
func (c *Conn) ListUnitFiles() ([]UnitFile, error) {
|
||||||
|
result := make([][]interface{}, 0)
|
||||||
|
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store(&result)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resultInterface := make([]interface{}, len(result))
|
||||||
|
for i := range result {
|
||||||
|
resultInterface[i] = result[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
files := make([]UnitFile, len(result))
|
||||||
|
fileInterface := make([]interface{}, len(files))
|
||||||
|
for i := range files {
|
||||||
|
fileInterface[i] = &files[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
err = dbus.Store(resultInterface, fileInterface...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type LinkUnitFileChange EnableUnitFileChange
|
||||||
|
|
||||||
|
// LinkUnitFiles() links unit files (that are located outside of the
|
||||||
|
// usual unit search paths) into the unit search path.
|
||||||
|
//
|
||||||
|
// It takes a list of absolute paths to unit files to link and two
|
||||||
|
// booleans. The first boolean controls whether the unit shall be
|
||||||
|
// enabled for runtime only (true, /run), or persistently (false,
|
||||||
|
// /etc).
|
||||||
|
// The second controls whether symlinks pointing to other units shall
|
||||||
|
// be replaced if necessary.
|
||||||
|
//
|
||||||
|
// This call returns a list of the changes made. The list consists of
|
||||||
|
// structures with three strings: the type of the change (one of symlink
|
||||||
|
// or unlink), the file name of the symlink and the destination of the
|
||||||
|
// symlink.
|
||||||
|
func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) {
|
||||||
|
result := make([][]interface{}, 0)
|
||||||
|
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resultInterface := make([]interface{}, len(result))
|
||||||
|
for i := range result {
|
||||||
|
resultInterface[i] = result[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
changes := make([]LinkUnitFileChange, len(result))
|
||||||
|
changesInterface := make([]interface{}, len(changes))
|
||||||
|
for i := range changes {
|
||||||
|
changesInterface[i] = &changes[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
err = dbus.Store(resultInterface, changesInterface...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return changes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnableUnitFiles() may be used to enable one or more units in the system (by
|
||||||
|
// creating symlinks to them in /etc or /run).
|
||||||
|
//
|
||||||
|
// It takes a list of unit files to enable (either just file names or full
|
||||||
|
// absolute paths if the unit files are residing outside the usual unit
|
||||||
|
// search paths), and two booleans: the first controls whether the unit shall
|
||||||
|
// be enabled for runtime only (true, /run), or persistently (false, /etc).
|
||||||
|
// The second one controls whether symlinks pointing to other units shall
|
||||||
|
// be replaced if necessary.
|
||||||
|
//
|
||||||
|
// This call returns one boolean and an array with the changes made. The
|
||||||
|
// boolean signals whether the unit files contained any enablement
|
||||||
|
// information (i.e. an [Install]) section. The changes list consists of
|
||||||
|
// structures with three strings: the type of the change (one of symlink
|
||||||
|
// or unlink), the file name of the symlink and the destination of the
|
||||||
|
// symlink.
|
||||||
|
func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) {
|
||||||
|
var carries_install_info bool
|
||||||
|
|
||||||
|
result := make([][]interface{}, 0)
|
||||||
|
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result)
|
||||||
|
if err != nil {
|
||||||
|
return false, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resultInterface := make([]interface{}, len(result))
|
||||||
|
for i := range result {
|
||||||
|
resultInterface[i] = result[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
changes := make([]EnableUnitFileChange, len(result))
|
||||||
|
changesInterface := make([]interface{}, len(changes))
|
||||||
|
for i := range changes {
|
||||||
|
changesInterface[i] = &changes[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
err = dbus.Store(resultInterface, changesInterface...)
|
||||||
|
if err != nil {
|
||||||
|
return false, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return carries_install_info, changes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type EnableUnitFileChange struct {
|
||||||
|
Type string // Type of the change (one of symlink or unlink)
|
||||||
|
Filename string // File name of the symlink
|
||||||
|
Destination string // Destination of the symlink
|
||||||
|
}
|
||||||
|
|
||||||
|
// DisableUnitFiles() may be used to disable one or more units in the system (by
|
||||||
|
// removing symlinks to them from /etc or /run).
|
||||||
|
//
|
||||||
|
// It takes a list of unit files to disable (either just file names or full
|
||||||
|
// absolute paths if the unit files are residing outside the usual unit
|
||||||
|
// search paths), and one boolean: whether the unit was enabled for runtime
|
||||||
|
// only (true, /run), or persistently (false, /etc).
|
||||||
|
//
|
||||||
|
// This call returns an array with the changes made. The changes list
|
||||||
|
// consists of structures with three strings: the type of the change (one of
|
||||||
|
// symlink or unlink), the file name of the symlink and the destination of the
|
||||||
|
// symlink.
|
||||||
|
func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) {
|
||||||
|
result := make([][]interface{}, 0)
|
||||||
|
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resultInterface := make([]interface{}, len(result))
|
||||||
|
for i := range result {
|
||||||
|
resultInterface[i] = result[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
changes := make([]DisableUnitFileChange, len(result))
|
||||||
|
changesInterface := make([]interface{}, len(changes))
|
||||||
|
for i := range changes {
|
||||||
|
changesInterface[i] = &changes[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
err = dbus.Store(resultInterface, changesInterface...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return changes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type DisableUnitFileChange struct {
|
||||||
|
Type string // Type of the change (one of symlink or unlink)
|
||||||
|
Filename string // File name of the symlink
|
||||||
|
Destination string // Destination of the symlink
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reload instructs systemd to scan for and reload unit files. This is
|
||||||
|
// equivalent to a 'systemctl daemon-reload'.
|
||||||
|
func (c *Conn) Reload() error {
|
||||||
|
return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store()
|
||||||
|
}
|
||||||
|
|
||||||
|
func unitPath(name string) dbus.ObjectPath {
|
||||||
|
return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name))
|
||||||
|
}
|
218
vendor/github.com/coreos/go-systemd/dbus/properties.go
generated
vendored
Normal file
218
vendor/github.com/coreos/go-systemd/dbus/properties.go
generated
vendored
Normal file
|
@ -0,0 +1,218 @@
|
||||||
|
// Copyright 2015 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package dbus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/godbus/dbus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// From the systemd docs:
|
||||||
|
//
|
||||||
|
// The properties array of StartTransientUnit() may take many of the settings
|
||||||
|
// that may also be configured in unit files. Not all parameters are currently
|
||||||
|
// accepted though, but we plan to cover more properties with future release.
|
||||||
|
// Currently you may set the Description, Slice and all dependency types of
|
||||||
|
// units, as well as RemainAfterExit, ExecStart for service units,
|
||||||
|
// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares,
|
||||||
|
// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth,
|
||||||
|
// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit,
|
||||||
|
// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map
|
||||||
|
// directly to their counterparts in unit files and as normal D-Bus object
|
||||||
|
// properties. The exception here is the PIDs field of scope units which is
|
||||||
|
// used for construction of the scope only and specifies the initial PIDs to
|
||||||
|
// add to the scope object.
|
||||||
|
|
||||||
|
type Property struct {
|
||||||
|
Name string
|
||||||
|
Value dbus.Variant
|
||||||
|
}
|
||||||
|
|
||||||
|
type PropertyCollection struct {
|
||||||
|
Name string
|
||||||
|
Properties []Property
|
||||||
|
}
|
||||||
|
|
||||||
|
type execStart struct {
|
||||||
|
Path string // the binary path to execute
|
||||||
|
Args []string // an array with all arguments to pass to the executed command, starting with argument 0
|
||||||
|
UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropExecStart sets the ExecStart service property. The first argument is a
|
||||||
|
// slice with the binary path to execute followed by the arguments to pass to
|
||||||
|
// the executed command. See
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart=
|
||||||
|
func PropExecStart(command []string, uncleanIsFailure bool) Property {
|
||||||
|
execStarts := []execStart{
|
||||||
|
execStart{
|
||||||
|
Path: command[0],
|
||||||
|
Args: command,
|
||||||
|
UncleanIsFailure: uncleanIsFailure,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return Property{
|
||||||
|
Name: "ExecStart",
|
||||||
|
Value: dbus.MakeVariant(execStarts),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropRemainAfterExit sets the RemainAfterExit service property. See
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit=
|
||||||
|
func PropRemainAfterExit(b bool) Property {
|
||||||
|
return Property{
|
||||||
|
Name: "RemainAfterExit",
|
||||||
|
Value: dbus.MakeVariant(b),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropDescription sets the Description unit property. See
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description=
|
||||||
|
func PropDescription(desc string) Property {
|
||||||
|
return Property{
|
||||||
|
Name: "Description",
|
||||||
|
Value: dbus.MakeVariant(desc),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func propDependency(name string, units []string) Property {
|
||||||
|
return Property{
|
||||||
|
Name: name,
|
||||||
|
Value: dbus.MakeVariant(units),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropRequires sets the Requires unit property. See
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires=
|
||||||
|
func PropRequires(units ...string) Property {
|
||||||
|
return propDependency("Requires", units)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropRequiresOverridable sets the RequiresOverridable unit property. See
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable=
|
||||||
|
func PropRequiresOverridable(units ...string) Property {
|
||||||
|
return propDependency("RequiresOverridable", units)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropRequisite sets the Requisite unit property. See
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite=
|
||||||
|
func PropRequisite(units ...string) Property {
|
||||||
|
return propDependency("Requisite", units)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropRequisiteOverridable sets the RequisiteOverridable unit property. See
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable=
|
||||||
|
func PropRequisiteOverridable(units ...string) Property {
|
||||||
|
return propDependency("RequisiteOverridable", units)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropWants sets the Wants unit property. See
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants=
|
||||||
|
func PropWants(units ...string) Property {
|
||||||
|
return propDependency("Wants", units)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropBindsTo sets the BindsTo unit property. See
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo=
|
||||||
|
func PropBindsTo(units ...string) Property {
|
||||||
|
return propDependency("BindsTo", units)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropRequiredBy sets the RequiredBy unit property. See
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy=
|
||||||
|
func PropRequiredBy(units ...string) Property {
|
||||||
|
return propDependency("RequiredBy", units)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropRequiredByOverridable sets the RequiredByOverridable unit property. See
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable=
|
||||||
|
func PropRequiredByOverridable(units ...string) Property {
|
||||||
|
return propDependency("RequiredByOverridable", units)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropWantedBy sets the WantedBy unit property. See
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy=
|
||||||
|
func PropWantedBy(units ...string) Property {
|
||||||
|
return propDependency("WantedBy", units)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropBoundBy sets the BoundBy unit property. See
|
||||||
|
// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy=
|
||||||
|
func PropBoundBy(units ...string) Property {
|
||||||
|
return propDependency("BoundBy", units)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropConflicts sets the Conflicts unit property. See
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts=
|
||||||
|
func PropConflicts(units ...string) Property {
|
||||||
|
return propDependency("Conflicts", units)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropConflictedBy sets the ConflictedBy unit property. See
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy=
|
||||||
|
func PropConflictedBy(units ...string) Property {
|
||||||
|
return propDependency("ConflictedBy", units)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropBefore sets the Before unit property. See
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=
|
||||||
|
func PropBefore(units ...string) Property {
|
||||||
|
return propDependency("Before", units)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropAfter sets the After unit property. See
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After=
|
||||||
|
func PropAfter(units ...string) Property {
|
||||||
|
return propDependency("After", units)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropOnFailure sets the OnFailure unit property. See
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure=
|
||||||
|
func PropOnFailure(units ...string) Property {
|
||||||
|
return propDependency("OnFailure", units)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropTriggers sets the Triggers unit property. See
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers=
|
||||||
|
func PropTriggers(units ...string) Property {
|
||||||
|
return propDependency("Triggers", units)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropTriggeredBy sets the TriggeredBy unit property. See
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy=
|
||||||
|
func PropTriggeredBy(units ...string) Property {
|
||||||
|
return propDependency("TriggeredBy", units)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo=
|
||||||
|
func PropPropagatesReloadTo(units ...string) Property {
|
||||||
|
return propDependency("PropagatesReloadTo", units)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropRequiresMountsFor sets the RequiresMountsFor unit property. See
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor=
|
||||||
|
func PropRequiresMountsFor(units ...string) Property {
|
||||||
|
return propDependency("RequiresMountsFor", units)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropSlice sets the Slice unit property. See
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice=
|
||||||
|
func PropSlice(slice string) Property {
|
||||||
|
return Property{
|
||||||
|
Name: "Slice",
|
||||||
|
Value: dbus.MakeVariant(slice),
|
||||||
|
}
|
||||||
|
}
|
47
vendor/github.com/coreos/go-systemd/dbus/set.go
generated
vendored
Normal file
47
vendor/github.com/coreos/go-systemd/dbus/set.go
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
// Copyright 2015 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package dbus
|
||||||
|
|
||||||
|
type set struct {
|
||||||
|
data map[string]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *set) Add(value string) {
|
||||||
|
s.data[value] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *set) Remove(value string) {
|
||||||
|
delete(s.data, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *set) Contains(value string) (exists bool) {
|
||||||
|
_, exists = s.data[value]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *set) Length() int {
|
||||||
|
return len(s.data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *set) Values() (values []string) {
|
||||||
|
for val, _ := range s.data {
|
||||||
|
values = append(values, val)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSet() *set {
|
||||||
|
return &set{make(map[string]bool)}
|
||||||
|
}
|
250
vendor/github.com/coreos/go-systemd/dbus/subscription.go
generated
vendored
Normal file
250
vendor/github.com/coreos/go-systemd/dbus/subscription.go
generated
vendored
Normal file
|
@ -0,0 +1,250 @@
|
||||||
|
// Copyright 2015 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package dbus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/godbus/dbus"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
cleanIgnoreInterval = int64(10 * time.Second)
|
||||||
|
ignoreInterval = int64(30 * time.Millisecond)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Subscribe sets up this connection to subscribe to all systemd dbus events.
|
||||||
|
// This is required before calling SubscribeUnits. When the connection closes
|
||||||
|
// systemd will automatically stop sending signals so there is no need to
|
||||||
|
// explicitly call Unsubscribe().
|
||||||
|
func (c *Conn) Subscribe() error {
|
||||||
|
c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
|
||||||
|
"type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'")
|
||||||
|
c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
|
||||||
|
"type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'")
|
||||||
|
|
||||||
|
err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unsubscribe this connection from systemd dbus events.
|
||||||
|
func (c *Conn) Unsubscribe() error {
|
||||||
|
err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Conn) dispatch() {
|
||||||
|
ch := make(chan *dbus.Signal, signalBuffer)
|
||||||
|
|
||||||
|
c.sigconn.Signal(ch)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
signal, ok := <-ch
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" {
|
||||||
|
c.jobComplete(signal)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.subscriber.updateCh == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var unitPath dbus.ObjectPath
|
||||||
|
switch signal.Name {
|
||||||
|
case "org.freedesktop.systemd1.Manager.JobRemoved":
|
||||||
|
unitName := signal.Body[2].(string)
|
||||||
|
c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath)
|
||||||
|
case "org.freedesktop.systemd1.Manager.UnitNew":
|
||||||
|
unitPath = signal.Body[1].(dbus.ObjectPath)
|
||||||
|
case "org.freedesktop.DBus.Properties.PropertiesChanged":
|
||||||
|
if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" {
|
||||||
|
unitPath = signal.Path
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if unitPath == dbus.ObjectPath("") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
c.sendSubStateUpdate(unitPath)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns two unbuffered channels which will receive all changed units every
|
||||||
|
// interval. Deleted units are sent as nil.
|
||||||
|
func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) {
|
||||||
|
return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer
|
||||||
|
// size of the channels, the comparison function for detecting changes and a filter
|
||||||
|
// function for cutting down on the noise that your channel receives.
|
||||||
|
func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) {
|
||||||
|
old := make(map[string]*UnitStatus)
|
||||||
|
statusChan := make(chan map[string]*UnitStatus, buffer)
|
||||||
|
errChan := make(chan error, buffer)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
timerChan := time.After(interval)
|
||||||
|
|
||||||
|
units, err := c.ListUnits()
|
||||||
|
if err == nil {
|
||||||
|
cur := make(map[string]*UnitStatus)
|
||||||
|
for i := range units {
|
||||||
|
if filterUnit != nil && filterUnit(units[i].Name) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
cur[units[i].Name] = &units[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
// add all new or changed units
|
||||||
|
changed := make(map[string]*UnitStatus)
|
||||||
|
for n, u := range cur {
|
||||||
|
if oldU, ok := old[n]; !ok || isChanged(oldU, u) {
|
||||||
|
changed[n] = u
|
||||||
|
}
|
||||||
|
delete(old, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// add all deleted units
|
||||||
|
for oldN := range old {
|
||||||
|
changed[oldN] = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
old = cur
|
||||||
|
|
||||||
|
if len(changed) != 0 {
|
||||||
|
statusChan <- changed
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
errChan <- err
|
||||||
|
}
|
||||||
|
|
||||||
|
<-timerChan
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return statusChan, errChan
|
||||||
|
}
|
||||||
|
|
||||||
|
type SubStateUpdate struct {
|
||||||
|
UnitName string
|
||||||
|
SubState string
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSubStateSubscriber writes to updateCh when any unit's substate changes.
|
||||||
|
// Although this writes to updateCh on every state change, the reported state
|
||||||
|
// may be more recent than the change that generated it (due to an unavoidable
|
||||||
|
// race in the systemd dbus interface). That is, this method provides a good
|
||||||
|
// way to keep a current view of all units' states, but is not guaranteed to
|
||||||
|
// show every state transition they go through. Furthermore, state changes
|
||||||
|
// will only be written to the channel with non-blocking writes. If updateCh
|
||||||
|
// is full, it attempts to write an error to errCh; if errCh is full, the error
|
||||||
|
// passes silently.
|
||||||
|
func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) {
|
||||||
|
c.subscriber.Lock()
|
||||||
|
defer c.subscriber.Unlock()
|
||||||
|
c.subscriber.updateCh = updateCh
|
||||||
|
c.subscriber.errCh = errCh
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Conn) sendSubStateUpdate(path dbus.ObjectPath) {
|
||||||
|
c.subscriber.Lock()
|
||||||
|
defer c.subscriber.Unlock()
|
||||||
|
|
||||||
|
if c.shouldIgnore(path) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err := c.GetUnitProperties(string(path))
|
||||||
|
if err != nil {
|
||||||
|
select {
|
||||||
|
case c.subscriber.errCh <- err:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
name := info["Id"].(string)
|
||||||
|
substate := info["SubState"].(string)
|
||||||
|
|
||||||
|
update := &SubStateUpdate{name, substate}
|
||||||
|
select {
|
||||||
|
case c.subscriber.updateCh <- update:
|
||||||
|
default:
|
||||||
|
select {
|
||||||
|
case c.subscriber.errCh <- errors.New("update channel full!"):
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c.updateIgnore(path, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The ignore functions work around a wart in the systemd dbus interface.
|
||||||
|
// Requesting the properties of an unloaded unit will cause systemd to send a
|
||||||
|
// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's
|
||||||
|
// properties on UnitNew (as that's the only indication of a new unit coming up
|
||||||
|
// for the first time), we would enter an infinite loop if we did not attempt
|
||||||
|
// to detect and ignore these spurious signals. The signal themselves are
|
||||||
|
// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an
|
||||||
|
// unloaded unit's signals for a short time after requesting its properties.
|
||||||
|
// This means that we will miss e.g. a transient unit being restarted
|
||||||
|
// *immediately* upon failure and also a transient unit being started
|
||||||
|
// immediately after requesting its status (with systemctl status, for example,
|
||||||
|
// because this causes a UnitNew signal to be sent which then causes us to fetch
|
||||||
|
// the properties).
|
||||||
|
|
||||||
|
func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool {
|
||||||
|
t, ok := c.subscriber.ignore[path]
|
||||||
|
return ok && t >= time.Now().UnixNano()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) {
|
||||||
|
c.cleanIgnore()
|
||||||
|
|
||||||
|
// unit is unloaded - it will trigger bad systemd dbus behavior
|
||||||
|
if info["LoadState"].(string) == "not-found" {
|
||||||
|
c.subscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// without this, ignore would grow unboundedly over time
|
||||||
|
func (c *Conn) cleanIgnore() {
|
||||||
|
now := time.Now().UnixNano()
|
||||||
|
if c.subscriber.cleanIgnore < now {
|
||||||
|
c.subscriber.cleanIgnore = now + cleanIgnoreInterval
|
||||||
|
|
||||||
|
for p, t := range c.subscriber.ignore {
|
||||||
|
if t < now {
|
||||||
|
delete(c.subscriber.ignore, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
57
vendor/github.com/coreos/go-systemd/dbus/subscription_set.go
generated
vendored
Normal file
57
vendor/github.com/coreos/go-systemd/dbus/subscription_set.go
generated
vendored
Normal file
|
@ -0,0 +1,57 @@
|
||||||
|
// Copyright 2015 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package dbus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SubscriptionSet returns a subscription set which is like conn.Subscribe but
|
||||||
|
// can filter to only return events for a set of units.
|
||||||
|
type SubscriptionSet struct {
|
||||||
|
*set
|
||||||
|
conn *Conn
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SubscriptionSet) filter(unit string) bool {
|
||||||
|
return !s.Contains(unit)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe starts listening for dbus events for all of the units in the set.
|
||||||
|
// Returns channels identical to conn.SubscribeUnits.
|
||||||
|
func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) {
|
||||||
|
// TODO: Make fully evented by using systemd 209 with properties changed values
|
||||||
|
return s.conn.SubscribeUnitsCustom(time.Second, 0,
|
||||||
|
mismatchUnitStatus,
|
||||||
|
func(unit string) bool { return s.filter(unit) },
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSubscriptionSet returns a new subscription set.
|
||||||
|
func (conn *Conn) NewSubscriptionSet() *SubscriptionSet {
|
||||||
|
return &SubscriptionSet{newSet(), conn}
|
||||||
|
}
|
||||||
|
|
||||||
|
// mismatchUnitStatus returns true if the provided UnitStatus objects
|
||||||
|
// are not equivalent. false is returned if the objects are equivalent.
|
||||||
|
// Only the Name, Description and state-related fields are used in
|
||||||
|
// the comparison.
|
||||||
|
func mismatchUnitStatus(u1, u2 *UnitStatus) bool {
|
||||||
|
return u1.Name != u2.Name ||
|
||||||
|
u1.Description != u2.Description ||
|
||||||
|
u1.LoadState != u2.LoadState ||
|
||||||
|
u1.ActiveState != u2.ActiveState ||
|
||||||
|
u1.SubState != u2.SubState
|
||||||
|
}
|
191
vendor/github.com/coreos/go-systemd/util/LICENSE
generated
vendored
Normal file
191
vendor/github.com/coreos/go-systemd/util/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction, and
|
||||||
|
distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||||
|
owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||||
|
that control, are controlled by, or are under common control with that entity.
|
||||||
|
For the purposes of this definition, "control" means (i) the power, direct or
|
||||||
|
indirect, to cause the direction or management of such entity, whether by
|
||||||
|
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||||
|
permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications, including
|
||||||
|
but not limited to software source code, documentation source, and configuration
|
||||||
|
files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical transformation or
|
||||||
|
translation of a Source form, including but not limited to compiled object code,
|
||||||
|
generated documentation, and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||||
|
available under the License, as indicated by a copyright notice that is included
|
||||||
|
in or attached to the work (an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||||
|
is based on (or derived from) the Work and for which the editorial revisions,
|
||||||
|
annotations, elaborations, or other modifications represent, as a whole, an
|
||||||
|
original work of authorship. For the purposes of this License, Derivative Works
|
||||||
|
shall not include works that remain separable from, or merely link (or bind by
|
||||||
|
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including the original version
|
||||||
|
of the Work and any modifications or additions to that Work or Derivative Works
|
||||||
|
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||||
|
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||||
|
on behalf of the copyright owner. For the purposes of this definition,
|
||||||
|
"submitted" means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems, and
|
||||||
|
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||||
|
the purpose of discussing and improving the Work, but excluding communication
|
||||||
|
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||||
|
owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||||
|
of whom a Contribution has been received by Licensor and subsequently
|
||||||
|
incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License.
|
||||||
|
|
||||||
|
Subject to the terms and conditions of this License, each Contributor hereby
|
||||||
|
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||||
|
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||||
|
Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License.
|
||||||
|
|
||||||
|
Subject to the terms and conditions of this License, each Contributor hereby
|
||||||
|
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||||
|
irrevocable (except as stated in this section) patent license to make, have
|
||||||
|
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||||
|
such license applies only to those patent claims licensable by such Contributor
|
||||||
|
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||||
|
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||||
|
submitted. If You institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||||
|
Contribution incorporated within the Work constitutes direct or contributory
|
||||||
|
patent infringement, then any patent licenses granted to You under this License
|
||||||
|
for that Work shall terminate as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution.
|
||||||
|
|
||||||
|
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||||
|
in any medium, with or without modifications, and in Source or Object form,
|
||||||
|
provided that You meet the following conditions:
|
||||||
|
|
||||||
|
You must give any other recipients of the Work or Derivative Works a copy of
|
||||||
|
this License; and
|
||||||
|
You must cause any modified files to carry prominent notices stating that You
|
||||||
|
changed the files; and
|
||||||
|
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||||
|
all copyright, patent, trademark, and attribution notices from the Source form
|
||||||
|
of the Work, excluding those notices that do not pertain to any part of the
|
||||||
|
Derivative Works; and
|
||||||
|
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||||
|
Derivative Works that You distribute must include a readable copy of the
|
||||||
|
attribution notices contained within such NOTICE file, excluding those notices
|
||||||
|
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||||
|
following places: within a NOTICE text file distributed as part of the
|
||||||
|
Derivative Works; within the Source form or documentation, if provided along
|
||||||
|
with the Derivative Works; or, within a display generated by the Derivative
|
||||||
|
Works, if and wherever such third-party notices normally appear. The contents of
|
||||||
|
the NOTICE file are for informational purposes only and do not modify the
|
||||||
|
License. You may add Your own attribution notices within Derivative Works that
|
||||||
|
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||||
|
provided that such additional attribution notices cannot be construed as
|
||||||
|
modifying the License.
|
||||||
|
You may add Your own copyright statement to Your modifications and may provide
|
||||||
|
additional or different license terms and conditions for use, reproduction, or
|
||||||
|
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||||
|
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||||
|
with the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions.
|
||||||
|
|
||||||
|
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||||
|
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||||
|
conditions of this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||||
|
any separate license agreement you may have executed with Licensor regarding
|
||||||
|
such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks.
|
||||||
|
|
||||||
|
This License does not grant permission to use the trade names, trademarks,
|
||||||
|
service marks, or product names of the Licensor, except as required for
|
||||||
|
reasonable and customary use in describing the origin of the Work and
|
||||||
|
reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty.
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||||
|
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||||
|
including, without limitation, any warranties or conditions of TITLE,
|
||||||
|
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||||
|
solely responsible for determining the appropriateness of using or
|
||||||
|
redistributing the Work and assume any risks associated with Your exercise of
|
||||||
|
permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability.
|
||||||
|
|
||||||
|
In no event and under no legal theory, whether in tort (including negligence),
|
||||||
|
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||||
|
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special, incidental,
|
||||||
|
or consequential damages of any character arising as a result of this License or
|
||||||
|
out of the use or inability to use the Work (including but not limited to
|
||||||
|
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||||
|
any and all other commercial damages or losses), even if such Contributor has
|
||||||
|
been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability.
|
||||||
|
|
||||||
|
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||||
|
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||||
|
other liability obligations and/or rights consistent with this License. However,
|
||||||
|
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||||
|
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||||
|
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason of your
|
||||||
|
accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following boilerplate
|
||||||
|
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||||
|
identifying information. (Don't include the brackets!) The text should be
|
||||||
|
enclosed in the appropriate comment syntax for the file format. We also
|
||||||
|
recommend that a file or class name and description of purpose be included on
|
||||||
|
the same "printed page" as the copyright notice for easier identification within
|
||||||
|
third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
270
vendor/github.com/coreos/go-systemd/util/util.go
generated
vendored
Normal file
270
vendor/github.com/coreos/go-systemd/util/util.go
generated
vendored
Normal file
|
@ -0,0 +1,270 @@
|
||||||
|
// Copyright 2015 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package util contains utility functions related to systemd that applications
|
||||||
|
// can use to check things like whether systemd is running. Note that some of
|
||||||
|
// these functions attempt to manually load systemd libraries at runtime rather
|
||||||
|
// than linking against them.
|
||||||
|
package util
|
||||||
|
|
||||||
|
// #cgo LDFLAGS: -ldl
|
||||||
|
// #include <stdlib.h>
|
||||||
|
// #include <dlfcn.h>
|
||||||
|
// #include <sys/types.h>
|
||||||
|
// #include <unistd.h>
|
||||||
|
//
|
||||||
|
// int
|
||||||
|
// my_sd_pid_get_owner_uid(void *f, pid_t pid, uid_t *uid)
|
||||||
|
// {
|
||||||
|
// int (*sd_pid_get_owner_uid)(pid_t, uid_t *);
|
||||||
|
//
|
||||||
|
// sd_pid_get_owner_uid = (int (*)(pid_t, uid_t *))f;
|
||||||
|
// return sd_pid_get_owner_uid(pid, uid);
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// int
|
||||||
|
// my_sd_pid_get_unit(void *f, pid_t pid, char **unit)
|
||||||
|
// {
|
||||||
|
// int (*sd_pid_get_unit)(pid_t, char **);
|
||||||
|
//
|
||||||
|
// sd_pid_get_unit = (int (*)(pid_t, char **))f;
|
||||||
|
// return sd_pid_get_unit(pid, unit);
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// int
|
||||||
|
// my_sd_pid_get_slice(void *f, pid_t pid, char **slice)
|
||||||
|
// {
|
||||||
|
// int (*sd_pid_get_slice)(pid_t, char **);
|
||||||
|
//
|
||||||
|
// sd_pid_get_slice = (int (*)(pid_t, char **))f;
|
||||||
|
// return sd_pid_get_slice(pid, slice);
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// int
|
||||||
|
// am_session_leader()
|
||||||
|
// {
|
||||||
|
// return (getsid(0) == getpid());
|
||||||
|
// }
|
||||||
|
import "C"
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ErrSoNotFound = errors.New("unable to open a handle to libsystemd")
|
||||||
|
|
||||||
|
// libHandle represents an open handle to the systemd C library
|
||||||
|
type libHandle struct {
|
||||||
|
handle unsafe.Pointer
|
||||||
|
libname string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *libHandle) Close() error {
|
||||||
|
if r := C.dlclose(h.handle); r != 0 {
|
||||||
|
return fmt.Errorf("error closing %v: %d", h.libname, r)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getHandle tries to get a handle to a systemd library (.so), attempting to
|
||||||
|
// access it by several different names and returning the first that is
|
||||||
|
// successfully opened. Callers are responsible for closing the handler.
|
||||||
|
// If no library can be successfully opened, an error is returned.
|
||||||
|
func getHandle() (*libHandle, error) {
|
||||||
|
for _, name := range []string{
|
||||||
|
// systemd < 209
|
||||||
|
"libsystemd-login.so",
|
||||||
|
"libsystemd-login.so.0",
|
||||||
|
|
||||||
|
// systemd >= 209 merged libsystemd-login into libsystemd proper
|
||||||
|
"libsystemd.so",
|
||||||
|
"libsystemd.so.0",
|
||||||
|
} {
|
||||||
|
libname := C.CString(name)
|
||||||
|
defer C.free(unsafe.Pointer(libname))
|
||||||
|
handle := C.dlopen(libname, C.RTLD_LAZY)
|
||||||
|
if handle != nil {
|
||||||
|
h := &libHandle{
|
||||||
|
handle: handle,
|
||||||
|
libname: name,
|
||||||
|
}
|
||||||
|
return h, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, ErrSoNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRunningSlice attempts to retrieve the name of the systemd slice in which
|
||||||
|
// the current process is running.
|
||||||
|
// This function is a wrapper around the libsystemd C library; if it cannot be
|
||||||
|
// opened, an error is returned.
|
||||||
|
func GetRunningSlice() (slice string, err error) {
|
||||||
|
var h *libHandle
|
||||||
|
h, err = getHandle()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err1 := h.Close(); err1 != nil {
|
||||||
|
err = err1
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
sym := C.CString("sd_pid_get_slice")
|
||||||
|
defer C.free(unsafe.Pointer(sym))
|
||||||
|
sd_pid_get_slice := C.dlsym(h.handle, sym)
|
||||||
|
if sd_pid_get_slice == nil {
|
||||||
|
err = fmt.Errorf("error resolving sd_pid_get_slice function")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var s string
|
||||||
|
sl := C.CString(s)
|
||||||
|
defer C.free(unsafe.Pointer(sl))
|
||||||
|
|
||||||
|
ret := C.my_sd_pid_get_slice(sd_pid_get_slice, 0, &sl)
|
||||||
|
if ret < 0 {
|
||||||
|
err = fmt.Errorf("error calling sd_pid_get_slice: %v", syscall.Errno(-ret))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
return C.GoString(sl), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunningFromSystemService tries to detect whether the current process has
|
||||||
|
// been invoked from a system service. The condition for this is whether the
|
||||||
|
// process is _not_ a user process. User processes are those running in session
|
||||||
|
// scopes or under per-user `systemd --user` instances.
|
||||||
|
//
|
||||||
|
// To avoid false positives on systems without `pam_systemd` (which is
|
||||||
|
// responsible for creating user sessions), this function also uses a heuristic
|
||||||
|
// to detect whether it's being invoked from a session leader process. This is
|
||||||
|
// the case if the current process is executed directly from a service file
|
||||||
|
// (e.g. with `ExecStart=/this/cmd`). Note that this heuristic will fail if the
|
||||||
|
// command is instead launched in a subshell or similar so that it is not
|
||||||
|
// session leader (e.g. `ExecStart=/bin/bash -c "/this/cmd"`)
|
||||||
|
//
|
||||||
|
// This function is a wrapper around the libsystemd C library; if this is
|
||||||
|
// unable to successfully open a handle to the library for any reason (e.g. it
|
||||||
|
// cannot be found), an errr will be returned
|
||||||
|
func RunningFromSystemService() (ret bool, err error) {
|
||||||
|
var h *libHandle
|
||||||
|
h, err = getHandle()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err1 := h.Close(); err1 != nil {
|
||||||
|
err = err1
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
sym := C.CString("sd_pid_get_owner_uid")
|
||||||
|
defer C.free(unsafe.Pointer(sym))
|
||||||
|
sd_pid_get_owner_uid := C.dlsym(h.handle, sym)
|
||||||
|
if sd_pid_get_owner_uid == nil {
|
||||||
|
err = fmt.Errorf("error resolving sd_pid_get_owner_uid function")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var uid C.uid_t
|
||||||
|
errno := C.my_sd_pid_get_owner_uid(sd_pid_get_owner_uid, 0, &uid)
|
||||||
|
serrno := syscall.Errno(-errno)
|
||||||
|
// when we're running from a unit file, sd_pid_get_owner_uid returns
|
||||||
|
// ENOENT (systemd <220) or ENXIO (systemd >=220)
|
||||||
|
switch {
|
||||||
|
case errno >= 0:
|
||||||
|
ret = false
|
||||||
|
case serrno == syscall.ENOENT, serrno == syscall.ENXIO:
|
||||||
|
// Since the implementation of sessions in systemd relies on
|
||||||
|
// the `pam_systemd` module, using the sd_pid_get_owner_uid
|
||||||
|
// heuristic alone can result in false positives if that module
|
||||||
|
// (or PAM itself) is not present or properly configured on the
|
||||||
|
// system. As such, we also check if we're the session leader,
|
||||||
|
// which should be the case if we're invoked from a unit file,
|
||||||
|
// but not if e.g. we're invoked from the command line from a
|
||||||
|
// user's login session
|
||||||
|
ret = C.am_session_leader() == 1
|
||||||
|
default:
|
||||||
|
err = fmt.Errorf("error calling sd_pid_get_owner_uid: %v", syscall.Errno(-errno))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurrentUnitName attempts to retrieve the name of the systemd system unit
|
||||||
|
// from which the calling process has been invoked. It wraps the systemd
|
||||||
|
// `sd_pid_get_unit` call, with the same caveat: for processes not part of a
|
||||||
|
// systemd system unit, this function will return an error.
|
||||||
|
func CurrentUnitName() (unit string, err error) {
|
||||||
|
var h *libHandle
|
||||||
|
h, err = getHandle()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err1 := h.Close(); err1 != nil {
|
||||||
|
err = err1
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
sym := C.CString("sd_pid_get_unit")
|
||||||
|
defer C.free(unsafe.Pointer(sym))
|
||||||
|
sd_pid_get_unit := C.dlsym(h.handle, sym)
|
||||||
|
if sd_pid_get_unit == nil {
|
||||||
|
err = fmt.Errorf("error resolving sd_pid_get_unit function")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var s string
|
||||||
|
u := C.CString(s)
|
||||||
|
defer C.free(unsafe.Pointer(u))
|
||||||
|
|
||||||
|
ret := C.my_sd_pid_get_unit(sd_pid_get_unit, 0, &u)
|
||||||
|
if ret < 0 {
|
||||||
|
err = fmt.Errorf("error calling sd_pid_get_unit: %v", syscall.Errno(-ret))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
unit = C.GoString(u)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsRunningSystemd checks whether the host was booted with systemd as its init
|
||||||
|
// system. This functions similarly to systemd's `sd_booted(3)`: internally, it
|
||||||
|
// checks whether /run/systemd/system/ exists and is a directory.
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/sd_booted.html
|
||||||
|
func IsRunningSystemd() bool {
|
||||||
|
fi, err := os.Lstat("/run/systemd/system")
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return fi.IsDir()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMachineID returns a host's 128-bit machine ID as a string. This functions
|
||||||
|
// similarly to systemd's `sd_id128_get_machine`: internally, it simply reads
|
||||||
|
// the contents of /etc/machine-id
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/sd_id128_get_machine.html
|
||||||
|
func GetMachineID() (string, error) {
|
||||||
|
machineID, err := ioutil.ReadFile("/etc/machine-id")
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to read /etc/machine-id: %v", err)
|
||||||
|
}
|
||||||
|
return strings.TrimSpace(string(machineID)), nil
|
||||||
|
}
|
191
vendor/github.com/docker/docker/pkg/archive/LICENSE
generated
vendored
Normal file
191
vendor/github.com/docker/docker/pkg/archive/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
https://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
Copyright 2013-2016 Docker, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
1096
vendor/github.com/docker/docker/pkg/archive/archive.go
generated
vendored
Normal file
1096
vendor/github.com/docker/docker/pkg/archive/archive.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
112
vendor/github.com/docker/docker/pkg/archive/archive_unix.go
generated
vendored
Normal file
112
vendor/github.com/docker/docker/pkg/archive/archive_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,112 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// fixVolumePathPrefix does platform specific processing to ensure that if
|
||||||
|
// the path being passed in is not in a volume path format, convert it to one.
|
||||||
|
func fixVolumePathPrefix(srcPath string) string {
|
||||||
|
return srcPath
|
||||||
|
}
|
||||||
|
|
||||||
|
// getWalkRoot calculates the root path when performing a TarWithOptions.
|
||||||
|
// We use a separate function as this is platform specific. On Linux, we
|
||||||
|
// can't use filepath.Join(srcPath,include) because this will clean away
|
||||||
|
// a trailing "." or "/" which may be important.
|
||||||
|
func getWalkRoot(srcPath string, include string) string {
|
||||||
|
return srcPath + string(filepath.Separator) + include
|
||||||
|
}
|
||||||
|
|
||||||
|
// CanonicalTarNameForPath returns platform-specific filepath
|
||||||
|
// to canonical posix-style path for tar archival. p is relative
|
||||||
|
// path.
|
||||||
|
func CanonicalTarNameForPath(p string) (string, error) {
|
||||||
|
return p, nil // already unix-style
|
||||||
|
}
|
||||||
|
|
||||||
|
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
||||||
|
// on the platform the archival is done.
|
||||||
|
|
||||||
|
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
||||||
|
return perm // noop for unix as golang APIs provide perm bits correctly
|
||||||
|
}
|
||||||
|
|
||||||
|
func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
|
||||||
|
s, ok := stat.(*syscall.Stat_t)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
err = errors.New("cannot convert stat value to syscall.Stat_t")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
inode = uint64(s.Ino)
|
||||||
|
|
||||||
|
// Currently go does not fill in the major/minors
|
||||||
|
if s.Mode&syscall.S_IFBLK != 0 ||
|
||||||
|
s.Mode&syscall.S_IFCHR != 0 {
|
||||||
|
hdr.Devmajor = int64(major(uint64(s.Rdev)))
|
||||||
|
hdr.Devminor = int64(minor(uint64(s.Rdev)))
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFileUIDGID(stat interface{}) (int, int, error) {
|
||||||
|
s, ok := stat.(*syscall.Stat_t)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t")
|
||||||
|
}
|
||||||
|
return int(s.Uid), int(s.Gid), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func major(device uint64) uint64 {
|
||||||
|
return (device >> 8) & 0xfff
|
||||||
|
}
|
||||||
|
|
||||||
|
func minor(device uint64) uint64 {
|
||||||
|
return (device & 0xff) | ((device >> 12) & 0xfff00)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||||
|
// createTarFile to handle the following types of header: Block; Char; Fifo
|
||||||
|
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||||
|
mode := uint32(hdr.Mode & 07777)
|
||||||
|
switch hdr.Typeflag {
|
||||||
|
case tar.TypeBlock:
|
||||||
|
mode |= syscall.S_IFBLK
|
||||||
|
case tar.TypeChar:
|
||||||
|
mode |= syscall.S_IFCHR
|
||||||
|
case tar.TypeFifo:
|
||||||
|
mode |= syscall.S_IFIFO
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
||||||
|
if hdr.Typeflag == tar.TypeLink {
|
||||||
|
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
|
||||||
|
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if hdr.Typeflag != tar.TypeSymlink {
|
||||||
|
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
70
vendor/github.com/docker/docker/pkg/archive/archive_windows.go
generated
vendored
Normal file
70
vendor/github.com/docker/docker/pkg/archive/archive_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,70 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/longpath"
|
||||||
|
)
|
||||||
|
|
||||||
|
// fixVolumePathPrefix does platform specific processing to ensure that if
|
||||||
|
// the path being passed in is not in a volume path format, convert it to one.
|
||||||
|
func fixVolumePathPrefix(srcPath string) string {
|
||||||
|
return longpath.AddPrefix(srcPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getWalkRoot calculates the root path when performing a TarWithOptions.
|
||||||
|
// We use a separate function as this is platform specific.
|
||||||
|
func getWalkRoot(srcPath string, include string) string {
|
||||||
|
return filepath.Join(srcPath, include)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CanonicalTarNameForPath returns platform-specific filepath
|
||||||
|
// to canonical posix-style path for tar archival. p is relative
|
||||||
|
// path.
|
||||||
|
func CanonicalTarNameForPath(p string) (string, error) {
|
||||||
|
// windows: convert windows style relative path with backslashes
|
||||||
|
// into forward slashes. Since windows does not allow '/' or '\'
|
||||||
|
// in file names, it is mostly safe to replace however we must
|
||||||
|
// check just in case
|
||||||
|
if strings.Contains(p, "/") {
|
||||||
|
return "", fmt.Errorf("Windows path contains forward slash: %s", p)
|
||||||
|
}
|
||||||
|
return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
||||||
|
// on the platform the archival is done.
|
||||||
|
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
||||||
|
perm &= 0755
|
||||||
|
// Add the x bit: make everything +x from windows
|
||||||
|
perm |= 0111
|
||||||
|
|
||||||
|
return perm
|
||||||
|
}
|
||||||
|
|
||||||
|
func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
|
||||||
|
// do nothing. no notion of Rdev, Inode, Nlink in stat on Windows
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||||
|
// createTarFile to handle the following types of header: Block; Char; Fifo
|
||||||
|
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFileUIDGID(stat interface{}) (int, int, error) {
|
||||||
|
// no notion of file ownership mapping yet on Windows
|
||||||
|
return 0, 0, nil
|
||||||
|
}
|
416
vendor/github.com/docker/docker/pkg/archive/changes.go
generated
vendored
Normal file
416
vendor/github.com/docker/docker/pkg/archive/changes.go
generated
vendored
Normal file
|
@ -0,0 +1,416 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/docker/pkg/idtools"
|
||||||
|
"github.com/docker/docker/pkg/pools"
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ChangeType represents the change type.
|
||||||
|
type ChangeType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ChangeModify represents the modify operation.
|
||||||
|
ChangeModify = iota
|
||||||
|
// ChangeAdd represents the add operation.
|
||||||
|
ChangeAdd
|
||||||
|
// ChangeDelete represents the delete operation.
|
||||||
|
ChangeDelete
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c ChangeType) String() string {
|
||||||
|
switch c {
|
||||||
|
case ChangeModify:
|
||||||
|
return "C"
|
||||||
|
case ChangeAdd:
|
||||||
|
return "A"
|
||||||
|
case ChangeDelete:
|
||||||
|
return "D"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Change represents a change, it wraps the change type and path.
|
||||||
|
// It describes changes of the files in the path respect to the
|
||||||
|
// parent layers. The change could be modify, add, delete.
|
||||||
|
// This is used for layer diff.
|
||||||
|
type Change struct {
|
||||||
|
Path string
|
||||||
|
Kind ChangeType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (change *Change) String() string {
|
||||||
|
return fmt.Sprintf("%s %s", change.Kind, change.Path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// for sort.Sort
|
||||||
|
type changesByPath []Change
|
||||||
|
|
||||||
|
func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
|
||||||
|
func (c changesByPath) Len() int { return len(c) }
|
||||||
|
func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
|
||||||
|
|
||||||
|
// Gnu tar and the go tar writer don't have sub-second mtime
|
||||||
|
// precision, which is problematic when we apply changes via tar
|
||||||
|
// files, we handle this by comparing for exact times, *or* same
|
||||||
|
// second count and either a or b having exactly 0 nanoseconds
|
||||||
|
func sameFsTime(a, b time.Time) bool {
|
||||||
|
return a == b ||
|
||||||
|
(a.Unix() == b.Unix() &&
|
||||||
|
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sameFsTimeSpec(a, b syscall.Timespec) bool {
|
||||||
|
return a.Sec == b.Sec &&
|
||||||
|
(a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Changes walks the path rw and determines changes for the files in the path,
|
||||||
|
// with respect to the parent layers
|
||||||
|
func Changes(layers []string, rw string) ([]Change, error) {
|
||||||
|
var (
|
||||||
|
changes []Change
|
||||||
|
changedDirs = make(map[string]struct{})
|
||||||
|
)
|
||||||
|
|
||||||
|
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rebase path
|
||||||
|
path, err = filepath.Rel(rw, path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// As this runs on the daemon side, file paths are OS specific.
|
||||||
|
path = filepath.Join(string(os.PathSeparator), path)
|
||||||
|
|
||||||
|
// Skip root
|
||||||
|
if path == string(os.PathSeparator) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip AUFS metadata
|
||||||
|
if matched, err := filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path); err != nil || matched {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
change := Change{
|
||||||
|
Path: path,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find out what kind of modification happened
|
||||||
|
file := filepath.Base(path)
|
||||||
|
// If there is a whiteout, then the file was removed
|
||||||
|
if strings.HasPrefix(file, WhiteoutPrefix) {
|
||||||
|
originalFile := file[len(WhiteoutPrefix):]
|
||||||
|
change.Path = filepath.Join(filepath.Dir(path), originalFile)
|
||||||
|
change.Kind = ChangeDelete
|
||||||
|
} else {
|
||||||
|
// Otherwise, the file was added
|
||||||
|
change.Kind = ChangeAdd
|
||||||
|
|
||||||
|
// ...Unless it already existed in a top layer, in which case, it's a modification
|
||||||
|
for _, layer := range layers {
|
||||||
|
stat, err := os.Stat(filepath.Join(layer, path))
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
// The file existed in the top layer, so that's a modification
|
||||||
|
|
||||||
|
// However, if it's a directory, maybe it wasn't actually modified.
|
||||||
|
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
|
||||||
|
if stat.IsDir() && f.IsDir() {
|
||||||
|
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
|
||||||
|
// Both directories are the same, don't record the change
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
change.Kind = ChangeModify
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
|
||||||
|
// This block is here to ensure the change is recorded even if the
|
||||||
|
// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
|
||||||
|
// Check https://github.com/docker/docker/pull/13590 for details.
|
||||||
|
if f.IsDir() {
|
||||||
|
changedDirs[path] = struct{}{}
|
||||||
|
}
|
||||||
|
if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
|
||||||
|
parent := filepath.Dir(path)
|
||||||
|
if _, ok := changedDirs[parent]; !ok && parent != "/" {
|
||||||
|
changes = append(changes, Change{Path: parent, Kind: ChangeModify})
|
||||||
|
changedDirs[parent] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record change
|
||||||
|
changes = append(changes, change)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return changes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileInfo describes the information of a file.
|
||||||
|
type FileInfo struct {
|
||||||
|
parent *FileInfo
|
||||||
|
name string
|
||||||
|
stat *system.StatT
|
||||||
|
children map[string]*FileInfo
|
||||||
|
capability []byte
|
||||||
|
added bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// LookUp looks up the file information of a file.
|
||||||
|
func (info *FileInfo) LookUp(path string) *FileInfo {
|
||||||
|
// As this runs on the daemon side, file paths are OS specific.
|
||||||
|
parent := info
|
||||||
|
if path == string(os.PathSeparator) {
|
||||||
|
return info
|
||||||
|
}
|
||||||
|
|
||||||
|
pathElements := strings.Split(path, string(os.PathSeparator))
|
||||||
|
for _, elem := range pathElements {
|
||||||
|
if elem != "" {
|
||||||
|
child := parent.children[elem]
|
||||||
|
if child == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
parent = child
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return parent
|
||||||
|
}
|
||||||
|
|
||||||
|
func (info *FileInfo) path() string {
|
||||||
|
if info.parent == nil {
|
||||||
|
// As this runs on the daemon side, file paths are OS specific.
|
||||||
|
return string(os.PathSeparator)
|
||||||
|
}
|
||||||
|
return filepath.Join(info.parent.path(), info.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
||||||
|
|
||||||
|
sizeAtEntry := len(*changes)
|
||||||
|
|
||||||
|
if oldInfo == nil {
|
||||||
|
// add
|
||||||
|
change := Change{
|
||||||
|
Path: info.path(),
|
||||||
|
Kind: ChangeAdd,
|
||||||
|
}
|
||||||
|
*changes = append(*changes, change)
|
||||||
|
info.added = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// We make a copy so we can modify it to detect additions
|
||||||
|
// also, we only recurse on the old dir if the new info is a directory
|
||||||
|
// otherwise any previous delete/change is considered recursive
|
||||||
|
oldChildren := make(map[string]*FileInfo)
|
||||||
|
if oldInfo != nil && info.isDir() {
|
||||||
|
for k, v := range oldInfo.children {
|
||||||
|
oldChildren[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, newChild := range info.children {
|
||||||
|
oldChild, _ := oldChildren[name]
|
||||||
|
if oldChild != nil {
|
||||||
|
// change?
|
||||||
|
oldStat := oldChild.stat
|
||||||
|
newStat := newChild.stat
|
||||||
|
// Note: We can't compare inode or ctime or blocksize here, because these change
|
||||||
|
// when copying a file into a container. However, that is not generally a problem
|
||||||
|
// because any content change will change mtime, and any status change should
|
||||||
|
// be visible when actually comparing the stat fields. The only time this
|
||||||
|
// breaks down is if some code intentionally hides a change by setting
|
||||||
|
// back mtime
|
||||||
|
if statDifferent(oldStat, newStat) ||
|
||||||
|
bytes.Compare(oldChild.capability, newChild.capability) != 0 {
|
||||||
|
change := Change{
|
||||||
|
Path: newChild.path(),
|
||||||
|
Kind: ChangeModify,
|
||||||
|
}
|
||||||
|
*changes = append(*changes, change)
|
||||||
|
newChild.added = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove from copy so we can detect deletions
|
||||||
|
delete(oldChildren, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
newChild.addChanges(oldChild, changes)
|
||||||
|
}
|
||||||
|
for _, oldChild := range oldChildren {
|
||||||
|
// delete
|
||||||
|
change := Change{
|
||||||
|
Path: oldChild.path(),
|
||||||
|
Kind: ChangeDelete,
|
||||||
|
}
|
||||||
|
*changes = append(*changes, change)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there were changes inside this directory, we need to add it, even if the directory
|
||||||
|
// itself wasn't changed. This is needed to properly save and restore filesystem permissions.
|
||||||
|
// As this runs on the daemon side, file paths are OS specific.
|
||||||
|
if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
|
||||||
|
change := Change{
|
||||||
|
Path: info.path(),
|
||||||
|
Kind: ChangeModify,
|
||||||
|
}
|
||||||
|
// Let's insert the directory entry before the recently added entries located inside this dir
|
||||||
|
*changes = append(*changes, change) // just to resize the slice, will be overwritten
|
||||||
|
copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
|
||||||
|
(*changes)[sizeAtEntry] = change
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Changes add changes to file information.
|
||||||
|
func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
|
||||||
|
var changes []Change
|
||||||
|
|
||||||
|
info.addChanges(oldInfo, &changes)
|
||||||
|
|
||||||
|
return changes
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRootFileInfo() *FileInfo {
|
||||||
|
// As this runs on the daemon side, file paths are OS specific.
|
||||||
|
root := &FileInfo{
|
||||||
|
name: string(os.PathSeparator),
|
||||||
|
children: make(map[string]*FileInfo),
|
||||||
|
}
|
||||||
|
return root
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
|
||||||
|
// If oldDir is "", then all files in newDir will be Add-Changes.
|
||||||
|
func ChangesDirs(newDir, oldDir string) ([]Change, error) {
|
||||||
|
var (
|
||||||
|
oldRoot, newRoot *FileInfo
|
||||||
|
)
|
||||||
|
if oldDir == "" {
|
||||||
|
emptyDir, err := ioutil.TempDir("", "empty")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer os.Remove(emptyDir)
|
||||||
|
oldDir = emptyDir
|
||||||
|
}
|
||||||
|
oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return newRoot.Changes(oldRoot), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
|
||||||
|
func ChangesSize(newDir string, changes []Change) int64 {
|
||||||
|
var (
|
||||||
|
size int64
|
||||||
|
sf = make(map[uint64]struct{})
|
||||||
|
)
|
||||||
|
for _, change := range changes {
|
||||||
|
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
|
||||||
|
file := filepath.Join(newDir, change.Path)
|
||||||
|
fileInfo, err := os.Lstat(file)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Errorf("Can not stat %q: %s", file, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if fileInfo != nil && !fileInfo.IsDir() {
|
||||||
|
if hasHardlinks(fileInfo) {
|
||||||
|
inode := getIno(fileInfo)
|
||||||
|
if _, ok := sf[inode]; !ok {
|
||||||
|
size += fileInfo.Size()
|
||||||
|
sf[inode] = struct{}{}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
size += fileInfo.Size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return size
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExportChanges produces an Archive from the provided changes, relative to dir.
|
||||||
|
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) {
|
||||||
|
reader, writer := io.Pipe()
|
||||||
|
go func() {
|
||||||
|
ta := &tarAppender{
|
||||||
|
TarWriter: tar.NewWriter(writer),
|
||||||
|
Buffer: pools.BufioWriter32KPool.Get(nil),
|
||||||
|
SeenFiles: make(map[uint64]string),
|
||||||
|
UIDMaps: uidMaps,
|
||||||
|
GIDMaps: gidMaps,
|
||||||
|
}
|
||||||
|
// this buffer is needed for the duration of this piped stream
|
||||||
|
defer pools.BufioWriter32KPool.Put(ta.Buffer)
|
||||||
|
|
||||||
|
sort.Sort(changesByPath(changes))
|
||||||
|
|
||||||
|
// In general we log errors here but ignore them because
|
||||||
|
// during e.g. a diff operation the container can continue
|
||||||
|
// mutating the filesystem and we can see transient errors
|
||||||
|
// from this
|
||||||
|
for _, change := range changes {
|
||||||
|
if change.Kind == ChangeDelete {
|
||||||
|
whiteOutDir := filepath.Dir(change.Path)
|
||||||
|
whiteOutBase := filepath.Base(change.Path)
|
||||||
|
whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
|
||||||
|
timestamp := time.Now()
|
||||||
|
hdr := &tar.Header{
|
||||||
|
Name: whiteOut[1:],
|
||||||
|
Size: 0,
|
||||||
|
ModTime: timestamp,
|
||||||
|
AccessTime: timestamp,
|
||||||
|
ChangeTime: timestamp,
|
||||||
|
}
|
||||||
|
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
|
||||||
|
logrus.Debugf("Can't write whiteout header: %s", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
path := filepath.Join(dir, change.Path)
|
||||||
|
if err := ta.addTarFile(path, change.Path[1:]); err != nil {
|
||||||
|
logrus.Debugf("Can't add file %s to tar: %s", path, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure to check the error on Close.
|
||||||
|
if err := ta.TarWriter.Close(); err != nil {
|
||||||
|
logrus.Debugf("Can't close layer: %s", err)
|
||||||
|
}
|
||||||
|
if err := writer.Close(); err != nil {
|
||||||
|
logrus.Debugf("failed close Changes writer: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return reader, nil
|
||||||
|
}
|
285
vendor/github.com/docker/docker/pkg/archive/changes_linux.go
generated
vendored
Normal file
285
vendor/github.com/docker/docker/pkg/archive/changes_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,285 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// walker is used to implement collectFileInfoForChanges on linux. Where this
|
||||||
|
// method in general returns the entire contents of two directory trees, we
|
||||||
|
// optimize some FS calls out on linux. In particular, we take advantage of the
|
||||||
|
// fact that getdents(2) returns the inode of each file in the directory being
|
||||||
|
// walked, which, when walking two trees in parallel to generate a list of
|
||||||
|
// changes, can be used to prune subtrees without ever having to lstat(2) them
|
||||||
|
// directly. Eliminating stat calls in this way can save up to seconds on large
|
||||||
|
// images.
|
||||||
|
type walker struct {
|
||||||
|
dir1 string
|
||||||
|
dir2 string
|
||||||
|
root1 *FileInfo
|
||||||
|
root2 *FileInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// collectFileInfoForChanges returns a complete representation of the trees
|
||||||
|
// rooted at dir1 and dir2, with one important exception: any subtree or
|
||||||
|
// leaf where the inode and device numbers are an exact match between dir1
|
||||||
|
// and dir2 will be pruned from the results. This method is *only* to be used
|
||||||
|
// to generating a list of changes between the two directories, as it does not
|
||||||
|
// reflect the full contents.
|
||||||
|
func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
|
||||||
|
w := &walker{
|
||||||
|
dir1: dir1,
|
||||||
|
dir2: dir2,
|
||||||
|
root1: newRootFileInfo(),
|
||||||
|
root2: newRootFileInfo(),
|
||||||
|
}
|
||||||
|
|
||||||
|
i1, err := os.Lstat(w.dir1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
i2, err := os.Lstat(w.dir2)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.walk("/", i1, i2); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.root1, w.root2, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Given a FileInfo, its path info, and a reference to the root of the tree
|
||||||
|
// being constructed, register this file with the tree.
|
||||||
|
func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
|
||||||
|
if fi == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
parent := root.LookUp(filepath.Dir(path))
|
||||||
|
if parent == nil {
|
||||||
|
return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path)
|
||||||
|
}
|
||||||
|
info := &FileInfo{
|
||||||
|
name: filepath.Base(path),
|
||||||
|
children: make(map[string]*FileInfo),
|
||||||
|
parent: parent,
|
||||||
|
}
|
||||||
|
cpath := filepath.Join(dir, path)
|
||||||
|
stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
info.stat = stat
|
||||||
|
info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
|
||||||
|
parent.children[info.name] = info
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Walk a subtree rooted at the same path in both trees being iterated. For
|
||||||
|
// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
|
||||||
|
func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
|
||||||
|
// Register these nodes with the return trees, unless we're still at the
|
||||||
|
// (already-created) roots:
|
||||||
|
if path != "/" {
|
||||||
|
if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
is1Dir := i1 != nil && i1.IsDir()
|
||||||
|
is2Dir := i2 != nil && i2.IsDir()
|
||||||
|
|
||||||
|
sameDevice := false
|
||||||
|
if i1 != nil && i2 != nil {
|
||||||
|
si1 := i1.Sys().(*syscall.Stat_t)
|
||||||
|
si2 := i2.Sys().(*syscall.Stat_t)
|
||||||
|
if si1.Dev == si2.Dev {
|
||||||
|
sameDevice = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If these files are both non-existent, or leaves (non-dirs), we are done.
|
||||||
|
if !is1Dir && !is2Dir {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch the names of all the files contained in both directories being walked:
|
||||||
|
var names1, names2 []nameIno
|
||||||
|
if is1Dir {
|
||||||
|
names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if is2Dir {
|
||||||
|
names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We have lists of the files contained in both parallel directories, sorted
|
||||||
|
// in the same order. Walk them in parallel, generating a unique merged list
|
||||||
|
// of all items present in either or both directories.
|
||||||
|
var names []string
|
||||||
|
ix1 := 0
|
||||||
|
ix2 := 0
|
||||||
|
|
||||||
|
for {
|
||||||
|
if ix1 >= len(names1) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if ix2 >= len(names2) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
ni1 := names1[ix1]
|
||||||
|
ni2 := names2[ix2]
|
||||||
|
|
||||||
|
switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
|
||||||
|
case -1: // ni1 < ni2 -- advance ni1
|
||||||
|
// we will not encounter ni1 in names2
|
||||||
|
names = append(names, ni1.name)
|
||||||
|
ix1++
|
||||||
|
case 0: // ni1 == ni2
|
||||||
|
if ni1.ino != ni2.ino || !sameDevice {
|
||||||
|
names = append(names, ni1.name)
|
||||||
|
}
|
||||||
|
ix1++
|
||||||
|
ix2++
|
||||||
|
case 1: // ni1 > ni2 -- advance ni2
|
||||||
|
// we will not encounter ni2 in names1
|
||||||
|
names = append(names, ni2.name)
|
||||||
|
ix2++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for ix1 < len(names1) {
|
||||||
|
names = append(names, names1[ix1].name)
|
||||||
|
ix1++
|
||||||
|
}
|
||||||
|
for ix2 < len(names2) {
|
||||||
|
names = append(names, names2[ix2].name)
|
||||||
|
ix2++
|
||||||
|
}
|
||||||
|
|
||||||
|
// For each of the names present in either or both of the directories being
|
||||||
|
// iterated, stat the name under each root, and recurse the pair of them:
|
||||||
|
for _, name := range names {
|
||||||
|
fname := filepath.Join(path, name)
|
||||||
|
var cInfo1, cInfo2 os.FileInfo
|
||||||
|
if is1Dir {
|
||||||
|
cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if is2Dir {
|
||||||
|
cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err = w.walk(fname, cInfo1, cInfo2); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// {name,inode} pairs used to support the early-pruning logic of the walker type
|
||||||
|
type nameIno struct {
|
||||||
|
name string
|
||||||
|
ino uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type nameInoSlice []nameIno
|
||||||
|
|
||||||
|
func (s nameInoSlice) Len() int { return len(s) }
|
||||||
|
func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||||
|
func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
|
||||||
|
|
||||||
|
// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
|
||||||
|
// numbers further up the stack when reading directory contents. Unlike
|
||||||
|
// os.Readdirnames, which returns a list of filenames, this function returns a
|
||||||
|
// list of {filename,inode} pairs.
|
||||||
|
func readdirnames(dirname string) (names []nameIno, err error) {
|
||||||
|
var (
|
||||||
|
size = 100
|
||||||
|
buf = make([]byte, 4096)
|
||||||
|
nbuf int
|
||||||
|
bufp int
|
||||||
|
nb int
|
||||||
|
)
|
||||||
|
|
||||||
|
f, err := os.Open(dirname)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
names = make([]nameIno, 0, size) // Empty with room to grow.
|
||||||
|
for {
|
||||||
|
// Refill the buffer if necessary
|
||||||
|
if bufp >= nbuf {
|
||||||
|
bufp = 0
|
||||||
|
nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux
|
||||||
|
if nbuf < 0 {
|
||||||
|
nbuf = 0
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, os.NewSyscallError("readdirent", err)
|
||||||
|
}
|
||||||
|
if nbuf <= 0 {
|
||||||
|
break // EOF
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drain the buffer
|
||||||
|
nb, names = parseDirent(buf[bufp:nbuf], names)
|
||||||
|
bufp += nb
|
||||||
|
}
|
||||||
|
|
||||||
|
sl := nameInoSlice(names)
|
||||||
|
sort.Sort(sl)
|
||||||
|
return sl, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseDirent is a minor modification of syscall.ParseDirent (linux version)
|
||||||
|
// which returns {name,inode} pairs instead of just names.
|
||||||
|
func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
|
||||||
|
origlen := len(buf)
|
||||||
|
for len(buf) > 0 {
|
||||||
|
dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0]))
|
||||||
|
buf = buf[dirent.Reclen:]
|
||||||
|
if dirent.Ino == 0 { // File absent in directory.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
|
||||||
|
var name = string(bytes[0:clen(bytes[:])])
|
||||||
|
if name == "." || name == ".." { // Useless names
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
names = append(names, nameIno{name, dirent.Ino})
|
||||||
|
}
|
||||||
|
return origlen - len(buf), names
|
||||||
|
}
|
||||||
|
|
||||||
|
func clen(n []byte) int {
|
||||||
|
for i := 0; i < len(n); i++ {
|
||||||
|
if n[i] == 0 {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(n)
|
||||||
|
}
|
97
vendor/github.com/docker/docker/pkg/archive/changes_other.go
generated
vendored
Normal file
97
vendor/github.com/docker/docker/pkg/archive/changes_other.go
generated
vendored
Normal file
|
@ -0,0 +1,97 @@
|
||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
|
||||||
|
var (
|
||||||
|
oldRoot, newRoot *FileInfo
|
||||||
|
err1, err2 error
|
||||||
|
errs = make(chan error, 2)
|
||||||
|
)
|
||||||
|
go func() {
|
||||||
|
oldRoot, err1 = collectFileInfo(oldDir)
|
||||||
|
errs <- err1
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
newRoot, err2 = collectFileInfo(newDir)
|
||||||
|
errs <- err2
|
||||||
|
}()
|
||||||
|
|
||||||
|
// block until both routines have returned
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
if err := <-errs; err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return oldRoot, newRoot, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func collectFileInfo(sourceDir string) (*FileInfo, error) {
|
||||||
|
root := newRootFileInfo()
|
||||||
|
|
||||||
|
err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rebase path
|
||||||
|
relPath, err := filepath.Rel(sourceDir, path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// As this runs on the daemon side, file paths are OS specific.
|
||||||
|
relPath = filepath.Join(string(os.PathSeparator), relPath)
|
||||||
|
|
||||||
|
// See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
|
||||||
|
// Temporary workaround. If the returned path starts with two backslashes,
|
||||||
|
// trim it down to a single backslash. Only relevant on Windows.
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
if strings.HasPrefix(relPath, `\\`) {
|
||||||
|
relPath = relPath[1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if relPath == string(os.PathSeparator) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
parent := root.LookUp(filepath.Dir(relPath))
|
||||||
|
if parent == nil {
|
||||||
|
return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
info := &FileInfo{
|
||||||
|
name: filepath.Base(relPath),
|
||||||
|
children: make(map[string]*FileInfo),
|
||||||
|
parent: parent,
|
||||||
|
}
|
||||||
|
|
||||||
|
s, err := system.Lstat(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
info.stat = s
|
||||||
|
|
||||||
|
info.capability, _ = system.Lgetxattr(path, "security.capability")
|
||||||
|
|
||||||
|
parent.children[info.name] = info
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return root, nil
|
||||||
|
}
|
36
vendor/github.com/docker/docker/pkg/archive/changes_unix.go
generated
vendored
Normal file
36
vendor/github.com/docker/docker/pkg/archive/changes_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
||||||
|
// Don't look at size for dirs, its not a good measure of change
|
||||||
|
if oldStat.Mode() != newStat.Mode() ||
|
||||||
|
oldStat.UID() != newStat.UID() ||
|
||||||
|
oldStat.GID() != newStat.GID() ||
|
||||||
|
oldStat.Rdev() != newStat.Rdev() ||
|
||||||
|
// Don't look at size for dirs, its not a good measure of change
|
||||||
|
(oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR &&
|
||||||
|
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (info *FileInfo) isDir() bool {
|
||||||
|
return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func getIno(fi os.FileInfo) uint64 {
|
||||||
|
return uint64(fi.Sys().(*syscall.Stat_t).Ino)
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasHardlinks(fi os.FileInfo) bool {
|
||||||
|
return fi.Sys().(*syscall.Stat_t).Nlink > 1
|
||||||
|
}
|
30
vendor/github.com/docker/docker/pkg/archive/changes_windows.go
generated
vendored
Normal file
30
vendor/github.com/docker/docker/pkg/archive/changes_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
||||||
|
|
||||||
|
// Don't look at size for dirs, its not a good measure of change
|
||||||
|
if oldStat.ModTime() != newStat.ModTime() ||
|
||||||
|
oldStat.Mode() != newStat.Mode() ||
|
||||||
|
oldStat.Size() != newStat.Size() && !oldStat.IsDir() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (info *FileInfo) isDir() bool {
|
||||||
|
return info.parent == nil || info.stat.IsDir()
|
||||||
|
}
|
||||||
|
|
||||||
|
func getIno(fi os.FileInfo) (inode uint64) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasHardlinks(fi os.FileInfo) bool {
|
||||||
|
return false
|
||||||
|
}
|
458
vendor/github.com/docker/docker/pkg/archive/copy.go
generated
vendored
Normal file
458
vendor/github.com/docker/docker/pkg/archive/copy.go
generated
vendored
Normal file
|
@ -0,0 +1,458 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Errors used or returned by this file.
|
||||||
|
var (
|
||||||
|
ErrNotDirectory = errors.New("not a directory")
|
||||||
|
ErrDirNotExists = errors.New("no such directory")
|
||||||
|
ErrCannotCopyDir = errors.New("cannot copy directory")
|
||||||
|
ErrInvalidCopySource = errors.New("invalid copy source content")
|
||||||
|
)
|
||||||
|
|
||||||
|
// PreserveTrailingDotOrSeparator returns the given cleaned path (after
|
||||||
|
// processing using any utility functions from the path or filepath stdlib
|
||||||
|
// packages) and appends a trailing `/.` or `/` if its corresponding original
|
||||||
|
// path (from before being processed by utility functions from the path or
|
||||||
|
// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
|
||||||
|
// path already ends in a `.` path segment, then another is not added. If the
|
||||||
|
// clean path already ends in a path separator, then another is not added.
|
||||||
|
func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
|
||||||
|
// Ensure paths are in platform semantics
|
||||||
|
cleanedPath = normalizePath(cleanedPath)
|
||||||
|
originalPath = normalizePath(originalPath)
|
||||||
|
|
||||||
|
if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
|
||||||
|
if !hasTrailingPathSeparator(cleanedPath) {
|
||||||
|
// Add a separator if it doesn't already end with one (a cleaned
|
||||||
|
// path would only end in a separator if it is the root).
|
||||||
|
cleanedPath += string(filepath.Separator)
|
||||||
|
}
|
||||||
|
cleanedPath += "."
|
||||||
|
}
|
||||||
|
|
||||||
|
if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) {
|
||||||
|
cleanedPath += string(filepath.Separator)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cleanedPath
|
||||||
|
}
|
||||||
|
|
||||||
|
// assertsDirectory returns whether the given path is
|
||||||
|
// asserted to be a directory, i.e., the path ends with
|
||||||
|
// a trailing '/' or `/.`, assuming a path separator of `/`.
|
||||||
|
func assertsDirectory(path string) bool {
|
||||||
|
return hasTrailingPathSeparator(path) || specifiesCurrentDir(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasTrailingPathSeparator returns whether the given
|
||||||
|
// path ends with the system's path separator character.
|
||||||
|
func hasTrailingPathSeparator(path string) bool {
|
||||||
|
return len(path) > 0 && os.IsPathSeparator(path[len(path)-1])
|
||||||
|
}
|
||||||
|
|
||||||
|
// specifiesCurrentDir returns whether the given path specifies
|
||||||
|
// a "current directory", i.e., the last path segment is `.`.
|
||||||
|
func specifiesCurrentDir(path string) bool {
|
||||||
|
return filepath.Base(path) == "."
|
||||||
|
}
|
||||||
|
|
||||||
|
// SplitPathDirEntry splits the given path between its directory name and its
|
||||||
|
// basename by first cleaning the path but preserves a trailing "." if the
|
||||||
|
// original path specified the current directory.
|
||||||
|
func SplitPathDirEntry(path string) (dir, base string) {
|
||||||
|
cleanedPath := filepath.Clean(normalizePath(path))
|
||||||
|
|
||||||
|
if specifiesCurrentDir(path) {
|
||||||
|
cleanedPath += string(filepath.Separator) + "."
|
||||||
|
}
|
||||||
|
|
||||||
|
return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TarResource archives the resource described by the given CopyInfo to a Tar
|
||||||
|
// archive. A non-nil error is returned if sourcePath does not exist or is
|
||||||
|
// asserted to be a directory but exists as another type of file.
|
||||||
|
//
|
||||||
|
// This function acts as a convenient wrapper around TarWithOptions, which
|
||||||
|
// requires a directory as the source path. TarResource accepts either a
|
||||||
|
// directory or a file path and correctly sets the Tar options.
|
||||||
|
func TarResource(sourceInfo CopyInfo) (content Archive, err error) {
|
||||||
|
return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TarResourceRebase is like TarResource but renames the first path element of
|
||||||
|
// items in the resulting tar archive to match the given rebaseName if not "".
|
||||||
|
func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) {
|
||||||
|
sourcePath = normalizePath(sourcePath)
|
||||||
|
if _, err = os.Lstat(sourcePath); err != nil {
|
||||||
|
// Catches the case where the source does not exist or is not a
|
||||||
|
// directory if asserted to be a directory, as this also causes an
|
||||||
|
// error.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Separate the source path between it's directory and
|
||||||
|
// the entry in that directory which we are archiving.
|
||||||
|
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
|
||||||
|
|
||||||
|
filter := []string{sourceBase}
|
||||||
|
|
||||||
|
logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
|
||||||
|
|
||||||
|
return TarWithOptions(sourceDir, &TarOptions{
|
||||||
|
Compression: Uncompressed,
|
||||||
|
IncludeFiles: filter,
|
||||||
|
IncludeSourceDir: true,
|
||||||
|
RebaseNames: map[string]string{
|
||||||
|
sourceBase: rebaseName,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyInfo holds basic info about the source
|
||||||
|
// or destination path of a copy operation.
|
||||||
|
type CopyInfo struct {
|
||||||
|
Path string
|
||||||
|
Exists bool
|
||||||
|
IsDir bool
|
||||||
|
RebaseName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyInfoSourcePath stats the given path to create a CopyInfo
|
||||||
|
// struct representing that resource for the source of an archive copy
|
||||||
|
// operation. The given path should be an absolute local path. A source path
|
||||||
|
// has all symlinks evaluated that appear before the last path separator ("/"
|
||||||
|
// on Unix). As it is to be a copy source, the path must exist.
|
||||||
|
func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
|
||||||
|
// normalize the file path and then evaluate the symbol link
|
||||||
|
// we will use the target file instead of the symbol link if
|
||||||
|
// followLink is set
|
||||||
|
path = normalizePath(path)
|
||||||
|
|
||||||
|
resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
|
||||||
|
if err != nil {
|
||||||
|
return CopyInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
stat, err := os.Lstat(resolvedPath)
|
||||||
|
if err != nil {
|
||||||
|
return CopyInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return CopyInfo{
|
||||||
|
Path: resolvedPath,
|
||||||
|
Exists: true,
|
||||||
|
IsDir: stat.IsDir(),
|
||||||
|
RebaseName: rebaseName,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyInfoDestinationPath stats the given path to create a CopyInfo
|
||||||
|
// struct representing that resource for the destination of an archive copy
|
||||||
|
// operation. The given path should be an absolute local path.
|
||||||
|
func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
|
||||||
|
maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
|
||||||
|
path = normalizePath(path)
|
||||||
|
originalPath := path
|
||||||
|
|
||||||
|
stat, err := os.Lstat(path)
|
||||||
|
|
||||||
|
if err == nil && stat.Mode()&os.ModeSymlink == 0 {
|
||||||
|
// The path exists and is not a symlink.
|
||||||
|
return CopyInfo{
|
||||||
|
Path: path,
|
||||||
|
Exists: true,
|
||||||
|
IsDir: stat.IsDir(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// While the path is a symlink.
|
||||||
|
for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
|
||||||
|
if n > maxSymlinkIter {
|
||||||
|
// Don't follow symlinks more than this arbitrary number of times.
|
||||||
|
return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The path is a symbolic link. We need to evaluate it so that the
|
||||||
|
// destination of the copy operation is the link target and not the
|
||||||
|
// link itself. This is notably different than CopyInfoSourcePath which
|
||||||
|
// only evaluates symlinks before the last appearing path separator.
|
||||||
|
// Also note that it is okay if the last path element is a broken
|
||||||
|
// symlink as the copy operation should create the target.
|
||||||
|
var linkTarget string
|
||||||
|
|
||||||
|
linkTarget, err = os.Readlink(path)
|
||||||
|
if err != nil {
|
||||||
|
return CopyInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !system.IsAbs(linkTarget) {
|
||||||
|
// Join with the parent directory.
|
||||||
|
dstParent, _ := SplitPathDirEntry(path)
|
||||||
|
linkTarget = filepath.Join(dstParent, linkTarget)
|
||||||
|
}
|
||||||
|
|
||||||
|
path = linkTarget
|
||||||
|
stat, err = os.Lstat(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
// It's okay if the destination path doesn't exist. We can still
|
||||||
|
// continue the copy operation if the parent directory exists.
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
return CopyInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure destination parent dir exists.
|
||||||
|
dstParent, _ := SplitPathDirEntry(path)
|
||||||
|
|
||||||
|
parentDirStat, err := os.Lstat(dstParent)
|
||||||
|
if err != nil {
|
||||||
|
return CopyInfo{}, err
|
||||||
|
}
|
||||||
|
if !parentDirStat.IsDir() {
|
||||||
|
return CopyInfo{}, ErrNotDirectory
|
||||||
|
}
|
||||||
|
|
||||||
|
return CopyInfo{Path: path}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The path exists after resolving symlinks.
|
||||||
|
return CopyInfo{
|
||||||
|
Path: path,
|
||||||
|
Exists: true,
|
||||||
|
IsDir: stat.IsDir(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrepareArchiveCopy prepares the given srcContent archive, which should
|
||||||
|
// contain the archived resource described by srcInfo, to the destination
|
||||||
|
// described by dstInfo. Returns the possibly modified content archive along
|
||||||
|
// with the path to the destination directory which it should be extracted to.
|
||||||
|
func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) {
|
||||||
|
// Ensure in platform semantics
|
||||||
|
srcInfo.Path = normalizePath(srcInfo.Path)
|
||||||
|
dstInfo.Path = normalizePath(dstInfo.Path)
|
||||||
|
|
||||||
|
// Separate the destination path between its directory and base
|
||||||
|
// components in case the source archive contents need to be rebased.
|
||||||
|
dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
|
||||||
|
_, srcBase := SplitPathDirEntry(srcInfo.Path)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case dstInfo.Exists && dstInfo.IsDir:
|
||||||
|
// The destination exists as a directory. No alteration
|
||||||
|
// to srcContent is needed as its contents can be
|
||||||
|
// simply extracted to the destination directory.
|
||||||
|
return dstInfo.Path, ioutil.NopCloser(srcContent), nil
|
||||||
|
case dstInfo.Exists && srcInfo.IsDir:
|
||||||
|
// The destination exists as some type of file and the source
|
||||||
|
// content is a directory. This is an error condition since
|
||||||
|
// you cannot copy a directory to an existing file location.
|
||||||
|
return "", nil, ErrCannotCopyDir
|
||||||
|
case dstInfo.Exists:
|
||||||
|
// The destination exists as some type of file and the source content
|
||||||
|
// is also a file. The source content entry will have to be renamed to
|
||||||
|
// have a basename which matches the destination path's basename.
|
||||||
|
if len(srcInfo.RebaseName) != 0 {
|
||||||
|
srcBase = srcInfo.RebaseName
|
||||||
|
}
|
||||||
|
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||||
|
case srcInfo.IsDir:
|
||||||
|
// The destination does not exist and the source content is an archive
|
||||||
|
// of a directory. The archive should be extracted to the parent of
|
||||||
|
// the destination path instead, and when it is, the directory that is
|
||||||
|
// created as a result should take the name of the destination path.
|
||||||
|
// The source content entries will have to be renamed to have a
|
||||||
|
// basename which matches the destination path's basename.
|
||||||
|
if len(srcInfo.RebaseName) != 0 {
|
||||||
|
srcBase = srcInfo.RebaseName
|
||||||
|
}
|
||||||
|
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||||
|
case assertsDirectory(dstInfo.Path):
|
||||||
|
// The destination does not exist and is asserted to be created as a
|
||||||
|
// directory, but the source content is not a directory. This is an
|
||||||
|
// error condition since you cannot create a directory from a file
|
||||||
|
// source.
|
||||||
|
return "", nil, ErrDirNotExists
|
||||||
|
default:
|
||||||
|
// The last remaining case is when the destination does not exist, is
|
||||||
|
// not asserted to be a directory, and the source content is not an
|
||||||
|
// archive of a directory. It this case, the destination file will need
|
||||||
|
// to be created when the archive is extracted and the source content
|
||||||
|
// entry will have to be renamed to have a basename which matches the
|
||||||
|
// destination path's basename.
|
||||||
|
if len(srcInfo.RebaseName) != 0 {
|
||||||
|
srcBase = srcInfo.RebaseName
|
||||||
|
}
|
||||||
|
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// RebaseArchiveEntries rewrites the given srcContent archive replacing
|
||||||
|
// an occurrence of oldBase with newBase at the beginning of entry names.
|
||||||
|
func RebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive {
|
||||||
|
if oldBase == string(os.PathSeparator) {
|
||||||
|
// If oldBase specifies the root directory, use an empty string as
|
||||||
|
// oldBase instead so that newBase doesn't replace the path separator
|
||||||
|
// that all paths will start with.
|
||||||
|
oldBase = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
rebased, w := io.Pipe()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
srcTar := tar.NewReader(srcContent)
|
||||||
|
rebasedTar := tar.NewWriter(w)
|
||||||
|
|
||||||
|
for {
|
||||||
|
hdr, err := srcTar.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
// Signals end of archive.
|
||||||
|
rebasedTar.Close()
|
||||||
|
w.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
w.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
|
||||||
|
|
||||||
|
if err = rebasedTar.WriteHeader(hdr); err != nil {
|
||||||
|
w.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = io.Copy(rebasedTar, srcTar); err != nil {
|
||||||
|
w.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return rebased
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyResource performs an archive copy from the given source path to the
|
||||||
|
// given destination path. The source path MUST exist and the destination
|
||||||
|
// path's parent directory must exist.
|
||||||
|
func CopyResource(srcPath, dstPath string, followLink bool) error {
|
||||||
|
var (
|
||||||
|
srcInfo CopyInfo
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
// Ensure in platform semantics
|
||||||
|
srcPath = normalizePath(srcPath)
|
||||||
|
dstPath = normalizePath(dstPath)
|
||||||
|
|
||||||
|
// Clean the source and destination paths.
|
||||||
|
srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
|
||||||
|
dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
|
||||||
|
|
||||||
|
if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
content, err := TarResource(srcInfo)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer content.Close()
|
||||||
|
|
||||||
|
return CopyTo(content, srcInfo, dstPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyTo handles extracting the given content whose
|
||||||
|
// entries should be sourced from srcInfo to dstPath.
|
||||||
|
func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error {
|
||||||
|
// The destination path need not exist, but CopyInfoDestinationPath will
|
||||||
|
// ensure that at least the parent directory exists.
|
||||||
|
dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer copyArchive.Close()
|
||||||
|
|
||||||
|
options := &TarOptions{
|
||||||
|
NoLchown: true,
|
||||||
|
NoOverwriteDirNonDir: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
return Untar(copyArchive, dstDir, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResolveHostSourcePath decides real path need to be copied with parameters such as
|
||||||
|
// whether to follow symbol link or not, if followLink is true, resolvedPath will return
|
||||||
|
// link target of any symbol link file, else it will only resolve symlink of directory
|
||||||
|
// but return symbol link file itself without resolving.
|
||||||
|
func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
|
||||||
|
if followLink {
|
||||||
|
resolvedPath, err = filepath.EvalSymlinks(path)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
|
||||||
|
} else {
|
||||||
|
dirPath, basePath := filepath.Split(path)
|
||||||
|
|
||||||
|
// if not follow symbol link, then resolve symbol link of parent dir
|
||||||
|
var resolvedDirPath string
|
||||||
|
resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// resolvedDirPath will have been cleaned (no trailing path separators) so
|
||||||
|
// we can manually join it with the base path element.
|
||||||
|
resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
|
||||||
|
if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) {
|
||||||
|
rebaseName = filepath.Base(path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return resolvedPath, rebaseName, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRebaseName normalizes and compares path and resolvedPath,
|
||||||
|
// return completed resolved path and rebased file name
|
||||||
|
func GetRebaseName(path, resolvedPath string) (string, string) {
|
||||||
|
// linkTarget will have been cleaned (no trailing path separators and dot) so
|
||||||
|
// we can manually join it with them
|
||||||
|
var rebaseName string
|
||||||
|
if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) {
|
||||||
|
resolvedPath += string(filepath.Separator) + "."
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) {
|
||||||
|
resolvedPath += string(filepath.Separator)
|
||||||
|
}
|
||||||
|
|
||||||
|
if filepath.Base(path) != filepath.Base(resolvedPath) {
|
||||||
|
// In the case where the path had a trailing separator and a symlink
|
||||||
|
// evaluation has changed the last path component, we will need to
|
||||||
|
// rebase the name in the archive that is being copied to match the
|
||||||
|
// originally requested name.
|
||||||
|
rebaseName = filepath.Base(path)
|
||||||
|
}
|
||||||
|
return resolvedPath, rebaseName
|
||||||
|
}
|
11
vendor/github.com/docker/docker/pkg/archive/copy_unix.go
generated
vendored
Normal file
11
vendor/github.com/docker/docker/pkg/archive/copy_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
func normalizePath(path string) string {
|
||||||
|
return filepath.ToSlash(path)
|
||||||
|
}
|
9
vendor/github.com/docker/docker/pkg/archive/copy_windows.go
generated
vendored
Normal file
9
vendor/github.com/docker/docker/pkg/archive/copy_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
func normalizePath(path string) string {
|
||||||
|
return filepath.FromSlash(path)
|
||||||
|
}
|
279
vendor/github.com/docker/docker/pkg/archive/diff.go
generated
vendored
Normal file
279
vendor/github.com/docker/docker/pkg/archive/diff.go
generated
vendored
Normal file
|
@ -0,0 +1,279 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/docker/pkg/idtools"
|
||||||
|
"github.com/docker/docker/pkg/pools"
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
|
||||||
|
// compressed or uncompressed.
|
||||||
|
// Returns the size in bytes of the contents of the layer.
|
||||||
|
func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) {
|
||||||
|
tr := tar.NewReader(layer)
|
||||||
|
trBuf := pools.BufioReader32KPool.Get(tr)
|
||||||
|
defer pools.BufioReader32KPool.Put(trBuf)
|
||||||
|
|
||||||
|
var dirs []*tar.Header
|
||||||
|
unpackedPaths := make(map[string]struct{})
|
||||||
|
|
||||||
|
if options == nil {
|
||||||
|
options = &TarOptions{}
|
||||||
|
}
|
||||||
|
if options.ExcludePatterns == nil {
|
||||||
|
options.ExcludePatterns = []string{}
|
||||||
|
}
|
||||||
|
remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
aufsTempdir := ""
|
||||||
|
aufsHardlinks := make(map[string]*tar.Header)
|
||||||
|
|
||||||
|
if options == nil {
|
||||||
|
options = &TarOptions{}
|
||||||
|
}
|
||||||
|
// Iterate through the files in the archive.
|
||||||
|
for {
|
||||||
|
hdr, err := tr.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
// end of tar archive
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
size += hdr.Size
|
||||||
|
|
||||||
|
// Normalize name, for safety and for a simple is-root check
|
||||||
|
hdr.Name = filepath.Clean(hdr.Name)
|
||||||
|
|
||||||
|
// Windows does not support filenames with colons in them. Ignore
|
||||||
|
// these files. This is not a problem though (although it might
|
||||||
|
// appear that it is). Let's suppose a client is running docker pull.
|
||||||
|
// The daemon it points to is Windows. Would it make sense for the
|
||||||
|
// client to be doing a docker pull Ubuntu for example (which has files
|
||||||
|
// with colons in the name under /usr/share/man/man3)? No, absolutely
|
||||||
|
// not as it would really only make sense that they were pulling a
|
||||||
|
// Windows image. However, for development, it is necessary to be able
|
||||||
|
// to pull Linux images which are in the repository.
|
||||||
|
//
|
||||||
|
// TODO Windows. Once the registry is aware of what images are Windows-
|
||||||
|
// specific or Linux-specific, this warning should be changed to an error
|
||||||
|
// to cater for the situation where someone does manage to upload a Linux
|
||||||
|
// image but have it tagged as Windows inadvertently.
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
if strings.Contains(hdr.Name, ":") {
|
||||||
|
logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note as these operations are platform specific, so must the slash be.
|
||||||
|
if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
|
||||||
|
// Not the root directory, ensure that the parent directory exists.
|
||||||
|
// This happened in some tests where an image had a tarfile without any
|
||||||
|
// parent directories.
|
||||||
|
parent := filepath.Dir(hdr.Name)
|
||||||
|
parentPath := filepath.Join(dest, parent)
|
||||||
|
|
||||||
|
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||||
|
err = system.MkdirAll(parentPath, 0600)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip AUFS metadata dirs
|
||||||
|
if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
|
||||||
|
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
|
||||||
|
// We don't want this directory, but we need the files in them so that
|
||||||
|
// such hardlinks can be resolved.
|
||||||
|
if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
|
||||||
|
basename := filepath.Base(hdr.Name)
|
||||||
|
aufsHardlinks[basename] = hdr
|
||||||
|
if aufsTempdir == "" {
|
||||||
|
if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(aufsTempdir)
|
||||||
|
}
|
||||||
|
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if hdr.Name != WhiteoutOpaqueDir {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
path := filepath.Join(dest, hdr.Name)
|
||||||
|
rel, err := filepath.Rel(dest, path)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note as these operations are platform specific, so must the slash be.
|
||||||
|
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||||
|
return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
|
||||||
|
}
|
||||||
|
base := filepath.Base(path)
|
||||||
|
|
||||||
|
if strings.HasPrefix(base, WhiteoutPrefix) {
|
||||||
|
dir := filepath.Dir(path)
|
||||||
|
if base == WhiteoutOpaqueDir {
|
||||||
|
_, err := os.Lstat(dir)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
err = nil // parent was deleted
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if path == dir {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if _, exists := unpackedPaths[path]; !exists {
|
||||||
|
err := os.RemoveAll(path)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
originalBase := base[len(WhiteoutPrefix):]
|
||||||
|
originalPath := filepath.Join(dir, originalBase)
|
||||||
|
if err := os.RemoveAll(originalPath); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// If path exits we almost always just want to remove and replace it.
|
||||||
|
// The only exception is when it is a directory *and* the file from
|
||||||
|
// the layer is also a directory. Then we want to merge them (i.e.
|
||||||
|
// just apply the metadata from the layer).
|
||||||
|
if fi, err := os.Lstat(path); err == nil {
|
||||||
|
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
|
||||||
|
if err := os.RemoveAll(path); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
trBuf.Reset(tr)
|
||||||
|
srcData := io.Reader(trBuf)
|
||||||
|
srcHdr := hdr
|
||||||
|
|
||||||
|
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
|
||||||
|
// we manually retarget these into the temporary files we extracted them into
|
||||||
|
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
|
||||||
|
linkBasename := filepath.Base(hdr.Linkname)
|
||||||
|
srcHdr = aufsHardlinks[linkBasename]
|
||||||
|
if srcHdr == nil {
|
||||||
|
return 0, fmt.Errorf("Invalid aufs hardlink")
|
||||||
|
}
|
||||||
|
tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer tmpFile.Close()
|
||||||
|
srcData = tmpFile
|
||||||
|
}
|
||||||
|
|
||||||
|
// if the options contain a uid & gid maps, convert header uid/gid
|
||||||
|
// entries using the maps such that lchown sets the proper mapped
|
||||||
|
// uid/gid after writing the file. We only perform this mapping if
|
||||||
|
// the file isn't already owned by the remapped root UID or GID, as
|
||||||
|
// that specific uid/gid has no mapping from container -> host, and
|
||||||
|
// those files already have the proper ownership for inside the
|
||||||
|
// container.
|
||||||
|
if srcHdr.Uid != remappedRootUID {
|
||||||
|
xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
srcHdr.Uid = xUID
|
||||||
|
}
|
||||||
|
if srcHdr.Gid != remappedRootGID {
|
||||||
|
xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
srcHdr.Gid = xGID
|
||||||
|
}
|
||||||
|
if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Directory mtimes must be handled at the end to avoid further
|
||||||
|
// file creation in them to modify the directory mtime
|
||||||
|
if hdr.Typeflag == tar.TypeDir {
|
||||||
|
dirs = append(dirs, hdr)
|
||||||
|
}
|
||||||
|
unpackedPaths[path] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hdr := range dirs {
|
||||||
|
path := filepath.Join(dest, hdr.Name)
|
||||||
|
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyLayer parses a diff in the standard layer format from `layer`,
|
||||||
|
// and applies it to the directory `dest`. The stream `layer` can be
|
||||||
|
// compressed or uncompressed.
|
||||||
|
// Returns the size in bytes of the contents of the layer.
|
||||||
|
func ApplyLayer(dest string, layer Reader) (int64, error) {
|
||||||
|
return applyLayerHandler(dest, layer, &TarOptions{}, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyUncompressedLayer parses a diff in the standard layer format from
|
||||||
|
// `layer`, and applies it to the directory `dest`. The stream `layer`
|
||||||
|
// can only be uncompressed.
|
||||||
|
// Returns the size in bytes of the contents of the layer.
|
||||||
|
func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) {
|
||||||
|
return applyLayerHandler(dest, layer, options, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
|
||||||
|
func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) {
|
||||||
|
dest = filepath.Clean(dest)
|
||||||
|
|
||||||
|
// We need to be able to set any perms
|
||||||
|
oldmask, err := system.Umask(0)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
|
||||||
|
|
||||||
|
if decompress {
|
||||||
|
layer, err = DecompressStream(layer)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return UnpackLayer(dest, layer, options)
|
||||||
|
}
|
97
vendor/github.com/docker/docker/pkg/archive/example_changes.go
generated
vendored
Normal file
97
vendor/github.com/docker/docker/pkg/archive/example_changes.go
generated
vendored
Normal file
|
@ -0,0 +1,97 @@
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// Simple tool to create an archive stream from an old and new directory
|
||||||
|
//
|
||||||
|
// By default it will stream the comparison of two temporary directories with junk files
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/docker/pkg/archive"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
flDebug = flag.Bool("D", false, "debugging output")
|
||||||
|
flNewDir = flag.String("newdir", "", "")
|
||||||
|
flOldDir = flag.String("olddir", "", "")
|
||||||
|
log = logrus.New()
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Usage = func() {
|
||||||
|
fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
|
||||||
|
fmt.Printf("%s [OPTIONS]\n", os.Args[0])
|
||||||
|
flag.PrintDefaults()
|
||||||
|
}
|
||||||
|
flag.Parse()
|
||||||
|
log.Out = os.Stderr
|
||||||
|
if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
|
||||||
|
logrus.SetLevel(logrus.DebugLevel)
|
||||||
|
}
|
||||||
|
var newDir, oldDir string
|
||||||
|
|
||||||
|
if len(*flNewDir) == 0 {
|
||||||
|
var err error
|
||||||
|
newDir, err = ioutil.TempDir("", "docker-test-newDir")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(newDir)
|
||||||
|
if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
newDir = *flNewDir
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(*flOldDir) == 0 {
|
||||||
|
oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(oldDir)
|
||||||
|
} else {
|
||||||
|
oldDir = *flOldDir
|
||||||
|
}
|
||||||
|
|
||||||
|
changes, err := archive.ChangesDirs(newDir, oldDir)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a, err := archive.ExportChanges(newDir, changes)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
defer a.Close()
|
||||||
|
|
||||||
|
i, err := io.Copy(os.Stdout, a)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
|
||||||
|
fileData := []byte("fooo")
|
||||||
|
for n := 0; n < numberOfFiles; n++ {
|
||||||
|
fileName := fmt.Sprintf("file-%d", n)
|
||||||
|
if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if makeLinks {
|
||||||
|
if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
totalSize := numberOfFiles * len(fileData)
|
||||||
|
return totalSize, nil
|
||||||
|
}
|
16
vendor/github.com/docker/docker/pkg/archive/time_linux.go
generated
vendored
Normal file
16
vendor/github.com/docker/docker/pkg/archive/time_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
|
||||||
|
if time.IsZero() {
|
||||||
|
// Return UTIME_OMIT special value
|
||||||
|
ts.Sec = 0
|
||||||
|
ts.Nsec = ((1 << 30) - 2)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return syscall.NsecToTimespec(time.UnixNano())
|
||||||
|
}
|
16
vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
generated
vendored
Normal file
16
vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
|
||||||
|
nsec := int64(0)
|
||||||
|
if !time.IsZero() {
|
||||||
|
nsec = time.UnixNano()
|
||||||
|
}
|
||||||
|
return syscall.NsecToTimespec(nsec)
|
||||||
|
}
|
23
vendor/github.com/docker/docker/pkg/archive/whiteouts.go
generated
vendored
Normal file
23
vendor/github.com/docker/docker/pkg/archive/whiteouts.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
// Whiteouts are files with a special meaning for the layered filesystem.
|
||||||
|
// Docker uses AUFS whiteout files inside exported archives. In other
|
||||||
|
// filesystems these files are generated/handled on tar creation/extraction.
|
||||||
|
|
||||||
|
// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
|
||||||
|
// filename this means that file has been removed from the base layer.
|
||||||
|
const WhiteoutPrefix = ".wh."
|
||||||
|
|
||||||
|
// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
|
||||||
|
// for removing an actual file. Normally these files are excluded from exported
|
||||||
|
// archives.
|
||||||
|
const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
|
||||||
|
|
||||||
|
// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
|
||||||
|
// layers. Normally these should not go into exported archives and all changed
|
||||||
|
// hardlinks should be copied to the top layer.
|
||||||
|
const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
|
||||||
|
|
||||||
|
// WhiteoutOpaqueDir file means directory has been made opaque - meaning
|
||||||
|
// readdir calls to this directory do not follow to lower layers.
|
||||||
|
const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"
|
59
vendor/github.com/docker/docker/pkg/archive/wrap.go
generated
vendored
Normal file
59
vendor/github.com/docker/docker/pkg/archive/wrap.go
generated
vendored
Normal file
|
@ -0,0 +1,59 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"bytes"
|
||||||
|
"io/ioutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Generate generates a new archive from the content provided
|
||||||
|
// as input.
|
||||||
|
//
|
||||||
|
// `files` is a sequence of path/content pairs. A new file is
|
||||||
|
// added to the archive for each pair.
|
||||||
|
// If the last pair is incomplete, the file is created with an
|
||||||
|
// empty content. For example:
|
||||||
|
//
|
||||||
|
// Generate("foo.txt", "hello world", "emptyfile")
|
||||||
|
//
|
||||||
|
// The above call will return an archive with 2 files:
|
||||||
|
// * ./foo.txt with content "hello world"
|
||||||
|
// * ./empty with empty content
|
||||||
|
//
|
||||||
|
// FIXME: stream content instead of buffering
|
||||||
|
// FIXME: specify permissions and other archive metadata
|
||||||
|
func Generate(input ...string) (Archive, error) {
|
||||||
|
files := parseStringPairs(input...)
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
tw := tar.NewWriter(buf)
|
||||||
|
for _, file := range files {
|
||||||
|
name, content := file[0], file[1]
|
||||||
|
hdr := &tar.Header{
|
||||||
|
Name: name,
|
||||||
|
Size: int64(len(content)),
|
||||||
|
}
|
||||||
|
if err := tw.WriteHeader(hdr); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if _, err := tw.Write([]byte(content)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := tw.Close(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ioutil.NopCloser(buf), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseStringPairs(input ...string) (output [][2]string) {
|
||||||
|
output = make([][2]string, 0, len(input)/2+1)
|
||||||
|
for i := 0; i < len(input); i += 2 {
|
||||||
|
var pair [2]string
|
||||||
|
pair[0] = input[i]
|
||||||
|
if i+1 < len(input) {
|
||||||
|
pair[1] = input[i+1]
|
||||||
|
}
|
||||||
|
output = append(output, pair)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
191
vendor/github.com/docker/docker/pkg/fileutils/LICENSE
generated
vendored
Normal file
191
vendor/github.com/docker/docker/pkg/fileutils/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
https://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
Copyright 2013-2016 Docker, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
283
vendor/github.com/docker/docker/pkg/fileutils/fileutils.go
generated
vendored
Normal file
283
vendor/github.com/docker/docker/pkg/fileutils/fileutils.go
generated
vendored
Normal file
|
@ -0,0 +1,283 @@
|
||||||
|
package fileutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"text/scanner"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// exclusion return true if the specified pattern is an exclusion
|
||||||
|
func exclusion(pattern string) bool {
|
||||||
|
return pattern[0] == '!'
|
||||||
|
}
|
||||||
|
|
||||||
|
// empty return true if the specified pattern is empty
|
||||||
|
func empty(pattern string) bool {
|
||||||
|
return pattern == ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// CleanPatterns takes a slice of patterns returns a new
|
||||||
|
// slice of patterns cleaned with filepath.Clean, stripped
|
||||||
|
// of any empty patterns and lets the caller know whether the
|
||||||
|
// slice contains any exception patterns (prefixed with !).
|
||||||
|
func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) {
|
||||||
|
// Loop over exclusion patterns and:
|
||||||
|
// 1. Clean them up.
|
||||||
|
// 2. Indicate whether we are dealing with any exception rules.
|
||||||
|
// 3. Error if we see a single exclusion marker on it's own (!).
|
||||||
|
cleanedPatterns := []string{}
|
||||||
|
patternDirs := [][]string{}
|
||||||
|
exceptions := false
|
||||||
|
for _, pattern := range patterns {
|
||||||
|
// Eliminate leading and trailing whitespace.
|
||||||
|
pattern = strings.TrimSpace(pattern)
|
||||||
|
if empty(pattern) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if exclusion(pattern) {
|
||||||
|
if len(pattern) == 1 {
|
||||||
|
return nil, nil, false, errors.New("Illegal exclusion pattern: !")
|
||||||
|
}
|
||||||
|
exceptions = true
|
||||||
|
}
|
||||||
|
pattern = filepath.Clean(pattern)
|
||||||
|
cleanedPatterns = append(cleanedPatterns, pattern)
|
||||||
|
if exclusion(pattern) {
|
||||||
|
pattern = pattern[1:]
|
||||||
|
}
|
||||||
|
patternDirs = append(patternDirs, strings.Split(pattern, string(os.PathSeparator)))
|
||||||
|
}
|
||||||
|
|
||||||
|
return cleanedPatterns, patternDirs, exceptions, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Matches returns true if file matches any of the patterns
|
||||||
|
// and isn't excluded by any of the subsequent patterns.
|
||||||
|
func Matches(file string, patterns []string) (bool, error) {
|
||||||
|
file = filepath.Clean(file)
|
||||||
|
|
||||||
|
if file == "." {
|
||||||
|
// Don't let them exclude everything, kind of silly.
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
patterns, patDirs, _, err := CleanPatterns(patterns)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return OptimizedMatches(file, patterns, patDirs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go.
|
||||||
|
// It will assume that the inputs have been preprocessed and therefore the function
|
||||||
|
// doesn't need to do as much error checking and clean-up. This was done to avoid
|
||||||
|
// repeating these steps on each file being checked during the archive process.
|
||||||
|
// The more generic fileutils.Matches() can't make these assumptions.
|
||||||
|
func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) {
|
||||||
|
matched := false
|
||||||
|
file = filepath.FromSlash(file)
|
||||||
|
parentPath := filepath.Dir(file)
|
||||||
|
parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
|
||||||
|
|
||||||
|
for i, pattern := range patterns {
|
||||||
|
negative := false
|
||||||
|
|
||||||
|
if exclusion(pattern) {
|
||||||
|
negative = true
|
||||||
|
pattern = pattern[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
match, err := regexpMatch(pattern, file)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !match && parentPath != "." {
|
||||||
|
// Check to see if the pattern matches one of our parent dirs.
|
||||||
|
if len(patDirs[i]) <= len(parentPathDirs) {
|
||||||
|
match, _ = regexpMatch(strings.Join(patDirs[i], string(os.PathSeparator)),
|
||||||
|
strings.Join(parentPathDirs[:len(patDirs[i])], string(os.PathSeparator)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if match {
|
||||||
|
matched = !negative
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if matched {
|
||||||
|
logrus.Debugf("Skipping excluded path: %s", file)
|
||||||
|
}
|
||||||
|
|
||||||
|
return matched, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// regexpMatch tries to match the logic of filepath.Match but
|
||||||
|
// does so using regexp logic. We do this so that we can expand the
|
||||||
|
// wildcard set to include other things, like "**" to mean any number
|
||||||
|
// of directories. This means that we should be backwards compatible
|
||||||
|
// with filepath.Match(). We'll end up supporting more stuff, due to
|
||||||
|
// the fact that we're using regexp, but that's ok - it does no harm.
|
||||||
|
//
|
||||||
|
// As per the comment in golangs filepath.Match, on Windows, escaping
|
||||||
|
// is disabled. Instead, '\\' is treated as path separator.
|
||||||
|
func regexpMatch(pattern, path string) (bool, error) {
|
||||||
|
regStr := "^"
|
||||||
|
|
||||||
|
// Do some syntax checking on the pattern.
|
||||||
|
// filepath's Match() has some really weird rules that are inconsistent
|
||||||
|
// so instead of trying to dup their logic, just call Match() for its
|
||||||
|
// error state and if there is an error in the pattern return it.
|
||||||
|
// If this becomes an issue we can remove this since its really only
|
||||||
|
// needed in the error (syntax) case - which isn't really critical.
|
||||||
|
if _, err := filepath.Match(pattern, path); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Go through the pattern and convert it to a regexp.
|
||||||
|
// We use a scanner so we can support utf-8 chars.
|
||||||
|
var scan scanner.Scanner
|
||||||
|
scan.Init(strings.NewReader(pattern))
|
||||||
|
|
||||||
|
sl := string(os.PathSeparator)
|
||||||
|
escSL := sl
|
||||||
|
if sl == `\` {
|
||||||
|
escSL += `\`
|
||||||
|
}
|
||||||
|
|
||||||
|
for scan.Peek() != scanner.EOF {
|
||||||
|
ch := scan.Next()
|
||||||
|
|
||||||
|
if ch == '*' {
|
||||||
|
if scan.Peek() == '*' {
|
||||||
|
// is some flavor of "**"
|
||||||
|
scan.Next()
|
||||||
|
|
||||||
|
if scan.Peek() == scanner.EOF {
|
||||||
|
// is "**EOF" - to align with .gitignore just accept all
|
||||||
|
regStr += ".*"
|
||||||
|
} else {
|
||||||
|
// is "**"
|
||||||
|
regStr += "((.*" + escSL + ")|([^" + escSL + "]*))"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Treat **/ as ** so eat the "/"
|
||||||
|
if string(scan.Peek()) == sl {
|
||||||
|
scan.Next()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// is "*" so map it to anything but "/"
|
||||||
|
regStr += "[^" + escSL + "]*"
|
||||||
|
}
|
||||||
|
} else if ch == '?' {
|
||||||
|
// "?" is any char except "/"
|
||||||
|
regStr += "[^" + escSL + "]"
|
||||||
|
} else if strings.Index(".$", string(ch)) != -1 {
|
||||||
|
// Escape some regexp special chars that have no meaning
|
||||||
|
// in golang's filepath.Match
|
||||||
|
regStr += `\` + string(ch)
|
||||||
|
} else if ch == '\\' {
|
||||||
|
// escape next char. Note that a trailing \ in the pattern
|
||||||
|
// will be left alone (but need to escape it)
|
||||||
|
if sl == `\` {
|
||||||
|
// On windows map "\" to "\\", meaning an escaped backslash,
|
||||||
|
// and then just continue because filepath.Match on
|
||||||
|
// Windows doesn't allow escaping at all
|
||||||
|
regStr += escSL
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if scan.Peek() != scanner.EOF {
|
||||||
|
regStr += `\` + string(scan.Next())
|
||||||
|
} else {
|
||||||
|
regStr += `\`
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
regStr += string(ch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
regStr += "$"
|
||||||
|
|
||||||
|
res, err := regexp.MatchString(regStr, path)
|
||||||
|
|
||||||
|
// Map regexp's error to filepath's so no one knows we're not using filepath
|
||||||
|
if err != nil {
|
||||||
|
err = filepath.ErrBadPattern
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyFile copies from src to dst until either EOF is reached
|
||||||
|
// on src or an error occurs. It verifies src exists and remove
|
||||||
|
// the dst if it exists.
|
||||||
|
func CopyFile(src, dst string) (int64, error) {
|
||||||
|
cleanSrc := filepath.Clean(src)
|
||||||
|
cleanDst := filepath.Clean(dst)
|
||||||
|
if cleanSrc == cleanDst {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
sf, err := os.Open(cleanSrc)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer sf.Close()
|
||||||
|
if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
df, err := os.Create(cleanDst)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer df.Close()
|
||||||
|
return io.Copy(df, sf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadSymlinkedDirectory returns the target directory of a symlink.
|
||||||
|
// The target of the symbolic link may not be a file.
|
||||||
|
func ReadSymlinkedDirectory(path string) (string, error) {
|
||||||
|
var realPath string
|
||||||
|
var err error
|
||||||
|
if realPath, err = filepath.Abs(path); err != nil {
|
||||||
|
return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
|
||||||
|
}
|
||||||
|
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
|
||||||
|
return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
|
||||||
|
}
|
||||||
|
realPathInfo, err := os.Stat(realPath)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
|
||||||
|
}
|
||||||
|
if !realPathInfo.Mode().IsDir() {
|
||||||
|
return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
|
||||||
|
}
|
||||||
|
return realPath, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateIfNotExists creates a file or a directory only if it does not already exist.
|
||||||
|
func CreateIfNotExists(path string, isDir bool) error {
|
||||||
|
if _, err := os.Stat(path); err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
if isDir {
|
||||||
|
return os.MkdirAll(path, 0755)
|
||||||
|
}
|
||||||
|
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f, err := os.OpenFile(path, os.O_CREATE, 0755)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
22
vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
generated
vendored
Normal file
22
vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
// +build linux freebsd
|
||||||
|
|
||||||
|
package fileutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetTotalUsedFds Returns the number of used File Descriptors by
|
||||||
|
// reading it via /proc filesystem.
|
||||||
|
func GetTotalUsedFds() int {
|
||||||
|
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
|
||||||
|
logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
|
||||||
|
} else {
|
||||||
|
return len(fds)
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
7
vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go
generated
vendored
Normal file
7
vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
package fileutils
|
||||||
|
|
||||||
|
// GetTotalUsedFds Returns the number of used File Descriptors. Not supported
|
||||||
|
// on Windows.
|
||||||
|
func GetTotalUsedFds() int {
|
||||||
|
return -1
|
||||||
|
}
|
191
vendor/github.com/docker/docker/pkg/idtools/LICENSE
generated
vendored
Normal file
191
vendor/github.com/docker/docker/pkg/idtools/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
https://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
Copyright 2013-2016 Docker, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
197
vendor/github.com/docker/docker/pkg/idtools/idtools.go
generated
vendored
Normal file
197
vendor/github.com/docker/docker/pkg/idtools/idtools.go
generated
vendored
Normal file
|
@ -0,0 +1,197 @@
|
||||||
|
package idtools
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IDMap contains a single entry for user namespace range remapping. An array
|
||||||
|
// of IDMap entries represents the structure that will be provided to the Linux
|
||||||
|
// kernel for creating a user namespace.
|
||||||
|
type IDMap struct {
|
||||||
|
ContainerID int `json:"container_id"`
|
||||||
|
HostID int `json:"host_id"`
|
||||||
|
Size int `json:"size"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type subIDRange struct {
|
||||||
|
Start int
|
||||||
|
Length int
|
||||||
|
}
|
||||||
|
|
||||||
|
type ranges []subIDRange
|
||||||
|
|
||||||
|
func (e ranges) Len() int { return len(e) }
|
||||||
|
func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
|
||||||
|
func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start }
|
||||||
|
|
||||||
|
const (
|
||||||
|
subuidFileName string = "/etc/subuid"
|
||||||
|
subgidFileName string = "/etc/subgid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MkdirAllAs creates a directory (include any along the path) and then modifies
|
||||||
|
// ownership to the requested uid/gid. If the directory already exists, this
|
||||||
|
// function will still change ownership to the requested uid/gid pair.
|
||||||
|
func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
|
||||||
|
return mkdirAs(path, mode, ownerUID, ownerGID, true, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MkdirAllNewAs creates a directory (include any along the path) and then modifies
|
||||||
|
// ownership ONLY of newly created directories to the requested uid/gid. If the
|
||||||
|
// directories along the path exist, no change of ownership will be performed
|
||||||
|
func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
|
||||||
|
return mkdirAs(path, mode, ownerUID, ownerGID, true, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MkdirAs creates a directory and then modifies ownership to the requested uid/gid.
|
||||||
|
// If the directory already exists, this function still changes ownership
|
||||||
|
func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
|
||||||
|
return mkdirAs(path, mode, ownerUID, ownerGID, false, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
|
||||||
|
// If the maps are empty, then the root uid/gid will default to "real" 0/0
|
||||||
|
func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
|
||||||
|
var uid, gid int
|
||||||
|
|
||||||
|
if uidMap != nil {
|
||||||
|
xUID, err := ToHost(0, uidMap)
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, err
|
||||||
|
}
|
||||||
|
uid = xUID
|
||||||
|
}
|
||||||
|
if gidMap != nil {
|
||||||
|
xGID, err := ToHost(0, gidMap)
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, err
|
||||||
|
}
|
||||||
|
gid = xGID
|
||||||
|
}
|
||||||
|
return uid, gid, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToContainer takes an id mapping, and uses it to translate a
|
||||||
|
// host ID to the remapped ID. If no map is provided, then the translation
|
||||||
|
// assumes a 1-to-1 mapping and returns the passed in id
|
||||||
|
func ToContainer(hostID int, idMap []IDMap) (int, error) {
|
||||||
|
if idMap == nil {
|
||||||
|
return hostID, nil
|
||||||
|
}
|
||||||
|
for _, m := range idMap {
|
||||||
|
if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) {
|
||||||
|
contID := m.ContainerID + (hostID - m.HostID)
|
||||||
|
return contID, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToHost takes an id mapping and a remapped ID, and translates the
|
||||||
|
// ID to the mapped host ID. If no map is provided, then the translation
|
||||||
|
// assumes a 1-to-1 mapping and returns the passed in id #
|
||||||
|
func ToHost(contID int, idMap []IDMap) (int, error) {
|
||||||
|
if idMap == nil {
|
||||||
|
return contID, nil
|
||||||
|
}
|
||||||
|
for _, m := range idMap {
|
||||||
|
if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) {
|
||||||
|
hostID := m.HostID + (contID - m.ContainerID)
|
||||||
|
return hostID, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateIDMappings takes a requested user and group name and
|
||||||
|
// using the data from /etc/sub{uid,gid} ranges, creates the
|
||||||
|
// proper uid and gid remapping ranges for that user/group pair
|
||||||
|
func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) {
|
||||||
|
subuidRanges, err := parseSubuid(username)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
subgidRanges, err := parseSubgid(groupname)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if len(subuidRanges) == 0 {
|
||||||
|
return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username)
|
||||||
|
}
|
||||||
|
if len(subgidRanges) == 0 {
|
||||||
|
return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname)
|
||||||
|
}
|
||||||
|
|
||||||
|
return createIDMap(subuidRanges), createIDMap(subgidRanges), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createIDMap(subidRanges ranges) []IDMap {
|
||||||
|
idMap := []IDMap{}
|
||||||
|
|
||||||
|
// sort the ranges by lowest ID first
|
||||||
|
sort.Sort(subidRanges)
|
||||||
|
containerID := 0
|
||||||
|
for _, idrange := range subidRanges {
|
||||||
|
idMap = append(idMap, IDMap{
|
||||||
|
ContainerID: containerID,
|
||||||
|
HostID: idrange.Start,
|
||||||
|
Size: idrange.Length,
|
||||||
|
})
|
||||||
|
containerID = containerID + idrange.Length
|
||||||
|
}
|
||||||
|
return idMap
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseSubuid(username string) (ranges, error) {
|
||||||
|
return parseSubidFile(subuidFileName, username)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseSubgid(username string) (ranges, error) {
|
||||||
|
return parseSubidFile(subgidFileName, username)
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid)
|
||||||
|
// and return all found ranges for a specified username. If the special value
|
||||||
|
// "ALL" is supplied for username, then all ranges in the file will be returned
|
||||||
|
func parseSubidFile(path, username string) (ranges, error) {
|
||||||
|
var rangeList ranges
|
||||||
|
|
||||||
|
subidFile, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return rangeList, err
|
||||||
|
}
|
||||||
|
defer subidFile.Close()
|
||||||
|
|
||||||
|
s := bufio.NewScanner(subidFile)
|
||||||
|
for s.Scan() {
|
||||||
|
if err := s.Err(); err != nil {
|
||||||
|
return rangeList, err
|
||||||
|
}
|
||||||
|
|
||||||
|
text := strings.TrimSpace(s.Text())
|
||||||
|
if text == "" || strings.HasPrefix(text, "#") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
parts := strings.Split(text, ":")
|
||||||
|
if len(parts) != 3 {
|
||||||
|
return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
|
||||||
|
}
|
||||||
|
if parts[0] == username || username == "ALL" {
|
||||||
|
startid, err := strconv.Atoi(parts[1])
|
||||||
|
if err != nil {
|
||||||
|
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
|
||||||
|
}
|
||||||
|
length, err := strconv.Atoi(parts[2])
|
||||||
|
if err != nil {
|
||||||
|
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
|
||||||
|
}
|
||||||
|
rangeList = append(rangeList, subIDRange{startid, length})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return rangeList, nil
|
||||||
|
}
|
60
vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
generated
vendored
Normal file
60
vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,60 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package idtools
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
|
||||||
|
// make an array containing the original path asked for, plus (for mkAll == true)
|
||||||
|
// all path components leading up to the complete path that don't exist before we MkdirAll
|
||||||
|
// so that we can chown all of them properly at the end. If chownExisting is false, we won't
|
||||||
|
// chown the full directory path if it exists
|
||||||
|
var paths []string
|
||||||
|
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
|
||||||
|
paths = []string{path}
|
||||||
|
} else if err == nil && chownExisting {
|
||||||
|
if err := os.Chown(path, ownerUID, ownerGID); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// short-circuit--we were called with an existing directory and chown was requested
|
||||||
|
return nil
|
||||||
|
} else if err == nil {
|
||||||
|
// nothing to do; directory path fully exists already and chown was NOT requested
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if mkAll {
|
||||||
|
// walk back to "/" looking for directories which do not exist
|
||||||
|
// and add them to the paths array for chown after creation
|
||||||
|
dirPath := path
|
||||||
|
for {
|
||||||
|
dirPath = filepath.Dir(dirPath)
|
||||||
|
if dirPath == "/" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
|
||||||
|
paths = append(paths, dirPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// even if it existed, we will chown the requested path + any subpaths that
|
||||||
|
// didn't exist when we called MkdirAll
|
||||||
|
for _, pathComponent := range paths {
|
||||||
|
if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
18
vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
generated
vendored
Normal file
18
vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package idtools
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Platforms such as Windows do not support the UID/GID concept. So make this
|
||||||
|
// just a wrapper around system.MkdirAll.
|
||||||
|
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
|
||||||
|
if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
188
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
generated
vendored
Normal file
188
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,188 @@
|
||||||
|
package idtools
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// add a user and/or group to Linux /etc/passwd, /etc/group using standard
|
||||||
|
// Linux distribution commands:
|
||||||
|
// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group <username>
|
||||||
|
// useradd -r -s /bin/false <username>
|
||||||
|
|
||||||
|
var (
|
||||||
|
once sync.Once
|
||||||
|
userCommand string
|
||||||
|
|
||||||
|
cmdTemplates = map[string]string{
|
||||||
|
"adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s",
|
||||||
|
"useradd": "-r -s /bin/false %s",
|
||||||
|
"usermod": "-%s %d-%d %s",
|
||||||
|
}
|
||||||
|
|
||||||
|
idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`)
|
||||||
|
// default length for a UID/GID subordinate range
|
||||||
|
defaultRangeLen = 65536
|
||||||
|
defaultRangeStart = 100000
|
||||||
|
userMod = "usermod"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resolveBinary(binname string) (string, error) {
|
||||||
|
binaryPath, err := exec.LookPath(binname)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
resolvedPath, err := filepath.EvalSymlinks(binaryPath)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
//only return no error if the final resolved binary basename
|
||||||
|
//matches what was searched for
|
||||||
|
if filepath.Base(resolvedPath) == binname {
|
||||||
|
return resolvedPath, nil
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddNamespaceRangesUser takes a username and uses the standard system
|
||||||
|
// utility to create a system user/group pair used to hold the
|
||||||
|
// /etc/sub{uid,gid} ranges which will be used for user namespace
|
||||||
|
// mapping ranges in containers.
|
||||||
|
func AddNamespaceRangesUser(name string) (int, int, error) {
|
||||||
|
if err := addUser(name); err != nil {
|
||||||
|
return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query the system for the created uid and gid pair
|
||||||
|
out, err := execCmd("id", name)
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err)
|
||||||
|
}
|
||||||
|
matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out)))
|
||||||
|
if len(matches) != 3 {
|
||||||
|
return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out))
|
||||||
|
}
|
||||||
|
uid, err := strconv.Atoi(matches[1])
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err)
|
||||||
|
}
|
||||||
|
gid, err := strconv.Atoi(matches[2])
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now we need to create the subuid/subgid ranges for our new user/group (system users
|
||||||
|
// do not get auto-created ranges in subuid/subgid)
|
||||||
|
|
||||||
|
if err := createSubordinateRanges(name); err != nil {
|
||||||
|
return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err)
|
||||||
|
}
|
||||||
|
return uid, gid, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func addUser(userName string) error {
|
||||||
|
once.Do(func() {
|
||||||
|
// set up which commands are used for adding users/groups dependent on distro
|
||||||
|
if _, err := resolveBinary("adduser"); err == nil {
|
||||||
|
userCommand = "adduser"
|
||||||
|
} else if _, err := resolveBinary("useradd"); err == nil {
|
||||||
|
userCommand = "useradd"
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if userCommand == "" {
|
||||||
|
return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
|
||||||
|
}
|
||||||
|
args := fmt.Sprintf(cmdTemplates[userCommand], userName)
|
||||||
|
out, err := execCmd(userCommand, args)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createSubordinateRanges(name string) error {
|
||||||
|
|
||||||
|
// first, we should verify that ranges weren't automatically created
|
||||||
|
// by the distro tooling
|
||||||
|
ranges, err := parseSubuid(name)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err)
|
||||||
|
}
|
||||||
|
if len(ranges) == 0 {
|
||||||
|
// no UID ranges; let's create one
|
||||||
|
startID, err := findNextUIDRange()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Can't find available subuid range: %v", err)
|
||||||
|
}
|
||||||
|
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ranges, err = parseSubgid(name)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err)
|
||||||
|
}
|
||||||
|
if len(ranges) == 0 {
|
||||||
|
// no GID ranges; let's create one
|
||||||
|
startID, err := findNextGIDRange()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Can't find available subgid range: %v", err)
|
||||||
|
}
|
||||||
|
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func findNextUIDRange() (int, error) {
|
||||||
|
ranges, err := parseSubuid("ALL")
|
||||||
|
if err != nil {
|
||||||
|
return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err)
|
||||||
|
}
|
||||||
|
sort.Sort(ranges)
|
||||||
|
return findNextRangeStart(ranges)
|
||||||
|
}
|
||||||
|
|
||||||
|
func findNextGIDRange() (int, error) {
|
||||||
|
ranges, err := parseSubgid("ALL")
|
||||||
|
if err != nil {
|
||||||
|
return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err)
|
||||||
|
}
|
||||||
|
sort.Sort(ranges)
|
||||||
|
return findNextRangeStart(ranges)
|
||||||
|
}
|
||||||
|
|
||||||
|
func findNextRangeStart(rangeList ranges) (int, error) {
|
||||||
|
startID := defaultRangeStart
|
||||||
|
for _, arange := range rangeList {
|
||||||
|
if wouldOverlap(arange, startID) {
|
||||||
|
startID = arange.Start + arange.Length
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return startID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func wouldOverlap(arange subIDRange, ID int) bool {
|
||||||
|
low := ID
|
||||||
|
high := ID + defaultRangeLen
|
||||||
|
if (low >= arange.Start && low <= arange.Start+arange.Length) ||
|
||||||
|
(high <= arange.Start+arange.Length && high >= arange.Start) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func execCmd(cmd, args string) ([]byte, error) {
|
||||||
|
execCmd := exec.Command(cmd, strings.Split(args, " ")...)
|
||||||
|
return execCmd.CombinedOutput()
|
||||||
|
}
|
12
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
generated
vendored
Normal file
12
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package idtools
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
|
||||||
|
// and calls the appropriate helper function to add the group and then
|
||||||
|
// the user to the group in /etc/group and /etc/passwd respectively.
|
||||||
|
func AddNamespaceRangesUser(name string) (int, int, error) {
|
||||||
|
return -1, -1, fmt.Errorf("No support for adding users or groups on this OS")
|
||||||
|
}
|
191
vendor/github.com/docker/docker/pkg/ioutils/LICENSE
generated
vendored
Normal file
191
vendor/github.com/docker/docker/pkg/ioutils/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
https://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
Copyright 2013-2016 Docker, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
51
vendor/github.com/docker/docker/pkg/ioutils/buffer.go
generated
vendored
Normal file
51
vendor/github.com/docker/docker/pkg/ioutils/buffer.go
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
var errBufferFull = errors.New("buffer is full")
|
||||||
|
|
||||||
|
type fixedBuffer struct {
|
||||||
|
buf []byte
|
||||||
|
pos int
|
||||||
|
lastRead int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *fixedBuffer) Write(p []byte) (int, error) {
|
||||||
|
n := copy(b.buf[b.pos:cap(b.buf)], p)
|
||||||
|
b.pos += n
|
||||||
|
|
||||||
|
if n < len(p) {
|
||||||
|
if b.pos == cap(b.buf) {
|
||||||
|
return n, errBufferFull
|
||||||
|
}
|
||||||
|
return n, io.ErrShortWrite
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *fixedBuffer) Read(p []byte) (int, error) {
|
||||||
|
n := copy(p, b.buf[b.lastRead:b.pos])
|
||||||
|
b.lastRead += n
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *fixedBuffer) Len() int {
|
||||||
|
return b.pos - b.lastRead
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *fixedBuffer) Cap() int {
|
||||||
|
return cap(b.buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *fixedBuffer) Reset() {
|
||||||
|
b.pos = 0
|
||||||
|
b.lastRead = 0
|
||||||
|
b.buf = b.buf[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *fixedBuffer) String() string {
|
||||||
|
return string(b.buf[b.lastRead:b.pos])
|
||||||
|
}
|
180
vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go
generated
vendored
Normal file
180
vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go
generated
vendored
Normal file
|
@ -0,0 +1,180 @@
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// maxCap is the highest capacity to use in byte slices that buffer data.
|
||||||
|
const maxCap = 1e6
|
||||||
|
|
||||||
|
// minCap is the lowest capacity to use in byte slices that buffer data
|
||||||
|
const minCap = 64
|
||||||
|
|
||||||
|
// blockThreshold is the minimum number of bytes in the buffer which will cause
|
||||||
|
// a write to BytesPipe to block when allocating a new slice.
|
||||||
|
const blockThreshold = 1e6
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrClosed is returned when Write is called on a closed BytesPipe.
|
||||||
|
ErrClosed = errors.New("write to closed BytesPipe")
|
||||||
|
|
||||||
|
bufPools = make(map[int]*sync.Pool)
|
||||||
|
)
|
||||||
|
|
||||||
|
// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue).
|
||||||
|
// All written data may be read at most once. Also, BytesPipe allocates
|
||||||
|
// and releases new byte slices to adjust to current needs, so the buffer
|
||||||
|
// won't be overgrown after peak loads.
|
||||||
|
type BytesPipe struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
wait *sync.Cond
|
||||||
|
buf []*fixedBuffer
|
||||||
|
bufLen int
|
||||||
|
closeErr error // error to return from next Read. set to nil if not closed.
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBytesPipe creates new BytesPipe, initialized by specified slice.
|
||||||
|
// If buf is nil, then it will be initialized with slice which cap is 64.
|
||||||
|
// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
|
||||||
|
func NewBytesPipe() *BytesPipe {
|
||||||
|
bp := &BytesPipe{}
|
||||||
|
bp.buf = append(bp.buf, getBuffer(minCap))
|
||||||
|
bp.wait = sync.NewCond(&bp.mu)
|
||||||
|
return bp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes p to BytesPipe.
|
||||||
|
// It can allocate new []byte slices in a process of writing.
|
||||||
|
func (bp *BytesPipe) Write(p []byte) (int, error) {
|
||||||
|
bp.mu.Lock()
|
||||||
|
|
||||||
|
written := 0
|
||||||
|
loop0:
|
||||||
|
for {
|
||||||
|
if bp.closeErr != nil {
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return written, ErrClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(bp.buf) == 0 {
|
||||||
|
bp.buf = append(bp.buf, getBuffer(64))
|
||||||
|
}
|
||||||
|
// get the last buffer
|
||||||
|
b := bp.buf[len(bp.buf)-1]
|
||||||
|
|
||||||
|
n, err := b.Write(p)
|
||||||
|
written += n
|
||||||
|
bp.bufLen += n
|
||||||
|
|
||||||
|
// errBufferFull is an error we expect to get if the buffer is full
|
||||||
|
if err != nil && err != errBufferFull {
|
||||||
|
bp.wait.Broadcast()
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// if there was enough room to write all then break
|
||||||
|
if len(p) == n {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// more data: write to the next slice
|
||||||
|
p = p[n:]
|
||||||
|
|
||||||
|
// make sure the buffer doesn't grow too big from this write
|
||||||
|
for bp.bufLen >= blockThreshold {
|
||||||
|
bp.wait.Wait()
|
||||||
|
if bp.closeErr != nil {
|
||||||
|
continue loop0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// add new byte slice to the buffers slice and continue writing
|
||||||
|
nextCap := b.Cap() * 2
|
||||||
|
if nextCap > maxCap {
|
||||||
|
nextCap = maxCap
|
||||||
|
}
|
||||||
|
bp.buf = append(bp.buf, getBuffer(nextCap))
|
||||||
|
}
|
||||||
|
bp.wait.Broadcast()
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return written, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloseWithError causes further reads from a BytesPipe to return immediately.
|
||||||
|
func (bp *BytesPipe) CloseWithError(err error) error {
|
||||||
|
bp.mu.Lock()
|
||||||
|
if err != nil {
|
||||||
|
bp.closeErr = err
|
||||||
|
} else {
|
||||||
|
bp.closeErr = io.EOF
|
||||||
|
}
|
||||||
|
bp.wait.Broadcast()
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close causes further reads from a BytesPipe to return immediately.
|
||||||
|
func (bp *BytesPipe) Close() error {
|
||||||
|
return bp.CloseWithError(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads bytes from BytesPipe.
|
||||||
|
// Data could be read only once.
|
||||||
|
func (bp *BytesPipe) Read(p []byte) (n int, err error) {
|
||||||
|
bp.mu.Lock()
|
||||||
|
if bp.bufLen == 0 {
|
||||||
|
if bp.closeErr != nil {
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return 0, bp.closeErr
|
||||||
|
}
|
||||||
|
bp.wait.Wait()
|
||||||
|
if bp.bufLen == 0 && bp.closeErr != nil {
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return 0, bp.closeErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for bp.bufLen > 0 {
|
||||||
|
b := bp.buf[0]
|
||||||
|
read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error
|
||||||
|
n += read
|
||||||
|
bp.bufLen -= read
|
||||||
|
|
||||||
|
if b.Len() == 0 {
|
||||||
|
// it's empty so return it to the pool and move to the next one
|
||||||
|
returnBuffer(b)
|
||||||
|
bp.buf[0] = nil
|
||||||
|
bp.buf = bp.buf[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(p) == read {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
p = p[read:]
|
||||||
|
}
|
||||||
|
|
||||||
|
bp.wait.Broadcast()
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func returnBuffer(b *fixedBuffer) {
|
||||||
|
b.Reset()
|
||||||
|
pool := bufPools[b.Cap()]
|
||||||
|
if pool != nil {
|
||||||
|
pool.Put(b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBuffer(size int) *fixedBuffer {
|
||||||
|
pool, ok := bufPools[size]
|
||||||
|
if !ok {
|
||||||
|
pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }}
|
||||||
|
bufPools[size] = pool
|
||||||
|
}
|
||||||
|
return pool.Get().(*fixedBuffer)
|
||||||
|
}
|
22
vendor/github.com/docker/docker/pkg/ioutils/fmt.go
generated
vendored
Normal file
22
vendor/github.com/docker/docker/pkg/ioutils/fmt.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FprintfIfNotEmpty prints the string value if it's not empty
|
||||||
|
func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) {
|
||||||
|
if value != "" {
|
||||||
|
return fmt.Fprintf(w, format, value)
|
||||||
|
}
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FprintfIfTrue prints the boolean value if it's true
|
||||||
|
func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) {
|
||||||
|
if ok {
|
||||||
|
return fmt.Fprintf(w, format, ok)
|
||||||
|
}
|
||||||
|
return 0, nil
|
||||||
|
}
|
226
vendor/github.com/docker/docker/pkg/ioutils/multireader.go
generated
vendored
Normal file
226
vendor/github.com/docker/docker/pkg/ioutils/multireader.go
generated
vendored
Normal file
|
@ -0,0 +1,226 @@
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
type pos struct {
|
||||||
|
idx int
|
||||||
|
offset int64
|
||||||
|
}
|
||||||
|
|
||||||
|
type multiReadSeeker struct {
|
||||||
|
readers []io.ReadSeeker
|
||||||
|
pos *pos
|
||||||
|
posIdx map[io.ReadSeeker]int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
var tmpOffset int64
|
||||||
|
switch whence {
|
||||||
|
case os.SEEK_SET:
|
||||||
|
for i, rdr := range r.readers {
|
||||||
|
// get size of the current reader
|
||||||
|
s, err := rdr.Seek(0, os.SEEK_END)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if offset > tmpOffset+s {
|
||||||
|
if i == len(r.readers)-1 {
|
||||||
|
rdrOffset := s + (offset - tmpOffset)
|
||||||
|
if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
r.pos = &pos{i, rdrOffset}
|
||||||
|
return offset, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
tmpOffset += s
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
rdrOffset := offset - tmpOffset
|
||||||
|
idx := i
|
||||||
|
|
||||||
|
rdr.Seek(rdrOffset, os.SEEK_SET)
|
||||||
|
// make sure all following readers are at 0
|
||||||
|
for _, rdr := range r.readers[i+1:] {
|
||||||
|
rdr.Seek(0, os.SEEK_SET)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rdrOffset == s && i != len(r.readers)-1 {
|
||||||
|
idx++
|
||||||
|
rdrOffset = 0
|
||||||
|
}
|
||||||
|
r.pos = &pos{idx, rdrOffset}
|
||||||
|
return offset, nil
|
||||||
|
}
|
||||||
|
case os.SEEK_END:
|
||||||
|
for _, rdr := range r.readers {
|
||||||
|
s, err := rdr.Seek(0, os.SEEK_END)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
tmpOffset += s
|
||||||
|
}
|
||||||
|
r.Seek(tmpOffset+offset, os.SEEK_SET)
|
||||||
|
return tmpOffset + offset, nil
|
||||||
|
case os.SEEK_CUR:
|
||||||
|
if r.pos == nil {
|
||||||
|
return r.Seek(offset, os.SEEK_SET)
|
||||||
|
}
|
||||||
|
// Just return the current offset
|
||||||
|
if offset == 0 {
|
||||||
|
return r.getCurOffset()
|
||||||
|
}
|
||||||
|
|
||||||
|
curOffset, err := r.getCurOffset()
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
|
||||||
|
r.pos = &pos{r.posIdx[rdr], rdrOffset}
|
||||||
|
return curOffset + offset, nil
|
||||||
|
default:
|
||||||
|
return -1, fmt.Errorf("Invalid whence: %d", whence)
|
||||||
|
}
|
||||||
|
|
||||||
|
return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) {
|
||||||
|
var rdr io.ReadSeeker
|
||||||
|
var rdrOffset int64
|
||||||
|
|
||||||
|
for i, rdr := range r.readers {
|
||||||
|
offsetTo, err := r.getOffsetToReader(rdr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, -1, err
|
||||||
|
}
|
||||||
|
if offsetTo > offset {
|
||||||
|
rdr = r.readers[i-1]
|
||||||
|
rdrOffset = offsetTo - offset
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if rdr == r.readers[len(r.readers)-1] {
|
||||||
|
rdrOffset = offsetTo + offset
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return rdr, rdrOffset, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *multiReadSeeker) getCurOffset() (int64, error) {
|
||||||
|
var totalSize int64
|
||||||
|
for _, rdr := range r.readers[:r.pos.idx+1] {
|
||||||
|
if r.posIdx[rdr] == r.pos.idx {
|
||||||
|
totalSize += r.pos.offset
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
size, err := getReadSeekerSize(rdr)
|
||||||
|
if err != nil {
|
||||||
|
return -1, fmt.Errorf("error getting seeker size: %v", err)
|
||||||
|
}
|
||||||
|
totalSize += size
|
||||||
|
}
|
||||||
|
return totalSize, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) {
|
||||||
|
var offset int64
|
||||||
|
for _, r := range r.readers {
|
||||||
|
if r == rdr {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
size, err := getReadSeekerSize(rdr)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
offset += size
|
||||||
|
}
|
||||||
|
return offset, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *multiReadSeeker) Read(b []byte) (int, error) {
|
||||||
|
if r.pos == nil {
|
||||||
|
r.pos = &pos{0, 0}
|
||||||
|
}
|
||||||
|
|
||||||
|
bCap := int64(cap(b))
|
||||||
|
buf := bytes.NewBuffer(nil)
|
||||||
|
var rdr io.ReadSeeker
|
||||||
|
|
||||||
|
for _, rdr = range r.readers[r.pos.idx:] {
|
||||||
|
readBytes, err := io.CopyN(buf, rdr, bCap)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
bCap -= readBytes
|
||||||
|
|
||||||
|
if bCap == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rdrPos, err := rdr.Seek(0, os.SEEK_CUR)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
r.pos = &pos{r.posIdx[rdr], rdrPos}
|
||||||
|
return buf.Read(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) {
|
||||||
|
// save the current position
|
||||||
|
pos, err := rdr.Seek(0, os.SEEK_CUR)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// get the size
|
||||||
|
size, err := rdr.Seek(0, os.SEEK_END)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// reset the position
|
||||||
|
if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
return size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided
|
||||||
|
// input readseekers. After calling this method the initial position is set to the
|
||||||
|
// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances
|
||||||
|
// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker.
|
||||||
|
// Seek can be used over the sum of lengths of all readseekers.
|
||||||
|
//
|
||||||
|
// When a MultiReadSeeker is used, no Read and Seek operations should be made on
|
||||||
|
// its ReadSeeker components. Also, users should make no assumption on the state
|
||||||
|
// of individual readseekers while the MultiReadSeeker is used.
|
||||||
|
func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker {
|
||||||
|
if len(readers) == 1 {
|
||||||
|
return readers[0]
|
||||||
|
}
|
||||||
|
idx := make(map[io.ReadSeeker]int)
|
||||||
|
for i, rdr := range readers {
|
||||||
|
idx[rdr] = i
|
||||||
|
}
|
||||||
|
return &multiReadSeeker{
|
||||||
|
readers: readers,
|
||||||
|
posIdx: idx,
|
||||||
|
}
|
||||||
|
}
|
154
vendor/github.com/docker/docker/pkg/ioutils/readers.go
generated
vendored
Normal file
154
vendor/github.com/docker/docker/pkg/ioutils/readers.go
generated
vendored
Normal file
|
@ -0,0 +1,154 @@
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
type readCloserWrapper struct {
|
||||||
|
io.Reader
|
||||||
|
closer func() error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *readCloserWrapper) Close() error {
|
||||||
|
return r.closer()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReadCloserWrapper returns a new io.ReadCloser.
|
||||||
|
func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
|
||||||
|
return &readCloserWrapper{
|
||||||
|
Reader: r,
|
||||||
|
closer: closer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type readerErrWrapper struct {
|
||||||
|
reader io.Reader
|
||||||
|
closer func()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *readerErrWrapper) Read(p []byte) (int, error) {
|
||||||
|
n, err := r.reader.Read(p)
|
||||||
|
if err != nil {
|
||||||
|
r.closer()
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReaderErrWrapper returns a new io.Reader.
|
||||||
|
func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
|
||||||
|
return &readerErrWrapper{
|
||||||
|
reader: r,
|
||||||
|
closer: closer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HashData returns the sha256 sum of src.
|
||||||
|
func HashData(src io.Reader) (string, error) {
|
||||||
|
h := sha256.New()
|
||||||
|
if _, err := io.Copy(h, src); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnEOFReader wraps a io.ReadCloser and a function
|
||||||
|
// the function will run at the end of file or close the file.
|
||||||
|
type OnEOFReader struct {
|
||||||
|
Rc io.ReadCloser
|
||||||
|
Fn func()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *OnEOFReader) Read(p []byte) (n int, err error) {
|
||||||
|
n, err = r.Rc.Read(p)
|
||||||
|
if err == io.EOF {
|
||||||
|
r.runFunc()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the file and run the function.
|
||||||
|
func (r *OnEOFReader) Close() error {
|
||||||
|
err := r.Rc.Close()
|
||||||
|
r.runFunc()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *OnEOFReader) runFunc() {
|
||||||
|
if fn := r.Fn; fn != nil {
|
||||||
|
fn()
|
||||||
|
r.Fn = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read
|
||||||
|
// operations.
|
||||||
|
type cancelReadCloser struct {
|
||||||
|
cancel func()
|
||||||
|
pR *io.PipeReader // Stream to read from
|
||||||
|
pW *io.PipeWriter
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the
|
||||||
|
// context is cancelled. The returned io.ReadCloser must be closed when it is
|
||||||
|
// no longer needed.
|
||||||
|
func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser {
|
||||||
|
pR, pW := io.Pipe()
|
||||||
|
|
||||||
|
// Create a context used to signal when the pipe is closed
|
||||||
|
doneCtx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
p := &cancelReadCloser{
|
||||||
|
cancel: cancel,
|
||||||
|
pR: pR,
|
||||||
|
pW: pW,
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
_, err := io.Copy(pW, in)
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
// If the context was closed, p.closeWithError
|
||||||
|
// was already called. Calling it again would
|
||||||
|
// change the error that Read returns.
|
||||||
|
default:
|
||||||
|
p.closeWithError(err)
|
||||||
|
}
|
||||||
|
in.Close()
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
p.closeWithError(ctx.Err())
|
||||||
|
case <-doneCtx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read wraps the Read method of the pipe that provides data from the wrapped
|
||||||
|
// ReadCloser.
|
||||||
|
func (p *cancelReadCloser) Read(buf []byte) (n int, err error) {
|
||||||
|
return p.pR.Read(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// closeWithError closes the wrapper and its underlying reader. It will
|
||||||
|
// cause future calls to Read to return err.
|
||||||
|
func (p *cancelReadCloser) closeWithError(err error) {
|
||||||
|
p.pW.CloseWithError(err)
|
||||||
|
p.cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the wrapper its underlying reader. It will cause
|
||||||
|
// future calls to Read to return io.EOF.
|
||||||
|
func (p *cancelReadCloser) Close() error {
|
||||||
|
p.closeWithError(io.EOF)
|
||||||
|
return nil
|
||||||
|
}
|
10
vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go
generated
vendored
Normal file
10
vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import "io/ioutil"
|
||||||
|
|
||||||
|
// TempDir on Unix systems is equivalent to ioutil.TempDir.
|
||||||
|
func TempDir(dir, prefix string) (string, error) {
|
||||||
|
return ioutil.TempDir(dir, prefix)
|
||||||
|
}
|
18
vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go
generated
vendored
Normal file
18
vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/longpath"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.
|
||||||
|
func TempDir(dir, prefix string) (string, error) {
|
||||||
|
tempDir, err := ioutil.TempDir(dir, prefix)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return longpath.AddPrefix(tempDir), nil
|
||||||
|
}
|
92
vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go
generated
vendored
Normal file
92
vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go
generated
vendored
Normal file
|
@ -0,0 +1,92 @@
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WriteFlusher wraps the Write and Flush operation ensuring that every write
|
||||||
|
// is a flush. In addition, the Close method can be called to intercept
|
||||||
|
// Read/Write calls if the targets lifecycle has already ended.
|
||||||
|
type WriteFlusher struct {
|
||||||
|
w io.Writer
|
||||||
|
flusher flusher
|
||||||
|
flushed chan struct{}
|
||||||
|
flushedOnce sync.Once
|
||||||
|
closed chan struct{}
|
||||||
|
closeLock sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
type flusher interface {
|
||||||
|
Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
var errWriteFlusherClosed = io.EOF
|
||||||
|
|
||||||
|
func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
|
||||||
|
select {
|
||||||
|
case <-wf.closed:
|
||||||
|
return 0, errWriteFlusherClosed
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err = wf.w.Write(b)
|
||||||
|
wf.Flush() // every write is a flush.
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush the stream immediately.
|
||||||
|
func (wf *WriteFlusher) Flush() {
|
||||||
|
select {
|
||||||
|
case <-wf.closed:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
wf.flushedOnce.Do(func() {
|
||||||
|
close(wf.flushed)
|
||||||
|
})
|
||||||
|
wf.flusher.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flushed returns the state of flushed.
|
||||||
|
// If it's flushed, return true, or else it return false.
|
||||||
|
func (wf *WriteFlusher) Flushed() bool {
|
||||||
|
// BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to
|
||||||
|
// be used to detect whether or a response code has been issued or not.
|
||||||
|
// Another hook should be used instead.
|
||||||
|
var flushed bool
|
||||||
|
select {
|
||||||
|
case <-wf.flushed:
|
||||||
|
flushed = true
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return flushed
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the write flusher, disallowing any further writes to the
|
||||||
|
// target. After the flusher is closed, all calls to write or flush will
|
||||||
|
// result in an error.
|
||||||
|
func (wf *WriteFlusher) Close() error {
|
||||||
|
wf.closeLock.Lock()
|
||||||
|
defer wf.closeLock.Unlock()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-wf.closed:
|
||||||
|
return errWriteFlusherClosed
|
||||||
|
default:
|
||||||
|
close(wf.closed)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWriteFlusher returns a new WriteFlusher.
|
||||||
|
func NewWriteFlusher(w io.Writer) *WriteFlusher {
|
||||||
|
var fl flusher
|
||||||
|
if f, ok := w.(flusher); ok {
|
||||||
|
fl = f
|
||||||
|
} else {
|
||||||
|
fl = &NopFlusher{}
|
||||||
|
}
|
||||||
|
return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})}
|
||||||
|
}
|
66
vendor/github.com/docker/docker/pkg/ioutils/writers.go
generated
vendored
Normal file
66
vendor/github.com/docker/docker/pkg/ioutils/writers.go
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import "io"
|
||||||
|
|
||||||
|
// NopWriter represents a type which write operation is nop.
|
||||||
|
type NopWriter struct{}
|
||||||
|
|
||||||
|
func (*NopWriter) Write(buf []byte) (int, error) {
|
||||||
|
return len(buf), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type nopWriteCloser struct {
|
||||||
|
io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *nopWriteCloser) Close() error { return nil }
|
||||||
|
|
||||||
|
// NopWriteCloser returns a nopWriteCloser.
|
||||||
|
func NopWriteCloser(w io.Writer) io.WriteCloser {
|
||||||
|
return &nopWriteCloser{w}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NopFlusher represents a type which flush operation is nop.
|
||||||
|
type NopFlusher struct{}
|
||||||
|
|
||||||
|
// Flush is a nop operation.
|
||||||
|
func (f *NopFlusher) Flush() {}
|
||||||
|
|
||||||
|
type writeCloserWrapper struct {
|
||||||
|
io.Writer
|
||||||
|
closer func() error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *writeCloserWrapper) Close() error {
|
||||||
|
return r.closer()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWriteCloserWrapper returns a new io.WriteCloser.
|
||||||
|
func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
|
||||||
|
return &writeCloserWrapper{
|
||||||
|
Writer: r,
|
||||||
|
closer: closer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteCounter wraps a concrete io.Writer and hold a count of the number
|
||||||
|
// of bytes written to the writer during a "session".
|
||||||
|
// This can be convenient when write return is masked
|
||||||
|
// (e.g., json.Encoder.Encode())
|
||||||
|
type WriteCounter struct {
|
||||||
|
Count int64
|
||||||
|
Writer io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWriteCounter returns a new WriteCounter.
|
||||||
|
func NewWriteCounter(w io.Writer) *WriteCounter {
|
||||||
|
return &WriteCounter{
|
||||||
|
Writer: w,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wc *WriteCounter) Write(p []byte) (count int, err error) {
|
||||||
|
count, err = wc.Writer.Write(p)
|
||||||
|
wc.Count += int64(count)
|
||||||
|
return
|
||||||
|
}
|
191
vendor/github.com/docker/docker/pkg/mount/LICENSE
generated
vendored
Normal file
191
vendor/github.com/docker/docker/pkg/mount/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
https://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
Copyright 2013-2016 Docker, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
92
vendor/github.com/docker/docker/pkg/mount/flags.go
generated
vendored
Normal file
92
vendor/github.com/docker/docker/pkg/mount/flags.go
generated
vendored
Normal file
|
@ -0,0 +1,92 @@
|
||||||
|
package mount
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Parse fstab type mount options into mount() flags
|
||||||
|
// and device specific data
|
||||||
|
func parseOptions(options string) (int, string) {
|
||||||
|
var (
|
||||||
|
flag int
|
||||||
|
data []string
|
||||||
|
)
|
||||||
|
|
||||||
|
flags := map[string]struct {
|
||||||
|
clear bool
|
||||||
|
flag int
|
||||||
|
}{
|
||||||
|
"defaults": {false, 0},
|
||||||
|
"ro": {false, RDONLY},
|
||||||
|
"rw": {true, RDONLY},
|
||||||
|
"suid": {true, NOSUID},
|
||||||
|
"nosuid": {false, NOSUID},
|
||||||
|
"dev": {true, NODEV},
|
||||||
|
"nodev": {false, NODEV},
|
||||||
|
"exec": {true, NOEXEC},
|
||||||
|
"noexec": {false, NOEXEC},
|
||||||
|
"sync": {false, SYNCHRONOUS},
|
||||||
|
"async": {true, SYNCHRONOUS},
|
||||||
|
"dirsync": {false, DIRSYNC},
|
||||||
|
"remount": {false, REMOUNT},
|
||||||
|
"mand": {false, MANDLOCK},
|
||||||
|
"nomand": {true, MANDLOCK},
|
||||||
|
"atime": {true, NOATIME},
|
||||||
|
"noatime": {false, NOATIME},
|
||||||
|
"diratime": {true, NODIRATIME},
|
||||||
|
"nodiratime": {false, NODIRATIME},
|
||||||
|
"bind": {false, BIND},
|
||||||
|
"rbind": {false, RBIND},
|
||||||
|
"unbindable": {false, UNBINDABLE},
|
||||||
|
"runbindable": {false, RUNBINDABLE},
|
||||||
|
"private": {false, PRIVATE},
|
||||||
|
"rprivate": {false, RPRIVATE},
|
||||||
|
"shared": {false, SHARED},
|
||||||
|
"rshared": {false, RSHARED},
|
||||||
|
"slave": {false, SLAVE},
|
||||||
|
"rslave": {false, RSLAVE},
|
||||||
|
"relatime": {false, RELATIME},
|
||||||
|
"norelatime": {true, RELATIME},
|
||||||
|
"strictatime": {false, STRICTATIME},
|
||||||
|
"nostrictatime": {true, STRICTATIME},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, o := range strings.Split(options, ",") {
|
||||||
|
// If the option does not exist in the flags table or the flag
|
||||||
|
// is not supported on the platform,
|
||||||
|
// then it is a data value for a specific fs type
|
||||||
|
if f, exists := flags[o]; exists && f.flag != 0 {
|
||||||
|
if f.clear {
|
||||||
|
flag &= ^f.flag
|
||||||
|
} else {
|
||||||
|
flag |= f.flag
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
data = append(data, o)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return flag, strings.Join(data, ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseTmpfsOptions parse fstab type mount options into flags and data
|
||||||
|
func ParseTmpfsOptions(options string) (int, string, error) {
|
||||||
|
flags, data := parseOptions(options)
|
||||||
|
validFlags := map[string]bool{
|
||||||
|
"": true,
|
||||||
|
"size": true,
|
||||||
|
"mode": true,
|
||||||
|
"uid": true,
|
||||||
|
"gid": true,
|
||||||
|
"nr_inodes": true,
|
||||||
|
"nr_blocks": true,
|
||||||
|
"mpol": true,
|
||||||
|
}
|
||||||
|
for _, o := range strings.Split(data, ",") {
|
||||||
|
opt := strings.SplitN(o, "=", 2)
|
||||||
|
if !validFlags[opt[0]] {
|
||||||
|
return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return flags, data, nil
|
||||||
|
}
|
48
vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go
generated
vendored
Normal file
48
vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
// +build freebsd,cgo
|
||||||
|
|
||||||
|
package mount
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <sys/mount.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
const (
|
||||||
|
// RDONLY will mount the filesystem as read-only.
|
||||||
|
RDONLY = C.MNT_RDONLY
|
||||||
|
|
||||||
|
// NOSUID will not allow set-user-identifier or set-group-identifier bits to
|
||||||
|
// take effect.
|
||||||
|
NOSUID = C.MNT_NOSUID
|
||||||
|
|
||||||
|
// NOEXEC will not allow execution of any binaries on the mounted file system.
|
||||||
|
NOEXEC = C.MNT_NOEXEC
|
||||||
|
|
||||||
|
// SYNCHRONOUS will allow any I/O to the file system to be done synchronously.
|
||||||
|
SYNCHRONOUS = C.MNT_SYNCHRONOUS
|
||||||
|
|
||||||
|
// NOATIME will not update the file access time when reading from a file.
|
||||||
|
NOATIME = C.MNT_NOATIME
|
||||||
|
)
|
||||||
|
|
||||||
|
// These flags are unsupported.
|
||||||
|
const (
|
||||||
|
BIND = 0
|
||||||
|
DIRSYNC = 0
|
||||||
|
MANDLOCK = 0
|
||||||
|
NODEV = 0
|
||||||
|
NODIRATIME = 0
|
||||||
|
UNBINDABLE = 0
|
||||||
|
RUNBINDABLE = 0
|
||||||
|
PRIVATE = 0
|
||||||
|
RPRIVATE = 0
|
||||||
|
SHARED = 0
|
||||||
|
RSHARED = 0
|
||||||
|
SLAVE = 0
|
||||||
|
RSLAVE = 0
|
||||||
|
RBIND = 0
|
||||||
|
RELATIVE = 0
|
||||||
|
RELATIME = 0
|
||||||
|
REMOUNT = 0
|
||||||
|
STRICTATIME = 0
|
||||||
|
)
|
85
vendor/github.com/docker/docker/pkg/mount/flags_linux.go
generated
vendored
Normal file
85
vendor/github.com/docker/docker/pkg/mount/flags_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,85 @@
|
||||||
|
package mount
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// RDONLY will mount the file system read-only.
|
||||||
|
RDONLY = syscall.MS_RDONLY
|
||||||
|
|
||||||
|
// NOSUID will not allow set-user-identifier or set-group-identifier bits to
|
||||||
|
// take effect.
|
||||||
|
NOSUID = syscall.MS_NOSUID
|
||||||
|
|
||||||
|
// NODEV will not interpret character or block special devices on the file
|
||||||
|
// system.
|
||||||
|
NODEV = syscall.MS_NODEV
|
||||||
|
|
||||||
|
// NOEXEC will not allow execution of any binaries on the mounted file system.
|
||||||
|
NOEXEC = syscall.MS_NOEXEC
|
||||||
|
|
||||||
|
// SYNCHRONOUS will allow I/O to the file system to be done synchronously.
|
||||||
|
SYNCHRONOUS = syscall.MS_SYNCHRONOUS
|
||||||
|
|
||||||
|
// DIRSYNC will force all directory updates within the file system to be done
|
||||||
|
// synchronously. This affects the following system calls: create, link,
|
||||||
|
// unlink, symlink, mkdir, rmdir, mknod and rename.
|
||||||
|
DIRSYNC = syscall.MS_DIRSYNC
|
||||||
|
|
||||||
|
// REMOUNT will attempt to remount an already-mounted file system. This is
|
||||||
|
// commonly used to change the mount flags for a file system, especially to
|
||||||
|
// make a readonly file system writeable. It does not change device or mount
|
||||||
|
// point.
|
||||||
|
REMOUNT = syscall.MS_REMOUNT
|
||||||
|
|
||||||
|
// MANDLOCK will force mandatory locks on a filesystem.
|
||||||
|
MANDLOCK = syscall.MS_MANDLOCK
|
||||||
|
|
||||||
|
// NOATIME will not update the file access time when reading from a file.
|
||||||
|
NOATIME = syscall.MS_NOATIME
|
||||||
|
|
||||||
|
// NODIRATIME will not update the directory access time.
|
||||||
|
NODIRATIME = syscall.MS_NODIRATIME
|
||||||
|
|
||||||
|
// BIND remounts a subtree somewhere else.
|
||||||
|
BIND = syscall.MS_BIND
|
||||||
|
|
||||||
|
// RBIND remounts a subtree and all possible submounts somewhere else.
|
||||||
|
RBIND = syscall.MS_BIND | syscall.MS_REC
|
||||||
|
|
||||||
|
// UNBINDABLE creates a mount which cannot be cloned through a bind operation.
|
||||||
|
UNBINDABLE = syscall.MS_UNBINDABLE
|
||||||
|
|
||||||
|
// RUNBINDABLE marks the entire mount tree as UNBINDABLE.
|
||||||
|
RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC
|
||||||
|
|
||||||
|
// PRIVATE creates a mount which carries no propagation abilities.
|
||||||
|
PRIVATE = syscall.MS_PRIVATE
|
||||||
|
|
||||||
|
// RPRIVATE marks the entire mount tree as PRIVATE.
|
||||||
|
RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC
|
||||||
|
|
||||||
|
// SLAVE creates a mount which receives propagation from its master, but not
|
||||||
|
// vice versa.
|
||||||
|
SLAVE = syscall.MS_SLAVE
|
||||||
|
|
||||||
|
// RSLAVE marks the entire mount tree as SLAVE.
|
||||||
|
RSLAVE = syscall.MS_SLAVE | syscall.MS_REC
|
||||||
|
|
||||||
|
// SHARED creates a mount which provides the ability to create mirrors of
|
||||||
|
// that mount such that mounts and unmounts within any of the mirrors
|
||||||
|
// propagate to the other mirrors.
|
||||||
|
SHARED = syscall.MS_SHARED
|
||||||
|
|
||||||
|
// RSHARED marks the entire mount tree as SHARED.
|
||||||
|
RSHARED = syscall.MS_SHARED | syscall.MS_REC
|
||||||
|
|
||||||
|
// RELATIME updates inode access times relative to modify or change time.
|
||||||
|
RELATIME = syscall.MS_RELATIME
|
||||||
|
|
||||||
|
// STRICTATIME allows to explicitly request full atime updates. This makes
|
||||||
|
// it possible for the kernel to default to relatime or noatime but still
|
||||||
|
// allow userspace to override it.
|
||||||
|
STRICTATIME = syscall.MS_STRICTATIME
|
||||||
|
)
|
30
vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go
generated
vendored
Normal file
30
vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
// +build !linux,!freebsd freebsd,!cgo
|
||||||
|
|
||||||
|
package mount
|
||||||
|
|
||||||
|
// These flags are unsupported.
|
||||||
|
const (
|
||||||
|
BIND = 0
|
||||||
|
DIRSYNC = 0
|
||||||
|
MANDLOCK = 0
|
||||||
|
NOATIME = 0
|
||||||
|
NODEV = 0
|
||||||
|
NODIRATIME = 0
|
||||||
|
NOEXEC = 0
|
||||||
|
NOSUID = 0
|
||||||
|
UNBINDABLE = 0
|
||||||
|
RUNBINDABLE = 0
|
||||||
|
PRIVATE = 0
|
||||||
|
RPRIVATE = 0
|
||||||
|
SHARED = 0
|
||||||
|
RSHARED = 0
|
||||||
|
SLAVE = 0
|
||||||
|
RSLAVE = 0
|
||||||
|
RBIND = 0
|
||||||
|
RELATIME = 0
|
||||||
|
RELATIVE = 0
|
||||||
|
REMOUNT = 0
|
||||||
|
STRICTATIME = 0
|
||||||
|
SYNCHRONOUS = 0
|
||||||
|
RDONLY = 0
|
||||||
|
)
|
74
vendor/github.com/docker/docker/pkg/mount/mount.go
generated
vendored
Normal file
74
vendor/github.com/docker/docker/pkg/mount/mount.go
generated
vendored
Normal file
|
@ -0,0 +1,74 @@
|
||||||
|
package mount
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetMounts retrieves a list of mounts for the current running process.
|
||||||
|
func GetMounts() ([]*Info, error) {
|
||||||
|
return parseMountTable()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mounted looks at /proc/self/mountinfo to determine of the specified
|
||||||
|
// mountpoint has been mounted
|
||||||
|
func Mounted(mountpoint string) (bool, error) {
|
||||||
|
entries, err := parseMountTable()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search the table for the mountpoint
|
||||||
|
for _, e := range entries {
|
||||||
|
if e.Mountpoint == mountpoint {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mount will mount filesystem according to the specified configuration, on the
|
||||||
|
// condition that the target path is *not* already mounted. Options must be
|
||||||
|
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
|
||||||
|
// flags.go for supported option flags.
|
||||||
|
func Mount(device, target, mType, options string) error {
|
||||||
|
flag, _ := parseOptions(options)
|
||||||
|
if flag&REMOUNT != REMOUNT {
|
||||||
|
if mounted, err := Mounted(target); err != nil || mounted {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ForceMount(device, target, mType, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForceMount will mount a filesystem according to the specified configuration,
|
||||||
|
// *regardless* if the target path is not already mounted. Options must be
|
||||||
|
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
|
||||||
|
// flags.go for supported option flags.
|
||||||
|
func ForceMount(device, target, mType, options string) error {
|
||||||
|
flag, data := parseOptions(options)
|
||||||
|
if err := mount(device, target, mType, uintptr(flag), data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmount will unmount the target filesystem, so long as it is mounted.
|
||||||
|
func Unmount(target string) error {
|
||||||
|
if mounted, err := Mounted(target); err != nil || !mounted {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return ForceUnmount(target)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForceUnmount will force an unmount of the target filesystem, regardless if
|
||||||
|
// it is mounted or not.
|
||||||
|
func ForceUnmount(target string) (err error) {
|
||||||
|
// Simple retry logic for unmount
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
if err = unmount(target, 0); err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
59
vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go
generated
vendored
Normal file
59
vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go
generated
vendored
Normal file
|
@ -0,0 +1,59 @@
|
||||||
|
package mount
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <errno.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <sys/_iovec.h>
|
||||||
|
#include <sys/mount.h>
|
||||||
|
#include <sys/param.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
func allocateIOVecs(options []string) []C.struct_iovec {
|
||||||
|
out := make([]C.struct_iovec, len(options))
|
||||||
|
for i, option := range options {
|
||||||
|
out[i].iov_base = unsafe.Pointer(C.CString(option))
|
||||||
|
out[i].iov_len = C.size_t(len(option) + 1)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func mount(device, target, mType string, flag uintptr, data string) error {
|
||||||
|
isNullFS := false
|
||||||
|
|
||||||
|
xs := strings.Split(data, ",")
|
||||||
|
for _, x := range xs {
|
||||||
|
if x == "bind" {
|
||||||
|
isNullFS = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
options := []string{"fspath", target}
|
||||||
|
if isNullFS {
|
||||||
|
options = append(options, "fstype", "nullfs", "target", device)
|
||||||
|
} else {
|
||||||
|
options = append(options, "fstype", mType, "from", device)
|
||||||
|
}
|
||||||
|
rawOptions := allocateIOVecs(options)
|
||||||
|
for _, rawOption := range rawOptions {
|
||||||
|
defer C.free(rawOption.iov_base)
|
||||||
|
}
|
||||||
|
|
||||||
|
if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 {
|
||||||
|
reason := C.GoString(C.strerror(*C.__error()))
|
||||||
|
return fmt.Errorf("Failed to call nmount: %s", reason)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmount(target string, flag int) error {
|
||||||
|
return syscall.Unmount(target, flag)
|
||||||
|
}
|
21
vendor/github.com/docker/docker/pkg/mount/mounter_linux.go
generated
vendored
Normal file
21
vendor/github.com/docker/docker/pkg/mount/mounter_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
package mount
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func mount(device, target, mType string, flag uintptr, data string) error {
|
||||||
|
if err := syscall.Mount(device, target, mType, flag, data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have a bind mount or remount, remount...
|
||||||
|
if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY {
|
||||||
|
return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmount(target string, flag int) error {
|
||||||
|
return syscall.Unmount(target, flag)
|
||||||
|
}
|
11
vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go
generated
vendored
Normal file
11
vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
// +build !linux,!freebsd freebsd,!cgo
|
||||||
|
|
||||||
|
package mount
|
||||||
|
|
||||||
|
func mount(device, target, mType string, flag uintptr, data string) error {
|
||||||
|
panic("Not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmount(target string, flag int) error {
|
||||||
|
panic("Not implemented")
|
||||||
|
}
|
40
vendor/github.com/docker/docker/pkg/mount/mountinfo.go
generated
vendored
Normal file
40
vendor/github.com/docker/docker/pkg/mount/mountinfo.go
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
package mount
|
||||||
|
|
||||||
|
// Info reveals information about a particular mounted filesystem. This
|
||||||
|
// struct is populated from the content in the /proc/<pid>/mountinfo file.
|
||||||
|
type Info struct {
|
||||||
|
// ID is a unique identifier of the mount (may be reused after umount).
|
||||||
|
ID int
|
||||||
|
|
||||||
|
// Parent indicates the ID of the mount parent (or of self for the top of the
|
||||||
|
// mount tree).
|
||||||
|
Parent int
|
||||||
|
|
||||||
|
// Major indicates one half of the device ID which identifies the device class.
|
||||||
|
Major int
|
||||||
|
|
||||||
|
// Minor indicates one half of the device ID which identifies a specific
|
||||||
|
// instance of device.
|
||||||
|
Minor int
|
||||||
|
|
||||||
|
// Root of the mount within the filesystem.
|
||||||
|
Root string
|
||||||
|
|
||||||
|
// Mountpoint indicates the mount point relative to the process's root.
|
||||||
|
Mountpoint string
|
||||||
|
|
||||||
|
// Opts represents mount-specific options.
|
||||||
|
Opts string
|
||||||
|
|
||||||
|
// Optional represents optional fields.
|
||||||
|
Optional string
|
||||||
|
|
||||||
|
// Fstype indicates the type of filesystem, such as EXT3.
|
||||||
|
Fstype string
|
||||||
|
|
||||||
|
// Source indicates filesystem specific information or "none".
|
||||||
|
Source string
|
||||||
|
|
||||||
|
// VfsOpts represents per super block options.
|
||||||
|
VfsOpts string
|
||||||
|
}
|
41
vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go
generated
vendored
Normal file
41
vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
package mount
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <sys/param.h>
|
||||||
|
#include <sys/ucred.h>
|
||||||
|
#include <sys/mount.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
|
||||||
|
// bind mounts.
|
||||||
|
func parseMountTable() ([]*Info, error) {
|
||||||
|
var rawEntries *C.struct_statfs
|
||||||
|
|
||||||
|
count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))
|
||||||
|
if count == 0 {
|
||||||
|
return nil, fmt.Errorf("Failed to call getmntinfo")
|
||||||
|
}
|
||||||
|
|
||||||
|
var entries []C.struct_statfs
|
||||||
|
header := (*reflect.SliceHeader)(unsafe.Pointer(&entries))
|
||||||
|
header.Cap = count
|
||||||
|
header.Len = count
|
||||||
|
header.Data = uintptr(unsafe.Pointer(rawEntries))
|
||||||
|
|
||||||
|
var out []*Info
|
||||||
|
for _, entry := range entries {
|
||||||
|
var mountinfo Info
|
||||||
|
mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
|
||||||
|
mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
|
||||||
|
mountinfo.Fstype = C.GoString(&entry.f_fstypename[0])
|
||||||
|
out = append(out, &mountinfo)
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
95
vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go
generated
vendored
Normal file
95
vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,95 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package mount
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
/* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
|
||||||
|
(1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
|
||||||
|
|
||||||
|
(1) mount ID: unique identifier of the mount (may be reused after umount)
|
||||||
|
(2) parent ID: ID of parent (or of self for the top of the mount tree)
|
||||||
|
(3) major:minor: value of st_dev for files on filesystem
|
||||||
|
(4) root: root of the mount within the filesystem
|
||||||
|
(5) mount point: mount point relative to the process's root
|
||||||
|
(6) mount options: per mount options
|
||||||
|
(7) optional fields: zero or more fields of the form "tag[:value]"
|
||||||
|
(8) separator: marks the end of the optional fields
|
||||||
|
(9) filesystem type: name of filesystem of the form "type[.subtype]"
|
||||||
|
(10) mount source: filesystem specific information or "none"
|
||||||
|
(11) super options: per super block options*/
|
||||||
|
mountinfoFormat = "%d %d %d:%d %s %s %s %s"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
|
||||||
|
// bind mounts
|
||||||
|
func parseMountTable() ([]*Info, error) {
|
||||||
|
f, err := os.Open("/proc/self/mountinfo")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
return parseInfoFile(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseInfoFile(r io.Reader) ([]*Info, error) {
|
||||||
|
var (
|
||||||
|
s = bufio.NewScanner(r)
|
||||||
|
out = []*Info{}
|
||||||
|
)
|
||||||
|
|
||||||
|
for s.Scan() {
|
||||||
|
if err := s.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
p = &Info{}
|
||||||
|
text = s.Text()
|
||||||
|
optionalFields string
|
||||||
|
)
|
||||||
|
|
||||||
|
if _, err := fmt.Sscanf(text, mountinfoFormat,
|
||||||
|
&p.ID, &p.Parent, &p.Major, &p.Minor,
|
||||||
|
&p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil {
|
||||||
|
return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err)
|
||||||
|
}
|
||||||
|
// Safe as mountinfo encodes mountpoints with spaces as \040.
|
||||||
|
index := strings.Index(text, " - ")
|
||||||
|
postSeparatorFields := strings.Fields(text[index+3:])
|
||||||
|
if len(postSeparatorFields) < 3 {
|
||||||
|
return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
|
||||||
|
}
|
||||||
|
|
||||||
|
if optionalFields != "-" {
|
||||||
|
p.Optional = optionalFields
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Fstype = postSeparatorFields[0]
|
||||||
|
p.Source = postSeparatorFields[1]
|
||||||
|
p.VfsOpts = strings.Join(postSeparatorFields[2:], " ")
|
||||||
|
out = append(out, p)
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PidMountInfo collects the mounts for a specific process ID. If the process
|
||||||
|
// ID is unknown, it is better to use `GetMounts` which will inspect
|
||||||
|
// "/proc/self/mountinfo" instead.
|
||||||
|
func PidMountInfo(pid int) ([]*Info, error) {
|
||||||
|
f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
return parseInfoFile(f)
|
||||||
|
}
|
12
vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
generated
vendored
Normal file
12
vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
// +build !windows,!linux,!freebsd freebsd,!cgo
|
||||||
|
|
||||||
|
package mount
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
func parseMountTable() ([]*Info, error) {
|
||||||
|
return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
|
||||||
|
}
|
6
vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go
generated
vendored
Normal file
6
vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
package mount
|
||||||
|
|
||||||
|
func parseMountTable() ([]*Info, error) {
|
||||||
|
// Do NOT return an error!
|
||||||
|
return nil, nil
|
||||||
|
}
|
69
vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go
generated
vendored
Normal file
69
vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,69 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package mount
|
||||||
|
|
||||||
|
// MakeShared ensures a mounted filesystem has the SHARED mount option enabled.
|
||||||
|
// See the supported options in flags.go for further reference.
|
||||||
|
func MakeShared(mountPoint string) error {
|
||||||
|
return ensureMountedAs(mountPoint, "shared")
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled.
|
||||||
|
// See the supported options in flags.go for further reference.
|
||||||
|
func MakeRShared(mountPoint string) error {
|
||||||
|
return ensureMountedAs(mountPoint, "rshared")
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled.
|
||||||
|
// See the supported options in flags.go for further reference.
|
||||||
|
func MakePrivate(mountPoint string) error {
|
||||||
|
return ensureMountedAs(mountPoint, "private")
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option
|
||||||
|
// enabled. See the supported options in flags.go for further reference.
|
||||||
|
func MakeRPrivate(mountPoint string) error {
|
||||||
|
return ensureMountedAs(mountPoint, "rprivate")
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled.
|
||||||
|
// See the supported options in flags.go for further reference.
|
||||||
|
func MakeSlave(mountPoint string) error {
|
||||||
|
return ensureMountedAs(mountPoint, "slave")
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled.
|
||||||
|
// See the supported options in flags.go for further reference.
|
||||||
|
func MakeRSlave(mountPoint string) error {
|
||||||
|
return ensureMountedAs(mountPoint, "rslave")
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option
|
||||||
|
// enabled. See the supported options in flags.go for further reference.
|
||||||
|
func MakeUnbindable(mountPoint string) error {
|
||||||
|
return ensureMountedAs(mountPoint, "unbindable")
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount
|
||||||
|
// option enabled. See the supported options in flags.go for further reference.
|
||||||
|
func MakeRUnbindable(mountPoint string) error {
|
||||||
|
return ensureMountedAs(mountPoint, "runbindable")
|
||||||
|
}
|
||||||
|
|
||||||
|
func ensureMountedAs(mountPoint, options string) error {
|
||||||
|
mounted, err := Mounted(mountPoint)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !mounted {
|
||||||
|
if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, err = Mounted(mountPoint); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ForceMount("", mountPoint, "none", options)
|
||||||
|
}
|
191
vendor/github.com/docker/docker/pkg/pools/LICENSE
generated
vendored
Normal file
191
vendor/github.com/docker/docker/pkg/pools/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
https://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
Copyright 2013-2016 Docker, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
119
vendor/github.com/docker/docker/pkg/pools/pools.go
generated
vendored
Normal file
119
vendor/github.com/docker/docker/pkg/pools/pools.go
generated
vendored
Normal file
|
@ -0,0 +1,119 @@
|
||||||
|
// Package pools provides a collection of pools which provide various
|
||||||
|
// data types with buffers. These can be used to lower the number of
|
||||||
|
// memory allocations and reuse buffers.
|
||||||
|
//
|
||||||
|
// New pools should be added to this package to allow them to be
|
||||||
|
// shared across packages.
|
||||||
|
//
|
||||||
|
// Utility functions which operate on pools should be added to this
|
||||||
|
// package to allow them to be reused.
|
||||||
|
package pools
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/ioutils"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer.
|
||||||
|
BufioReader32KPool *BufioReaderPool
|
||||||
|
// BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer.
|
||||||
|
BufioWriter32KPool *BufioWriterPool
|
||||||
|
)
|
||||||
|
|
||||||
|
const buffer32K = 32 * 1024
|
||||||
|
|
||||||
|
// BufioReaderPool is a bufio reader that uses sync.Pool.
|
||||||
|
type BufioReaderPool struct {
|
||||||
|
pool sync.Pool
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
|
||||||
|
BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
|
||||||
|
}
|
||||||
|
|
||||||
|
// newBufioReaderPoolWithSize is unexported because new pools should be
|
||||||
|
// added here to be shared where required.
|
||||||
|
func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
|
||||||
|
pool := sync.Pool{
|
||||||
|
New: func() interface{} { return bufio.NewReaderSize(nil, size) },
|
||||||
|
}
|
||||||
|
return &BufioReaderPool{pool: pool}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
|
||||||
|
func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
|
||||||
|
buf := bufPool.pool.Get().(*bufio.Reader)
|
||||||
|
buf.Reset(r)
|
||||||
|
return buf
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put puts the bufio.Reader back into the pool.
|
||||||
|
func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
|
||||||
|
b.Reset(nil)
|
||||||
|
bufPool.pool.Put(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy.
|
||||||
|
func Copy(dst io.Writer, src io.Reader) (written int64, err error) {
|
||||||
|
buf := BufioReader32KPool.Get(src)
|
||||||
|
written, err = io.Copy(dst, buf)
|
||||||
|
BufioReader32KPool.Put(buf)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back
|
||||||
|
// into the pool and closes the reader if it's an io.ReadCloser.
|
||||||
|
func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
|
||||||
|
return ioutils.NewReadCloserWrapper(r, func() error {
|
||||||
|
if readCloser, ok := r.(io.ReadCloser); ok {
|
||||||
|
readCloser.Close()
|
||||||
|
}
|
||||||
|
bufPool.Put(buf)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// BufioWriterPool is a bufio writer that uses sync.Pool.
|
||||||
|
type BufioWriterPool struct {
|
||||||
|
pool sync.Pool
|
||||||
|
}
|
||||||
|
|
||||||
|
// newBufioWriterPoolWithSize is unexported because new pools should be
|
||||||
|
// added here to be shared where required.
|
||||||
|
func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
|
||||||
|
pool := sync.Pool{
|
||||||
|
New: func() interface{} { return bufio.NewWriterSize(nil, size) },
|
||||||
|
}
|
||||||
|
return &BufioWriterPool{pool: pool}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool.
|
||||||
|
func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
|
||||||
|
buf := bufPool.pool.Get().(*bufio.Writer)
|
||||||
|
buf.Reset(w)
|
||||||
|
return buf
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put puts the bufio.Writer back into the pool.
|
||||||
|
func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
|
||||||
|
b.Reset(nil)
|
||||||
|
bufPool.pool.Put(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
|
||||||
|
// into the pool and closes the writer if it's an io.Writecloser.
|
||||||
|
func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
|
||||||
|
return ioutils.NewWriteCloserWrapper(w, func() error {
|
||||||
|
buf.Flush()
|
||||||
|
if writeCloser, ok := w.(io.WriteCloser); ok {
|
||||||
|
writeCloser.Close()
|
||||||
|
}
|
||||||
|
bufPool.Put(buf)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue